diff --git a/cmd/aro/operator.go b/cmd/aro/operator.go index 0481c5b6127..7cfa23f29b8 100644 --- a/cmd/aro/operator.go +++ b/cmd/aro/operator.go @@ -75,8 +75,6 @@ func operator(ctx context.Context, log *logrus.Entry) error { mgr, err := ctrl.NewManager(restConfig, ctrl.Options{ HealthProbeBindAddress: ":8080", - MetricsBindAddress: "0", // disabled - Port: 8443, }) if err != nil { return err diff --git a/go.mod b/go.mod index b6c3827397e..5dd1478b93e 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( github.com/containers/image/v5 v5.32.2 github.com/containers/podman/v5 v5.2.4 github.com/coreos/go-oidc/v3 v3.11.0 - github.com/coreos/go-semver v0.3.0 + github.com/coreos/go-semver v0.3.1 github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 github.com/coreos/ignition/v2 v2.14.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc @@ -40,7 +40,7 @@ require ( github.com/go-test/deep v1.1.0 github.com/gofrs/uuid v4.4.0+incompatible github.com/golang-jwt/jwt/v4 v4.5.1 - github.com/google/gnostic v0.5.7-v3refs + github.com/google/gnostic-models v0.6.8 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 github.com/gorilla/csrf v1.7.2 @@ -61,16 +61,16 @@ require ( github.com/onsi/gomega v1.34.1 github.com/open-policy-agent/frameworks/constraint v0.0.0-20221109005544-7de84dff5081 github.com/opencontainers/runtime-spec v1.2.0 - github.com/openshift/api v0.0.0-20240103200955-7ca3a4634e46 - github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c - github.com/openshift/cloud-credential-operator v0.0.0-20240910012137-a0245d57d1e6 + github.com/openshift/api v0.0.0-20240830023148-b7d0481c9094 + github.com/openshift/client-go v0.0.0-20240510131258-f646d5f29250 + github.com/openshift/cloud-credential-operator v0.0.0-20250107183716-87a85dc8e12e github.com/openshift/hive/apis v0.0.0-20250212001559-5d3f4d77dc90 - github.com/openshift/library-go v0.0.0-20230620084201-504ca4bd5a83 + github.com/openshift/library-go v0.0.0-20240711192904-190fec8c3f09 github.com/openshift/machine-config-operator v0.0.0-00010101000000-000000000000 github.com/pires/go-proxyproto v0.6.2 github.com/pkg/errors v0.9.1 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.50.0 - github.com/prometheus-operator/prometheus-operator/pkg/client v0.48.1 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0 + github.com/prometheus-operator/prometheus-operator/pkg/client v0.74.0 github.com/prometheus/client_golang v1.19.0 github.com/prometheus/common v0.51.1 github.com/serge1peshcoff/selenium-go-conditions v0.0.0-20170824121757-5afbdb74596b @@ -88,15 +88,15 @@ require ( golang.org/x/sync v0.11.0 golang.org/x/text v0.22.0 golang.org/x/tools v0.24.0 - k8s.io/api v0.31.1 - k8s.io/apiextensions-apiserver v0.27.2 - k8s.io/apimachinery v0.31.1 - k8s.io/cli-runtime v0.25.16 - k8s.io/client-go v0.27.3 - k8s.io/kubectl v0.24.17 - k8s.io/kubernetes v1.28.4 + k8s.io/api v0.29.14 + k8s.io/apiextensions-apiserver v0.29.14 + k8s.io/apimachinery v0.29.14 + k8s.io/cli-runtime v0.29.14 + k8s.io/client-go v0.29.14 + k8s.io/kubectl v0.29.14 + k8s.io/kubernetes v1.29.14 k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 - sigs.k8s.io/controller-runtime v0.15.0 + sigs.k8s.io/controller-runtime v0.17.2 sigs.k8s.io/yaml v1.4.0 ) @@ -116,7 +116,7 @@ require ( github.com/Microsoft/hcsshim v0.12.5 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect - github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed // indirect + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -148,13 +148,12 @@ require ( github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/elazarl/goproxy v1.7.2 // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/emicklei/go-restful/v3 v3.12.0 // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.8.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect - github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-json-experiment/json v0.0.0-20240418180308-af2d5061e6c2 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -177,14 +176,14 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/cel-go v0.12.6 // indirect + github.com/google/cel-go v0.17.7 // indirect github.com/google/go-containerregistry v0.20.0 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/gnostic v0.6.8 // indirect github.com/gorilla/schema v1.4.1 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/imdario/mergo v0.3.15 // indirect @@ -220,6 +219,7 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/nxadm/tail v1.4.11 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect @@ -239,7 +239,7 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/robfig/cron v1.2.0 // indirect - github.com/russross/blackfriday v1.6.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.6.0 // indirect github.com/sanity-io/litter v1.5.5 // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect @@ -263,7 +263,7 @@ require ( github.com/vbauerster/mpb/v8 v8.7.5 // indirect github.com/vmihailenco/msgpack/v4 v4.3.13 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect - github.com/xlab/treeprint v1.1.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect github.com/yosida95/uritemplate/v3 v3.0.2 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect @@ -272,13 +272,13 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd // indirect + go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.20.0 // indirect golang.org/x/sys v0.30.0 // indirect golang.org/x/term v0.29.0 // indirect golang.org/x/time v0.5.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect @@ -288,22 +288,24 @@ require ( gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.26.2 // indirect - k8s.io/component-base v0.27.2 // indirect + k8s.io/apiserver v0.29.14 // indirect + k8s.io/component-base v0.29.14 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-aggregator v0.27.1 // indirect - k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect + k8s.io/kube-aggregator v0.29.14 // indirect + k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 // indirect sigs.k8s.io/json v0.0.0-20241009153224-e386a8af8d30 // indirect - sigs.k8s.io/kube-storage-version-migrator v0.0.4 // indirect - sigs.k8s.io/kustomize/api v0.12.1 // indirect - sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect + sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect + sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect tags.cncf.io/container-device-interface v0.8.0 // indirect ) // strip cloud.google.com/go dependencies -- these are not required as dependencies exclude ( + cloud.google.com/go v0.34.0 cloud.google.com/go v0.37.4 + cloud.google.com/go v0.38.0 cloud.google.com/go v0.41.0 cloud.google.com/go v0.44.1 cloud.google.com/go v0.44.2 @@ -311,12 +313,14 @@ exclude ( cloud.google.com/go v0.45.1 cloud.google.com/go v0.46.3 cloud.google.com/go v0.50.0 + cloud.google.com/go v0.51.0 cloud.google.com/go v0.52.0 cloud.google.com/go v0.53.0 cloud.google.com/go v0.54.0 cloud.google.com/go v0.56.0 cloud.google.com/go v0.57.0 cloud.google.com/go v0.58.0 + cloud.google.com/go v0.65.0 cloud.google.com/go v0.75.0 cloud.google.com/go v0.81.0 cloud.google.com/go v0.97.0 @@ -461,6 +465,7 @@ exclude ( cloud.google.com/go/compute v1.19.0 cloud.google.com/go/compute v1.19.1 cloud.google.com/go/compute/metadata v0.1.0 + cloud.google.com/go/compute/metadata v0.2.0 cloud.google.com/go/compute/metadata v0.2.1 cloud.google.com/go/compute/metadata v0.2.3 cloud.google.com/go/contactcenterinsights v1.3.0 @@ -937,8 +942,23 @@ exclude ( google.golang.org/api v0.59.0 google.golang.org/api v0.61.0 google.golang.org/api v0.62.0 + google.golang.org/appengine v0.0.0 + google.golang.org/appengine v1.0.0 google.golang.org/appengine v1.1.0 + google.golang.org/appengine v1.2.0 + google.golang.org/appengine v1.3.0 + google.golang.org/appengine v1.4.0 + google.golang.org/appengine v1.5.0 + google.golang.org/appengine v1.6.0 + google.golang.org/appengine v1.6.1 + google.golang.org/appengine v1.6.2 + google.golang.org/appengine v1.6.3 + google.golang.org/appengine v1.6.4 + google.golang.org/appengine v1.6.5 + google.golang.org/appengine v1.6.6 + google.golang.org/appengine v1.6.7 google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 + google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215 @@ -946,6 +966,10 @@ exclude ( google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece google.golang.org/genproto v0.0.0-20200610104632-a5b850bcf112 google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154 + google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 + google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc + google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // trim dependency tree from old grpcs google.golang.org/grpc v1.17.0 google.golang.org/grpc v1.19.0 @@ -982,6 +1006,9 @@ exclude ( google.golang.org/protobuf v1.30.0 ) +// exclude Azure SDKs that we are not compatible with +exclude github.com/Azure/azure-sdk-for-go v68.0.0+incompatible + exclude ( // exclude Azure SDKs that we are not compatible with github.com/Azure/azure-sdk-for-go v48.0.0+incompatible @@ -999,7 +1026,6 @@ exclude ( github.com/Azure/azure-sdk-for-go v67.2.0+incompatible github.com/Azure/azure-sdk-for-go v67.3.0+incompatible github.com/Azure/azure-sdk-for-go v67.4.0+incompatible - github.com/Azure/azure-sdk-for-go v68.0.0+incompatible github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795 github.com/Azure/go-autorest/autorest v0.9.0 @@ -1080,136 +1106,58 @@ exclude ( github.com/pkg/errors v0.8.1 github.com/pkg/sftp v1.10.1 github.com/pkg/sftp v1.13.1 - // remove old prometheus deps - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.44.1 - github.com/prometheus/client_golang v1.0.0 - github.com/prometheus/client_golang v1.11.0 - github.com/prometheus/client_golang v1.11.1 - github.com/prometheus/client_golang v1.12.1 - github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 - github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 - github.com/prometheus/client_model v0.2.0 + github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0 github.com/prometheus/client_model v0.3.0 - github.com/prometheus/common v0.4.1 - github.com/prometheus/common v0.10.0 - github.com/prometheus/common v0.15.0 - github.com/prometheus/common v0.26.0 - github.com/prometheus/common v0.28.0 - github.com/prometheus/common v0.32.1 - github.com/prometheus/procfs v0.0.2 - github.com/prometheus/procfs v0.6.0 - github.com/prometheus/procfs v0.7.3 - github.com/russross/blackfriday v1.5.2 - github.com/sirupsen/logrus v1.4.1 - github.com/sirupsen/logrus v1.6.0 - github.com/sirupsen/logrus v1.7.0 - github.com/sirupsen/logrus v1.8.1 - github.com/sirupsen/logrus v1.9.0 - // trip dependency tree from old cobra - github.com/spf13/cobra v0.0.5 - github.com/spf13/cobra v1.0.0 - github.com/spf13/cobra v1.1.1 - github.com/spf13/cobra v1.1.3 - github.com/spf13/cobra v1.2.1 - github.com/spf13/cobra v1.4.0 - github.com/spf13/pflag v1.0.3 - github.com/spf13/pflag v1.0.5 - github.com/stretchr/objx v0.1.0 - github.com/stretchr/objx v0.1.1 - github.com/stretchr/objx v0.2.0 - github.com/stretchr/objx v0.4.0 - github.com/stretchr/testify v1.2.2 - github.com/stretchr/testify v1.3.0 - github.com/stretchr/testify v1.4.0 - github.com/stretchr/testify v1.5.1 - github.com/stretchr/testify v1.6.1 - github.com/stretchr/testify v1.7.0 - github.com/stretchr/testify v1.7.1 - github.com/stretchr/testify v1.8.0 - github.com/stretchr/testify v1.8.1 - github.com/stretchr/testify v1.8.2 - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f - go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 - // trim dependency tree from old opencensus - go.opencensus.io v0.20.1 - go.opencensus.io v0.20.2 - go.opencensus.io v0.21.0 - go.opencensus.io v0.22.0 - go.opencensus.io v0.22.2 - go.opencensus.io v0.22.3 - go.opencensus.io v0.22.4 go.opencensus.io v0.22.5 - go.opencensus.io v0.23.0 - // old otel deps - go.opentelemetry.io/contrib v0.20.0 - go.opentelemetry.io/otel v0.20.0 - go.opentelemetry.io/otel/metric v0.20.0 - go.opentelemetry.io/otel/sdk v0.20.0 - go.opentelemetry.io/otel/sdk/metric v0.20.0 - go.opentelemetry.io/otel/trace v0.20.0 - go.opentelemetry.io/proto/otlp v0.7.0 go.opentelemetry.io/proto/otlp v0.19.0 - go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 - go.uber.org/atomic v1.4.0 - go.uber.org/atomic v1.7.0 - go.uber.org/goleak v1.1.10 - go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 - go.uber.org/goleak v1.1.12 - go.uber.org/goleak v1.2.0 - go.uber.org/multierr v1.1.0 - go.uber.org/multierr v1.6.0 - go.uber.org/zap v1.10.0 - go.uber.org/zap v1.17.0 - go.uber.org/zap v1.19.0 - go.uber.org/zap v1.19.1 - gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 + golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 gopkg.in/yaml.v2 v2.2.1 gopkg.in/yaml.v2 v2.2.2 gopkg.in/yaml.v2 v2.2.4 gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 v2.3.0 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c - gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b - gopkg.in/yaml.v3 v3.0.0 ) -// trim old golang.org/x/ and github.com/golang/ items +// exclude some old crypto/serialisation libs, even though we won't use them, +// just to make sure they don't come up in our tree exclude ( - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e - // exclude github.com/golang/protobuf < 1.3.2 https://nvd.nist.gov/vuln/detail/CVE-2021-3121 - github.com/golang/protobuf v1.0.0 - github.com/golang/protobuf v1.1.1 + github.com/golang-jwt/jwt/v4 v4.0.0 + github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang/protobuf v1.2.0 - github.com/golang/protobuf v1.2.1 - github.com/golang/protobuf v1.3.0 - github.com/golang/protobuf v1.3.1 + github.com/golang/protobuf v1.3.2 + github.com/golang/protobuf v1.3.4 + github.com/golang/protobuf v1.3.5 github.com/golang/protobuf v1.4.2 github.com/golang/protobuf v1.4.3 - github.com/golang/protobuf v1.5.0 github.com/golang/protobuf v1.5.2 - go.uber.org/mock v1.4.4 - golang.org/x/arch v0.0.0-20180920145803-b19384d3c130 + github.com/golang/protobuf v1.5.3 + github.com/google/uuid v1.3.0 + github.com/gorilla/websocket v1.4.2 + github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 + github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e + github.com/mailru/easyjson v0.7.6 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f - golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 - golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa - golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 - golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 + golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa + golang.org/x/crypto v0.1.0 golang.org/x/crypto v0.6.0 golang.org/x/crypto v0.14.0 golang.org/x/lint v0.0.0-20190409202823-959b441ac422 golang.org/x/lint v0.0.0-20190930215403-16217165b5de - golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/mod v0.4.2 golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 @@ -1426,129 +1374,17 @@ exclude ( golang.org/x/tools v0.3.0 golang.org/x/tools v0.6.0 golang.org/x/tools v0.7.0 - golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 - golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 ) -// exclude ancient k8s versions +// exclude some old k8s deps that make weird branches exclude ( - k8s.io/api v0.0.0 - k8s.io/api v0.18.0-beta.2 - k8s.io/api v0.18.3 - k8s.io/api v0.19.2 - k8s.io/api v0.19.3 - k8s.io/api v0.19.4 - k8s.io/api v0.20.0 - k8s.io/api v0.20.6 - k8s.io/api v0.21.0 - k8s.io/api v0.21.1 - k8s.io/api v0.22.1 - k8s.io/api v0.23.0 - k8s.io/api v0.23.1 - k8s.io/apiextensions-apiserver v0.0.0 - k8s.io/apiextensions-apiserver v0.18.0-beta.2 - k8s.io/apiextensions-apiserver v0.18.3 - k8s.io/apiextensions-apiserver v0.19.2 - k8s.io/apiextensions-apiserver v0.19.3 - k8s.io/apiextensions-apiserver v0.21.0 - k8s.io/apiextensions-apiserver v0.21.1 - k8s.io/apiextensions-apiserver v0.22.1 - k8s.io/apiextensions-apiserver v0.23.0 - k8s.io/apiextensions-apiserver v0.23.1 - k8s.io/apiextensions-apiserver v0.23.5 - k8s.io/apimachinery v0.0.0 - k8s.io/apimachinery v0.18.0-beta.2 - k8s.io/apimachinery v0.18.3 - k8s.io/apimachinery v0.19.2 - k8s.io/apimachinery v0.19.3 - k8s.io/apimachinery v0.19.4 - k8s.io/apimachinery v0.20.0 - k8s.io/apimachinery v0.20.2 - k8s.io/apimachinery v0.20.6 - k8s.io/apimachinery v0.21.0 - k8s.io/apimachinery v0.21.1 - k8s.io/apimachinery v0.22.1 - k8s.io/apimachinery v0.23.0 - k8s.io/apimachinery v0.23.1 - k8s.io/apimachinery v0.23.5 - k8s.io/apiserver v0.0.0 - k8s.io/apiserver v0.20.6 - k8s.io/apiserver v0.21.0 - k8s.io/apiserver v0.22.1 - k8s.io/apiserver v0.23.0 - k8s.io/apiserver v0.23.1 - k8s.io/apiserver v0.23.5 - k8s.io/cli-runtime v0.0.0 - k8s.io/cli-runtime v0.21.0 - k8s.io/cli-runtime v0.23.0 - k8s.io/cli-runtime v0.23.1 - k8s.io/client-go v0.0.0 - k8s.io/client-go v0.18.0-beta.2 - k8s.io/client-go v0.19.2 - k8s.io/client-go v0.19.3 - k8s.io/client-go v0.19.4 - k8s.io/client-go v0.20.0 - k8s.io/client-go v0.20.6 - k8s.io/client-go v0.21.0 - k8s.io/client-go v0.21.1 - k8s.io/client-go v0.22.1 - k8s.io/client-go v0.23.0 - k8s.io/client-go v0.23.1 - k8s.io/client-go v0.23.5 - k8s.io/code-generator v0.0.0 - k8s.io/code-generator v0.18.0-beta.2 - k8s.io/code-generator v0.19.7 - k8s.io/code-generator v0.20.0 - k8s.io/code-generator v0.21.0 - k8s.io/code-generator v0.23.0 - k8s.io/component-base v0.0.0 - k8s.io/component-base v0.19.2 - k8s.io/component-base v0.19.4 - k8s.io/component-base v0.20.6 - k8s.io/component-base v0.21.0 - k8s.io/component-base v0.21.1 - k8s.io/component-base v0.22.1 - k8s.io/component-base v0.23.0 - k8s.io/component-base v0.23.1 - k8s.io/component-base v0.23.5 - k8s.io/controller-manager v0.0.0 - k8s.io/cri-api v0.0.0 - k8s.io/cri-api v0.20.6 - k8s.io/gengo v0.0.0-20201113003025-83324d819ded + k8s.io/api v0.23.3 + k8s.io/apimachinery v0.23.3 + k8s.io/code-generator v0.23.3 k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c - k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.0.0 - k8s.io/klog/v2 v2.2.0 - k8s.io/klog/v2 v2.4.0 - k8s.io/klog/v2 v2.8.0 - k8s.io/klog/v2 v2.9.0 - k8s.io/klog/v2 v2.30.0 + k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 k8s.io/klog/v2 v2.40.1 - k8s.io/klog/v2 v2.60.1 - k8s.io/klog/v2 v2.70.1 - k8s.io/kube-aggregator v0.0.0 - k8s.io/kube-aggregator v0.18.0-beta.2 - k8s.io/kube-aggregator v0.23.0 - k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e - k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf - k8s.io/kube-scheduler v0.0.0 - k8s.io/kubectl v0.0.0 - k8s.io/kubectl v0.21.0 - k8s.io/kubectl v0.22.0 - k8s.io/kubectl v0.23.0 - k8s.io/kubectl v0.23.1 - k8s.io/kubelet v0.0.0 - k8s.io/legacy-cloud-providers v0.0.0 - k8s.io/metrics v0.0.0 - k8s.io/mount-utils v0.0.0 - k8s.io/pod-security-admission v0.0.0 - k8s.io/sample-apiserver v0.0.0 - k8s.io/system-validators v1.6.0 - k8s.io/utils v0.0.0-20201110183641-67b214c5f920 - k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 k8s.io/utils v0.0.0-20210802155522-efc7438f0176 k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b k8s.io/utils v0.0.0-20211116205334-6203023598ed @@ -1577,40 +1413,12 @@ exclude ( sigs.k8s.io/structured-merge-diff/v4 v4.1.2 sigs.k8s.io/structured-merge-diff/v4 v4.2.0 sigs.k8s.io/structured-merge-diff/v4 v4.2.1 - sigs.k8s.io/yaml v1.2.0 -) - -// k8s.io and sigs.k8s.io pins -replace ( - k8s.io/api => k8s.io/api v0.25.16 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.25.16 - k8s.io/apimachinery => k8s.io/apimachinery v0.25.16 - k8s.io/apiserver => k8s.io/apiserver v0.25.16 - k8s.io/client-go => k8s.io/client-go v0.25.16 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.25.16 // required for k8s.io/kubernetes - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.25.16 // required for k8s.io/kubernetes - k8s.io/code-generator => k8s.io/code-generator v0.25.16 - k8s.io/component-base => k8s.io/component-base v0.25.16 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.25.16 // required for k8s.io/kubernetes - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.25.16 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.25.16 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.25.16 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.25.16 - k8s.io/kubectl => k8s.io/kubectl v0.25.16 - k8s.io/kubernetes => k8s.io/kubernetes v1.25.16 - sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.11.2 - sigs.k8s.io/kustomize/api => sigs.k8s.io/kustomize/api v0.11.2 - sigs.k8s.io/kustomize/kyaml => sigs.k8s.io/kustomize/kyaml v0.13.3 - sigs.k8s.io/structured-merge-diff => sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 + sigs.k8s.io/yaml v1.3.0 ) // OpenShift pins replace ( - github.com/googleapis/gnostic => github.com/google/gnostic v0.5.5 - github.com/openshift/api => github.com/openshift/api v0.0.0-20240103200955-7ca3a4634e46 - github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c github.com/openshift/hive/apis => github.com/openshift/hive/apis v0.0.0-20231116161336-9dd47f8bfa1f - github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20230222114049-eac44a078a6e github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20230908201248-46b93e64dea6 go.mongodb.org/mongo-driver => go.mongodb.org/mongo-driver v1.9.4 google.golang.org/grpc => google.golang.org/grpc v1.56.3 diff --git a/go.sum b/go.sum index 60eafb2dcce..971727d8025 100644 --- a/go.sum +++ b/go.sum @@ -88,7 +88,6 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.12.5 h1:bpTInLlDy/nDRWFVcefDZZ1+U8tS+rz3MxjKgu9boo0= github.com/Microsoft/hcsshim v0.12.5/go.mod h1:tIUGego4G1EN5Hb6KC90aDYiUI2dqLSTTOCjVNpOgZ8= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= @@ -96,8 +95,8 @@ github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAU github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed h1:ue9pVfIcP+QMEjfgo/Ez4ZjNZfonGgR6NgjMaJMu1Cg= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= @@ -169,8 +168,9 @@ github.com/containers/storage v1.55.1/go.mod h1:28cB81IDk+y7ok60Of6u52RbCeBRucbF github.com/coreos/go-json v0.0.0-20211020211907-c63f628265de/go.mod h1:lryFBkhadOfv8Jue2Vr/f/Yviw8h1DQPQojbXqEChY0= github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 h1:OoRAFlvDGCUqDLampLQjk0yeeSGdF9zzst/3G9IkBbc= github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09/go.mod h1:m2r/smMKsKwgMSAoFKHaa68ImdCSNuKE1MxvQ64xuCQ= @@ -212,20 +212,16 @@ github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= -github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= +github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= +github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -237,13 +233,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= -github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= @@ -254,18 +245,15 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= -github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= @@ -278,7 +266,6 @@ github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5 github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= @@ -303,7 +290,6 @@ github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1 github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= @@ -313,22 +299,18 @@ github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0L github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M= -github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= -github.com/google/gnostic v0.5.5 h1:xaJtlbPCF2oT4Aidl/Al5W6lRq7g5+biHTihznoaa7k= -github.com/google/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= +github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.20.0 h1:wRqHpOeVh3DnenOrPy9xDOLdnLatiGuuNRVelR2gSbg= @@ -344,12 +326,10 @@ github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73 github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= @@ -358,9 +338,12 @@ github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kX github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/gorilla/sessions v1.2.2 h1:lqzMYz6bOfvn2WriPUjNByzeXIlVzURcPmgMczkmTjY= github.com/gorilla/sessions v1.2.2/go.mod h1:ePLdVu+jbEgHH+KWw8I1z2wqd0BAdAQh/8LRvBeoNcQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= @@ -372,8 +355,6 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -407,7 +388,6 @@ github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQ github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -422,10 +402,6 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= @@ -458,7 +434,6 @@ github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPn github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= @@ -485,6 +460,7 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -494,23 +470,15 @@ github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JX github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/open-policy-agent/frameworks/constraint v0.0.0-20221109005544-7de84dff5081 h1:LcxhUNtgAf1dvHIDfOI/sO0LQALSxQCBHxTZ/5CrBxo= @@ -527,18 +495,18 @@ github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc h1: github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc/go.mod h1:8tx1helyqhUC65McMm3x7HmOex8lO2/v9zPuxmKHurs= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/openshift/api v0.0.0-20240103200955-7ca3a4634e46 h1:mnrBzHjjqYKw2uinOVXL9Eplj3+QaQwJ3SaWAs8l6cc= -github.com/openshift/api v0.0.0-20240103200955-7ca3a4634e46/go.mod h1:aQ6LDasvHMvHZXqLHnX2GRmnfTWCF/iIwz8EMTTIE9A= -github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c h1:CV76yFOTXmq9VciBR3Bve5ZWzSxdft7gaMVB3kS0rwg= -github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c/go.mod h1:lFMO8mLHXWFzSdYvGNo8ivF9SfF6zInA8ZGw4phRnUE= -github.com/openshift/cloud-credential-operator v0.0.0-20240910012137-a0245d57d1e6 h1:O6pXh1gieoLIpT7SJXS07evcY6a65ZizIedH3ltrdQE= -github.com/openshift/cloud-credential-operator v0.0.0-20240910012137-a0245d57d1e6/go.mod h1:rkcI1Oo6VPN1A6WOcWOXIuVp8dMMahFnfgzYUt0X1aA= +github.com/openshift/api v0.0.0-20240830023148-b7d0481c9094 h1:J1wuGhVxpsHykZBa6Beb1gQ96Ptej9AE/BvwCBiRj1E= +github.com/openshift/api v0.0.0-20240830023148-b7d0481c9094/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= +github.com/openshift/client-go v0.0.0-20240510131258-f646d5f29250 h1:WQ0e89xebsNeChdYw8QrxggYkFtEQOaLBEKJDkMmcUk= +github.com/openshift/client-go v0.0.0-20240510131258-f646d5f29250/go.mod h1:tjGjTE59N1+5W0YE4Wqf0zJpFLIY4d+EpMihDKOn1oA= +github.com/openshift/cloud-credential-operator v0.0.0-20250107183716-87a85dc8e12e h1:MQvqTFHJ8/S19CDjc7espw8YKnBzRhjflJcQBIEo3jI= +github.com/openshift/cloud-credential-operator v0.0.0-20250107183716-87a85dc8e12e/go.mod h1:b2+g5xlPxBO58w6SbwhBM6uWNeJUDzjWx/9I4nzzxrQ= github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 h1:cHyxR+Y8rAMT6m1jQCaYGRwikqahI0OjjUDhFNf3ySQ= github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/openshift/hive/apis v0.0.0-20231116161336-9dd47f8bfa1f h1:CedFPNwCSYbjo7rCr9YrJs/0OrBf3m8sLlUZw66VDwY= github.com/openshift/hive/apis v0.0.0-20231116161336-9dd47f8bfa1f/go.mod h1:RRH8lt09SAiPECNdsbh7Gun0lkcRWi1nYKq6tDp5WxQ= -github.com/openshift/library-go v0.0.0-20230222114049-eac44a078a6e h1:Q5Lcxl/4MdNqj0b2cEi0mzk/Ls9JXvGkCTw7qnVSjCo= -github.com/openshift/library-go v0.0.0-20230222114049-eac44a078a6e/go.mod h1:AMZwYwSdbvALDl3QobEzcJ2IeDO7DYLsr42izKzh524= +github.com/openshift/library-go v0.0.0-20240711192904-190fec8c3f09 h1:2Ed1mxJTRFpbHIynxkuIyADeWdtyQ6NEZkTaf0wG39M= +github.com/openshift/library-go v0.0.0-20240711192904-190fec8c3f09/go.mod h1:lFwyRj0XjUf25Da3Q00y+KuaxCWTJ6YzYPDX1+96nco= github.com/openshift/machine-config-operator v0.0.1-0.20230908201248-46b93e64dea6 h1:9aif5Z7d6VJOsvqpDrn5HHaK9aR463OJadLxE4YL/dg= github.com/openshift/machine-config-operator v0.0.1-0.20230908201248-46b93e64dea6/go.mod h1:zXXyi68fZLTGiIamAIwETIJdts9FCdyi9OCpnxh0N84= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= @@ -556,16 +524,15 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.50.0 h1:eIYVhtUPLDah0nhcHaWItFM595UAGVFKECaWoW02FUA= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.50.0/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= -github.com/prometheus-operator/prometheus-operator/pkg/client v0.48.1 h1:i3bwALeHBJaLRw+z/8IPujQpAAVwqXpyLrftdhysFrk= -github.com/prometheus-operator/prometheus-operator/pkg/client v0.48.1/go.mod h1:k4BrWlVQQsvBiTcDnKEMgyh/euRxyxgrHdur/ZX/sdA= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0 h1:AHzMWDxNiAVscJL6+4wkvFRTpMnJqiaZFEKA/osaBXE= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.74.0 h1:SyBTzvFuVshDNjDVALs6+NgOy3qh8/xlAsyqB1SzHbI= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.74.0/go.mod h1:FlcnLo14zQxL6P1yPrV22kYBqyAT0ZRRytv98+B7lBQ= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= @@ -584,8 +551,7 @@ github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfm github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= -github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= @@ -599,7 +565,6 @@ github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbm github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/serge1peshcoff/selenium-go-conditions v0.0.0-20170824121757-5afbdb74596b h1:jLwzNAxsHzKw5sHju7bUk0iQSynZxWAOtnXD5d37Vto= github.com/serge1peshcoff/selenium-go-conditions v0.0.0-20170824121757-5afbdb74596b/go.mod h1:noHZFMVoy0oY+ICCojiGUgv+/ecK+1M6huoUVWAIJoU= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc= @@ -620,6 +585,7 @@ github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlkXHjYPishpcw4jpSt/V/xYY3FMA= github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.0-alpha.6 h1:f65Cr/+2qk4GfHC0xqT/isoupQppwN5+VLRztUGTDbY= @@ -628,10 +594,20 @@ github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLy github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= @@ -675,18 +651,26 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= -github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= +go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= +go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= +go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= +go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= +go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= go.mongodb.org/mongo-driver v1.9.4 h1:qXWlnK2WCOWSxJ/Hm3XyYOGKv3ujA2btBsCyuIFvQjc= go.mongodb.org/mongo-driver v1.9.4/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= @@ -694,6 +678,8 @@ go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= @@ -704,8 +690,8 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd h1:Uo/x0Ir5vQJ+683GXB9Ug+4fcjsbp7z7Ul8UaZbhsRM= -go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= @@ -714,7 +700,6 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= @@ -722,7 +707,6 @@ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbR golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= @@ -731,7 +715,7 @@ golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= @@ -740,15 +724,10 @@ golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= -gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= -gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 h1:MuYw1wJzT+ZkybKfaOXKp5hJiZDn2iHaXRw0mRYdHSc= google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c= google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= @@ -757,6 +736,9 @@ google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -772,49 +754,46 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -k8s.io/api v0.25.16 h1:6VsSxn0vCdHuAVh82EZCKoM457LuUpQQb6YXfQunz7Q= -k8s.io/api v0.25.16/go.mod h1:7pehrlB/DJ0qzxicMELX7IZlgL6J5lnSOBPey/cuGBs= -k8s.io/apiextensions-apiserver v0.25.16 h1:M03IXtKn2dptWxGtqrh3ZK2xeIFTCKsZBSaUpRCFQV8= -k8s.io/apiextensions-apiserver v0.25.16/go.mod h1:RJVnHbx5/rIU+xAfdgauaAI8hSC2Pmx3/olTc9Eq2Vs= -k8s.io/apimachinery v0.25.16 h1:Mk5h4zbHVZh4ZkgRnciYdLRqOREt2dAGTJesDY0WO7U= -k8s.io/apimachinery v0.25.16/go.mod h1:34oJjP2pnWhz64k0GETsMvDeAp2A2v+gKa/u3tV/+6k= -k8s.io/apiserver v0.25.16 h1:RClMqIiPc0QsGy63cRbif3FwD0b+Bw1o4sRLMmWTufk= -k8s.io/apiserver v0.25.16/go.mod h1:Hdo/mBliyyE7jN0LjToUNrRXq20Wc3nOVSXwC1Fxa5M= -k8s.io/cli-runtime v0.25.16 h1:u6Uzd1ZHAa9N8y0nXvevTn7+h+KSBJ1M0NAxD+wPVy8= -k8s.io/cli-runtime v0.25.16/go.mod h1:aGnXwnp70LXzW7lyK4pOdWMTeunz1JL42JxcoW4B40c= -k8s.io/client-go v0.25.16 h1:Lf8gd4vP9hR2sDuSU+9id9Mj23u8X58nESZ5m1JcI24= -k8s.io/client-go v0.25.16/go.mod h1:6y5hyLavOErqO5LWUzk5rwKpRqmwZsE3oe3RW0cgs9s= -k8s.io/code-generator v0.25.16/go.mod h1:rv2rN7kkHpAMnEULzXSC380Tsj1rN7nkpzVqT59ptjk= -k8s.io/component-base v0.25.16 h1:ywaMQRJoCCQwWSM6z9jC+RJWBY/CcJDVgnyyIhuNAkQ= -k8s.io/component-base v0.25.16/go.mod h1:ZMFrcDjbpM12U5hvZ8bXgPf9U2Wrp0McSGMhoUkA9Mo= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/api v0.29.14 h1:JWFh5ufowH3Y6tCgEzY3URVJHb27f0tEDEej0nCjWDw= +k8s.io/api v0.29.14/go.mod h1:IV8YqKxMm8JGLBLlHM13Npn5lCITH10XYipWEW+YEOQ= +k8s.io/apiextensions-apiserver v0.29.14 h1:gw9WXrZJPu5kpI1UC+Wf8BVe9PWwRUB/UZXU8VzBsq4= +k8s.io/apiextensions-apiserver v0.29.14/go.mod h1:TJ51W+HKW2XqTtAsEFOz1/OohsMtekbKaTXh8ldioL4= +k8s.io/apimachinery v0.29.14 h1:IDhwnGNCp836SLOwW1SoEfFNV77wxIklhxeAHX9vmSo= +k8s.io/apimachinery v0.29.14/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/apiserver v0.29.14 h1:XTo9lDDsG4NkywEuzG0SDLrBv6+AyVIpqVzZjg+pWRs= +k8s.io/apiserver v0.29.14/go.mod h1:jC0HqUfqFKMp111xs97CXkf8XTQXtnbukRuuwDH74yE= +k8s.io/cli-runtime v0.29.14 h1:CJis59afFwxzV4RBKR6rpS94C5onIypVsRqq4hyydPk= +k8s.io/cli-runtime v0.29.14/go.mod h1:PBDI2lTaFj/qdNx5C6HGlCH+KMmtl1JVJcmCLMveTc4= +k8s.io/client-go v0.29.14 h1:OSnzZ9DClaFRgl3zMAY2kGZhNjdGJkEb+RDz+MW2h6k= +k8s.io/client-go v0.29.14/go.mod h1:XtZt5n5UxKfPJ+sCoTPcEavWgZbLFFxMnAFFRQGK1RY= +k8s.io/component-base v0.29.14 h1:SF1DWN7bc2VloJ/ysegGoi/aHnopEo81aw9CslhqXIw= +k8s.io/component-base v0.29.14/go.mod h1:FoK1PHhFTaEQVvQLw29/Uyfd8Ug0qUKHrUcXIXJ1VxI= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-aggregator v0.25.16 h1:yvWWF2bHyuNQ/dMwgWcbh3MkYl1hwl2A7dMwvMMwMIg= -k8s.io/kube-aggregator v0.25.16/go.mod h1:fmC08uLMNFUQj1pvtCYL0cTjhq+KDC9o31p51aH9dLE= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= -k8s.io/kubectl v0.25.16 h1:6TK2I58rj3Og6CZlkc6j2ARICag25ko9ALsr5w/Oe6A= -k8s.io/kubectl v0.25.16/go.mod h1:mmyKlLfVqm6s6zcKNwK8f36Vf5MAmrmudRko9/wibNA= -k8s.io/kubernetes v1.25.16 h1:pdtEdre7aInDvXas+mWzpTiK/7cpcaGRyYB+Bedr2+Q= -k8s.io/kubernetes v1.25.16/go.mod h1:ZrRRMmY0nDNxlnPnJopZRhF2i0tzfB3+DnitJcDe8Vw= +k8s.io/kube-aggregator v0.29.14 h1:RxT+HsddIHtbVmjqyLViMNPGQOM4RY3j87UwI8vMxKw= +k8s.io/kube-aggregator v0.29.14/go.mod h1:Bq2rKUjiTWcQQRkCljvp24ZF+YFefs0CLIeJEbUj7fU= +k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 h1:qVoMaQV5t62UUvHe16Q3eb2c5HPzLHYzsi0Tu/xLndo= +k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kubectl v0.29.14 h1:aNsAv4S8s6ldP8pPejkgPdNBBTeL/9vnl6j72F3b1z4= +k8s.io/kubectl v0.29.14/go.mod h1:/fKgBbMs87f5iWI2s2K4IFofr9tpYZ8kE9Q2m6zA4OY= +k8s.io/kubernetes v1.29.14 h1:mbk5M/WaJr/AX7p3p2/7eJjrtI+I3AKu7IEjI8+Ny/U= +k8s.io/kubernetes v1.29.14/go.mod h1:L6/pfKQZ6Tv2O8gyT4OxhGZp+nNsjV54xtNodRoup9k= k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA= -sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= +sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= +sigs.k8s.io/controller-runtime v0.17.2/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= sigs.k8s.io/json v0.0.0-20241009153224-e386a8af8d30 h1:ObU1vgTtAle8WwCKgcDkPjLJYwlazQpIjzSA0asMhy4= sigs.k8s.io/json v0.0.0-20241009153224-e386a8af8d30/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kube-storage-version-migrator v0.0.4 h1:qsCecgZHgdismlTt8xCmS/3numvpxrj58RWJeIg76wc= -sigs.k8s.io/kube-storage-version-migrator v0.0.4/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= -sigs.k8s.io/kustomize/api v0.11.2 h1:6YvCJHFDwsLwAX7zNHBxMZi3k7dGIXI8G9l0saYQI0E= -sigs.k8s.io/kustomize/api v0.11.2/go.mod h1:GZuhith5YcqxIDe0GnRJNx5xxPTjlwaLTt/e+ChUtJA= -sigs.k8s.io/kustomize/kyaml v0.13.3 h1:tNNQIC+8cc+aXFTVg+RtQAOsjwUdYBZRAgYOVI3RBc4= -sigs.k8s.io/kustomize/kyaml v0.13.3/go.mod h1:/ya3Gk4diiQzlE4mBh7wykyLRFZNvqlbh+JnwQ9Vhrc= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h1:PFWFSkpArPNJxFX4ZKWAk9NSeRoZaXschn+ULa4xVek= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= +sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= +sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= tags.cncf.io/container-device-interface v0.8.0 h1:8bCFo/g9WODjWx3m6EYl3GfUG31eKJbaggyBDxEldRc= diff --git a/hack/update-go-module-dependencies.sh b/hack/update-go-module-dependencies.sh deleted file mode 100755 index 819ef5fa695..00000000000 --- a/hack/update-go-module-dependencies.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -ex - -# Background: https://groups.google.com/forum/#!topic/golang-nuts/51-D_YFC78k -# -# TLDR: OCP consists of many repos where for each release a new release branch gets created (release-X.Y). -# When we update vendors we want to get latest changes from the release branch for all of the dependencies. -# With Go modules we can't easily do it, but there is a workaround which consists of multiple steps: -# 1. Get the latest commit from the branch using `go list -mod=mod -m MODULE@release-x.y`. -# 2. Using `sed`, transform output of the above command into format accepted by `go mod edit -replace`. -# 3. Modify `go.mod` by calling `go mod edit -replace`. -# -# This needs to happen for each module that uses this branching strategy: all these repos -# live under github.com/openshift organisation. -# -# There are however, some exceptions: -# * Some repos under github.com/openshift do not use this strategy. -# We should skip them in this script and manage directly with `go mod`. -# * Some dependencies pin their own dependencies to older commits. -# For example, dependency Foo from release-4.7 branch requires -# dependency Bar at older commit which is -# not compatible with Bar@release-4.7. - -for x in vendor/github.com/openshift/*; do - case $x in - # Review the list of special cases on each release. - - # Do not update Hive: it is not part of OCP - vendor/github.com/openshift/hive) - ;; - - # This repo doesn't follow release-x.y branching strategy - # We skip it and let go mod resolve it - vendor/github.com/openshift/custom-resource-status) - ;; - - *) - go mod edit -replace ${x##vendor/}=$(go list -mod=mod -m ${x##vendor/}@release-4.10 | sed -e 's/ /@/') - ;; - esac -done - -go get -u ./... - diff --git a/pkg/cluster/delete.go b/pkg/cluster/delete.go index 538ac79db65..700d019044d 100644 --- a/pkg/cluster/delete.go +++ b/pkg/cluster/delete.go @@ -332,13 +332,13 @@ func (m *manager) deleteGatewayAndWait(ctx context.Context) error { } m.log.Info("waiting for gateway record deletion") - return wait.PollImmediateUntil(15*time.Second, func() (bool, error) { + return wait.PollUntilContextCancel(timeoutCtx, 15*time.Second, true, func(ctx context.Context) (bool, error) { _, err := m.dbGateway.Get(ctx, m.doc.OpenShiftCluster.Properties.NetworkProfile.GatewayPrivateLinkID) - if err != nil && cosmosdb.IsErrorStatusCode(err, http.StatusNotFound) /* already gone */ { - return true, nil + if err != nil && cosmosdb.IsErrorStatusCode(err, http.StatusNotFound) { + return true, err } return false, nil - }, timeoutCtx.Done()) + }) } func (m *manager) deleteGateway(ctx context.Context) error { diff --git a/pkg/cluster/deploybaseresources.go b/pkg/cluster/deploybaseresources.go index b9a3427421e..11c2ecdf280 100644 --- a/pkg/cluster/deploybaseresources.go +++ b/pkg/cluster/deploybaseresources.go @@ -355,7 +355,7 @@ func (m *manager) _attachNSGs(ctx context.Context, timeout time.Duration, pollIn // NSG since the inner loop is tolerant of that, and since we are attaching // the same NSG the only allowed failure case is when the NSG cannot be // attached to begin with, so it shouldn't happen in practice. - _ = wait.PollImmediateUntil(pollInterval, func() (bool, error) { + _ = wait.PollUntilContextCancel(timeoutCtx, pollInterval, true, func(ctx context.Context) (bool, error) { var c bool c, innerErr = func() (bool, error) { for _, subnetID := range []string{ @@ -412,9 +412,8 @@ func (m *manager) _attachNSGs(ctx context.Context, timeout time.Duration, pollIn } return true, nil }() - return c, innerErr - }, timeoutCtx.Done()) + }) return innerErr } diff --git a/pkg/cluster/install.go b/pkg/cluster/install.go index d5f2d29de24..c47fe385b9d 100644 --- a/pkg/cluster/install.go +++ b/pkg/cluster/install.go @@ -15,6 +15,7 @@ import ( kruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -632,7 +633,12 @@ func (m *manager) initializeKubernetesClients(ctx context.Context) error { return err } - mapper, err := apiutil.NewDynamicRESTMapper(restConfig, apiutil.WithLazyDiscovery) + httpClient, err := rest.HTTPClientFor(restConfig) + if err != nil { + return err + } + + mapper, err := apiutil.NewDynamicRESTMapper(restConfig, httpClient) if err != nil { return err } diff --git a/pkg/deploy/predeploy.go b/pkg/deploy/predeploy.go index f3881e9f181..39c3d19aa0e 100644 --- a/pkg/deploy/predeploy.go +++ b/pkg/deploy/predeploy.go @@ -570,9 +570,9 @@ func (d *deployer) restartOldScaleset(ctx context.Context, vmssName string, lbHe } func (d *deployer) waitForReadiness(ctx context.Context, vmssName string, vmInstanceID string) error { - return wait.PollImmediateUntil(10*time.Second, func() (bool, error) { + return wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) { return d.isVMInstanceHealthy(ctx, d.config.RPResourceGroupName, vmssName, vmInstanceID), nil - }, ctx.Done()) + }) } func (d *deployer) isVMInstanceHealthy(ctx context.Context, resourceGroupName string, vmssName string, vmInstanceID string) bool { diff --git a/pkg/deploy/predeploy_test.go b/pkg/deploy/predeploy_test.go index d654489d453..86ea438ba07 100644 --- a/pkg/deploy/predeploy_test.go +++ b/pkg/deploy/predeploy_test.go @@ -1661,7 +1661,7 @@ func TestWaitForReadiness(t *testing.T) { cancel: cancelFastTimeout, }, mocks: []mock{getInstanceViewMock(unhealthyVMSS)}, - wantErr: "timed out waiting for the condition", + wantErr: "context deadline exceeded", }, { name: "run successfully after confirming healthy status", diff --git a/pkg/deploy/upgrade_gateway.go b/pkg/deploy/upgrade_gateway.go index c9cb28c4c35..9e6af4306b3 100644 --- a/pkg/deploy/upgrade_gateway.go +++ b/pkg/deploy/upgrade_gateway.go @@ -39,7 +39,7 @@ func (d *deployer) gatewayWaitForReadiness(ctx context.Context, vmssName string) } d.log.Printf("waiting for %s instances to be healthy", vmssName) - return wait.PollImmediateUntil(10*time.Second, func() (bool, error) { + return wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) { for _, vm := range scalesetVMs { if !d.isVMInstanceHealthy(ctx, d.config.GatewayResourceGroupName, vmssName, *vm.InstanceID) { return false, nil @@ -47,7 +47,7 @@ func (d *deployer) gatewayWaitForReadiness(ctx context.Context, vmssName string) } return true, nil - }, ctx.Done()) + }) } func (d *deployer) gatewayRemoveOldScalesets(ctx context.Context) error { diff --git a/pkg/deploy/upgrade_rp.go b/pkg/deploy/upgrade_rp.go index cf9a845473b..46737445227 100644 --- a/pkg/deploy/upgrade_rp.go +++ b/pkg/deploy/upgrade_rp.go @@ -39,7 +39,7 @@ func (d *deployer) rpWaitForReadiness(ctx context.Context, vmssName string) erro } d.log.Printf("waiting for %s instances to be healthy", vmssName) - return wait.PollImmediateUntil(10*time.Second, func() (bool, error) { + return wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) { for _, vm := range scalesetVMs { if !d.isVMInstanceHealthy(ctx, d.config.RPResourceGroupName, vmssName, *vm.InstanceID) { return false, nil @@ -47,7 +47,7 @@ func (d *deployer) rpWaitForReadiness(ctx context.Context, vmssName string) erro } return true, nil - }, ctx.Done()) + }) } func (d *deployer) rpRemoveOldScalesets(ctx context.Context) error { diff --git a/pkg/frontend/adminactions/kubeactions.go b/pkg/frontend/adminactions/kubeactions.go index c9737510f7e..0d6c3f6f716 100644 --- a/pkg/frontend/adminactions/kubeactions.go +++ b/pkg/frontend/adminactions/kubeactions.go @@ -19,6 +19,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "github.com/Azure/ARO-RP/pkg/api" @@ -59,7 +60,12 @@ func NewKubeActions(log *logrus.Entry, env env.Interface, oc *api.OpenShiftClust return nil, err } - mapper, err := apiutil.NewDynamicRESTMapper(restConfig, apiutil.WithLazyDiscovery) + httpClient, err := rest.HTTPClientFor(restConfig) + if err != nil { + return nil, err + } + + mapper, err := apiutil.NewDynamicRESTMapper(restConfig, httpClient) if err != nil { return nil, err } diff --git a/pkg/hive/manager_test.go b/pkg/hive/manager_test.go index b2e21e7b4c4..4f775c2d264 100644 --- a/pkg/hive/manager_test.go +++ b/pkg/hive/manager_test.go @@ -661,7 +661,7 @@ func TestListSyncSet(t *testing.T) { t.Fatal(err) } - if result != nil && reflect.DeepEqual(result, syncsetTest) { + if result != nil && !reflect.DeepEqual(result, syncsetTest) { t.Fatal("Unexpected syncset list returned", result) } }) @@ -702,7 +702,7 @@ func TestGetSyncSet(t *testing.T) { t.Fatal(err) } - if result != nil && reflect.DeepEqual(result, syncsetTest) { + if result != nil && !reflect.DeepEqual(result, syncsetTest) { t.Fatal("Unexpected syncset is returned", result) } }) diff --git a/pkg/mimo/actuator/task.go b/pkg/mimo/actuator/task.go index 493282d7c0b..f2643145afa 100644 --- a/pkg/mimo/actuator/task.go +++ b/pkg/mimo/actuator/task.go @@ -10,6 +10,7 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/sirupsen/logrus" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -89,7 +90,12 @@ func (t *th) ClientHelper() (clienthelper.Interface, error) { return nil, err } - mapper, err := apiutil.NewDynamicRESTMapper(restConfig, apiutil.WithLazyDiscovery) + httpClient, err := rest.HTTPClientFor(restConfig) + if err != nil { + return nil, err + } + + mapper, err := apiutil.NewDynamicRESTMapper(restConfig, httpClient) if err != nil { return nil, err } diff --git a/pkg/mimo/steps/cluster/operatorflags_test.go b/pkg/mimo/steps/cluster/operatorflags_test.go index d0801fcbad8..d7671b807c4 100644 --- a/pkg/mimo/steps/cluster/operatorflags_test.go +++ b/pkg/mimo/steps/cluster/operatorflags_test.go @@ -49,6 +49,10 @@ func TestOperatorFlags(t *testing.T) { Name: arov1alpha1.SingletonClusterName, ResourceVersion: "1000", }, + TypeMeta: metav1.TypeMeta{ + Kind: "Cluster", + APIVersion: arov1alpha1.SchemeGroupVersion.String(), + }, Spec: arov1alpha1.ClusterSpec{ OperatorFlags: arov1alpha1.OperatorFlags{ "foo": "bar", diff --git a/pkg/monitor/cluster/cluster.go b/pkg/monitor/cluster/cluster.go index 9b7755c568f..e63a3f7f513 100644 --- a/pkg/monitor/cluster/cluster.go +++ b/pkg/monitor/cluster/cluster.go @@ -115,8 +115,13 @@ func NewMonitor(log *logrus.Entry, restConfig *rest.Config, oc *api.OpenShiftClu return nil, err } + httpClient, err := rest.HTTPClientFor(restConfig) + if err != nil { + return nil, err + } + // lazy discovery will not attempt to reach out to the apiserver immediately - mapper, err := apiutil.NewDynamicRESTMapper(restConfig, apiutil.WithLazyDiscovery) + mapper, err := apiutil.NewDynamicRESTMapper(restConfig, httpClient) if err != nil { return nil, err } @@ -163,8 +168,13 @@ func getHiveClientSet(hiveRestConfig *rest.Config) (client.Client, error) { return nil, nil } + httpClient, err := rest.HTTPClientFor(hiveRestConfig) + if err != nil { + return nil, err + } + // lazy discovery will not attempt to reach out to the apiserver immediately - mapper, err := apiutil.NewDynamicRESTMapper(hiveRestConfig, apiutil.WithLazyDiscovery) + mapper, err := apiutil.NewDynamicRESTMapper(hiveRestConfig, httpClient) if err != nil { return nil, err } diff --git a/pkg/operator/controllers/banner/banner_controller.go b/pkg/operator/controllers/banner/banner_controller.go index 9d7662776e5..d8063cbeeeb 100644 --- a/pkg/operator/controllers/banner/banner_controller.go +++ b/pkg/operator/controllers/banner/banner_controller.go @@ -15,7 +15,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" consolev1 "github.com/openshift/api/console/v1" @@ -69,7 +68,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&arov1alpha1.Cluster{}, builder.WithPredicates(predicate.And(predicates.AROCluster, predicate.GenerationChangedPredicate{}))). // watching ConsoleNotifications in case a user edits it - Watches(&source.Kind{Type: &consolev1.ConsoleNotification{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(aroBannerPredicate)). + Watches(&consolev1.ConsoleNotification{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(aroBannerPredicate)). Named(ControllerName). Complete(r) } diff --git a/pkg/operator/controllers/base/aro_controller_test.go b/pkg/operator/controllers/base/aro_controller_test.go index 28fe677686f..36403835b20 100644 --- a/pkg/operator/controllers/base/aro_controller_test.go +++ b/pkg/operator/controllers/base/aro_controller_test.go @@ -119,18 +119,16 @@ func TestConditions(t *testing.T) { }, } { t.Run(tt.name, func(t *testing.T) { - client := ctrlfake.NewClientBuilder(). - WithObjects( - &arov1alpha1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: arov1alpha1.SingletonClusterName, - }, - Status: arov1alpha1.ClusterStatus{ - Conditions: tt.start, - OperatorVersion: "unknown", - }, - }, - ).Build() + cluster := &arov1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: arov1alpha1.SingletonClusterName, + }, + Status: arov1alpha1.ClusterStatus{ + Conditions: tt.start, + OperatorVersion: "unknown", + }, + } + client := ctrlfake.NewClientBuilder().WithObjects(cluster).WithStatusSubresource(cluster).Build() controller := AROController{ Log: logrus.NewEntry(logrus.StandardLogger()), diff --git a/pkg/operator/controllers/checkers/clusterdnschecker/controller.go b/pkg/operator/controllers/checkers/clusterdnschecker/controller.go index 502e3f7f49d..c097a798742 100644 --- a/pkg/operator/controllers/checkers/clusterdnschecker/controller.go +++ b/pkg/operator/controllers/checkers/clusterdnschecker/controller.go @@ -16,7 +16,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" operatorv1 "github.com/openshift/api/operator/v1" @@ -127,7 +126,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { builder := ctrl.NewControllerManagedBy(mgr). For(&arov1alpha1.Cluster{}, builder.WithPredicates(predicate.And(predicates.AROCluster, predicate.GenerationChangedPredicate{}))). Watches( - &source.Kind{Type: &operatorv1.DNS{}}, + &operatorv1.DNS{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(defaultClusterDNSPredicate), ) diff --git a/pkg/operator/controllers/checkers/clusterdnschecker/controller_test.go b/pkg/operator/controllers/checkers/clusterdnschecker/controller_test.go index d2033b7a713..05c13055070 100644 --- a/pkg/operator/controllers/checkers/clusterdnschecker/controller_test.go +++ b/pkg/operator/controllers/checkers/clusterdnschecker/controller_test.go @@ -103,7 +103,7 @@ func TestReconcile(t *testing.T) { instance.Spec.OperatorFlags[operator.CheckerEnabled] = operator.FlagFalse } - clientFake := fake.NewClientBuilder().WithObjects(instance).Build() + clientFake := fake.NewClientBuilder().WithObjects(instance).WithStatusSubresource(instance).Build() r := &Reconciler{ log: utillog.GetLogger(), diff --git a/pkg/operator/controllers/checkers/ingresscertificatechecker/controller.go b/pkg/operator/controllers/checkers/ingresscertificatechecker/controller.go index ca95130b459..66d13b4e108 100644 --- a/pkg/operator/controllers/checkers/ingresscertificatechecker/controller.go +++ b/pkg/operator/controllers/checkers/ingresscertificatechecker/controller.go @@ -17,7 +17,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" configv1 "github.com/openshift/api/config/v1" operatorv1 "github.com/openshift/api/operator/v1" @@ -128,12 +127,12 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { builder := ctrl.NewControllerManagedBy(mgr). For(&arov1alpha1.Cluster{}, builder.WithPredicates(predicate.And(predicates.AROCluster, predicate.GenerationChangedPredicate{}))). Watches( - &source.Kind{Type: &operatorv1.IngressController{}}, + &operatorv1.IngressController{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(defaultIngressControllerPredicate), ). Watches( - &source.Kind{Type: &configv1.ClusterVersion{}}, + &configv1.ClusterVersion{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicates.ClusterVersion), ) diff --git a/pkg/operator/controllers/checkers/ingresscertificatechecker/controller_test.go b/pkg/operator/controllers/checkers/ingresscertificatechecker/controller_test.go index 84700f854e0..3b2c738f5f2 100644 --- a/pkg/operator/controllers/checkers/ingresscertificatechecker/controller_test.go +++ b/pkg/operator/controllers/checkers/ingresscertificatechecker/controller_test.go @@ -84,7 +84,7 @@ func TestReconcile(t *testing.T) { instance.Spec.OperatorFlags[operator.CheckerEnabled] = operator.FlagFalse } - clientFake := fake.NewClientBuilder().WithObjects(instance).Build() + clientFake := fake.NewClientBuilder().WithObjects(instance).WithStatusSubresource(instance).Build() r := &Reconciler{ log: utillog.GetLogger(), diff --git a/pkg/operator/controllers/checkers/internetchecker/controller_test.go b/pkg/operator/controllers/checkers/internetchecker/controller_test.go index ce298be3c18..b1be6e623aa 100644 --- a/pkg/operator/controllers/checkers/internetchecker/controller_test.go +++ b/pkg/operator/controllers/checkers/internetchecker/controller_test.go @@ -90,7 +90,7 @@ func TestReconcile(t *testing.T) { instance.Spec.OperatorFlags[operator.CheckerEnabled] = operator.FlagFalse } - clientFake := fake.NewClientBuilder().WithObjects(instance).Build() + clientFake := fake.NewClientBuilder().WithObjects(instance).WithStatusSubresource(instance).Build() r := &Reconciler{ log: utillog.GetLogger(), diff --git a/pkg/operator/controllers/checkers/serviceprincipalchecker/controller.go b/pkg/operator/controllers/checkers/serviceprincipalchecker/controller.go index b1c3363451c..37eaf405974 100644 --- a/pkg/operator/controllers/checkers/serviceprincipalchecker/controller.go +++ b/pkg/operator/controllers/checkers/serviceprincipalchecker/controller.go @@ -17,7 +17,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" operatorv1 "github.com/openshift/api/operator/v1" @@ -124,7 +123,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { builder := ctrl.NewControllerManagedBy(mgr). For(&arov1alpha1.Cluster{}, builder.WithPredicates(predicate.And(predicates.AROCluster, predicate.GenerationChangedPredicate{}))). Watches( - &source.Kind{Type: &corev1.Secret{}}, + &corev1.Secret{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(clusterSPPredicate), ) diff --git a/pkg/operator/controllers/checkers/serviceprincipalchecker/controller_test.go b/pkg/operator/controllers/checkers/serviceprincipalchecker/controller_test.go index 98e91c06486..b60ad16935e 100644 --- a/pkg/operator/controllers/checkers/serviceprincipalchecker/controller_test.go +++ b/pkg/operator/controllers/checkers/serviceprincipalchecker/controller_test.go @@ -84,7 +84,7 @@ func TestReconcile(t *testing.T) { instance.Spec.OperatorFlags[operator.CheckerEnabled] = operator.FlagFalse } - clientFake := fake.NewClientBuilder().WithObjects(instance).Build() + clientFake := fake.NewClientBuilder().WithObjects(instance).WithStatusSubresource(instance).Build() r := &Reconciler{ log: utillog.GetLogger(), diff --git a/pkg/operator/controllers/cloudproviderconfig/cloudproviderconfig_controller.go b/pkg/operator/controllers/cloudproviderconfig/cloudproviderconfig_controller.go index fd33afe31a0..a028f841644 100644 --- a/pkg/operator/controllers/cloudproviderconfig/cloudproviderconfig_controller.go +++ b/pkg/operator/controllers/cloudproviderconfig/cloudproviderconfig_controller.go @@ -19,7 +19,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/Azure/ARO-RP/pkg/operator" arov1alpha1 "github.com/Azure/ARO-RP/pkg/operator/apis/aro.openshift.io/v1alpha1" @@ -173,7 +172,7 @@ func (r *CloudProviderConfigReconciler) SetupWithManager(mgr ctrl.Manager) error return ctrl.NewControllerManagedBy(mgr). For(&arov1alpha1.Cluster{}, builder.WithPredicates(predicate.And(predicates.AROCluster, predicate.GenerationChangedPredicate{}))). Watches( - &source.Kind{Type: &corev1.ConfigMap{}}, + &corev1.ConfigMap{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(cloudProviderConfigPredicate), ). diff --git a/pkg/operator/controllers/clusteroperatoraro/clusteroperatoraro_controller_test.go b/pkg/operator/controllers/clusteroperatoraro/clusteroperatoraro_controller_test.go index 140681a6fcf..602f55fb9dd 100644 --- a/pkg/operator/controllers/clusteroperatoraro/clusteroperatoraro_controller_test.go +++ b/pkg/operator/controllers/clusteroperatoraro/clusteroperatoraro_controller_test.go @@ -231,8 +231,11 @@ func TestConditions(t *testing.T) { Conditions: tt.controllerConditions, }, } + + co := defaultOperator() clientFake := ctrlfake.NewClientBuilder(). - WithObjects(cluster). + WithObjects(cluster, co). + WithStatusSubresource(cluster, co). Build() r := NewReconciler(logrus.NewEntry(logrus.StandardLogger()), clientFake) @@ -274,3 +277,42 @@ func TestConditions(t *testing.T) { }) } } + +func defaultOperator() *configv1.ClusterOperator { + currentTime := metav1.Now() + return &configv1.ClusterOperator{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterOperatorName, + }, + Status: configv1.ClusterOperatorStatus{ + Versions: []configv1.OperandVersion{ + { + Name: "operator", + Version: version.GitCommit, + }, + }, + Conditions: []configv1.ClusterOperatorStatusCondition{ + { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionFalse, + LastTransitionTime: currentTime, + Reason: reasonInitializing, + Message: "Operator is initializing", + }, + { + Type: configv1.OperatorProgressing, + Status: configv1.ConditionTrue, + LastTransitionTime: currentTime, + Reason: reasonInitializing, + Message: "Operator is initializing", + }, + { + Type: configv1.OperatorDegraded, + Status: configv1.ConditionFalse, + LastTransitionTime: currentTime, + Reason: reasonAsExpected, + }, + }, + }, + } +} diff --git a/pkg/operator/controllers/cpms/cpms_controller.go b/pkg/operator/controllers/cpms/cpms_controller.go index aebee30437c..58ecf1c0cd4 100644 --- a/pkg/operator/controllers/cpms/cpms_controller.go +++ b/pkg/operator/controllers/cpms/cpms_controller.go @@ -13,7 +13,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" machinev1 "github.com/openshift/api/machine/v1" @@ -93,7 +92,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&arov1alpha1.Cluster{}, builder.WithPredicates(predicate.And(aroClusterPredicate, predicate.GenerationChangedPredicate{}))). Watches( - &source.Kind{Type: &machinev1.ControlPlaneMachineSet{}}, + &machinev1.ControlPlaneMachineSet{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicate.GenerationChangedPredicate{}), // only watch for spec changes ). diff --git a/pkg/operator/controllers/dnsmasq/cluster_controller.go b/pkg/operator/controllers/dnsmasq/cluster_controller.go index ae96768e5fe..7dec48e04d1 100644 --- a/pkg/operator/controllers/dnsmasq/cluster_controller.go +++ b/pkg/operator/controllers/dnsmasq/cluster_controller.go @@ -17,7 +17,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" configv1 "github.com/openshift/api/config/v1" mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" @@ -105,7 +104,7 @@ func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { For(&arov1alpha1.Cluster{}, builder.WithPredicates(predicate.And(predicates.AROCluster, predicate.GenerationChangedPredicate{}))). Named(ClusterControllerName). Watches( - &source.Kind{Type: &configv1.ClusterVersion{}}, + &configv1.ClusterVersion{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(clusterVersionPredicate), ). diff --git a/pkg/operator/controllers/dnsmasq/cluster_controller_test.go b/pkg/operator/controllers/dnsmasq/cluster_controller_test.go index b301e1881aa..0cba37e5885 100644 --- a/pkg/operator/controllers/dnsmasq/cluster_controller_test.go +++ b/pkg/operator/controllers/dnsmasq/cluster_controller_test.go @@ -345,6 +345,7 @@ func TestClusterReconciler(t *testing.T) { client := testclienthelper.NewHookingClient(ctrlfake.NewClientBuilder(). WithObjects(tt.objects...). + WithStatusSubresource(tt.objects...). Build()).WithPostCreateHook(testclienthelper.TallyCountsAndKey(createTally)).WithPostUpdateHook(testclienthelper.TallyCountsAndKey(updateTally)) log := logrus.NewEntry(logrus.StandardLogger()) diff --git a/pkg/operator/controllers/dnsmasq/machineconfig_controller_test.go b/pkg/operator/controllers/dnsmasq/machineconfig_controller_test.go index 4419a43329d..dba03687cad 100644 --- a/pkg/operator/controllers/dnsmasq/machineconfig_controller_test.go +++ b/pkg/operator/controllers/dnsmasq/machineconfig_controller_test.go @@ -107,6 +107,7 @@ func TestMachineConfigReconciler(t *testing.T) { client := testclienthelper.NewHookingClient(ctrlfake.NewClientBuilder(). WithObjects(tt.objects...). + WithStatusSubresource(tt.objects...). Build()) client.WithPostCreateHook(testclienthelper.TallyCountsAndKey(createTally)).WithPostUpdateHook(testclienthelper.TallyCountsAndKey(updateTally)) @@ -272,21 +273,23 @@ func TestMachineConfigReconcilerNotUpgrading(t *testing.T) { createTally := make(map[string]int) updateTally := make(map[string]int) - client := testclienthelper.NewHookingClient(ctrlfake.NewClientBuilder(). - WithObjects(tt.objects...). - WithObjects(&configv1.ClusterVersion{ - ObjectMeta: metav1.ObjectMeta{ - Name: "version", - }, - Status: configv1.ClusterVersionStatus{ - History: []configv1.UpdateHistory{ - { - State: configv1.CompletedUpdate, - Version: "4.10.11", - }, + cluster := &configv1.ClusterVersion{ + ObjectMeta: metav1.ObjectMeta{ + Name: "version", + }, + Status: configv1.ClusterVersionStatus{ + History: []configv1.UpdateHistory{ + { + State: configv1.CompletedUpdate, + Version: "4.10.11", }, }, - }). + }, + } + client := testclienthelper.NewHookingClient(ctrlfake.NewClientBuilder(). + WithObjects(tt.objects...). + WithStatusSubresource(tt.objects...). + WithObjects(cluster). Build()) client.WithPostCreateHook(testclienthelper.TallyCountsAndKey(createTally)).WithPostUpdateHook(testclienthelper.TallyCountsAndKey(updateTally)) @@ -462,6 +465,7 @@ func TestMachineConfigReconcilerClusterUpgrading(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "custom", DeletionTimestamp: &transitionTime, + Finalizers: []string{"test-finalizer"}, }, Status: mcv1.MachineConfigPoolStatus{}, Spec: mcv1.MachineConfigPoolSpec{}, @@ -485,22 +489,25 @@ func TestMachineConfigReconcilerClusterUpgrading(t *testing.T) { createTally := make(map[string]int) updateTally := make(map[string]int) - client := testclienthelper.NewHookingClient(ctrlfake.NewClientBuilder(). - WithObjects(tt.objects...). - WithObjects(&configv1.ClusterVersion{ - ObjectMeta: metav1.ObjectMeta{ - Name: "version", - }, - Spec: configv1.ClusterVersionSpec{}, - Status: configv1.ClusterVersionStatus{ - Conditions: []configv1.ClusterOperatorStatusCondition{ - { - Type: configv1.OperatorProgressing, - Status: configv1.ConditionTrue, - }, + clusterversion := &configv1.ClusterVersion{ + ObjectMeta: metav1.ObjectMeta{ + Name: "version", + }, + Spec: configv1.ClusterVersionSpec{}, + Status: configv1.ClusterVersionStatus{ + Conditions: []configv1.ClusterOperatorStatusCondition{ + { + Type: configv1.OperatorProgressing, + Status: configv1.ConditionTrue, }, }, - }). + }, + } + + client := testclienthelper.NewHookingClient(ctrlfake.NewClientBuilder(). + WithObjects(tt.objects...). + WithStatusSubresource(tt.objects...). + WithObjects(clusterversion). Build()) client.WithPostCreateHook(testclienthelper.TallyCountsAndKey(createTally)).WithPostUpdateHook(testclienthelper.TallyCountsAndKey(updateTally)) diff --git a/pkg/operator/controllers/dnsmasq/machineconfigpool_controller_test.go b/pkg/operator/controllers/dnsmasq/machineconfigpool_controller_test.go index d831b58b8a6..45da2910c61 100644 --- a/pkg/operator/controllers/dnsmasq/machineconfigpool_controller_test.go +++ b/pkg/operator/controllers/dnsmasq/machineconfigpool_controller_test.go @@ -113,6 +113,7 @@ func TestMachineConfigPoolReconciler(t *testing.T) { client := testclienthelper.NewHookingClient(ctrlfake.NewClientBuilder(). WithObjects(tt.objects...). + WithStatusSubresource(tt.objects...). Build()) client.WithPostCreateHook(testclienthelper.TallyCountsAndKey(createTally)).WithPostUpdateHook(testclienthelper.TallyCountsAndKey(updateTally)) diff --git a/pkg/operator/controllers/etchosts/cluster_controller.go b/pkg/operator/controllers/etchosts/cluster_controller.go index 76039eb9dd3..bfe23425965 100644 --- a/pkg/operator/controllers/etchosts/cluster_controller.go +++ b/pkg/operator/controllers/etchosts/cluster_controller.go @@ -17,7 +17,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" @@ -199,10 +198,10 @@ func (r *EtcHostsClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { etcHostsBuilder := ctrl.NewControllerManagedBy(mgr). For(&arov1alpha1.Cluster{}, builder.WithPredicates(predicate.And(predicates.AROCluster, predicate.GenerationChangedPredicate{}))). - Watches(&source.Kind{Type: &mcv1.MachineConfigPool{}}, + Watches(&mcv1.MachineConfigPool{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). - Watches(&source.Kind{Type: &mcv1.MachineConfig{}}, + Watches(&mcv1.MachineConfig{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})) diff --git a/pkg/operator/controllers/etchosts/cluster_controller_test.go b/pkg/operator/controllers/etchosts/cluster_controller_test.go index 0c7692bbba1..20f03fc8765 100644 --- a/pkg/operator/controllers/etchosts/cluster_controller_test.go +++ b/pkg/operator/controllers/etchosts/cluster_controller_test.go @@ -351,7 +351,7 @@ func TestReconcileEtcHostsCluster(t *testing.T) { } var hook = logtest.NewLocal(logger) - clientBuilder := ctrlfake.NewClientBuilder().WithObjects(tt.objects...) + clientBuilder := ctrlfake.NewClientBuilder().WithObjects(tt.objects...).WithStatusSubresource(tt.objects...) r := &EtcHostsClusterReconciler{ AROController: base.AROController{ diff --git a/pkg/operator/controllers/etchosts/machineconfig_controller.go b/pkg/operator/controllers/etchosts/machineconfig_controller.go index 147841579a4..51c83af528f 100644 --- a/pkg/operator/controllers/etchosts/machineconfig_controller.go +++ b/pkg/operator/controllers/etchosts/machineconfig_controller.go @@ -18,7 +18,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" @@ -145,10 +144,10 @@ func (r *EtcHostsMachineConfigReconciler) SetupWithManager(mgr ctrl.Manager) err etcHostsBuilder := ctrl.NewControllerManagedBy(mgr). For(&mcv1.MachineConfig{}). - Watches(&source.Kind{Type: &mcv1.MachineConfigPool{}}, + Watches(&mcv1.MachineConfigPool{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). - Watches(&source.Kind{Type: &arov1alpha1.Cluster{}}, + Watches(&arov1alpha1.Cluster{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicate.And(predicates.AROCluster, predicate.GenerationChangedPredicate{}))) diff --git a/pkg/operator/controllers/etchosts/machineconfig_controller_test.go b/pkg/operator/controllers/etchosts/machineconfig_controller_test.go index c0167829d95..bc77f54d1d8 100644 --- a/pkg/operator/controllers/etchosts/machineconfig_controller_test.go +++ b/pkg/operator/controllers/etchosts/machineconfig_controller_test.go @@ -171,7 +171,7 @@ func TestReconcileEtcHostsMachineConfig(t *testing.T) { } var hook = logtest.NewLocal(logger) - clientBuilder := ctrlfake.NewClientBuilder().WithObjects(tt.objects...) + clientBuilder := ctrlfake.NewClientBuilder().WithObjects(tt.objects...).WithStatusSubresource(tt.objects...) r := &EtcHostsMachineConfigReconciler{ AROController: base.AROController{ diff --git a/pkg/operator/controllers/guardrails/guardrails_controller.go b/pkg/operator/controllers/guardrails/guardrails_controller.go index b4af07499be..72f2ac03a14 100644 --- a/pkg/operator/controllers/guardrails/guardrails_controller.go +++ b/pkg/operator/controllers/guardrails/guardrails_controller.go @@ -98,9 +98,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. timeoutCtx, cancel := context.WithTimeout(ctx, r.readinessTimeout) defer cancel() - err := wait.PollImmediateUntil(r.readinessPollTime, func() (bool, error) { + err := wait.PollUntilContextCancel(timeoutCtx, r.readinessPollTime, true, func(ctx context.Context) (bool, error) { return r.gatekeeperDeploymentIsReady(ctx, deployConfig) - }, timeoutCtx.Done()) + }) if err != nil { return reconcile.Result{}, fmt.Errorf("GateKeeper deployment timed out on Ready: %w", err) } @@ -113,9 +113,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. return reconcile.Result{}, err } - err := wait.PollImmediateUntil(r.readinessPollTime, func() (bool, error) { + err := wait.PollUntilContextCancel(timeoutCtx, r.readinessPollTime, true, func(ctx context.Context) (bool, error) { return r.gkPolicyTemplate.IsConstraintTemplateReady(ctx, policyConfig) - }, timeoutCtx.Done()) + }) if err != nil { return reconcile.Result{}, fmt.Errorf("GateKeeper ConstraintTemplates timed out on creation: %w", err) } diff --git a/pkg/operator/controllers/guardrails/guardrails_controller_test.go b/pkg/operator/controllers/guardrails/guardrails_controller_test.go index f5f3191a8d8..71ff4c59144 100644 --- a/pkg/operator/controllers/guardrails/guardrails_controller_test.go +++ b/pkg/operator/controllers/guardrails/guardrails_controller_test.go @@ -135,7 +135,7 @@ func TestGuardRailsReconciler(t *testing.T) { md.EXPECT().CreateOrUpdate(gomock.Any(), cluster, expectedConfig).Return(nil) md.EXPECT().IsReady(gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil) }, - wantErr: "GateKeeper deployment timed out on Ready: timed out waiting for the condition", + wantErr: "GateKeeper deployment timed out on Ready: context deadline exceeded", }, { name: "managed, CreateOrUpdate() fails", diff --git a/pkg/operator/controllers/imageconfig/image_controller_test.go b/pkg/operator/controllers/imageconfig/image_controller_test.go index aeac0677cdc..2267f2815aa 100644 --- a/pkg/operator/controllers/imageconfig/image_controller_test.go +++ b/pkg/operator/controllers/imageconfig/image_controller_test.go @@ -289,7 +289,7 @@ func TestImageConfigReconciler(t *testing.T) { instance = tt.instance } - clientFake := ctrlfake.NewClientBuilder().WithObjects(instance, tt.image).Build() + clientFake := ctrlfake.NewClientBuilder().WithObjects(instance, tt.image).WithStatusSubresource(instance, tt.image).Build() r := NewReconciler(logrus.NewEntry(logrus.StandardLogger()), clientFake) request := ctrl.Request{} diff --git a/pkg/operator/controllers/ingress/ingress_controller_test.go b/pkg/operator/controllers/ingress/ingress_controller_test.go index 9466d56d87b..f6752e12ef1 100644 --- a/pkg/operator/controllers/ingress/ingress_controller_test.go +++ b/pkg/operator/controllers/ingress/ingress_controller_test.go @@ -167,7 +167,7 @@ func TestReconciler(t *testing.T) { clusterMock.Status.Conditions = append(clusterMock.Status.Conditions, tt.startConditions...) } - clientBuilder := ctrlfake.NewClientBuilder().WithObjects(clusterMock) + clientBuilder := ctrlfake.NewClientBuilder().WithObjects(clusterMock).WithStatusSubresource(clusterMock) if tt.ingressController != nil { clientBuilder = clientBuilder.WithObjects(tt.ingressController) } diff --git a/pkg/operator/controllers/machine/machine_controller_test.go b/pkg/operator/controllers/machine/machine_controller_test.go index 86e6548cb05..06556a2f427 100644 --- a/pkg/operator/controllers/machine/machine_controller_test.go +++ b/pkg/operator/controllers/machine/machine_controller_test.go @@ -162,7 +162,7 @@ func TestMachineReconciler(t *testing.T) { }, } - clientFake := fake.NewClientBuilder().WithObjects(&baseCluster).WithObjects(tt.objects...).Build() + clientFake := fake.NewClientBuilder().WithObjects(&baseCluster).WithStatusSubresource(&baseCluster).WithObjects(tt.objects...).Build() r := &Reconciler{ log: logrus.NewEntry(logrus.StandardLogger()), diff --git a/pkg/operator/controllers/machinehealthcheck/machinehealthcheck_controller.go b/pkg/operator/controllers/machinehealthcheck/machinehealthcheck_controller.go index 89af57b8051..c77282714ae 100644 --- a/pkg/operator/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/pkg/operator/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -20,7 +20,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" configv1 "github.com/openshift/api/config/v1" machinev1beta1 "github.com/openshift/api/machine/v1beta1" @@ -174,7 +173,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&machinev1beta1.MachineHealthCheck{}). Owns(&monitoringv1.PrometheusRule{}). Watches( - &source.Kind{Type: &configv1.ClusterVersion{}}, + &configv1.ClusterVersion{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicates.ClusterVersion), ). diff --git a/pkg/operator/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/pkg/operator/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index 623b7637a10..1dc71e9ce85 100644 --- a/pkg/operator/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/pkg/operator/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -266,7 +266,7 @@ func TestMachineHealthCheckReconciler(t *testing.T) { clientBuilder := ctrlfake.NewClientBuilder() if tt.instance != nil { - clientBuilder = clientBuilder.WithObjects(tt.instance) + clientBuilder = clientBuilder.WithObjects(tt.instance).WithStatusSubresource(tt.instance) } if tt.clusterversion == nil { clientBuilder = clientBuilder.WithObjects(clusterversionDefault) diff --git a/pkg/operator/controllers/machineset/machineset_controller_test.go b/pkg/operator/controllers/machineset/machineset_controller_test.go index a24cfee23ee..6ef3a7f19d4 100644 --- a/pkg/operator/controllers/machineset/machineset_controller_test.go +++ b/pkg/operator/controllers/machineset/machineset_controller_test.go @@ -215,6 +215,7 @@ func TestReconciler(t *testing.T) { clientFake := ctrlfake.NewClientBuilder(). WithObjects(instance). + WithStatusSubresource(instance). WithObjects(tt.machinesets...). Build() diff --git a/pkg/operator/controllers/monitoring/monitoring_controller.go b/pkg/operator/controllers/monitoring/monitoring_controller.go index bb4297dd014..bf4dc7215bd 100644 --- a/pkg/operator/controllers/monitoring/monitoring_controller.go +++ b/pkg/operator/controllers/monitoring/monitoring_controller.go @@ -21,7 +21,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "sigs.k8s.io/yaml" "github.com/Azure/ARO-RP/pkg/api" @@ -216,7 +215,7 @@ func (r *MonitoringReconciler) SetupWithManager(mgr ctrl.Manager) error { // https://github.com/kubernetes-sigs/controller-runtime/issues/1173 // equivalent to For(&v1.ConfigMap{}, ...)., but can't call For multiple times on one builder Watches( - &source.Kind{Type: &corev1.ConfigMap{}}, + &corev1.ConfigMap{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(monitoringConfigMapPredicate), ). diff --git a/pkg/operator/controllers/monitoring/monitoring_controller_test.go b/pkg/operator/controllers/monitoring/monitoring_controller_test.go index d5c7345cdad..716c633ab6e 100644 --- a/pkg/operator/controllers/monitoring/monitoring_controller_test.go +++ b/pkg/operator/controllers/monitoring/monitoring_controller_test.go @@ -166,7 +166,7 @@ somethingElse: clientBuilder := ctrlfake.NewClientBuilder().WithObjects(instance) if tt.configMap != nil { - clientBuilder.WithObjects(tt.configMap) + clientBuilder.WithObjects(tt.configMap).WithStatusSubresource(tt.configMap) } r := &MonitoringReconciler{ @@ -203,7 +203,6 @@ func TestReconcilePVC(t *testing.T) { defaultProgressing := utilconditions.ControllerDefaultProgressing(ControllerName) defaultDegraded := utilconditions.ControllerDefaultDegraded(ControllerName) defaultConditions := []operatorv1.OperatorCondition{defaultAvailable, defaultProgressing, defaultDegraded} - volumeMode := corev1.PersistentVolumeFilesystem tests := []struct { name string pvcs []client.Object @@ -271,12 +270,6 @@ func TestReconcilePVC(t *testing.T) { }, ResourceVersion: "1", }, - Spec: corev1.PersistentVolumeClaimSpec{ - VolumeMode: &volumeMode, - }, - Status: corev1.PersistentVolumeClaimStatus{ - Phase: corev1.ClaimPending, - }, }, }, wantConditions: defaultConditions, @@ -298,7 +291,7 @@ func TestReconcilePVC(t *testing.T) { }, } - clientFake := ctrlfake.NewClientBuilder().WithObjects(instance).WithObjects(tt.pvcs...).Build() + clientFake := ctrlfake.NewClientBuilder().WithObjects(instance).WithStatusSubresource(instance).WithObjects(tt.pvcs...).Build() r := &MonitoringReconciler{ AROController: base.AROController{ diff --git a/pkg/operator/controllers/muo/muo_controller.go b/pkg/operator/controllers/muo/muo_controller.go index 1bfb4504492..5cce390ad72 100644 --- a/pkg/operator/controllers/muo/muo_controller.go +++ b/pkg/operator/controllers/muo/muo_controller.go @@ -21,7 +21,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/Azure/ARO-RP/pkg/operator" arov1alpha1 "github.com/Azure/ARO-RP/pkg/operator/apis/aro.openshift.io/v1alpha1" @@ -154,9 +153,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. timeoutCtx, cancel := context.WithTimeout(ctx, r.readinessTimeout) defer cancel() - err = wait.PollImmediateUntil(r.readinessPollTime, func() (bool, error) { + err = wait.PollUntilContextCancel(timeoutCtx, r.readinessPollTime, true, func(ctx context.Context) (bool, error) { return r.deployer.IsReady(ctx, "openshift-managed-upgrade-operator", "managed-upgrade-operator") - }, timeoutCtx.Done()) + }) if err != nil { return reconcile.Result{}, fmt.Errorf("managed Upgrade Operator deployment timed out on Ready: %w", err) } @@ -175,7 +174,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { muoBuilder := ctrl.NewControllerManagedBy(mgr). For(&arov1alpha1.Cluster{}, builder.WithPredicates(predicate.And(predicates.AROCluster, predicate.GenerationChangedPredicate{}))). Watches( - &source.Kind{Type: &corev1.Secret{}}, + &corev1.Secret{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicates.PullSecret), ) diff --git a/pkg/operator/controllers/muo/muo_controller_test.go b/pkg/operator/controllers/muo/muo_controller_test.go index 967a4e54906..5f390f060e9 100644 --- a/pkg/operator/controllers/muo/muo_controller_test.go +++ b/pkg/operator/controllers/muo/muo_controller_test.go @@ -212,7 +212,7 @@ func TestMUOReconciler(t *testing.T) { md.EXPECT().CreateOrUpdate(gomock.Any(), cluster, expectedConfig).Return(nil) md.EXPECT().IsReady(gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil) }, - wantErr: "managed Upgrade Operator deployment timed out on Ready: timed out waiting for the condition", + wantErr: "managed Upgrade Operator deployment timed out on Ready: context deadline exceeded", }, { name: "managed, could not parse cluster version fails", diff --git a/pkg/operator/controllers/node/node_controller_test.go b/pkg/operator/controllers/node/node_controller_test.go index 231c980a5dc..6bbe5ddc04f 100644 --- a/pkg/operator/controllers/node/node_controller_test.go +++ b/pkg/operator/controllers/node/node_controller_test.go @@ -418,7 +418,7 @@ func TestReconciler(t *testing.T) { if len(tt.startConditions) > 0 { cluster.Status.Conditions = append(cluster.Status.Conditions, tt.startConditions...) } - clientBuilder = clientBuilder.WithObjects(cluster) + clientBuilder = clientBuilder.WithObjects(cluster).WithStatusSubresource(cluster) } if tt.nodeObject != nil { diff --git a/pkg/operator/controllers/pullsecret/pullsecret_controller.go b/pkg/operator/controllers/pullsecret/pullsecret_controller.go index 53008b5ecb6..4b3936d7d2e 100644 --- a/pkg/operator/controllers/pullsecret/pullsecret_controller.go +++ b/pkg/operator/controllers/pullsecret/pullsecret_controller.go @@ -27,7 +27,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/Azure/ARO-RP/pkg/operator" arov1alpha1 "github.com/Azure/ARO-RP/pkg/operator/apis/aro.openshift.io/v1alpha1" @@ -114,12 +113,19 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. return reconcile.Result{}, err } + err = r.Client.Status().Update(ctx, instance) + if err != nil { + r.Log.Error(err) + return reconcile.Result{}, err + } + err = r.Client.Update(ctx, instance) if err == nil { r.ClearConditions(ctx) } else { r.SetDegraded(ctx, err) } + return reconcile.Result{}, err } @@ -130,7 +136,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { // https://github.com/kubernetes-sigs/controller-runtime/issues/1173 // equivalent to For(&v1.Secret{})., but can't call For multiple times on one builder Watches( - &source.Kind{Type: &corev1.Secret{}}, + &corev1.Secret{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicate.Or(predicates.PullSecret, predicates.BackupPullSecret)), ). diff --git a/pkg/operator/controllers/pullsecret/pullsecret_controller_test.go b/pkg/operator/controllers/pullsecret/pullsecret_controller_test.go index dbb23659bc4..40e50d511f4 100644 --- a/pkg/operator/controllers/pullsecret/pullsecret_controller_test.go +++ b/pkg/operator/controllers/pullsecret/pullsecret_controller_test.go @@ -314,7 +314,7 @@ func TestPullSecretReconciler(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ctx := context.Background() - clientFake := ctrlfake.NewClientBuilder().WithObjects(tt.instance).WithObjects(tt.secrets...).Build() + clientFake := ctrlfake.NewClientBuilder().WithObjects(tt.instance).WithStatusSubresource(tt.instance).WithObjects(tt.secrets...).Build() assert.NotNil(t, clientFake) r := NewReconciler(logrus.NewEntry(logrus.StandardLogger()), clientFake) diff --git a/pkg/operator/controllers/storageaccounts/storageaccount_controller.go b/pkg/operator/controllers/storageaccounts/storageaccount_controller.go index 505ce5e390d..19410997ecb 100644 --- a/pkg/operator/controllers/storageaccounts/storageaccount_controller.go +++ b/pkg/operator/controllers/storageaccounts/storageaccount_controller.go @@ -16,7 +16,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" machinev1beta1 "github.com/openshift/api/machine/v1beta1" @@ -116,8 +115,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&arov1alpha1.Cluster{}, builder.WithPredicates(predicate.And(predicates.AROCluster, predicate.GenerationChangedPredicate{}))). - Watches(&source.Kind{Type: &machinev1beta1.Machine{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicates.MachineRoleMaster)). // to reconcile on master machine replacement - Watches(&source.Kind{Type: &machinev1beta1.MachineSet{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicates.MachineRoleWorker)). // to reconcile on worker machines + Watches(&machinev1beta1.Machine{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicates.MachineRoleMaster)). // to reconcile on master machine replacement + Watches(&machinev1beta1.MachineSet{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicates.MachineRoleWorker)). // to reconcile on worker machines Named(ControllerName). Complete(r) } diff --git a/pkg/operator/controllers/subnets/subnets_controller.go b/pkg/operator/controllers/subnets/subnets_controller.go index 389cf51e77e..f37da1f6990 100644 --- a/pkg/operator/controllers/subnets/subnets_controller.go +++ b/pkg/operator/controllers/subnets/subnets_controller.go @@ -18,7 +18,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" machinev1beta1 "github.com/openshift/api/machine/v1beta1" @@ -154,8 +153,8 @@ func (r *reconcileManager) reconcileSubnets(ctx context.Context) error { func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&arov1alpha1.Cluster{}, builder.WithPredicates(predicate.And(predicates.AROCluster, predicate.GenerationChangedPredicate{}))). - Watches(&source.Kind{Type: &machinev1beta1.Machine{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicates.MachineRoleMaster)). // to reconcile on master machine replacement - Watches(&source.Kind{Type: &machinev1beta1.MachineSet{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicates.MachineRoleWorker)). // to reconcile on worker machines + Watches(&machinev1beta1.Machine{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicates.MachineRoleMaster)). // to reconcile on master machine replacement + Watches(&machinev1beta1.MachineSet{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicates.MachineRoleWorker)). // to reconcile on worker machines Named(ControllerName). Complete(r) } diff --git a/pkg/operator/controllers/workaround/systemreserved.go b/pkg/operator/controllers/workaround/systemreserved.go index 59ae2ceadc8..cac310bac57 100644 --- a/pkg/operator/controllers/workaround/systemreserved.go +++ b/pkg/operator/controllers/workaround/systemreserved.go @@ -80,6 +80,10 @@ func (sr *systemreserved) Ensure(ctx context.Context) error { // Step 2. Create KubeletConfig CRD with appropriate limits. kc := &mcv1.KubeletConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "KubeletConfig", + APIVersion: "machineconfiguration.openshift.io/v1", + }, ObjectMeta: metav1.ObjectMeta{ Name: kubeletConfigName, }, @@ -138,6 +142,10 @@ func (sr *systemreserved) Remove(ctx context.Context) error { } kc := &mcv1.KubeletConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "KubeletConfig", + APIVersion: "machineconfiguration.openshift.io/v1", + }, ObjectMeta: metav1.ObjectMeta{ Name: kubeletConfigName, }, diff --git a/pkg/operator/controllers/workaround/systemreserved_test.go b/pkg/operator/controllers/workaround/systemreserved_test.go index 74a41e68f96..16dcbf1260a 100644 --- a/pkg/operator/controllers/workaround/systemreserved_test.go +++ b/pkg/operator/controllers/workaround/systemreserved_test.go @@ -83,6 +83,10 @@ func TestSystemreservedEnsure(t *testing.T) { }, }, kc: &mcv1.KubeletConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "KubeletConfig", + APIVersion: "machineconfiguration.openshift.io/v1", + }, ObjectMeta: metav1.ObjectMeta{ Name: kubeletConfigName, ResourceVersion: "1", @@ -99,6 +103,10 @@ func TestSystemreservedEnsure(t *testing.T) { }, }, kc: &mcv1.KubeletConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "KubeletConfig", + APIVersion: "machineconfiguration.openshift.io/v1", + }, ObjectMeta: metav1.ObjectMeta{ Name: kubeletConfigName, ResourceVersion: "1", diff --git a/pkg/operator/controllers/workaround/workaround_controller.go b/pkg/operator/controllers/workaround/workaround_controller.go index 4fed24e6b0e..8d484f676e8 100644 --- a/pkg/operator/controllers/workaround/workaround_controller.go +++ b/pkg/operator/controllers/workaround/workaround_controller.go @@ -16,7 +16,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" configv1 "github.com/openshift/api/config/v1" @@ -94,7 +93,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&arov1alpha1.Cluster{}, builder.WithPredicates(predicate.And(predicates.AROCluster, predicate.GenerationChangedPredicate{}))). - Watches(&source.Kind{Type: &configv1.ClusterVersion{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicates.ClusterVersion)). + Watches(&configv1.ClusterVersion{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicates.ClusterVersion)). Named(ControllerName). Complete(r) } diff --git a/pkg/operator/deploy/deploy.go b/pkg/operator/deploy/deploy.go index d4814a45439..3c39b5fa760 100644 --- a/pkg/operator/deploy/deploy.go +++ b/pkg/operator/deploy/deploy.go @@ -431,7 +431,7 @@ func (o *operator) applyDeployment(ctx context.Context, resources []kruntime.Obj return err } - err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { + err = wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) { crd, err := o.extensionscli.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, acc.GetName(), metav1.GetOptions{}) if err != nil { return false, err diff --git a/pkg/operator/deploy/staticresources/aro.openshift.io_clusters.yaml b/pkg/operator/deploy/staticresources/aro.openshift.io_clusters.yaml index 600488399cb..da0963a44a7 100644 --- a/pkg/operator/deploy/staticresources/aro.openshift.io_clusters.yaml +++ b/pkg/operator/deploy/staticresources/aro.openshift.io_clusters.yaml @@ -135,6 +135,8 @@ spec: type: string type: type: string + required: + - type type: object type: array operatorVersion: diff --git a/pkg/operator/deploy/staticresources/preview.aro.openshift.io_previewfeatures.yaml b/pkg/operator/deploy/staticresources/preview.aro.openshift.io_previewfeatures.yaml index 126914adf53..2006e70372a 100644 --- a/pkg/operator/deploy/staticresources/preview.aro.openshift.io_previewfeatures.yaml +++ b/pkg/operator/deploy/staticresources/preview.aro.openshift.io_previewfeatures.yaml @@ -108,6 +108,8 @@ spec: type: string type: type: string + required: + - type type: object type: array operatorVersion: diff --git a/pkg/util/arm/deploy_test.go b/pkg/util/arm/deploy_test.go index 95e5b096f21..7bec5cc1e3f 100644 --- a/pkg/util/arm/deploy_test.go +++ b/pkg/util/arm/deploy_test.go @@ -5,6 +5,7 @@ package arm import ( "context" + "errors" "testing" mgmtfeatures "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features" @@ -73,7 +74,7 @@ func TestDeployARMTemplate(t *testing.T) { Return(activeErr) dc.EXPECT(). Wait(ctx, resourceGroup, deploymentName). - Return(wait.ErrWaitTimeout) + Return(wait.ErrorInterrupted(errors.New("timed out waiting for the condition"))) }, wantErr: "timed out waiting for the condition", }, diff --git a/pkg/util/azureclient/azuresdk/azcertificates/helpers.go b/pkg/util/azureclient/azuresdk/azcertificates/helpers.go index 1a855c7f97c..0438d0a96ec 100644 --- a/pkg/util/azureclient/azuresdk/azcertificates/helpers.go +++ b/pkg/util/azureclient/azuresdk/azcertificates/helpers.go @@ -106,14 +106,14 @@ func WaitForCertificateOperation(parent context.Context, operation func(ctx cont ctx, cancel := context.WithTimeout(parent, 15*time.Minute) defer cancel() - err := wait.PollImmediateUntil(10*time.Second, func() (bool, error) { + err := wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(context.Context) (bool, error) { op, err := operation(ctx) if err != nil { return false, err } return checkOperation(op) - }, ctx.Done()) + }) return err } diff --git a/pkg/util/azureclient/mgmt/features/deployments_addons.go b/pkg/util/azureclient/mgmt/features/deployments_addons.go index 8f8947bedf4..d438f8d8a55 100644 --- a/pkg/util/azureclient/mgmt/features/deployments_addons.go +++ b/pkg/util/azureclient/mgmt/features/deployments_addons.go @@ -48,7 +48,7 @@ func (c *deploymentsClient) DeleteAndWait(ctx context.Context, resourceGroupName } func (c *deploymentsClient) Wait(ctx context.Context, resourceGroupName string, deploymentName string) error { - return wait.Poll(c.Client.PollingDelay, c.Client.PollingDuration, func() (bool, error) { + return wait.PollUntilContextTimeout(ctx, c.Client.PollingDelay, c.Client.PollingDuration, true, func(ctx context.Context) (bool, error) { deployment, err := c.DeploymentsClient.Get(ctx, resourceGroupName, deploymentName) if err != nil { return false, err diff --git a/pkg/util/clienthelper/clienthelper.go b/pkg/util/clienthelper/clienthelper.go index eaf50a49ef3..4a7ce4b53ef 100644 --- a/pkg/util/clienthelper/clienthelper.go +++ b/pkg/util/clienthelper/clienthelper.go @@ -251,6 +251,7 @@ func Merge(old, new client.Object) (client.Object, bool, string, error) { _, injectTrustBundle := new.ObjectMeta.Labels["config.openshift.io/inject-trusted-cabundle"] if injectTrustBundle { caBundle, ext := old.Data["ca-bundle.crt"] + new.Data = make(map[string]string) if ext { new.Data["ca-bundle.crt"] = caBundle } diff --git a/pkg/util/clienthelper/clienthelper_test.go b/pkg/util/clienthelper/clienthelper_test.go index 43f6889d8da..c483a61edb2 100644 --- a/pkg/util/clienthelper/clienthelper_test.go +++ b/pkg/util/clienthelper/clienthelper_test.go @@ -1558,6 +1558,10 @@ func TestGetOne(t *testing.T) { Namespace: "somewhere", ResourceVersion: "1", }, + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, StringData: map[string]string{ "secret": "squirrels", }, diff --git a/pkg/util/cluster/aad.go b/pkg/util/cluster/aad.go index dcccbf81e15..3f8923b518a 100644 --- a/pkg/util/cluster/aad.go +++ b/pkg/util/cluster/aad.go @@ -51,9 +51,9 @@ func (c *Cluster) createServicePrincipal(ctx context.Context, appID string) (str defer cancel() // NOTE: Do not override err with the error returned by - // wait.PollImmediateUntil. Doing this will not propagate the latest error + // wait.PollUntilContextCancel. Doing this will not propagate the latest error // to the user in case when wait exceeds the timeout - _ = wait.PollImmediateUntil(10*time.Second, func() (bool, error) { + _ = wait.PollUntilContextCancel(timeoutCtx, 10*time.Second, true, func(ctx context.Context) (bool, error) { requestBody := msgraph_models.NewServicePrincipal() requestBody.SetAppId(&appID) result, err = c.spGraphClient.ServicePrincipals().Post(ctx, requestBody, nil) @@ -73,7 +73,7 @@ func (c *Cluster) createServicePrincipal(ctx context.Context, appID string) (str return false, nil } return err == nil, err - }, timeoutCtx.Done()) + }) if err != nil { return "", err } diff --git a/pkg/util/cluster/cluster.go b/pkg/util/cluster/cluster.go index 0dcd5dac306..f9ff3a8f930 100644 --- a/pkg/util/cluster/cluster.go +++ b/pkg/util/cluster/cluster.go @@ -1053,11 +1053,11 @@ func (c *Cluster) fixupNSGs(ctx context.Context, vnetResourceGroup, clusterName // very occasionally c.securitygroups.List returns an empty list in // production. No idea why. Let's try retrying it... var nsgs []*sdknetwork.SecurityGroup - err := wait.PollImmediateUntil(10*time.Second, func() (bool, error) { + err := wait.PollUntilContextCancel(timeoutCtx, 10*time.Second, true, func(ctx context.Context) (bool, error) { var err error nsgs, err = c.securitygroups.List(ctx, "aro-"+clusterName, nil) return len(nsgs) > 0, err - }, timeoutCtx.Done()) + }) if err != nil { return err } @@ -1175,14 +1175,14 @@ func (c *Cluster) ensureResourceGroupDeleted(ctx context.Context, resourceGroupN timeoutCtx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() - return wait.PollImmediateUntil(5*time.Second, func() (bool, error) { + return wait.PollUntilContextCancel(timeoutCtx, 5*time.Second, true, func(ctx context.Context) (bool, error) { _, err := c.groups.Get(ctx, resourceGroupName) if azureerrors.ResourceGroupNotFound(err) { c.log.Infof("finished deleting resource group %s", resourceGroupName) return true, nil } return false, fmt.Errorf("failed to delete resource group %s with %s", resourceGroupName, err) - }, timeoutCtx.Done()) + }) } func (c *Cluster) deleteResourceGroup(ctx context.Context, resourceGroup string) error { diff --git a/pkg/util/conditions/condition_test.go b/pkg/util/conditions/condition_test.go index 7490e57c5e9..df21062d40f 100644 --- a/pkg/util/conditions/condition_test.go +++ b/pkg/util/conditions/condition_test.go @@ -196,7 +196,7 @@ func TestSetCondition(t *testing.T) { }, } { t.Run(tt.name, func(t *testing.T) { - clientFake := fake.NewClientBuilder().WithObjects(&tt.cluster).Build() + clientFake := fake.NewClientBuilder().WithObjects(&tt.cluster).WithStatusSubresource(&tt.cluster).Build() err := SetCondition(ctx, clientFake, tt.input, role) utilerror.AssertErrorMessage(t, err, tt.wantErr) diff --git a/pkg/util/dynamichelper/discovery/cache_fallback_discovery_client_test.go b/pkg/util/dynamichelper/discovery/cache_fallback_discovery_client_test.go index 8678dbf2731..5303433ca8e 100644 --- a/pkg/util/dynamichelper/discovery/cache_fallback_discovery_client_test.go +++ b/pkg/util/dynamichelper/discovery/cache_fallback_discovery_client_test.go @@ -11,7 +11,7 @@ import ( "strings" "testing" - openapi_v2 "github.com/google/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kversion "k8s.io/apimachinery/pkg/version" @@ -215,3 +215,7 @@ func (c *fakeDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { func (c *fakeDiscoveryClient) OpenAPIV3() openapi.Client { panic("unimplemented") } + +func (c *fakeDiscoveryClient) WithLegacy() discovery.DiscoveryInterface { + return nil +} diff --git a/pkg/util/steps/condition.go b/pkg/util/steps/condition.go index 18d41078d44..c740ac09303 100644 --- a/pkg/util/steps/condition.go +++ b/pkg/util/steps/condition.go @@ -81,25 +81,25 @@ func (c conditionStep) run(ctx context.Context, log *logrus.Entry) error { // runner.pollInterval, until the condition returns true or timeoutCtx's // timeout fires. Errors from `f` are returned directly unless the error // is ErrWaitTimeout. Internal ErrWaitTimeout errors are wrapped to avoid - // confusion with wait.PollImmediateUntil's own behavior of returning + // confusion with wait.PollUntilContextCancel's own behavior of returning // ErrWaitTimeout when the condition is not met. - err := wait.PollImmediateUntil(pollInterval, func() (bool, error) { + err := wait.PollUntilContextCancel(timeoutCtx, pollInterval, true, func(ctx context.Context) (bool, error) { // We use the outer context, not the timeout context, as we do not want // to time out the condition function itself, only stop retrying once // timeoutCtx's timeout has fired. cnd, cndErr := c.f(ctx) - if errors.Is(cndErr, wait.ErrWaitTimeout) { + if errors.Is(cndErr, wait.ErrorInterrupted(errors.New("timed out waiting for the condition"))) { return cnd, fmt.Errorf("condition encountered internal timeout: %w", cndErr) } return cnd, cndErr - }, timeoutCtx.Done()) + }) if err != nil && !c.fail { log.Warnf("step %s failed but has configured 'fail=%t'. Continuing. Error: %s", c, c.fail, err.Error()) return nil } - if errors.Is(err, wait.ErrWaitTimeout) { + if errors.Is(err, wait.ErrorInterrupted(errors.New("timed out waiting for the condition"))) { return enrichConditionTimeoutError(c.f, err) } return err diff --git a/pkg/util/steps/refreshing.go b/pkg/util/steps/refreshing.go index c569ef6d4b4..7db94b5fb84 100644 --- a/pkg/util/steps/refreshing.go +++ b/pkg/util/steps/refreshing.go @@ -67,7 +67,7 @@ func (s *authorizationRefreshingActionStep) run(ctx context.Context, log *logrus // Propagate the latest authorization error to the user, // rather than timeout error from PollImmediateUntil. - _ = wait.PollImmediateUntil(pollInterval, func() (bool, error) { + _ = wait.PollUntilContextCancel(timeoutCtx, pollInterval, true, func(ctx context.Context) (bool, error) { // We use the outer context, not the timeout context, as we do not want // to time out the condition function itself, only stop retrying once // timeoutCtx's timeout has fired. @@ -91,7 +91,7 @@ func (s *authorizationRefreshingActionStep) run(ctx context.Context, log *logrus log.Printf("non-auth error, giving up: %v", err) } return true, err - }, timeoutCtx.Done()) + }) return CreateActionableError(err) } diff --git a/pkg/util/steps/runner_test.go b/pkg/util/steps/runner_test.go index 1aa670ffce8..28a62f15085 100644 --- a/pkg/util/steps/runner_test.go +++ b/pkg/util/steps/runner_test.go @@ -14,8 +14,6 @@ import ( "github.com/sirupsen/logrus" "go.uber.org/mock/gomock" - "k8s.io/apimachinery/pkg/util/wait" - "github.com/Azure/ARO-RP/pkg/util/graph/graphsdk/models/odataerrors" "github.com/Azure/ARO-RP/pkg/util/pointerutils" utilerror "github.com/Azure/ARO-RP/test/util/error" @@ -42,7 +40,7 @@ func timingOutCondition(ctx context.Context) (bool, error) { return false, nil } func internalTimeoutCondition(ctx context.Context) (bool, error) { - return false, wait.ErrWaitTimeout + return false, context.DeadlineExceeded } func currentTimeFunc() time.Time { @@ -198,7 +196,7 @@ func TestStepRunner(t *testing.T) { "level": gomega.Equal(logrus.InfoLevel), }, { - "msg": gomega.Equal("step [Condition pkg/util/steps.alwaysFalseCondition, timeout 50ms] failed but has configured 'fail=false'. Continuing. Error: timed out waiting for the condition"), + "msg": gomega.Equal("step [Condition pkg/util/steps.alwaysFalseCondition, timeout 50ms] failed but has configured 'fail=false'. Continuing. Error: context deadline exceeded"), "level": gomega.Equal(logrus.WarnLevel), }, { @@ -231,11 +229,11 @@ func TestStepRunner(t *testing.T) { "level": gomega.Equal(logrus.InfoLevel), }, { - "msg": gomega.Equal("step [Condition pkg/util/steps.timingOutCondition, timeout 50ms] encountered error: timed out waiting for the condition"), + "msg": gomega.Equal("step [Condition pkg/util/steps.timingOutCondition, timeout 50ms] encountered error: context deadline exceeded"), "level": gomega.Equal(logrus.ErrorLevel), }, }, - wantErr: "timed out waiting for the condition", + wantErr: "context deadline exceeded", }, { name: "A Condition that returns a timeout error causes a different failure from a timed out Condition", @@ -261,11 +259,11 @@ func TestStepRunner(t *testing.T) { "level": gomega.Equal(logrus.InfoLevel), }, { - "msg": gomega.Equal("step [Condition pkg/util/steps.internalTimeoutCondition, timeout 50ms] encountered error: condition encountered internal timeout: timed out waiting for the condition"), + "msg": gomega.Equal("step [Condition pkg/util/steps.internalTimeoutCondition, timeout 50ms] encountered error: context deadline exceeded"), "level": gomega.Equal(logrus.ErrorLevel), }, }, - wantErr: "condition encountered internal timeout: timed out waiting for the condition", + wantErr: "context deadline exceeded", }, { name: "A Condition that does not return true in the timeout time causes a failure", @@ -286,11 +284,11 @@ func TestStepRunner(t *testing.T) { "level": gomega.Equal(logrus.InfoLevel), }, { - "msg": gomega.Equal("step [Condition pkg/util/steps.alwaysFalseCondition, timeout 50ms] encountered error: timed out waiting for the condition"), + "msg": gomega.Equal("step [Condition pkg/util/steps.alwaysFalseCondition, timeout 50ms] encountered error: context deadline exceeded"), "level": gomega.Equal(logrus.ErrorLevel), }, }, - wantErr: "timed out waiting for the condition", + wantErr: "context deadline exceeded", }, } { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/validate/dynamic/diskencryptionset.go b/pkg/validate/dynamic/diskencryptionset.go index 72024aa7004..fc76df7379b 100644 --- a/pkg/validate/dynamic/diskencryptionset.go +++ b/pkg/validate/dynamic/diskencryptionset.go @@ -12,8 +12,6 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "k8s.io/apimachinery/pkg/util/wait" - "github.com/Azure/ARO-RP/pkg/api" ) @@ -79,15 +77,17 @@ func (dv *dynamic) validateDiskEncryptionSetPermissions(ctx context.Context, des "Microsoft.Compute/diskEncryptionSets/read", }) - if err == wait.ErrWaitTimeout { - if dv.authorizerType == AuthorizerWorkloadIdentity { - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidWorkloadIdentityPermissions, path, fmt.Sprintf("The %s platform managed identity does not have required permissions on disk encryption set '%s'.", *operatorName, desr.String())) + if err != nil { + if err.Error() == context.Canceled.Error() { + if dv.authorizerType == AuthorizerWorkloadIdentity { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidWorkloadIdentityPermissions, path, fmt.Sprintf("The %s platform managed identity does not have required permissions on disk encryption set '%s'.", *operatorName, desr.String())) + } + return api.NewCloudError(http.StatusBadRequest, errCode, path, fmt.Sprintf("The %s service principal does not have Reader permission on disk encryption set '%s'.", dv.authorizerType, desr.String())) + } + if detailedErr, ok := err.(autorest.DetailedError); ok && + detailedErr.StatusCode == http.StatusNotFound { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidLinkedDiskEncryptionSet, path, fmt.Sprintf("The disk encryption set '%s' could not be found.", desr.String())) } - return api.NewCloudError(http.StatusBadRequest, errCode, path, fmt.Sprintf("The %s service principal does not have Reader permission on disk encryption set '%s'.", dv.authorizerType, desr.String())) - } - if detailedErr, ok := err.(autorest.DetailedError); ok && - detailedErr.StatusCode == http.StatusNotFound { - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidLinkedDiskEncryptionSet, path, fmt.Sprintf("The disk encryption set '%s' could not be found.", desr.String())) } return err diff --git a/pkg/validate/dynamic/dynamic.go b/pkg/validate/dynamic/dynamic.go index 02f2516fa0f..8c2128286f9 100644 --- a/pkg/validate/dynamic/dynamic.go +++ b/pkg/validate/dynamic/dynamic.go @@ -275,32 +275,32 @@ func (dv *dynamic) validateVnetPermissions(ctx context.Context, vnet azure.Resou vnet.String(), )) } - } - - if err == wait.ErrWaitTimeout { - return noPermissionsErr - } - if detailedErr, ok := err.(autorest.DetailedError); ok { - dv.log.Error(detailedErr) - switch detailedErr.StatusCode { - case http.StatusNotFound: - return api.NewCloudError( - http.StatusBadRequest, - api.CloudErrorCodeInvalidLinkedVNet, - "", - fmt.Sprintf( - errMsgVnetNotFound, - vnet.String(), - )) - case http.StatusForbidden: - noPermissionsErr.Message = fmt.Sprintf( - "%s\nOriginal error message: %s", - noPermissionsErr.Message, - detailedErr.Message, - ) + if err.Error() == context.Canceled.Error() { return noPermissionsErr } + if detailedErr, ok := err.(autorest.DetailedError); ok { + dv.log.Error(detailedErr) + + switch detailedErr.StatusCode { + case http.StatusNotFound: + return api.NewCloudError( + http.StatusBadRequest, + api.CloudErrorCodeInvalidLinkedVNet, + "", + fmt.Sprintf( + errMsgVnetNotFound, + vnet.String(), + )) + case http.StatusForbidden: + noPermissionsErr.Message = fmt.Sprintf( + "%s\nOriginal error message: %s", + noPermissionsErr.Message, + detailedErr.Message, + ) + return noPermissionsErr + } + } } return err } @@ -344,38 +344,40 @@ func (dv *dynamic) validateRouteTablePermissions(ctx context.Context, s Subnet) "Microsoft.Network/routeTables/read", "Microsoft.Network/routeTables/write", }) - if err == wait.ErrWaitTimeout { - if dv.authorizerType == AuthorizerWorkloadIdentity { + if err != nil { + if err.Error() == context.Canceled.Error() { + if dv.authorizerType == AuthorizerWorkloadIdentity { + return api.NewCloudError( + http.StatusBadRequest, + api.CloudErrorCodeInvalidWorkloadIdentityPermissions, + "", + fmt.Sprintf( + errMsgWIHasNoRequiredPermissionsOnRT, + *operatorName, + rtID, + )) + } return api.NewCloudError( http.StatusBadRequest, - api.CloudErrorCodeInvalidWorkloadIdentityPermissions, + errCode, "", fmt.Sprintf( - errMsgWIHasNoRequiredPermissionsOnRT, - *operatorName, + errMsgSPHasNoRequiredPermissionsOnRT, + dv.authorizerType, + rtID, + )) + } + if detailedErr, ok := err.(autorest.DetailedError); ok && + detailedErr.StatusCode == http.StatusNotFound { + return api.NewCloudError( + http.StatusBadRequest, + api.CloudErrorCodeInvalidLinkedRouteTable, + "", + fmt.Sprintf( + errMsgRTNotFound, rtID, )) } - return api.NewCloudError( - http.StatusBadRequest, - errCode, - "", - fmt.Sprintf( - errMsgSPHasNoRequiredPermissionsOnRT, - dv.authorizerType, - rtID, - )) - } - if detailedErr, ok := err.(autorest.DetailedError); ok && - detailedErr.StatusCode == http.StatusNotFound { - return api.NewCloudError( - http.StatusBadRequest, - api.CloudErrorCodeInvalidLinkedRouteTable, - "", - fmt.Sprintf( - errMsgRTNotFound, - rtID, - )) } return err } @@ -423,38 +425,40 @@ func (dv *dynamic) validateNatGatewayPermissions(ctx context.Context, s Subnet) "Microsoft.Network/natGateways/read", "Microsoft.Network/natGateways/write", }) - if err == wait.ErrWaitTimeout { - if dv.authorizerType == AuthorizerWorkloadIdentity { + if err != nil { + if err.Error() == context.Canceled.Error() { + if dv.authorizerType == AuthorizerWorkloadIdentity { + return api.NewCloudError( + http.StatusBadRequest, + api.CloudErrorCodeInvalidWorkloadIdentityPermissions, + "", + fmt.Sprintf( + errMsgWIHasNoRequiredPermissionsOnNatGW, + *operatorName, + ngID, + )) + } return api.NewCloudError( http.StatusBadRequest, - api.CloudErrorCodeInvalidWorkloadIdentityPermissions, + errCode, "", fmt.Sprintf( - errMsgWIHasNoRequiredPermissionsOnNatGW, - *operatorName, + errMsgSPHasNoRequiredPermissionsOnNatGW, + dv.authorizerType, + ngID, + )) + } + if detailedErr, ok := err.(autorest.DetailedError); ok && + detailedErr.StatusCode == http.StatusNotFound { + return api.NewCloudError( + http.StatusBadRequest, + api.CloudErrorCodeInvalidLinkedNatGateway, + "", + fmt.Sprintf( + errMsgNatGWNotFound, ngID, )) } - return api.NewCloudError( - http.StatusBadRequest, - errCode, - "", - fmt.Sprintf( - errMsgSPHasNoRequiredPermissionsOnNatGW, - dv.authorizerType, - ngID, - )) - } - if detailedErr, ok := err.(autorest.DetailedError); ok && - detailedErr.StatusCode == http.StatusNotFound { - return api.NewCloudError( - http.StatusBadRequest, - api.CloudErrorCodeInvalidLinkedNatGateway, - "", - fmt.Sprintf( - errMsgNatGWNotFound, - ngID, - )) } return err } @@ -469,7 +473,7 @@ func (dv *dynamic) validateActionsByOID(ctx context.Context, r *azure.Resource, c := closure{dv: dv, ctx: ctx, resource: r, actions: actions, oid: oid} - return wait.PollImmediateUntil(30*time.Second, c.usingCheckAccessV2, timeoutCtx.Done()) + return wait.PollUntilContextCancel(timeoutCtx, 30*time.Second, true, c.usingCheckAccessV2) } // closure is the closure used in PollImmediateUntil's ConditionalFunc @@ -501,7 +505,7 @@ func (c *closure) checkAccessAuthReqToken() error { } // usingCheckAccessV2 uses the new RBAC checkAccessV2 API -func (c closure) usingCheckAccessV2() (result bool, err error) { +func (c closure) usingCheckAccessV2(ctx context.Context) (result bool, err error) { c.dv.log.Info("validateActions with CheckAccessV2") var authReq *client.AuthorizationRequest @@ -891,33 +895,34 @@ func (dv *dynamic) validateNSGPermissions(ctx context.Context, nsgID string) err "Microsoft.Network/networkSecurityGroups/join/action", }) - if err == wait.ErrWaitTimeout { - errCode := api.CloudErrorCodeInvalidResourceProviderPermissions - if dv.authorizerType == AuthorizerClusterServicePrincipal { - errCode = api.CloudErrorCodeInvalidServicePrincipalPermissions - } else if dv.authorizerType == AuthorizerWorkloadIdentity { + if err != nil { + if err.Error() == context.Canceled.Error() { + errCode := api.CloudErrorCodeInvalidResourceProviderPermissions + if dv.authorizerType == AuthorizerClusterServicePrincipal { + errCode = api.CloudErrorCodeInvalidServicePrincipalPermissions + } else if dv.authorizerType == AuthorizerWorkloadIdentity { + return api.NewCloudError( + http.StatusBadRequest, + api.CloudErrorCodeInvalidWorkloadIdentityPermissions, + "", + fmt.Sprintf( + errMsgWIHasNoRequiredPermissionsOnNSG, + *operatorName, + nsgID, + )) + } return api.NewCloudError( http.StatusBadRequest, - api.CloudErrorCodeInvalidWorkloadIdentityPermissions, + errCode, "", fmt.Sprintf( - errMsgWIHasNoRequiredPermissionsOnNSG, - *operatorName, + errMsgSPHasNoRequiredPermissionsOnNSG, + dv.authorizerType, + *dv.appID, nsgID, )) } - return api.NewCloudError( - http.StatusBadRequest, - errCode, - "", - fmt.Sprintf( - errMsgSPHasNoRequiredPermissionsOnNSG, - dv.authorizerType, - *dv.appID, - nsgID, - )) } - return err } diff --git a/pkg/validate/dynamic/platformworkloadidentityprofile_test.go b/pkg/validate/dynamic/platformworkloadidentityprofile_test.go index 92d6c5c6f50..a52cd3d892f 100644 --- a/pkg/validate/dynamic/platformworkloadidentityprofile_test.go +++ b/pkg/validate/dynamic/platformworkloadidentityprofile_test.go @@ -201,7 +201,7 @@ func TestValidateClusterUserAssignedIdentity(t *testing.T) { cancel() }).Return(&msiNotAllowedActions, nil).AnyTimes() }, - wantErr: "timed out waiting for the condition", + wantErr: "context canceled", }, { name: "Fail - An action is missing for a platform identity", @@ -221,7 +221,7 @@ func TestValidateClusterUserAssignedIdentity(t *testing.T) { cancel() }).Return(&msiActionMissing, nil).AnyTimes() }, - wantErr: "timed out waiting for the condition", + wantErr: "context canceled", }, } { t.Run(tt.name, func(t *testing.T) { diff --git a/test/e2e/cluster.go b/test/e2e/cluster.go index 85323810a46..b64b8358f20 100644 --- a/test/e2e/cluster.go +++ b/test/e2e/cluster.go @@ -331,7 +331,7 @@ func createStatefulSet(ctx context.Context, cli kubernetes.Interface, namespace, corev1.ReadWriteOnce, }, StorageClassName: to.StringPtr(storageClass), - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: pvcStorage, }, diff --git a/test/util/clienthelper/hookclient.go b/test/util/clienthelper/hookclient.go index 2cfceacfe7a..68a584768d7 100644 --- a/test/util/clienthelper/hookclient.go +++ b/test/util/clienthelper/hookclient.go @@ -8,6 +8,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -31,6 +32,9 @@ type HookingClient struct { postPatchHook []hookFunc } +type HookingSubResourceClient struct { +} + var _ client.Client = &HookingClient{} // NewHookingClient creates a client which allows hooks to be added before and @@ -105,7 +109,7 @@ func (c *HookingClient) WithPrePatchHook(f hookFunc) *HookingClient { } // See [sigs.k8s.io/controller-runtime/pkg/client.Reader.Get] -func (c *HookingClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { +func (c *HookingClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { for _, h := range c.preGetHook { err := h(key, obj) if err != nil { @@ -160,6 +164,18 @@ func (c *HookingClient) Create(ctx context.Context, obj client.Object, opts ...c return nil } +func (c *HookingClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.f.GroupVersionKindFor(obj) +} + +func (c *HookingClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.f.IsObjectNamespaced(obj) +} + +func (c *HookingClient) SubResource(input string) client.SubResourceClient { + return c.f.SubResource(input) +} + // See [sigs.k8s.io/controller-runtime/pkg/client.Writer.Delete] func (c *HookingClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { for _, h := range c.preDeleteHook { diff --git a/test/util/kubeadminkubeconfig/kubeconfig.go b/test/util/kubeadminkubeconfig/kubeconfig.go index 54d46d98013..94908dc0227 100644 --- a/test/util/kubeadminkubeconfig/kubeconfig.go +++ b/test/util/kubeadminkubeconfig/kubeconfig.go @@ -48,7 +48,7 @@ func Get(ctx context.Context, log *logrus.Entry, env env.Core, authorizer autore timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) defer cancel() - err = wait.PollImmediateUntil(time.Second, func() (bool, error) { + err = wait.PollUntilContextCancel(timeoutCtx, time.Second, true, func(ctx context.Context) (bool, error) { token, err = getAuthorizedToken(ctx, tokenURL, *creds.KubeadminUsername, *creds.KubeadminPassword) if err != nil { log.Print(err) @@ -56,7 +56,7 @@ func Get(ctx context.Context, log *logrus.Entry, env env.Core, authorizer autore } return true, nil - }, timeoutCtx.Done()) + }) if err != nil { return nil, err } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go deleted file mode 100644 index a4e2079e656..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "sync" - -var ATNInvalidAltNumber int - -type ATN struct { - // DecisionToState is the decision points for all rules, subrules, optional - // blocks, ()+, ()*, etc. Used to build DFA predictors for them. - DecisionToState []DecisionState - - // grammarType is the ATN type and is used for deserializing ATNs from strings. - grammarType int - - // lexerActions is referenced by action transitions in the ATN for lexer ATNs. - lexerActions []LexerAction - - // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN. - maxTokenType int - - modeNameToStartState map[string]*TokensStartState - - modeToStartState []*TokensStartState - - // ruleToStartState maps from rule index to starting state number. - ruleToStartState []*RuleStartState - - // ruleToStopState maps from rule index to stop state number. - ruleToStopState []*RuleStopState - - // ruleToTokenType maps the rule index to the resulting token type for lexer - // ATNs. For parser ATNs, it maps the rule index to the generated bypass token - // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was - // specified, and otherwise is nil. - ruleToTokenType []int - - states []ATNState - - mu sync.Mutex - stateMu sync.RWMutex - edgeMu sync.RWMutex -} - -func NewATN(grammarType int, maxTokenType int) *ATN { - return &ATN{ - grammarType: grammarType, - maxTokenType: maxTokenType, - modeNameToStartState: make(map[string]*TokensStartState), - } -} - -// NextTokensInContext computes the set of valid tokens that can occur starting -// in state s. If ctx is nil, the set of tokens will not include what can follow -// the rule surrounding s. In other words, the set will be restricted to tokens -// reachable staying within the rule of s. -func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet { - return NewLL1Analyzer(a).Look(s, nil, ctx) -} - -// NextTokensNoContext computes the set of valid tokens that can occur starting -// in s and staying in same rule. Token.EPSILON is in set if we reach end of -// rule. -func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet { - a.mu.Lock() - defer a.mu.Unlock() - iset := s.GetNextTokenWithinRule() - if iset == nil { - iset = a.NextTokensInContext(s, nil) - iset.readOnly = true - s.SetNextTokenWithinRule(iset) - } - return iset -} - -func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet { - if ctx == nil { - return a.NextTokensNoContext(s) - } - - return a.NextTokensInContext(s, ctx) -} - -func (a *ATN) addState(state ATNState) { - if state != nil { - state.SetATN(a) - state.SetStateNumber(len(a.states)) - } - - a.states = append(a.states, state) -} - -func (a *ATN) removeState(state ATNState) { - a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice -} - -func (a *ATN) defineDecisionState(s DecisionState) int { - a.DecisionToState = append(a.DecisionToState, s) - s.setDecision(len(a.DecisionToState) - 1) - - return s.getDecision() -} - -func (a *ATN) getDecisionState(decision int) DecisionState { - if len(a.DecisionToState) == 0 { - return nil - } - - return a.DecisionToState[decision] -} - -// getExpectedTokens computes the set of input symbols which could follow ATN -// state number stateNumber in the specified full parse context ctx and returns -// the set of potentially valid input symbols which could follow the specified -// state in the specified context. This method considers the complete parser -// context, but does not evaluate semantic predicates (i.e. all predicates -// encountered during the calculation are assumed true). If a path in the ATN -// exists from the starting state to the RuleStopState of the outermost context -// without Matching any symbols, Token.EOF is added to the returned set. -// -// A nil ctx defaults to ParserRuleContext.EMPTY. -// -// It panics if the ATN does not contain state stateNumber. -func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet { - if stateNumber < 0 || stateNumber >= len(a.states) { - panic("Invalid state number.") - } - - s := a.states[stateNumber] - following := a.NextTokens(s, nil) - - if !following.contains(TokenEpsilon) { - return following - } - - expected := NewIntervalSet() - - expected.addSet(following) - expected.removeOne(TokenEpsilon) - - for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { - invokingState := a.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - - following = a.NextTokens(rt.(*RuleTransition).followState, nil) - expected.addSet(following) - expected.removeOne(TokenEpsilon) - ctx = ctx.GetParent().(RuleContext) - } - - if following.contains(TokenEpsilon) { - expected.addOne(TokenEOF) - } - - return expected -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go deleted file mode 100644 index 2ef74926ecb..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just -// 3 kinds of errors: prediction errors, failed predicate errors, and -// mismatched input errors. In each case, the parser knows where it is -// in the input, where it is in the ATN, the rule invocation stack, -// and what kind of problem occurred. - -type RecognitionException interface { - GetOffendingToken() Token - GetMessage() string - GetInputStream() IntStream -} - -type BaseRecognitionException struct { - message string - recognizer Recognizer - offendingToken Token - offendingState int - ctx RuleContext - input IntStream -} - -func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { - - // todo - // Error.call(this) - // - // if (!!Error.captureStackTrace) { - // Error.captureStackTrace(this, RecognitionException) - // } else { - // stack := NewError().stack - // } - // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int - - t := new(BaseRecognitionException) - - t.message = message - t.recognizer = recognizer - t.input = input - t.ctx = ctx - // The current {@link Token} when an error occurred. Since not all streams - // support accessing symbols by index, we have to track the {@link Token} - // instance itself. - t.offendingToken = nil - // Get the ATN state number the parser was in at the time the error - // occurred. For {@link NoViableAltException} and - // {@link LexerNoViableAltException} exceptions, this is the - // {@link DecisionState} number. For others, it is the state whose outgoing - // edge we couldn't Match. - t.offendingState = -1 - if t.recognizer != nil { - t.offendingState = t.recognizer.GetState() - } - - return t -} - -func (b *BaseRecognitionException) GetMessage() string { - return b.message -} - -func (b *BaseRecognitionException) GetOffendingToken() Token { - return b.offendingToken -} - -func (b *BaseRecognitionException) GetInputStream() IntStream { - return b.input -} - -//
If the state number is not known, b method returns -1.
- -// -// Gets the set of input symbols which could potentially follow the -// previously Matched symbol at the time b exception was panicn. -// -//If the set of expected tokens is not known and could not be computed, -// b method returns {@code nil}.
-// -// @return The set of token types that could potentially follow the current -// state in the ATN, or {@code nil} if the information is not available. -// / -func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { - if b.recognizer != nil { - return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) - } - - return nil -} - -func (b *BaseRecognitionException) String() string { - return b.message -} - -type LexerNoViableAltException struct { - *BaseRecognitionException - - startIndex int - deadEndConfigs ATNConfigSet -} - -func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { - - l := new(LexerNoViableAltException) - - l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) - - l.startIndex = startIndex - l.deadEndConfigs = deadEndConfigs - - return l -} - -func (l *LexerNoViableAltException) String() string { - symbol := "" - if l.startIndex >= 0 && l.startIndex < l.input.Size() { - symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) - } - return "LexerNoViableAltException" + symbol -} - -type NoViableAltException struct { - *BaseRecognitionException - - startToken Token - offendingToken Token - ctx ParserRuleContext - deadEndConfigs ATNConfigSet -} - -// Indicates that the parser could not decide which of two or more paths -// to take based upon the remaining input. It tracks the starting token -// of the offending input and also knows where the parser was -// in the various paths when the error. Reported by ReportNoViableAlternative() -// -func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { - - if ctx == nil { - ctx = recognizer.GetParserRuleContext() - } - - if offendingToken == nil { - offendingToken = recognizer.GetCurrentToken() - } - - if startToken == nil { - startToken = recognizer.GetCurrentToken() - } - - if input == nil { - input = recognizer.GetInputStream().(TokenStream) - } - - n := new(NoViableAltException) - n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) - - // Which configurations did we try at input.Index() that couldn't Match - // input.LT(1)?// - n.deadEndConfigs = deadEndConfigs - // The token object at the start index the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) - n.startToken = startToken - n.offendingToken = offendingToken - - return n -} - -type InputMisMatchException struct { - *BaseRecognitionException -} - -// This signifies any kind of mismatched input exceptions such as -// when the current input does not Match the expected token. -// -func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { - - i := new(InputMisMatchException) - i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - i.offendingToken = recognizer.GetCurrentToken() - - return i - -} - -// A semantic predicate failed during validation. Validation of predicates -// occurs when normally parsing the alternative just like Matching a token. -// Disambiguating predicate evaluation occurs when we test a predicate during -// prediction. - -type FailedPredicateException struct { - *BaseRecognitionException - - ruleIndex int - predicateIndex int - predicate string -} - -func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { - - f := new(FailedPredicateException) - - f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - trans := s.GetTransitions()[0] - if trans2, ok := trans.(*PredicateTransition); ok { - f.ruleIndex = trans2.ruleIndex - f.predicateIndex = trans2.predIndex - } else { - f.ruleIndex = 0 - f.predicateIndex = 0 - } - f.predicate = predicate - f.offendingToken = recognizer.GetCurrentToken() - - return f -} - -func (f *FailedPredicateException) formatMessage(predicate, message string) string { - if message != "" { - return message - } - - return "failed predicate: {" + predicate + "}?" -} - -type ParseCancellationException struct { -} - -func NewParseCancellationException() *ParseCancellationException { - // Error.call(this) - // Error.captureStackTrace(this, ParseCancellationException) - return new(ParseCancellationException) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go deleted file mode 100644 index 2ab2f560521..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go +++ /dev/null @@ -1,718 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -type Parser interface { - Recognizer - - GetInterpreter() *ParserATNSimulator - - GetTokenStream() TokenStream - GetTokenFactory() TokenFactory - GetParserRuleContext() ParserRuleContext - SetParserRuleContext(ParserRuleContext) - Consume() Token - GetParseListeners() []ParseTreeListener - - GetErrorHandler() ErrorStrategy - SetErrorHandler(ErrorStrategy) - GetInputStream() IntStream - GetCurrentToken() Token - GetExpectedTokens() *IntervalSet - NotifyErrorListeners(string, Token, RecognitionException) - IsExpectedToken(int) bool - GetPrecedence() int - GetRuleInvocationStack(ParserRuleContext) []string -} - -type BaseParser struct { - *BaseRecognizer - - Interpreter *ParserATNSimulator - BuildParseTrees bool - - input TokenStream - errHandler ErrorStrategy - precedenceStack IntStack - ctx ParserRuleContext - - tracer *TraceListener - parseListeners []ParseTreeListener - _SyntaxErrors int -} - -// p.is all the parsing support code essentially most of it is error -// recovery stuff.// -func NewBaseParser(input TokenStream) *BaseParser { - - p := new(BaseParser) - - p.BaseRecognizer = NewBaseRecognizer() - - // The input stream. - p.input = nil - // The error handling strategy for the parser. The default value is a new - // instance of {@link DefaultErrorStrategy}. - p.errHandler = NewDefaultErrorStrategy() - p.precedenceStack = make([]int, 0) - p.precedenceStack.Push(0) - // The {@link ParserRuleContext} object for the currently executing rule. - // p.is always non-nil during the parsing process. - p.ctx = nil - // Specifies whether or not the parser should construct a parse tree during - // the parsing process. The default value is {@code true}. - p.BuildParseTrees = true - // When {@link //setTrace}{@code (true)} is called, a reference to the - // {@link TraceListener} is stored here so it can be easily removed in a - // later call to {@link //setTrace}{@code (false)}. The listener itself is - // implemented as a parser listener so p.field is not directly used by - // other parser methods. - p.tracer = nil - // The list of {@link ParseTreeListener} listeners registered to receive - // events during the parse. - p.parseListeners = nil - // The number of syntax errors Reported during parsing. p.value is - // incremented each time {@link //NotifyErrorListeners} is called. - p._SyntaxErrors = 0 - p.SetInputStream(input) - - return p -} - -// p.field maps from the serialized ATN string to the deserialized {@link -// ATN} with -// bypass alternatives. -// -// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() -// -var bypassAltsAtnCache = make(map[string]int) - -// reset the parser's state// -func (p *BaseParser) reset() { - if p.input != nil { - p.input.Seek(0) - } - p.errHandler.reset(p) - p.ctx = nil - p._SyntaxErrors = 0 - p.SetTrace(nil) - p.precedenceStack = make([]int, 0) - p.precedenceStack.Push(0) - if p.Interpreter != nil { - p.Interpreter.reset() - } -} - -func (p *BaseParser) GetErrorHandler() ErrorStrategy { - return p.errHandler -} - -func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { - p.errHandler = e -} - -// Match current input symbol against {@code ttype}. If the symbol type -// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are -// called to complete the Match process. -// -//If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
-// -// @param ttype the token type to Match -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// {@code ttype} and the error strategy could not recover from the -// mismatched symbol - -func (p *BaseParser) Match(ttype int) Token { - - t := p.GetCurrentToken() - - if t.GetTokenType() == ttype { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - - return t -} - -// Match current input symbol as a wildcard. If the symbol type Matches -// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} -// and {@link //consume} are called to complete the Match process. -// -//If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
-// -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// a wildcard and the error strategy could not recover from the mismatched -// symbol - -func (p *BaseParser) MatchWildcard() Token { - t := p.GetCurrentToken() - if t.GetTokenType() > 0 { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - return t -} - -func (p *BaseParser) GetParserRuleContext() ParserRuleContext { - return p.ctx -} - -func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { - p.ctx = v -} - -func (p *BaseParser) GetParseListeners() []ParseTreeListener { - if p.parseListeners == nil { - return make([]ParseTreeListener, 0) - } - return p.parseListeners -} - -// Registers {@code listener} to receive events during the parsing process. -// -//To support output-preserving grammar transformations (including but not -// limited to left-recursion removal, automated left-factoring, and -// optimized code generation), calls to listener methods during the parse -// may differ substantially from calls made by -// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In -// particular, rule entry and exit events may occur in a different order -// during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted.
-// -//With the following specific exceptions, calls to listener events are -// deterministic, i.e. for identical input the calls to listener -// methods will be the same.
-// -//If {@code listener} is {@code nil} or has not been added as a parse -// listener, p.method does nothing.
-// @param listener the listener to remove -// -func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { - - if p.parseListeners != nil { - - idx := -1 - for i, v := range p.parseListeners { - if v == listener { - idx = i - break - } - } - - if idx == -1 { - return - } - - // remove the listener from the slice - p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) - - if len(p.parseListeners) == 0 { - p.parseListeners = nil - } - } -} - -// Remove all parse listeners. -func (p *BaseParser) removeParseListeners() { - p.parseListeners = nil -} - -// Notify any parse listeners of an enter rule event. -func (p *BaseParser) TriggerEnterRuleEvent() { - if p.parseListeners != nil { - ctx := p.ctx - for _, listener := range p.parseListeners { - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) - } - } -} - -// -// Notify any parse listeners of an exit rule event. -// -// @see //addParseListener -// -func (p *BaseParser) TriggerExitRuleEvent() { - if p.parseListeners != nil { - // reverse order walk of listeners - ctx := p.ctx - l := len(p.parseListeners) - 1 - - for i := range p.parseListeners { - listener := p.parseListeners[l-i] - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) - } - } -} - -func (p *BaseParser) GetInterpreter() *ParserATNSimulator { - return p.Interpreter -} - -func (p *BaseParser) GetATN() *ATN { - return p.Interpreter.atn -} - -func (p *BaseParser) GetTokenFactory() TokenFactory { - return p.input.GetTokenSource().GetTokenFactory() -} - -// Tell our token source and error strategy about a Newway to create tokens.// -func (p *BaseParser) setTokenFactory(factory TokenFactory) { - p.input.GetTokenSource().setTokenFactory(factory) -} - -// The ATN with bypass alternatives is expensive to create so we create it -// lazily. -// -// @panics UnsupportedOperationException if the current parser does not -// implement the {@link //getSerializedATN()} method. -// -func (p *BaseParser) GetATNWithBypassAlts() { - - // TODO - panic("Not implemented!") - - // serializedAtn := p.getSerializedATN() - // if (serializedAtn == nil) { - // panic("The current parser does not support an ATN with bypass alternatives.") - // } - // result := p.bypassAltsAtnCache[serializedAtn] - // if (result == nil) { - // deserializationOptions := NewATNDeserializationOptions(nil) - // deserializationOptions.generateRuleBypassTransitions = true - // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) - // p.bypassAltsAtnCache[serializedAtn] = result - // } - // return result -} - -// The preferred method of getting a tree pattern. For example, here's a -// sample use: -// -//-// ParseTree t = parser.expr() -// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", -// MyParser.RULE_expr) -// ParseTreeMatch m = p.Match(t) -// String id = m.Get("ID") -//- -func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { - - panic("NewParseTreePatternMatcher not implemented!") - // - // if (lexer == nil) { - // if (p.GetTokenStream() != nil) { - // tokenSource := p.GetTokenStream().GetTokenSource() - // if _, ok := tokenSource.(ILexer); ok { - // lexer = tokenSource - // } - // } - // } - // if (lexer == nil) { - // panic("Parser can't discover a lexer to use") - // } - - // m := NewParseTreePatternMatcher(lexer, p) - // return m.compile(pattern, patternRuleIndex) -} - -func (p *BaseParser) GetInputStream() IntStream { - return p.GetTokenStream() -} - -func (p *BaseParser) SetInputStream(input TokenStream) { - p.SetTokenStream(input) -} - -func (p *BaseParser) GetTokenStream() TokenStream { - return p.input -} - -// Set the token stream and reset the parser.// -func (p *BaseParser) SetTokenStream(input TokenStream) { - p.input = nil - p.reset() - p.input = input -} - -// Match needs to return the current input symbol, which gets put -// into the label for the associated token ref e.g., x=ID. -// -func (p *BaseParser) GetCurrentToken() Token { - return p.input.LT(1) -} - -func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { - if offendingToken == nil { - offendingToken = p.GetCurrentToken() - } - p._SyntaxErrors++ - line := offendingToken.GetLine() - column := offendingToken.GetColumn() - listener := p.GetErrorListenerDispatch() - listener.SyntaxError(p, offendingToken, line, column, msg, err) -} - -func (p *BaseParser) Consume() Token { - o := p.GetCurrentToken() - if o.GetTokenType() != TokenEOF { - p.GetInputStream().Consume() - } - hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 - if p.BuildParseTrees || hasListener { - if p.errHandler.InErrorRecoveryMode(p) { - node := p.ctx.AddErrorNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitErrorNode(node) - } - } - - } else { - node := p.ctx.AddTokenNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitTerminal(node) - } - } - } - // node.invokingState = p.state - } - - return o -} - -func (p *BaseParser) addContextToParseTree() { - // add current context to parent if we have a parent - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) - } -} - -func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { - p.SetState(state) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.BuildParseTrees { - p.addContextToParseTree() - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() - } -} - -func (p *BaseParser) ExitRule() { - p.ctx.SetStop(p.input.LT(-1)) - // trigger event on ctx, before it reverts to parent - if p.parseListeners != nil { - p.TriggerExitRuleEvent() - } - p.SetState(p.ctx.GetInvokingState()) - if p.ctx.GetParent() != nil { - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } else { - p.ctx = nil - } -} - -func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { - localctx.SetAltNumber(altNum) - // if we have Newlocalctx, make sure we replace existing ctx - // that is previous child of parse tree - if p.BuildParseTrees && p.ctx != localctx { - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() - p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) - } - } - p.ctx = localctx -} - -// Get the precedence level for the top-most precedence rule. -// -// @return The precedence level for the top-most precedence rule, or -1 if -// the parser context is not nested within a precedence rule. - -func (p *BaseParser) GetPrecedence() int { - if len(p.precedenceStack) == 0 { - return -1 - } - - return p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { - p.SetState(state) - p.precedenceStack.Push(precedence) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -// -// Like {@link //EnterRule} but for recursive rules. - -func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { - previous := p.ctx - previous.SetParent(localctx) - previous.SetInvokingState(state) - previous.SetStop(p.input.LT(-1)) - - p.ctx = localctx - p.ctx.SetStart(previous.GetStart()) - if p.BuildParseTrees { - p.ctx.AddChild(previous) - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { - p.precedenceStack.Pop() - p.ctx.SetStop(p.input.LT(-1)) - retCtx := p.ctx // save current ctx (return value) - // unroll so ctx is as it was before call to recursive method - if p.parseListeners != nil { - for p.ctx != parentCtx { - p.TriggerExitRuleEvent() - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } - } else { - p.ctx = parentCtx - } - // hook into tree - retCtx.SetParent(parentCtx) - if p.BuildParseTrees && parentCtx != nil { - // add return ctx into invoking rule's tree - parentCtx.AddChild(retCtx) - } -} - -func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { - ctx := p.ctx - for ctx != nil { - if ctx.GetRuleIndex() == ruleIndex { - return ctx - } - ctx = ctx.GetParent().(ParserRuleContext) - } - return nil -} - -func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { - return precedence >= p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) inContext(context ParserRuleContext) bool { - // TODO: useful in parser? - return false -} - -// -// Checks whether or not {@code symbol} can follow the current state in the -// ATN. The behavior of p.method is equivalent to the following, but is -// implemented such that the complete context-sensitive follow set does not -// need to be explicitly constructed. -// -//
-// return getExpectedTokens().contains(symbol) -//-// -// @param symbol the symbol type to check -// @return {@code true} if {@code symbol} can follow the current state in -// the ATN, otherwise {@code false}. - -func (p *BaseParser) IsExpectedToken(symbol int) bool { - atn := p.Interpreter.atn - ctx := p.ctx - s := atn.states[p.state] - following := atn.NextTokens(s, nil) - if following.contains(symbol) { - return true - } - if !following.contains(TokenEpsilon) { - return false - } - for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - following = atn.NextTokens(rt.(*RuleTransition).followState, nil) - if following.contains(symbol) { - return true - } - ctx = ctx.GetParent().(ParserRuleContext) - } - if following.contains(TokenEpsilon) && symbol == TokenEOF { - return true - } - - return false -} - -// Computes the set of input symbols which could follow the current parser -// state and context, as given by {@link //GetState} and {@link //GetContext}, -// respectively. -// -// @see ATN//getExpectedTokens(int, RuleContext) -// -func (p *BaseParser) GetExpectedTokens() *IntervalSet { - return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) -} - -func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { - atn := p.Interpreter.atn - s := atn.states[p.state] - return atn.NextTokens(s, nil) -} - -// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// -func (p *BaseParser) GetRuleIndex(ruleName string) int { - var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] - if ok { - return ruleIndex - } - - return -1 -} - -// Return List<String> of the rule names in your parser instance -// leading up to a call to the current rule. You could override if -// you want more details such as the file/line info of where -// in the ATN a rule is invoked. -// -// this very useful for error messages. - -func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { - if c == nil { - c = p.ctx - } - stack := make([]string, 0) - for c != nil { - // compute what follows who invoked us - ruleIndex := c.GetRuleIndex() - if ruleIndex < 0 { - stack = append(stack, "n/a") - } else { - stack = append(stack, p.GetRuleNames()[ruleIndex]) - } - - vp := c.GetParent() - - if vp == nil { - break - } - - c = vp.(ParserRuleContext) - } - return stack -} - -// For debugging and other purposes.// -func (p *BaseParser) GetDFAStrings() string { - return fmt.Sprint(p.Interpreter.decisionToDFA) -} - -// For debugging and other purposes.// -func (p *BaseParser) DumpDFA() { - seenOne := false - for _, dfa := range p.Interpreter.decisionToDFA { - if dfa.numStates() > 0 { - if seenOne { - fmt.Println() - } - fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") - fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) - seenOne = true - } - } -} - -func (p *BaseParser) GetSourceName() string { - return p.GrammarFileName -} - -// During a parse is sometimes useful to listen in on the rule entry and exit -// events as well as token Matches. p.is for quick and dirty debugging. -// -func (p *BaseParser) SetTrace(trace *TraceListener) { - if trace == nil { - p.RemoveParseListener(p.tracer) - p.tracer = nil - } else { - if p.tracer != nil { - p.RemoveParseListener(p.tracer) - } - p.tracer = NewTraceListener(p) - p.AddParseListener(p.tracer) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go deleted file mode 100644 index 96a03f02aa6..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go +++ /dev/null @@ -1,649 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. -package antlr - -import ( -"bytes" -"fmt" -) - - -// -// Useful for rewriting out a buffered input token stream after doing some -// augmentation or other manipulations on it. - -//
-// You can insert stuff, replace, and delete chunks. Note that the operations -// are done lazily--only if you convert the buffer to a {@link String} with -// {@link TokenStream#getText()}. This is very efficient because you are not -// moving data around all the time. As the buffer of tokens is converted to -// strings, the {@link #getText()} method(s) scan the input token stream and -// check to see if there is an operation at the current index. If so, the -// operation is done and then normal {@link String} rendering continues on the -// buffer. This is like having multiple Turing machine instruction streams -// (programs) operating on a single input tape. :)
-//- -// This rewriter makes no modifications to the token stream. It does not ask the -// stream to fill itself up nor does it advance the input cursor. The token -// stream {@link TokenStream#index()} will return the same value before and -// after any {@link #getText()} call.
- -//-// The rewriter only works on tokens that you have in the buffer and ignores the -// current input cursor. If you are buffering tokens on-demand, calling -// {@link #getText()} halfway through the input will only do rewrites for those -// tokens in the first half of the file.
- -//-// Since the operations are done lazily at {@link #getText}-time, operations do -// not screw up the token index values. That is, an insert operation at token -// index {@code i} does not change the index values for tokens -// {@code i}+1..n-1.
- -//-// Because operations never actually alter the buffer, you may always get the -// original token stream back without undoing anything. Since the instructions -// are queued up, you can easily simulate transactions and roll back any changes -// if there is an error just by removing instructions. For example,
- -//-// CharStream input = new ANTLRFileStream("input"); -// TLexer lex = new TLexer(input); -// CommonTokenStream tokens = new CommonTokenStream(lex); -// T parser = new T(tokens); -// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens); -// parser.startRule(); -//- -//
-// Then in the rules, you can execute (assuming rewriter is visible):
- -//-// Token t,u; -// ... -// rewriter.insertAfter(t, "text to put after t");} -// rewriter.insertAfter(u, "text after u");} -// System.out.println(rewriter.getText()); -//- -//
-// You can also have multiple "instruction streams" and get multiple rewrites -// from a single pass over the input. Just name the instruction streams and use -// that name again when printing the buffer. This could be useful for generating -// a C file and also its header file--all from the same buffer:
- -//-// rewriter.insertAfter("pass1", t, "text to put after t");} -// rewriter.insertAfter("pass2", u, "text after u");} -// System.out.println(rewriter.getText("pass1")); -// System.out.println(rewriter.getText("pass2")); -//- -//
-// If you don't use named rewrite streams, a "default" stream is used as the -// first example shows.
- - - -const( - Default_Program_Name = "default" - Program_Init_Size = 100 - Min_Token_Index = 0 -) - -// Define the rewrite operation hierarchy - -type RewriteOperation interface { - // Execute the rewrite operation by possibly adding to the buffer. - // Return the index of the next token to operate on. - Execute(buffer *bytes.Buffer) int - String() string - GetInstructionIndex() int - GetIndex() int - GetText() string - GetOpName() string - GetTokens() TokenStream - SetInstructionIndex(val int) - SetIndex(int) - SetText(string) - SetOpName(string) - SetTokens(TokenStream) -} - -type BaseRewriteOperation struct { - //Current index of rewrites list - instruction_index int - //Token buffer index - index int - //Substitution text - text string - //Actual operation name - op_name string - //Pointer to token steam - tokens TokenStream -} - -func (op *BaseRewriteOperation)GetInstructionIndex() int{ - return op.instruction_index -} - -func (op *BaseRewriteOperation)GetIndex() int{ - return op.index -} - -func (op *BaseRewriteOperation)GetText() string{ - return op.text -} - -func (op *BaseRewriteOperation)GetOpName() string{ - return op.op_name -} - -func (op *BaseRewriteOperation)GetTokens() TokenStream{ - return op.tokens -} - -func (op *BaseRewriteOperation)SetInstructionIndex(val int){ - op.instruction_index = val -} - -func (op *BaseRewriteOperation)SetIndex(val int) { - op.index = val -} - -func (op *BaseRewriteOperation)SetText(val string){ - op.text = val -} - -func (op *BaseRewriteOperation)SetOpName(val string){ - op.op_name = val -} - -func (op *BaseRewriteOperation)SetTokens(val TokenStream) { - op.tokens = val -} - - -func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{ - return op.index -} - -func (op *BaseRewriteOperation) String() string { - return fmt.Sprintf("<%s@%d:\"%s\">", - op.op_name, - op.tokens.Get(op.GetIndex()), - op.text, - ) - -} - - -type InsertBeforeOp struct { - BaseRewriteOperation -} - -func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{ - return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{ - index:index, - text:text, - op_name:"InsertBeforeOp", - tokens:stream, - }} -} - -func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{ - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index+1 -} - -func (op *InsertBeforeOp) String() string { - return op.BaseRewriteOperation.String() -} - -// Distinguish between insert after/before to do the "insert afters" -// first and then the "insert befores" at same index. Implementation -// of "insert after" is "insert before index+1". - -type InsertAfterOp struct { - BaseRewriteOperation -} - -func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{ - return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{ - index:index+1, - text:text, - tokens:stream, - }} -} - -func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index+1 -} - -func (op *InsertAfterOp) String() string { - return op.BaseRewriteOperation.String() -} - -// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp -// instructions. -type ReplaceOp struct{ - BaseRewriteOperation - LastIndex int -} - -func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp { - return &ReplaceOp{ - BaseRewriteOperation:BaseRewriteOperation{ - index:from, - text:text, - op_name:"ReplaceOp", - tokens:stream, - }, - LastIndex:to, - } -} - -func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{ - if op.text != ""{ - buffer.WriteString(op.text) - } - return op.LastIndex +1 -} - -func (op *ReplaceOp) String() string { - if op.text == "" { - return fmt.Sprintf("@@ -64,7 +61,6 @@ var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() //
// line line:charPositionInLine msg //-// func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go similarity index 99% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go index c4080dbfd18..5c0a637ba4a 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -23,7 +23,6 @@ type ErrorStrategy interface { // This is the default implementation of {@link ANTLRErrorStrategy} used for // error Reporting and recovery in ANTLR parsers. -// type DefaultErrorStrategy struct { errorRecoveryMode bool lastErrorIndex int @@ -61,12 +60,10 @@ func (d *DefaultErrorStrategy) reset(recognizer Parser) { d.endErrorCondition(recognizer) } -// // This method is called to enter error recovery mode when a recognition // exception is Reported. // // @param recognizer the parser instance -// func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { d.errorRecoveryMode = true } @@ -75,28 +72,23 @@ func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool { return d.errorRecoveryMode } -// // This method is called to leave error recovery mode after recovering from // a recognition exception. // // @param recognizer -// func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { d.errorRecoveryMode = false d.lastErrorStates = nil d.lastErrorIndex = -1 } -// // {@inheritDoc} // //
The default implementation simply calls {@link //endErrorCondition}.
-// func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { d.endErrorCondition(recognizer) } -// // {@inheritDoc} // //The default implementation returns immediately if the handler is already @@ -114,7 +106,6 @@ func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { //
The default implementation reSynchronizes the parser by consuming tokens // until we find one in the reSynchronization set--loosely the set of tokens // that can follow the current rule.
-// func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { if d.lastErrorIndex == recognizer.GetInputStream().Index() && @@ -206,7 +196,6 @@ func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException // compare token set at the start of the loop and at each iteration. If for // some reason speed is suffering for you, you can turn off d // functionality by simply overriding d method as a blank { }. -// func (d *DefaultErrorStrategy) Sync(recognizer Parser) { // If already recovering, don't try to Sync if d.InErrorRecoveryMode(recognizer) { @@ -247,7 +236,6 @@ func (d *DefaultErrorStrategy) Sync(recognizer Parser) { // // @param recognizer the parser instance // @param e the recognition exception -// func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { tokens := recognizer.GetTokenStream() var input string @@ -264,7 +252,6 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N recognizer.NotifyErrorListeners(msg, e.offendingToken, e) } -// // This is called by {@link //ReportError} when the exception is an // {@link InputMisMatchException}. // @@ -272,14 +259,12 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N // // @param recognizer the parser instance // @param e the recognition exception -// func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) recognizer.NotifyErrorListeners(msg, e.offendingToken, e) } -// // This is called by {@link //ReportError} when the exception is a // {@link FailedPredicateException}. // @@ -287,7 +272,6 @@ func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *Inpu // // @param recognizer the parser instance // @param e the recognition exception -// func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] msg := "rule " + ruleName + " " + e.message @@ -310,7 +294,6 @@ func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *Faile // {@link Parser//NotifyErrorListeners}. // // @param recognizer the parser instance -// func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { if d.InErrorRecoveryMode(recognizer) { return @@ -339,7 +322,6 @@ func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { // {@link Parser//NotifyErrorListeners}. // // @param recognizer the parser instance -// func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { if d.InErrorRecoveryMode(recognizer) { return @@ -392,15 +374,14 @@ func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { // derivation: // //-// => ID '=' '(' INT ')' ('+' atom)* '' +// => ID '=' '(' INT ')' ('+' atom)* ” // ^ //// -// The attempt to Match {@code ')'} will fail when it sees {@code ''} and -// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''} +// The attempt to Match {@code ')'} will fail when it sees {@code ”} and +// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”} // is in the set of tokens that can follow the {@code ')'} token reference // in rule {@code atom}. It can assume that you forgot the {@code ')'}. -// func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { // SINGLE TOKEN DELETION MatchedSymbol := d.SingleTokenDeletion(recognizer) @@ -418,7 +399,6 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { panic(NewInputMisMatchException(recognizer)) } -// // This method implements the single-token insertion inline error recovery // strategy. It is called by {@link //recoverInline} if the single-token // deletion strategy fails to recover from the mismatched input. If this @@ -434,7 +414,6 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { // @param recognizer the parser instance // @return {@code true} if single-token insertion is a viable recovery // strategy for the current mismatched input, otherwise {@code false} -// func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { currentSymbolType := recognizer.GetTokenStream().LA(1) // if current token is consistent with what could come after current @@ -469,7 +448,6 @@ func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { // @return the successfully Matched {@link Token} instance if single-token // deletion successfully recovers from the mismatched input, otherwise // {@code nil} -// func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { NextTokenType := recognizer.GetTokenStream().LA(2) expecting := d.GetExpectedTokens(recognizer) @@ -507,7 +485,6 @@ func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { // a CommonToken of the appropriate type. The text will be the token. // If you change what tokens must be created by the lexer, // override d method to create the appropriate tokens. -// func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { currentSymbol := recognizer.GetCurrentToken() expecting := d.GetExpectedTokens(recognizer) @@ -546,7 +523,6 @@ func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet // the token). This is better than forcing you to override a method in // your token objects because you don't have to go modify your lexer // so that it creates a NewJava type. -// func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { if t == nil { return "
If the state number is not known, b method returns -1.
+ +// Gets the set of input symbols which could potentially follow the +// previously Matched symbol at the time b exception was panicn. +// +//If the set of expected tokens is not known and could not be computed, +// b method returns {@code nil}.
+// +// @return The set of token types that could potentially follow the current +// state in the ATN, or {@code nil} if the information is not available. +// / +func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { + if b.recognizer != nil { + return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) + } + + return nil +} + +func (b *BaseRecognitionException) String() string { + return b.message +} + +type LexerNoViableAltException struct { + *BaseRecognitionException + + startIndex int + deadEndConfigs ATNConfigSet +} + +func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { + + l := new(LexerNoViableAltException) + + l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) + + l.startIndex = startIndex + l.deadEndConfigs = deadEndConfigs + + return l +} + +func (l *LexerNoViableAltException) String() string { + symbol := "" + if l.startIndex >= 0 && l.startIndex < l.input.Size() { + symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) + } + return "LexerNoViableAltException" + symbol +} + +type NoViableAltException struct { + *BaseRecognitionException + + startToken Token + offendingToken Token + ctx ParserRuleContext + deadEndConfigs ATNConfigSet +} + +// Indicates that the parser could not decide which of two or more paths +// to take based upon the remaining input. It tracks the starting token +// of the offending input and also knows where the parser was +// in the various paths when the error. Reported by ReportNoViableAlternative() +func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { + + if ctx == nil { + ctx = recognizer.GetParserRuleContext() + } + + if offendingToken == nil { + offendingToken = recognizer.GetCurrentToken() + } + + if startToken == nil { + startToken = recognizer.GetCurrentToken() + } + + if input == nil { + input = recognizer.GetInputStream().(TokenStream) + } + + n := new(NoViableAltException) + n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) + + // Which configurations did we try at input.Index() that couldn't Match + // input.LT(1)?// + n.deadEndConfigs = deadEndConfigs + // The token object at the start index the input stream might + // not be buffering tokens so get a reference to it. (At the + // time the error occurred, of course the stream needs to keep a + // buffer all of the tokens but later we might not have access to those.) + n.startToken = startToken + n.offendingToken = offendingToken + + return n +} + +type InputMisMatchException struct { + *BaseRecognitionException +} + +// This signifies any kind of mismatched input exceptions such as +// when the current input does not Match the expected token. +func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { + + i := new(InputMisMatchException) + i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + i.offendingToken = recognizer.GetCurrentToken() + + return i + +} + +// A semantic predicate failed during validation. Validation of predicates +// occurs when normally parsing the alternative just like Matching a token. +// Disambiguating predicate evaluation occurs when we test a predicate during +// prediction. + +type FailedPredicateException struct { + *BaseRecognitionException + + ruleIndex int + predicateIndex int + predicate string +} + +func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { + + f := new(FailedPredicateException) + + f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + trans := s.GetTransitions()[0] + if trans2, ok := trans.(*PredicateTransition); ok { + f.ruleIndex = trans2.ruleIndex + f.predicateIndex = trans2.predIndex + } else { + f.ruleIndex = 0 + f.predicateIndex = 0 + } + f.predicate = predicate + f.offendingToken = recognizer.GetCurrentToken() + + return f +} + +func (f *FailedPredicateException) formatMessage(predicate, message string) string { + if message != "" { + return message + } + + return "failed predicate: {" + predicate + "}?" +} + +type ParseCancellationException struct { +} + +func NewParseCancellationException() *ParseCancellationException { + // Error.call(this) + // Error.captureStackTrace(this, ParseCancellationException) + return new(ParseCancellationException) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go similarity index 92% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go index 842170c086c..bd6ad5efe3d 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go similarity index 96% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go index 5ff270f5368..a8b889cedb9 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go similarity index 82% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go index 438e0ea6e75..4778878bd0d 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go similarity index 98% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go index 1e9393adb60..c1e155e8180 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -223,6 +223,10 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin return i.toIndexString() } +func (i *IntervalSet) GetIntervals() []*Interval { + return i.intervals +} + func (i *IntervalSet) toCharString() string { names := make([]string, len(i.intervals)) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go new file mode 100644 index 00000000000..e5a74f0c6c4 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go @@ -0,0 +1,198 @@ +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +import ( + "sort" +) + +// Collectable is an interface that a struct should implement if it is to be +// usable as a key in these collections. +type Collectable[T any] interface { + Hash() int + Equals(other Collectable[T]) bool +} + +type Comparator[T any] interface { + Hash1(o T) int + Equals2(T, T) bool +} + +// JStore implements a container that allows the use of a struct to calculate the key +// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just +// serve the needs of the ANTLR Go runtime. +// +// For ease of porting the logic of the runtime from the master target (Java), this collection +// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() +// function as the key. The values are stored in a standard go map which internally is a form of hashmap +// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with +// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't +// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and +// we understand the requirements, then this is fine - this is not a general purpose collection. +type JStore[T any, C Comparator[T]] struct { + store map[int][]T + len int + comparator Comparator[T] +} + +func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] { + + if comparator == nil { + panic("comparator cannot be nil") + } + + s := &JStore[T, C]{ + store: make(map[int][]T, 1), + comparator: comparator, + } + return s +} + +// Put will store given value in the collection. Note that the key for storage is generated from +// the value itself - this is specifically because that is what ANTLR needs - this would not be useful +// as any kind of general collection. +// +// If the key has a hash conflict, then the value will be added to the slice of values associated with the +// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is +// tested by calling the equals() method on the key. +// +// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true +// +// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. +func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn + + kh := s.comparator.Hash1(value) + + for _, v1 := range s.store[kh] { + if s.comparator.Equals2(value, v1) { + return v1, true + } + } + s.store[kh] = append(s.store[kh], value) + s.len++ + return value, false +} + +// Get will return the value associated with the key - the type of the key is the same type as the value +// which would not generally be useful, but this is a specific thing for ANTLR where the key is +// generated using the object we are going to store. +func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn + + kh := s.comparator.Hash1(key) + + for _, v := range s.store[kh] { + if s.comparator.Equals2(key, v) { + return v, true + } + } + return key, false +} + +// Contains returns true if the given key is present in the store +func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn + + _, present := s.Get(key) + return present +} + +func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { + vs := make([]T, 0, len(s.store)) + for _, v := range s.store { + vs = append(vs, v...) + } + sort.Slice(vs, func(i, j int) bool { + return less(vs[i], vs[j]) + }) + + return vs +} + +func (s *JStore[T, C]) Each(f func(T) bool) { + for _, e := range s.store { + for _, v := range e { + f(v) + } + } +} + +func (s *JStore[T, C]) Len() int { + return s.len +} + +func (s *JStore[T, C]) Values() []T { + vs := make([]T, 0, len(s.store)) + for _, e := range s.store { + for _, v := range e { + vs = append(vs, v) + } + } + return vs +} + +type entry[K, V any] struct { + key K + val V +} + +type JMap[K, V any, C Comparator[K]] struct { + store map[int][]*entry[K, V] + len int + comparator Comparator[K] +} + +func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] { + return &JMap[K, V, C]{ + store: make(map[int][]*entry[K, V], 1), + comparator: comparator, + } +} + +func (m *JMap[K, V, C]) Put(key K, val V) { + kh := m.comparator.Hash1(key) + + m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) + m.len++ +} + +func (m *JMap[K, V, C]) Values() []V { + vs := make([]V, 0, len(m.store)) + for _, e := range m.store { + for _, v := range e { + vs = append(vs, v.val) + } + } + return vs +} + +func (m *JMap[K, V, C]) Get(key K) (V, bool) { + + var none V + kh := m.comparator.Hash1(key) + for _, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + return e.val, true + } + } + return none, false +} + +func (m *JMap[K, V, C]) Len() int { + return len(m.store) +} + +func (m *JMap[K, V, C]) Delete(key K) { + kh := m.comparator.Hash1(key) + for i, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) + m.len-- + return + } + } +} + +func (m *JMap[K, V, C]) Clear() { + m.store = make(map[int][]*entry[K, V]) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go similarity index 98% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go index b04f04572f1..6533f051645 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -232,8 +232,6 @@ func (b *BaseLexer) NextToken() Token { } return b.token } - - return nil } // Instruct the lexer to Skip creating a token for current lexer rule @@ -342,7 +340,7 @@ func (b *BaseLexer) GetCharIndex() int { } // Return the text Matched so far for the current token or any text override. -//Set the complete text of l token it wipes any previous changes to the text. +// Set the complete text of l token it wipes any previous changes to the text. func (b *BaseLexer) GetText() string { if b.text != "" { return b.text diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go similarity index 91% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go index 5a325be1372..111656c2952 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -21,8 +21,8 @@ type LexerAction interface { getActionType() int getIsPositionDependent() bool execute(lexer Lexer) - hash() int - equals(other LexerAction) bool + Hash() int + Equals(other LexerAction) bool } type BaseLexerAction struct { @@ -51,15 +51,14 @@ func (b *BaseLexerAction) getIsPositionDependent() bool { return b.isPositionDependent } -func (b *BaseLexerAction) hash() int { +func (b *BaseLexerAction) Hash() int { return b.actionType } -func (b *BaseLexerAction) equals(other LexerAction) bool { +func (b *BaseLexerAction) Equals(other LexerAction) bool { return b == other } -// // Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. // //The {@code Skip} command does not have any parameters, so l action is @@ -85,7 +84,8 @@ func (l *LexerSkipAction) String() string { return "skip" } -// Implements the {@code type} lexer action by calling {@link Lexer//setType} +// Implements the {@code type} lexer action by calling {@link Lexer//setType} +// // with the assigned type. type LexerTypeAction struct { *BaseLexerAction @@ -104,14 +104,14 @@ func (l *LexerTypeAction) execute(lexer Lexer) { lexer.SetType(l.thetype) } -func (l *LexerTypeAction) hash() int { +func (l *LexerTypeAction) Hash() int { h := murmurInit(0) h = murmurUpdate(h, l.actionType) h = murmurUpdate(h, l.thetype) return murmurFinish(h, 2) } -func (l *LexerTypeAction) equals(other LexerAction) bool { +func (l *LexerTypeAction) Equals(other LexerAction) bool { if l == other { return true } else if _, ok := other.(*LexerTypeAction); !ok { @@ -148,14 +148,14 @@ func (l *LexerPushModeAction) execute(lexer Lexer) { lexer.PushMode(l.mode) } -func (l *LexerPushModeAction) hash() int { +func (l *LexerPushModeAction) Hash() int { h := murmurInit(0) h = murmurUpdate(h, l.actionType) h = murmurUpdate(h, l.mode) return murmurFinish(h, 2) } -func (l *LexerPushModeAction) equals(other LexerAction) bool { +func (l *LexerPushModeAction) Equals(other LexerAction) bool { if l == other { return true } else if _, ok := other.(*LexerPushModeAction); !ok { @@ -245,14 +245,14 @@ func (l *LexerModeAction) execute(lexer Lexer) { lexer.SetMode(l.mode) } -func (l *LexerModeAction) hash() int { +func (l *LexerModeAction) Hash() int { h := murmurInit(0) h = murmurUpdate(h, l.actionType) h = murmurUpdate(h, l.mode) return murmurFinish(h, 2) } -func (l *LexerModeAction) equals(other LexerAction) bool { +func (l *LexerModeAction) Equals(other LexerAction) bool { if l == other { return true } else if _, ok := other.(*LexerModeAction); !ok { @@ -303,7 +303,7 @@ func (l *LexerCustomAction) execute(lexer Lexer) { lexer.Action(nil, l.ruleIndex, l.actionIndex) } -func (l *LexerCustomAction) hash() int { +func (l *LexerCustomAction) Hash() int { h := murmurInit(0) h = murmurUpdate(h, l.actionType) h = murmurUpdate(h, l.ruleIndex) @@ -311,13 +311,14 @@ func (l *LexerCustomAction) hash() int { return murmurFinish(h, 3) } -func (l *LexerCustomAction) equals(other LexerAction) bool { +func (l *LexerCustomAction) Equals(other LexerAction) bool { if l == other { return true } else if _, ok := other.(*LexerCustomAction); !ok { return false } else { - return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex + return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && + l.actionIndex == other.(*LexerCustomAction).actionIndex } } @@ -344,14 +345,14 @@ func (l *LexerChannelAction) execute(lexer Lexer) { lexer.SetChannel(l.channel) } -func (l *LexerChannelAction) hash() int { +func (l *LexerChannelAction) Hash() int { h := murmurInit(0) h = murmurUpdate(h, l.actionType) h = murmurUpdate(h, l.channel) return murmurFinish(h, 2) } -func (l *LexerChannelAction) equals(other LexerAction) bool { +func (l *LexerChannelAction) Equals(other LexerAction) bool { if l == other { return true } else if _, ok := other.(*LexerChannelAction); !ok { @@ -412,10 +413,10 @@ func (l *LexerIndexedCustomAction) execute(lexer Lexer) { l.lexerAction.execute(lexer) } -func (l *LexerIndexedCustomAction) hash() int { +func (l *LexerIndexedCustomAction) Hash() int { h := murmurInit(0) h = murmurUpdate(h, l.offset) - h = murmurUpdate(h, l.lexerAction.hash()) + h = murmurUpdate(h, l.lexerAction.Hash()) return murmurFinish(h, 2) } @@ -425,6 +426,7 @@ func (l *LexerIndexedCustomAction) equals(other LexerAction) bool { } else if _, ok := other.(*LexerIndexedCustomAction); !ok { return false } else { - return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction + return l.offset == other.(*LexerIndexedCustomAction).offset && + l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction) } } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go similarity index 88% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go index 056941dd6e7..be1ba7a7e30 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go @@ -1,9 +1,11 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. package antlr +import "golang.org/x/exp/slices" + // Represents an executor for a sequence of lexer actions which traversed during // the Matching operation of a lexer rule (token). // @@ -12,8 +14,8 @@ package antlr // not cause bloating of the {@link DFA} created for the lexer.
type LexerActionExecutor struct { - lexerActions []LexerAction - cachedHash int + lexerActions []LexerAction + cachedHash int } func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { @@ -30,7 +32,7 @@ func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { // of the performance-critical {@link LexerATNConfig//hashCode} operation. l.cachedHash = murmurInit(57) for _, a := range lexerActions { - l.cachedHash = murmurUpdate(l.cachedHash, a.hash()) + l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) } return l @@ -151,14 +153,17 @@ func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex } } -func (l *LexerActionExecutor) hash() int { +func (l *LexerActionExecutor) Hash() int { if l == nil { + // TODO: Why is this here? l should not be nil return 61 } + + // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode return l.cachedHash } -func (l *LexerActionExecutor) equals(other interface{}) bool { +func (l *LexerActionExecutor) Equals(other interface{}) bool { if l == other { return true } @@ -169,5 +174,13 @@ func (l *LexerActionExecutor) equals(other interface{}) bool { if othert == nil { return false } - return l.cachedHash == othert.cachedHash && &l.lexerActions == &othert.lexerActions + if l.cachedHash != othert.cachedHash { + return false + } + if len(l.lexerActions) != len(othert.lexerActions) { + return false + } + return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool { + return i.Equals(j) + }) } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go similarity index 98% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go index dc05153ea44..c573b752100 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -591,19 +591,24 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) } - hash := proposed.hash() dfa := l.decisionToDFA[l.mode] l.atn.stateMu.Lock() defer l.atn.stateMu.Unlock() - existing, ok := dfa.getState(hash) - if ok { + existing, present := dfa.states.Get(proposed) + if present { + + // This state was already present, so just return it. + // proposed = existing } else { - proposed.stateNumber = dfa.numStates() + + // We need to add the new state + // + proposed.stateNumber = dfa.states.Len() configs.SetReadOnly(true) proposed.configs = configs - dfa.setState(hash, proposed) + dfa.states.Put(proposed) } if !suppressEdge { dfa.setS0(proposed) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go similarity index 87% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go index 6ffb37de694..76689615a6d 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -14,14 +14,15 @@ func NewLL1Analyzer(atn *ATN) *LL1Analyzer { return la } -//* Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -/// +// - Special value added to the lookahead sets to indicate that we hit +// a predicate during analysis if {@code seeThruPreds==false}. +// +// / const ( LL1AnalyzerHitPred = TokenInvalidType ) -//* +// * // Calculates the SLL(1) expected lookahead set for each outgoing transition // of an {@link ATNState}. The returned array has one element for each // outgoing transition in {@code s}. If the closure from transition @@ -38,7 +39,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { look := make([]*IntervalSet, count) for alt := 0; alt < count; alt++ { look[alt] = NewIntervalSet() - lookBusy := newArray2DHashSet(nil, nil) + lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) seeThruPreds := false // fail to get lookahead upon pred la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) // Wipe out lookahead for la alternative if we found nothing @@ -50,7 +51,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { return look } -//* +// * // Compute set of tokens that can follow {@code s} in the ATN in the // specified {@code ctx}. // @@ -67,7 +68,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { // // @return The set of tokens that can follow {@code s} in the ATN in the // specified {@code ctx}. -/// +// / func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { r := NewIntervalSet() seeThruPreds := true // ignore preds get all lookahead @@ -75,7 +76,7 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet if ctx != nil { lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) } - la.look1(s, stopState, lookContext, r, newArray2DHashSet(nil, nil), NewBitSet(), seeThruPreds, true) + la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true) return r } @@ -109,14 +110,14 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet // outermost context is reached. This parameter has no effect if {@code ctx} // is {@code nil}. -func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { +func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { returnState := la.atn.states[ctx.getReturnState(i)] la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) } -func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) { +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { c := NewBaseATNConfig6(s, 0, ctx) @@ -124,8 +125,11 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look return } - lookBusy.Add(c) + _, present := lookBusy.Put(c) + if present { + return + } if s == stopState { if ctx == nil { look.addOne(TokenEpsilon) @@ -198,7 +202,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look } } -func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { +func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go new file mode 100644 index 00000000000..d26bf063920 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go @@ -0,0 +1,708 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +type Parser interface { + Recognizer + + GetInterpreter() *ParserATNSimulator + + GetTokenStream() TokenStream + GetTokenFactory() TokenFactory + GetParserRuleContext() ParserRuleContext + SetParserRuleContext(ParserRuleContext) + Consume() Token + GetParseListeners() []ParseTreeListener + + GetErrorHandler() ErrorStrategy + SetErrorHandler(ErrorStrategy) + GetInputStream() IntStream + GetCurrentToken() Token + GetExpectedTokens() *IntervalSet + NotifyErrorListeners(string, Token, RecognitionException) + IsExpectedToken(int) bool + GetPrecedence() int + GetRuleInvocationStack(ParserRuleContext) []string +} + +type BaseParser struct { + *BaseRecognizer + + Interpreter *ParserATNSimulator + BuildParseTrees bool + + input TokenStream + errHandler ErrorStrategy + precedenceStack IntStack + ctx ParserRuleContext + + tracer *TraceListener + parseListeners []ParseTreeListener + _SyntaxErrors int +} + +// p.is all the parsing support code essentially most of it is error +// recovery stuff.// +func NewBaseParser(input TokenStream) *BaseParser { + + p := new(BaseParser) + + p.BaseRecognizer = NewBaseRecognizer() + + // The input stream. + p.input = nil + // The error handling strategy for the parser. The default value is a new + // instance of {@link DefaultErrorStrategy}. + p.errHandler = NewDefaultErrorStrategy() + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + // The {@link ParserRuleContext} object for the currently executing rule. + // p.is always non-nil during the parsing process. + p.ctx = nil + // Specifies whether or not the parser should construct a parse tree during + // the parsing process. The default value is {@code true}. + p.BuildParseTrees = true + // When {@link //setTrace}{@code (true)} is called, a reference to the + // {@link TraceListener} is stored here so it can be easily removed in a + // later call to {@link //setTrace}{@code (false)}. The listener itself is + // implemented as a parser listener so p.field is not directly used by + // other parser methods. + p.tracer = nil + // The list of {@link ParseTreeListener} listeners registered to receive + // events during the parse. + p.parseListeners = nil + // The number of syntax errors Reported during parsing. p.value is + // incremented each time {@link //NotifyErrorListeners} is called. + p._SyntaxErrors = 0 + p.SetInputStream(input) + + return p +} + +// p.field maps from the serialized ATN string to the deserialized {@link +// ATN} with +// bypass alternatives. +// +// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() +var bypassAltsAtnCache = make(map[string]int) + +// reset the parser's state// +func (p *BaseParser) reset() { + if p.input != nil { + p.input.Seek(0) + } + p.errHandler.reset(p) + p.ctx = nil + p._SyntaxErrors = 0 + p.SetTrace(nil) + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + if p.Interpreter != nil { + p.Interpreter.reset() + } +} + +func (p *BaseParser) GetErrorHandler() ErrorStrategy { + return p.errHandler +} + +func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { + p.errHandler = e +} + +// Match current input symbol against {@code ttype}. If the symbol type +// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are +// called to complete the Match process. +// +//If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+// +// @param ttype the token type to Match +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// {@code ttype} and the error strategy could not recover from the +// mismatched symbol + +func (p *BaseParser) Match(ttype int) Token { + + t := p.GetCurrentToken() + + if t.GetTokenType() == ttype { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a Newtoken during single token + // insertion + // if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + + return t +} + +// Match current input symbol as a wildcard. If the symbol type Matches +// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} +// and {@link //consume} are called to complete the Match process. +// +//If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+// +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// a wildcard and the error strategy could not recover from the mismatched +// symbol + +func (p *BaseParser) MatchWildcard() Token { + t := p.GetCurrentToken() + if t.GetTokenType() > 0 { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a Newtoken during single token + // insertion + // if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + return t +} + +func (p *BaseParser) GetParserRuleContext() ParserRuleContext { + return p.ctx +} + +func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { + p.ctx = v +} + +func (p *BaseParser) GetParseListeners() []ParseTreeListener { + if p.parseListeners == nil { + return make([]ParseTreeListener, 0) + } + return p.parseListeners +} + +// Registers {@code listener} to receive events during the parsing process. +// +//To support output-preserving grammar transformations (including but not +// limited to left-recursion removal, automated left-factoring, and +// optimized code generation), calls to listener methods during the parse +// may differ substantially from calls made by +// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In +// particular, rule entry and exit events may occur in a different order +// during the parse than after the parser. In addition, calls to certain +// rule entry methods may be omitted.
+// +//With the following specific exceptions, calls to listener events are +// deterministic, i.e. for identical input the calls to listener +// methods will be the same.
+// +//If {@code listener} is {@code nil} or has not been added as a parse +// listener, p.method does nothing.
+// @param listener the listener to remove +func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { + + if p.parseListeners != nil { + + idx := -1 + for i, v := range p.parseListeners { + if v == listener { + idx = i + break + } + } + + if idx == -1 { + return + } + + // remove the listener from the slice + p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) + + if len(p.parseListeners) == 0 { + p.parseListeners = nil + } + } +} + +// Remove all parse listeners. +func (p *BaseParser) removeParseListeners() { + p.parseListeners = nil +} + +// Notify any parse listeners of an enter rule event. +func (p *BaseParser) TriggerEnterRuleEvent() { + if p.parseListeners != nil { + ctx := p.ctx + for _, listener := range p.parseListeners { + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) + } + } +} + +// Notify any parse listeners of an exit rule event. +// +// @see //addParseListener +func (p *BaseParser) TriggerExitRuleEvent() { + if p.parseListeners != nil { + // reverse order walk of listeners + ctx := p.ctx + l := len(p.parseListeners) - 1 + + for i := range p.parseListeners { + listener := p.parseListeners[l-i] + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) + } + } +} + +func (p *BaseParser) GetInterpreter() *ParserATNSimulator { + return p.Interpreter +} + +func (p *BaseParser) GetATN() *ATN { + return p.Interpreter.atn +} + +func (p *BaseParser) GetTokenFactory() TokenFactory { + return p.input.GetTokenSource().GetTokenFactory() +} + +// Tell our token source and error strategy about a Newway to create tokens.// +func (p *BaseParser) setTokenFactory(factory TokenFactory) { + p.input.GetTokenSource().setTokenFactory(factory) +} + +// The ATN with bypass alternatives is expensive to create so we create it +// lazily. +// +// @panics UnsupportedOperationException if the current parser does not +// implement the {@link //getSerializedATN()} method. +func (p *BaseParser) GetATNWithBypassAlts() { + + // TODO + panic("Not implemented!") + + // serializedAtn := p.getSerializedATN() + // if (serializedAtn == nil) { + // panic("The current parser does not support an ATN with bypass alternatives.") + // } + // result := p.bypassAltsAtnCache[serializedAtn] + // if (result == nil) { + // deserializationOptions := NewATNDeserializationOptions(nil) + // deserializationOptions.generateRuleBypassTransitions = true + // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) + // p.bypassAltsAtnCache[serializedAtn] = result + // } + // return result +} + +// The preferred method of getting a tree pattern. For example, here's a +// sample use: +// +//+// ParseTree t = parser.expr() +// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", +// MyParser.RULE_expr) +// ParseTreeMatch m = p.Match(t) +// String id = m.Get("ID") +//+ +func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { + + panic("NewParseTreePatternMatcher not implemented!") + // + // if (lexer == nil) { + // if (p.GetTokenStream() != nil) { + // tokenSource := p.GetTokenStream().GetTokenSource() + // if _, ok := tokenSource.(ILexer); ok { + // lexer = tokenSource + // } + // } + // } + // if (lexer == nil) { + // panic("Parser can't discover a lexer to use") + // } + + // m := NewParseTreePatternMatcher(lexer, p) + // return m.compile(pattern, patternRuleIndex) +} + +func (p *BaseParser) GetInputStream() IntStream { + return p.GetTokenStream() +} + +func (p *BaseParser) SetInputStream(input TokenStream) { + p.SetTokenStream(input) +} + +func (p *BaseParser) GetTokenStream() TokenStream { + return p.input +} + +// Set the token stream and reset the parser.// +func (p *BaseParser) SetTokenStream(input TokenStream) { + p.input = nil + p.reset() + p.input = input +} + +// Match needs to return the current input symbol, which gets put +// into the label for the associated token ref e.g., x=ID. +func (p *BaseParser) GetCurrentToken() Token { + return p.input.LT(1) +} + +func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { + if offendingToken == nil { + offendingToken = p.GetCurrentToken() + } + p._SyntaxErrors++ + line := offendingToken.GetLine() + column := offendingToken.GetColumn() + listener := p.GetErrorListenerDispatch() + listener.SyntaxError(p, offendingToken, line, column, msg, err) +} + +func (p *BaseParser) Consume() Token { + o := p.GetCurrentToken() + if o.GetTokenType() != TokenEOF { + p.GetInputStream().Consume() + } + hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 + if p.BuildParseTrees || hasListener { + if p.errHandler.InErrorRecoveryMode(p) { + node := p.ctx.AddErrorNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitErrorNode(node) + } + } + + } else { + node := p.ctx.AddTokenNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitTerminal(node) + } + } + } + // node.invokingState = p.state + } + + return o +} + +func (p *BaseParser) addContextToParseTree() { + // add current context to parent if we have a parent + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) + } +} + +func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { + p.SetState(state) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.BuildParseTrees { + p.addContextToParseTree() + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() + } +} + +func (p *BaseParser) ExitRule() { + p.ctx.SetStop(p.input.LT(-1)) + // trigger event on ctx, before it reverts to parent + if p.parseListeners != nil { + p.TriggerExitRuleEvent() + } + p.SetState(p.ctx.GetInvokingState()) + if p.ctx.GetParent() != nil { + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } else { + p.ctx = nil + } +} + +func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { + localctx.SetAltNumber(altNum) + // if we have Newlocalctx, make sure we replace existing ctx + // that is previous child of parse tree + if p.BuildParseTrees && p.ctx != localctx { + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() + p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) + } + } + p.ctx = localctx +} + +// Get the precedence level for the top-most precedence rule. +// +// @return The precedence level for the top-most precedence rule, or -1 if +// the parser context is not nested within a precedence rule. + +func (p *BaseParser) GetPrecedence() int { + if len(p.precedenceStack) == 0 { + return -1 + } + + return p.precedenceStack[len(p.precedenceStack)-1] +} + +func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { + p.SetState(state) + p.precedenceStack.Push(precedence) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +// +// Like {@link //EnterRule} but for recursive rules. + +func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { + previous := p.ctx + previous.SetParent(localctx) + previous.SetInvokingState(state) + previous.SetStop(p.input.LT(-1)) + + p.ctx = localctx + p.ctx.SetStart(previous.GetStart()) + if p.BuildParseTrees { + p.ctx.AddChild(previous) + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { + p.precedenceStack.Pop() + p.ctx.SetStop(p.input.LT(-1)) + retCtx := p.ctx // save current ctx (return value) + // unroll so ctx is as it was before call to recursive method + if p.parseListeners != nil { + for p.ctx != parentCtx { + p.TriggerExitRuleEvent() + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } + } else { + p.ctx = parentCtx + } + // hook into tree + retCtx.SetParent(parentCtx) + if p.BuildParseTrees && parentCtx != nil { + // add return ctx into invoking rule's tree + parentCtx.AddChild(retCtx) + } +} + +func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { + ctx := p.ctx + for ctx != nil { + if ctx.GetRuleIndex() == ruleIndex { + return ctx + } + ctx = ctx.GetParent().(ParserRuleContext) + } + return nil +} + +func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { + return precedence >= p.precedenceStack[len(p.precedenceStack)-1] +} + +func (p *BaseParser) inContext(context ParserRuleContext) bool { + // TODO: useful in parser? + return false +} + +// +// Checks whether or not {@code symbol} can follow the current state in the +// ATN. The behavior of p.method is equivalent to the following, but is +// implemented such that the complete context-sensitive follow set does not +// need to be explicitly constructed. +// +//
+// return getExpectedTokens().contains(symbol) +//+// +// @param symbol the symbol type to check +// @return {@code true} if {@code symbol} can follow the current state in +// the ATN, otherwise {@code false}. + +func (p *BaseParser) IsExpectedToken(symbol int) bool { + atn := p.Interpreter.atn + ctx := p.ctx + s := atn.states[p.state] + following := atn.NextTokens(s, nil) + if following.contains(symbol) { + return true + } + if !following.contains(TokenEpsilon) { + return false + } + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + following = atn.NextTokens(rt.(*RuleTransition).followState, nil) + if following.contains(symbol) { + return true + } + ctx = ctx.GetParent().(ParserRuleContext) + } + if following.contains(TokenEpsilon) && symbol == TokenEOF { + return true + } + + return false +} + +// Computes the set of input symbols which could follow the current parser +// state and context, as given by {@link //GetState} and {@link //GetContext}, +// respectively. +// +// @see ATN//getExpectedTokens(int, RuleContext) +func (p *BaseParser) GetExpectedTokens() *IntervalSet { + return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) +} + +func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { + atn := p.Interpreter.atn + s := atn.states[p.state] + return atn.NextTokens(s, nil) +} + +// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// +func (p *BaseParser) GetRuleIndex(ruleName string) int { + var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] + if ok { + return ruleIndex + } + + return -1 +} + +// Return List<String> of the rule names in your parser instance +// leading up to a call to the current rule. You could override if +// you want more details such as the file/line info of where +// in the ATN a rule is invoked. +// +// this very useful for error messages. + +func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { + if c == nil { + c = p.ctx + } + stack := make([]string, 0) + for c != nil { + // compute what follows who invoked us + ruleIndex := c.GetRuleIndex() + if ruleIndex < 0 { + stack = append(stack, "n/a") + } else { + stack = append(stack, p.GetRuleNames()[ruleIndex]) + } + + vp := c.GetParent() + + if vp == nil { + break + } + + c = vp.(ParserRuleContext) + } + return stack +} + +// For debugging and other purposes.// +func (p *BaseParser) GetDFAStrings() string { + return fmt.Sprint(p.Interpreter.decisionToDFA) +} + +// For debugging and other purposes.// +func (p *BaseParser) DumpDFA() { + seenOne := false + for _, dfa := range p.Interpreter.decisionToDFA { + if dfa.states.Len() > 0 { + if seenOne { + fmt.Println() + } + fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") + fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) + seenOne = true + } + } +} + +func (p *BaseParser) GetSourceName() string { + return p.GrammarFileName +} + +// During a parse is sometimes useful to listen in on the rule entry and exit +// events as well as token Matches. p.is for quick and dirty debugging. +func (p *BaseParser) SetTrace(trace *TraceListener) { + if trace == nil { + p.RemoveParseListener(p.tracer) + p.tracer = nil + } else { + if p.tracer != nil { + p.RemoveParseListener(p.tracer) + } + p.tracer = NewTraceListener(p) + p.AddParseListener(p.tracer) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go similarity index 94% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go index 888d512975a..8bcc46a0d99 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -11,11 +11,11 @@ import ( ) var ( - ParserATNSimulatorDebug = false - ParserATNSimulatorListATNDecisions = false - ParserATNSimulatorDFADebug = false - ParserATNSimulatorRetryDebug = false - TurnOffLRLoopEntryBranchOpt = false + ParserATNSimulatorDebug = false + ParserATNSimulatorTraceATNSim = false + ParserATNSimulatorDFADebug = false + ParserATNSimulatorRetryDebug = false + TurnOffLRLoopEntryBranchOpt = false ) type ParserATNSimulator struct { @@ -70,8 +70,8 @@ func (p *ParserATNSimulator) reset() { } func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) + + if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { + fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + " exec LA(1)==" + p.getLookaheadName(input) + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) @@ -111,15 +111,15 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou if s0 == nil { if outerContext == nil { - outerContext = RuleContextEmpty + outerContext = ParserRuleContextEmpty } - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + if ParserATNSimulatorDebug { fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + " exec LA(1)==" + p.getLookaheadName(input) + ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) } fullCtx := false - s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx) + s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx) p.atn.stateMu.Lock() if dfa.getPrecedenceDfa() { @@ -174,17 +174,18 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou // Reporting insufficient predicates // cover these cases: -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds // +// dead end +// single alt +// single alt + preds +// conflict +// conflict + preds func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + p.getLookaheadName(input) + + ", DFA state " + s0.String() + + ", LA(1)==" + p.getLookaheadName(input) + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) } @@ -277,8 +278,6 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, t = input.LA(1) } } - - panic("Should not have reached p state") } // Get an existing target state for an edge in the DFA. If the target state @@ -384,7 +383,7 @@ func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState // comes back with reach.uniqueAlt set to a valid alt func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { fmt.Println("execATNWithFullContext " + s0.String()) } @@ -492,9 +491,6 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 AT } func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { - if ParserATNSimulatorDebug { - fmt.Println("in computeReachSet, starting closure: " + closure.String()) - } if p.mergeCache == nil { p.mergeCache = NewDoubleDict() } @@ -570,7 +566,7 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt // if reach == nil { reach = NewBaseATNConfigSet(fullCtx) - closureBusy := newArray2DHashSet(nil, nil) + closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) treatEOFAsEpsilon := t == TokenEOF amount := len(intermediate.configs) for k := 0; k < amount; k++ { @@ -610,6 +606,11 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt reach.Add(skippedStopStates[l], p.mergeCache) } } + + if ParserATNSimulatorTraceATNSim { + fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String()) + } + if len(reach.GetItems()) == 0 { return nil } @@ -617,7 +618,6 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt return reach } -// // Return a configuration set containing only the configurations from // {@code configs} which are in a {@link RuleStopState}. If all // configurations in {@code configs} are already in a rule stop state, p @@ -636,7 +636,6 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt // @return {@code configs} if all configurations in {@code configs} are in a // rule stop state, otherwise return a Newconfiguration set containing only // the configurations from {@code configs} which are in a rule stop state -// func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { if PredictionModeallConfigsInRuleStopStates(configs) { return configs @@ -662,16 +661,20 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full // always at least the implicit call to start rule initialContext := predictionContextFromRuleContext(p.atn, ctx) configs := NewBaseATNConfigSet(fullCtx) + if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { + fmt.Println("computeStartState from ATN state " + a.String() + + " initialContext=" + initialContext.String()) + } + for i := 0; i < len(a.GetTransitions()); i++ { target := a.GetTransitions()[i].getTarget() c := NewBaseATNConfig6(target, i+1, initialContext) - closureBusy := newArray2DHashSet(nil, nil) + closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) p.closure(c, configs, closureBusy, true, fullCtx, false) } return configs } -// // This method transforms the start state computed by // {@link //computeStartState} to the special start state used by a // precedence DFA for a particular precedence value. The transformation @@ -726,7 +729,6 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full // @return The transformed configuration set representing the start state // for a precedence DFA at a particular precedence level (determined by // calling {@link Parser//getPrecedence}). -// func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { statesFromAlt1 := make(map[int]PredictionContext) @@ -760,7 +762,7 @@ func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConf // (basically a graph subtraction algorithm). if !config.getPrecedenceFilterSuppressed() { context := statesFromAlt1[config.GetState().GetStateNumber()] - if context != nil && context.equals(config.GetContext()) { + if context != nil && context.Equals(config.GetContext()) { // eliminated continue } @@ -824,7 +826,6 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre return pairs } -// // This method is used to improve the localization of error messages by // choosing an alternative rather than panicing a // {@link NoViableAltException} in particular prediction scenarios where the @@ -869,7 +870,6 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre // @return The value to return from {@link //AdaptivePredict}, or // {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not // identified and {@link //AdaptivePredict} should Report an error instead. -// func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) semValidConfigs := cfgs[0] @@ -938,11 +938,11 @@ func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigS } // Look through a list of predicate/alt pairs, returning alts for the -// pairs that win. A {@code NONE} predicate indicates an alt containing an -// unpredicated config which behaves as "always true." If !complete -// then we stop at the first predicate that evaluates to true. This -// includes pairs with nil predicates. // +// pairs that win. A {@code NONE} predicate indicates an alt containing an +// unpredicated config which behaves as "always true." If !complete +// then we stop at the first predicate that evaluates to true. This +// includes pairs with nil predicates. func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { predictions := NewBitSet() for i := 0; i < len(predPredictions); i++ { @@ -972,16 +972,16 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti return predictions } -func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { +func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) { initialDepth := 0 p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, fullCtx, initialDepth, treatEOFAsEpsilon) } -func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - if ParserATNSimulatorDebug { +func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if ParserATNSimulatorTraceATNSim { fmt.Println("closure(" + config.String() + ")") - fmt.Println("configs(" + configs.String() + ")") + //fmt.Println("configs(" + configs.String() + ")") if config.GetReachesIntoOuterContext() > 50 { panic("problem") } @@ -1031,7 +1031,7 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs } // Do the actual work of walking epsilon edges// -func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { +func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { state := config.GetState() // optimization if !state.GetEpsilonOnlyTransitions() { @@ -1066,7 +1066,8 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) - if closureBusy.Add(c) != c { + _, present := closureBusy.Put(c) + if present { // avoid infinite recursion for right-recursive rules continue } @@ -1077,9 +1078,13 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, fmt.Println("dips into outer ctx: " + c.String()) } } else { - if !t.getIsEpsilon() && closureBusy.Add(c) != c { - // avoid infinite recursion for EOF* and EOF+ - continue + + if !t.getIsEpsilon() { + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for EOF* and EOF+ + continue + } } if _, ok := t.(*RuleTransition); ok { // latch when newDepth goes negative - once we step out of the entry context we can't return @@ -1104,7 +1109,16 @@ func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNC // left-recursion elimination. For efficiency, also check if // the context has an empty stack case. If so, it would mean // global FOLLOW so we can't perform optimization - if startLoop, ok := _p.(StarLoopEntryState); !ok || !startLoop.precedenceRuleDecision || config.GetContext().isEmpty() || config.GetContext().hasEmptyPath() { + if _p.GetStateType() != ATNStateStarLoopEntry { + return false + } + startLoop, ok := _p.(*StarLoopEntryState) + if !ok { + return false + } + if !startLoop.precedenceRuleDecision || + config.GetContext().isEmpty() || + config.GetContext().hasEmptyPath() { return false } @@ -1117,8 +1131,8 @@ func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNC return false } } - - decisionStartState := _p.(BlockStartState).GetTransitions()[0].getTarget().(BlockStartState) + x := _p.GetTransitions()[0].getTarget() + decisionStartState := x.(BlockStartState) blockEndStateNum := decisionStartState.getEndState().stateNumber blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState) @@ -1355,13 +1369,12 @@ func (p *ParserATNSimulator) GetTokenName(t int) string { return "EOF" } - if p.parser != nil && p.parser.GetLiteralNames() != nil { - if t >= len(p.parser.GetLiteralNames()) { - fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ",")) - // fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect - } else { - return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" - } + if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) { + return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) { + return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">" } return strconv.Itoa(t) @@ -1372,9 +1385,9 @@ func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { } // Used for debugging in AdaptivePredict around execATN but I cut -// it out for clarity now that alg. works well. We can leave p -// "dead" code for a bit. // +// it out for clarity now that alg. works well. We can leave p +// "dead" code for a bit. func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { panic("Not implemented") @@ -1421,7 +1434,6 @@ func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { return alt } -// // Add an edge to the DFA, if possible. This method calls // {@link //addDFAState} to ensure the {@code to} state is present in the // DFA. If {@code from} is {@code nil}, or if {@code t} is outside the @@ -1440,7 +1452,6 @@ func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { // @return If {@code to} is {@code nil}, p method returns {@code nil} // otherwise p method returns the result of calling {@link //addDFAState} // on {@code to} -// func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { if ParserATNSimulatorDebug { fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) @@ -1472,7 +1483,6 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA return to } -// // Add state {@code D} to the DFA if it is not already present, and return // the actual instance stored in the DFA. If a state equivalent to {@code D} // is already in the DFA, the existing state is returned. Otherwise p @@ -1486,25 +1496,30 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA // @return The state stored in the DFA. This will be either the existing // state if {@code D} is already in the DFA, or {@code D} itself if the // state was not already present. -// func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { if d == ATNSimulatorError { return d } - hash := d.hash() - existing, ok := dfa.getState(hash) - if ok { + existing, present := dfa.states.Get(d) + if present { + if ParserATNSimulatorTraceATNSim { + fmt.Print("addDFAState " + d.String() + " exists") + } return existing } - d.stateNumber = dfa.numStates() + + // The state was not present, so update it with configs + // + d.stateNumber = dfa.states.Len() if !d.configs.ReadOnly() { d.configs.OptimizeConfigs(p.BaseATNSimulator) d.configs.SetReadOnly(true) } - dfa.setState(hash, d) - if ParserATNSimulatorDebug { - fmt.Println("adding NewDFA state: " + d.String()) + dfa.states.Put(d) + if ParserATNSimulatorTraceATNSim { + fmt.Println("addDFAState new " + d.String()) } + return d } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go similarity index 98% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go index 49cd10c5ffc..1c8cee74795 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -340,7 +340,7 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s return s } -var RuleContextEmpty = NewBaseParserRuleContext(nil, -1) +var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1) type InterpreterRuleContext interface { ParserRuleContext diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go similarity index 81% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go index 9fdfd52b26c..ba62af36108 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go @@ -1,10 +1,12 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. package antlr import ( + "fmt" + "golang.org/x/exp/slices" "strconv" ) @@ -26,10 +28,10 @@ var ( ) type PredictionContext interface { - hash() int + Hash() int + Equals(interface{}) bool GetParent(int) PredictionContext getReturnState(int) int - equals(PredictionContext) bool length() int isEmpty() bool hasEmptyPath() bool @@ -53,7 +55,7 @@ func (b *BasePredictionContext) isEmpty() bool { func calculateHash(parent PredictionContext, returnState int) int { h := murmurInit(1) - h = murmurUpdate(h, parent.hash()) + h = murmurUpdate(h, parent.Hash()) h = murmurUpdate(h, returnState) return murmurFinish(h, 2) } @@ -86,7 +88,6 @@ func NewPredictionContextCache() *PredictionContextCache { // Add a context to the cache and return it. If the context already exists, // return that one instead and do not add a Newcontext to the cache. // Protect shared cache from unsafe thread access. -// func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { if ctx == BasePredictionContextEMPTY { return BasePredictionContextEMPTY @@ -160,28 +161,28 @@ func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { return b.returnState == BasePredictionContextEmptyReturnState } -func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool { +func (b *BaseSingletonPredictionContext) Hash() int { + return b.cachedHash +} + +func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool { if b == other { return true - } else if _, ok := other.(*BaseSingletonPredictionContext); !ok { + } + if _, ok := other.(*BaseSingletonPredictionContext); !ok { return false - } else if b.hash() != other.hash() { - return false // can't be same if hash is different } otherP := other.(*BaseSingletonPredictionContext) - if b.returnState != other.getReturnState(0) { + if b.returnState != otherP.getReturnState(0) { return false - } else if b.parentCtx == nil { + } + if b.parentCtx == nil { return otherP.parentCtx == nil } - return b.parentCtx.equals(otherP.parentCtx) -} - -func (b *BaseSingletonPredictionContext) hash() int { - return b.cachedHash + return b.parentCtx.Equals(otherP.parentCtx) } func (b *BaseSingletonPredictionContext) String() string { @@ -215,7 +216,7 @@ func NewEmptyPredictionContext() *EmptyPredictionContext { p := new(EmptyPredictionContext) p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) - + p.cachedHash = calculateEmptyHash() return p } @@ -231,7 +232,11 @@ func (e *EmptyPredictionContext) getReturnState(index int) int { return e.returnState } -func (e *EmptyPredictionContext) equals(other PredictionContext) bool { +func (e *EmptyPredictionContext) Hash() int { + return e.cachedHash +} + +func (e *EmptyPredictionContext) Equals(other interface{}) bool { return e == other } @@ -254,7 +259,7 @@ func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) hash := murmurInit(1) for _, parent := range parents { - hash = murmurUpdate(hash, parent.hash()) + hash = murmurUpdate(hash, parent.Hash()) } for _, returnState := range returnStates { @@ -298,18 +303,31 @@ func (a *ArrayPredictionContext) getReturnState(index int) int { return a.returnStates[index] } -func (a *ArrayPredictionContext) equals(other PredictionContext) bool { - if _, ok := other.(*ArrayPredictionContext); !ok { +// Equals is the default comparison function for ArrayPredictionContext when no specialized +// implementation is needed for a collection +func (a *ArrayPredictionContext) Equals(o interface{}) bool { + if a == o { + return true + } + other, ok := o.(*ArrayPredictionContext) + if !ok { return false - } else if a.cachedHash != other.hash() { + } + if a.cachedHash != other.Hash() { return false // can't be same if hash is different - } else { - otherP := other.(*ArrayPredictionContext) - return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents } + + // Must compare the actual array elements and not just the array address + // + return slices.Equal(a.returnStates, other.returnStates) && + slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool { + return x.Equals(y) + }) } -func (a *ArrayPredictionContext) hash() int { +// Hash is the default hash function for ArrayPredictionContext when no specialized +// implementation is needed for a collection +func (a *ArrayPredictionContext) Hash() int { return a.BasePredictionContext.cachedHash } @@ -343,11 +361,11 @@ func (a *ArrayPredictionContext) String() string { // / func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { if outerContext == nil { - outerContext = RuleContextEmpty + outerContext = ParserRuleContextEmpty } // if we are in RuleContext of start rule, s, then BasePredictionContext // is EMPTY. Nobody called us. (if we are empty, return empty) - if outerContext.GetParent() == nil || outerContext == RuleContextEmpty { + if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { return BasePredictionContextEMPTY } // If we have a parent, convert it to a BasePredictionContext graph @@ -359,11 +377,20 @@ func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) Predicti } func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - // share same graph if both same - if a == b { + + // Share same graph if both same + // + if a == b || a.Equals(b) { return a } + // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test + // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created + // from it. + // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion + // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from + // either of them. + ac, ok1 := a.(*BaseSingletonPredictionContext) bc, ok2 := b.(*BaseSingletonPredictionContext) @@ -380,17 +407,32 @@ func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) return b } } - // convert singleton so both are arrays to normalize - if _, ok := a.(*BaseSingletonPredictionContext); ok { - a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) + + // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters + // here. + // + // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here + + var arp, arb *ArrayPredictionContext + var ok bool + if arp, ok = a.(*ArrayPredictionContext); ok { + } else if _, ok = a.(*BaseSingletonPredictionContext); ok { + arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) + } else if _, ok = a.(*EmptyPredictionContext); ok { + arp = NewArrayPredictionContext([]PredictionContext{}, []int{}) } - if _, ok := b.(*BaseSingletonPredictionContext); ok { - b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) + + if arb, ok = b.(*ArrayPredictionContext); ok { + } else if _, ok = b.(*BaseSingletonPredictionContext); ok { + arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) + } else if _, ok = b.(*EmptyPredictionContext); ok { + arb = NewArrayPredictionContext([]PredictionContext{}, []int{}) } - return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache) + + // Both arp and arb + return mergeArrays(arp, arb, rootIsWildcard, mergeCache) } -// // Merge two {@link SingletonBasePredictionContext} instances. // //
Stack tops equal, parents merge is same return left graph.
@@ -423,11 +465,11 @@ func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict)
// /
func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
if mergeCache != nil {
- previous := mergeCache.Get(a.hash(), b.hash())
+ previous := mergeCache.Get(a.Hash(), b.Hash())
if previous != nil {
return previous.(PredictionContext)
}
- previous = mergeCache.Get(b.hash(), a.hash())
+ previous = mergeCache.Get(b.Hash(), a.Hash())
if previous != nil {
return previous.(PredictionContext)
}
@@ -436,7 +478,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
rootMerge := mergeRoot(a, b, rootIsWildcard)
if rootMerge != nil {
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), rootMerge)
+ mergeCache.set(a.Hash(), b.Hash(), rootMerge)
}
return rootMerge
}
@@ -456,7 +498,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
// Newjoined parent so create Newsingleton pointing to it, a'
spc := SingletonBasePredictionContextCreate(parent, a.returnState)
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), spc)
+ mergeCache.set(a.Hash(), b.Hash(), spc)
}
return spc
}
@@ -478,7 +520,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
parents := []PredictionContext{singleParent, singleParent}
apc := NewArrayPredictionContext(parents, payloads)
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), apc)
+ mergeCache.set(a.Hash(), b.Hash(), apc)
}
return apc
}
@@ -494,12 +536,11 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
}
apc := NewArrayPredictionContext(parents, payloads)
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), apc)
+ mergeCache.set(a.Hash(), b.Hash(), apc)
}
return apc
}
-//
// Handle case where at least one of {@code a} or {@code b} is
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
// to represent {@link //EMPTY}.
@@ -561,7 +602,6 @@ func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionC
return nil
}
-//
// Merge two {@link ArrayBasePredictionContext} instances.
//
//
Different tops, different parents.
@@ -583,12 +623,18 @@ func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionC
// /
func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
if mergeCache != nil {
- previous := mergeCache.Get(a.hash(), b.hash())
+ previous := mergeCache.Get(a.Hash(), b.Hash())
if previous != nil {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
return previous.(PredictionContext)
}
- previous = mergeCache.Get(b.hash(), a.hash())
+ previous = mergeCache.Get(b.Hash(), a.Hash())
if previous != nil {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
return previous.(PredictionContext)
}
}
@@ -608,7 +654,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
payload := a.returnStates[i]
// $+$ = $
bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
- axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax
+ axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax
// ->
// ax
if bothDollars || axAX {
@@ -651,7 +697,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
if k == 1 { // for just one merged element, return singleton top
pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), pc)
+ mergeCache.set(a.Hash(), b.Hash(), pc)
}
return pc
}
@@ -663,27 +709,36 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
// if we created same array as a or b, return that instead
// TODO: track whether this is possible above during merge sort for speed
+ // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems
if M == a {
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), a)
+ mergeCache.set(a.Hash(), b.Hash(), a)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
}
return a
}
if M == b {
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), b)
+ mergeCache.set(a.Hash(), b.Hash(), b)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
}
return b
}
combineCommonParents(mergedParents)
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), M)
+ mergeCache.set(a.Hash(), b.Hash(), M)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
}
return M
}
-//
// Make pass over all M {@code parents} merge any {@code equals()}
// ones.
// /
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
similarity index 95%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
index 15718f912bc..7b9b72fab1e 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -70,7 +70,6 @@ const (
PredictionModeLLExactAmbigDetection = 2
)
-//
// Computes the SLL prediction termination condition.
//
//
@@ -108,9 +107,9 @@ const ( // The single-alt-state thing lets prediction continue upon rules like // (otherwise, it would admit defeat too soon):
// -//{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }
+//{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }
// -//When the ATN simulation reaches the state before {@code ''}, it has a +//
When the ATN simulation reaches the state before {@code ”}, it has a // DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally // {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop // processing this node because alternative to has another way to continue, @@ -152,16 +151,15 @@ const ( // //
Before testing these configurations against others, we have to merge // {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x''} when looking for conflicts in +// For example, we test {@code (x+x')==x”} when looking for conflicts in // the following configurations.
// -//{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
+//{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}
// //If the configuration set has predicates (as indicated by // {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of // the configurations to strip out all of the predicates so that a standard // {@link ATNConfigSet} will merge everything ignoring predicates.
-// func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { // Configs in rule stop states indicate reaching the end of the decision // rule (local context) or end of start rule (full context). If all @@ -229,7 +227,6 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { return true } -// // Full LL prediction termination. // //Can we stop looking ahead during ATN simulation or is there some @@ -334,7 +331,7 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { // // //
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not // alt and not pred //-// func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { - configToAlts := make(map[int]*BitSet) + configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst) for _, c := range configs.GetItems() { - key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash() - alts, ok := configToAlts[key] + alts, ok := configToAlts.Get(c) if !ok { alts = NewBitSet() - configToAlts[key] = alts + configToAlts.Put(c, alts) } alts.add(c.GetAlt()) } - values := make([]*BitSet, 0, 10) - for _, v := range configToAlts { - values = append(values, v) - } - return values + return configToAlts.Values() } -// -// Get a map from state to alt subset from a configuration set. For each +// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each // configuration {@code c} in {@code configs}: // //
// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt} //-// func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { m := NewAltDict() diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go similarity index 92% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go index 93efcf355d8..bfe542d0914 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -49,7 +49,7 @@ var tokenTypeMapCache = make(map[string]int) var ruleIndexMapCache = make(map[string]int) func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.10.1" + runtimeVersion := "4.12.0" if runtimeVersion != toolVersion { fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) } @@ -108,7 +108,6 @@ func (b *BaseRecognizer) SetState(v int) { // Get a map from rule names to rule indexes. // //
Used for XPath and tree pattern compilation.
-// func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { panic("Method not defined!") @@ -171,18 +170,18 @@ func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { } // How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. +// +// is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. // // @deprecated This method is not called by the ANTLR 4 Runtime. Specific // implementations of {@link ANTLRErrorStrategy} may provide a similar // feature when necessary. For example, see // {@link DefaultErrorStrategy//GetTokenErrorDisplay}. -// func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { if t == nil { return "// The evaluation of predicates by a context is short-circuiting, but // unordered.
-// func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { for i := 0; i < len(a.opnds); i++ { if !a.opnds[i].evaluate(parser, outerContext) { @@ -304,18 +308,18 @@ func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) Semant return result } -func (a *AND) hash() int { +func (a *AND) Hash() int { h := murmurInit(37) // Init with a value different from OR for _, op := range a.opnds { - h = murmurUpdate(h, op.hash()) + h = murmurUpdate(h, op.Hash()) } return murmurFinish(h, len(a.opnds)) } -func (a *OR) hash() int { +func (a *OR) Hash() int { h := murmurInit(41) // Init with a value different from AND for _, op := range a.opnds { - h = murmurUpdate(h, op.hash()) + h = murmurUpdate(h, op.Hash()) } return murmurFinish(h, len(a.opnds)) } @@ -345,21 +349,21 @@ type OR struct { func NewOR(a, b SemanticContext) *OR { - operands := newArray2DHashSet(nil, nil) + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) if aa, ok := a.(*OR); ok { for _, o := range aa.opnds { - operands.Add(o) + operands.Put(o) } } else { - operands.Add(a) + operands.Put(a) } if ba, ok := b.(*OR); ok { for _, o := range ba.opnds { - operands.Add(o) + operands.Put(o) } } else { - operands.Add(b) + operands.Put(b) } precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) if len(precedencePredicates) > 0 { @@ -372,7 +376,7 @@ func NewOR(a, b SemanticContext) *OR { } } - operands.Add(reduced) + operands.Put(reduced) } vs := operands.Values() @@ -388,14 +392,14 @@ func NewOR(a, b SemanticContext) *OR { return o } -func (o *OR) equals(other interface{}) bool { +func (o *OR) Equals(other Collectable[SemanticContext]) bool { if o == other { return true } else if _, ok := other.(*OR); !ok { return false } else { for i, v := range other.(*OR).opnds { - if !o.opnds[i].equals(v) { + if !o.opnds[i].Equals(v) { return false } } @@ -406,7 +410,6 @@ func (o *OR) equals(other interface{}) bool { //// The evaluation of predicates by o context is short-circuiting, but // unordered.
-// func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { for i := 0; i < len(o.opnds); i++ { if o.opnds[i].evaluate(parser, outerContext) { diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go similarity index 98% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go index 2d8e99095d3..f73b06bc6a0 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -158,7 +158,6 @@ func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start // {@link Token//GetInputStream}. // // @param oldToken The token to copy. -// func (c *CommonToken) clone() *CommonToken { t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) t.tokenIndex = c.GetTokenIndex() diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go similarity index 85% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go index e023978fef4..a3f36eaa67f 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go similarity index 87% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go index df92c814789..1527d43f608 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go new file mode 100644 index 00000000000..b3e38af3445 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go @@ -0,0 +1,659 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "fmt" +) + +// +// Useful for rewriting out a buffered input token stream after doing some +// augmentation or other manipulations on it. + +//+// You can insert stuff, replace, and delete chunks. Note that the operations +// are done lazily--only if you convert the buffer to a {@link String} with +// {@link TokenStream#getText()}. This is very efficient because you are not +// moving data around all the time. As the buffer of tokens is converted to +// strings, the {@link #getText()} method(s) scan the input token stream and +// check to see if there is an operation at the current index. If so, the +// operation is done and then normal {@link String} rendering continues on the +// buffer. This is like having multiple Turing machine instruction streams +// (programs) operating on a single input tape. :)
+//+ +// This rewriter makes no modifications to the token stream. It does not ask the +// stream to fill itself up nor does it advance the input cursor. The token +// stream {@link TokenStream#index()} will return the same value before and +// after any {@link #getText()} call.
+ +//+// The rewriter only works on tokens that you have in the buffer and ignores the +// current input cursor. If you are buffering tokens on-demand, calling +// {@link #getText()} halfway through the input will only do rewrites for those +// tokens in the first half of the file.
+ +//+// Since the operations are done lazily at {@link #getText}-time, operations do +// not screw up the token index values. That is, an insert operation at token +// index {@code i} does not change the index values for tokens +// {@code i}+1..n-1.
+ +//+// Because operations never actually alter the buffer, you may always get the +// original token stream back without undoing anything. Since the instructions +// are queued up, you can easily simulate transactions and roll back any changes +// if there is an error just by removing instructions. For example,
+ +//+// CharStream input = new ANTLRFileStream("input"); +// TLexer lex = new TLexer(input); +// CommonTokenStream tokens = new CommonTokenStream(lex); +// T parser = new T(tokens); +// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens); +// parser.startRule(); +//+ +//
+// Then in the rules, you can execute (assuming rewriter is visible):
+ +//+// Token t,u; +// ... +// rewriter.insertAfter(t, "text to put after t");} +// rewriter.insertAfter(u, "text after u");} +// System.out.println(rewriter.getText()); +//+ +//
+// You can also have multiple "instruction streams" and get multiple rewrites +// from a single pass over the input. Just name the instruction streams and use +// that name again when printing the buffer. This could be useful for generating +// a C file and also its header file--all from the same buffer:
+ +//+// rewriter.insertAfter("pass1", t, "text to put after t");} +// rewriter.insertAfter("pass2", u, "text after u");} +// System.out.println(rewriter.getText("pass1")); +// System.out.println(rewriter.getText("pass2")); +//+ +//
+// If you don't use named rewrite streams, a "default" stream is used as the +// first example shows.
+ +const ( + Default_Program_Name = "default" + Program_Init_Size = 100 + Min_Token_Index = 0 +) + +// Define the rewrite operation hierarchy + +type RewriteOperation interface { + // Execute the rewrite operation by possibly adding to the buffer. + // Return the index of the next token to operate on. + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream + SetInstructionIndex(val int) + SetIndex(int) + SetText(string) + SetOpName(string) + SetTokens(TokenStream) +} + +type BaseRewriteOperation struct { + //Current index of rewrites list + instruction_index int + //Token buffer index + index int + //Substitution text + text string + //Actual operation name + op_name string + //Pointer to token steam + tokens TokenStream +} + +func (op *BaseRewriteOperation) GetInstructionIndex() int { + return op.instruction_index +} + +func (op *BaseRewriteOperation) GetIndex() int { + return op.index +} + +func (op *BaseRewriteOperation) GetText() string { + return op.text +} + +func (op *BaseRewriteOperation) GetOpName() string { + return op.op_name +} + +func (op *BaseRewriteOperation) GetTokens() TokenStream { + return op.tokens +} + +func (op *BaseRewriteOperation) SetInstructionIndex(val int) { + op.instruction_index = val +} + +func (op *BaseRewriteOperation) SetIndex(val int) { + op.index = val +} + +func (op *BaseRewriteOperation) SetText(val string) { + op.text = val +} + +func (op *BaseRewriteOperation) SetOpName(val string) { + op.op_name = val +} + +func (op *BaseRewriteOperation) SetTokens(val TokenStream) { + op.tokens = val +} + +func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int { + return op.index +} + +func (op *BaseRewriteOperation) String() string { + return fmt.Sprintf("<%s@%d:\"%s\">", + op.op_name, + op.tokens.Get(op.GetIndex()), + op.text, + ) + +} + +type InsertBeforeOp struct { + BaseRewriteOperation +} + +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { + return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index, + text: text, + op_name: "InsertBeforeOp", + tokens: stream, + }} +} + +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertBeforeOp) String() string { + return op.BaseRewriteOperation.String() +} + +// Distinguish between insert after/before to do the "insert afters" +// first and then the "insert befores" at same index. Implementation +// of "insert after" is "insert before index+1". + +type InsertAfterOp struct { + BaseRewriteOperation +} + +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { + return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index + 1, + text: text, + tokens: stream, + }} +} + +func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertAfterOp) String() string { + return op.BaseRewriteOperation.String() +} + +// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp +// instructions. +type ReplaceOp struct { + BaseRewriteOperation + LastIndex int +} + +func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { + return &ReplaceOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: from, + text: text, + op_name: "ReplaceOp", + tokens: stream, + }, + LastIndex: to, + } +} + +func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { + if op.text != "" { + buffer.WriteString(op.text) + } + return op.LastIndex + 1 +} + +func (op *ReplaceOp) String() string { + if op.text == "" { + return fmt.Sprintf("