diff --git a/.chloggen/46383-tlscheck-enable-reaggregation.yaml b/.chloggen/46383-tlscheck-enable-reaggregation.yaml deleted file mode 100644 index d8ed29d382080..0000000000000 --- a/.chloggen/46383-tlscheck-enable-reaggregation.yaml +++ /dev/null @@ -1,11 +0,0 @@ -change_type: enhancement - -component: receiver/tls_check - -note: Enables dynamic metric reaggregation in the TLS Check receiver. This does not break existing configuration files. - -issues: [46383] - -subtext: - -change_logs: [user] diff --git a/.chloggen/otl-3444-signalfx-physical-cpu-cores-fix.yaml b/.chloggen/47740-httpcheck-dedup-tls-cert-remaining.yaml similarity index 74% rename from .chloggen/otl-3444-signalfx-physical-cpu-cores-fix.yaml rename to .chloggen/47740-httpcheck-dedup-tls-cert-remaining.yaml index 4c01a7ce7ff33..64c482f54ba21 100644 --- a/.chloggen/otl-3444-signalfx-physical-cpu-cores-fix.yaml +++ b/.chloggen/47740-httpcheck-dedup-tls-cert-remaining.yaml @@ -4,24 +4,21 @@ change_type: bug_fix # The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) -component: exporter/signalfx +component: receiver/http_check # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Fix incorrect `host_physical_cpus` and `host_cpu_cores` values reported on Linux +note: Stop emitting duplicate `httpcheck.tls.cert_remaining` data points by removing the redundant pre-timing TLS block; the post-timing block (a few lines below) is the single source that retains endpoint/issuer/commonName/sans labels. # Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [47550] +issues: [47740] # (Optional) One or more lines of additional information to render under the primary note. # These lines will be padded with 2 spaces and then inserted directly into the document. # Use pipe (|) for multiline entries. subtext: -# If your change doesn't affect end users or the exported elements of any package, -# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. # Optional: The change log or logs in which this entry should be included. # e.g. '[user]' or '[user, api]' # Include 'user' if the change is relevant to end users. # Include 'api' if there is a change to a library API. -# Default: '[user]' change_logs: [user] diff --git a/.chloggen/alibabacloudlogservice-update-sdk.yaml b/.chloggen/alibabacloudlogservice-update-sdk.yaml deleted file mode 100644 index 88527cae9c06e..0000000000000 --- a/.chloggen/alibabacloudlogservice-update-sdk.yaml +++ /dev/null @@ -1,5 +0,0 @@ -change_type: bug_fix -component: exporter/alibabacloud_logservice -note: Bump aliyun-log-go-sdk to v0.1.100, fixing SA1019 lint warning from deprecated producer.InitProducer -issues: [44363] -subtext: Migrates to producer.NewProducer which returns an error, allowing initialization failures to propagate correctly. diff --git a/.chloggen/config.yaml b/.chloggen/config.yaml index 2eafec554448e..484a5f0adf22a 100644 --- a/.chloggen/config.yaml +++ b/.chloggen/config.yaml @@ -325,7 +325,7 @@ components: - receiver/splunkenterprise - receiver/sqlquery - receiver/sqlserver - - receiver/ssh_check + - receiver/sshcheck - receiver/statsd - receiver/stef - receiver/syslog diff --git a/.chloggen/disable_pod_association_metric.yaml b/.chloggen/disable_pod_association_metric.yaml deleted file mode 100644 index b754ae8baaeab..0000000000000 --- a/.chloggen/disable_pod_association_metric.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Use this changelog template to create an entry for release notes. - -# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: breaking - -# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) -component: processor/k8s_attributes - -# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Disable otelcol.k8s.pod.association metric until pod_identifier attribute is properly calculated - -# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [47669] - -# (Optional) One or more lines of additional information to render under the primary note. -# These lines will be padded with 2 spaces and then inserted directly into the document. -# Use pipe (|) for multiline entries. -subtext: - -# If your change doesn't affect end users or the exported elements of any package, -# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. -# Optional: The change log or logs in which this entry should be included. -# e.g. '[user]' or '[user, api]' -# Include 'user' if the change is relevant to end users. -# Include 'api' if there is a change to a library API. -# Default: '[user]' -change_logs: [] diff --git a/.chloggen/githubreceiver-enforce-required-headers.yaml b/.chloggen/githubreceiver-enforce-required-headers.yaml deleted file mode 100644 index d4ed8389e6e1c..0000000000000 --- a/.chloggen/githubreceiver-enforce-required-headers.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Use this changelog template to create an entry for release notes. - -# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: bug_fix - -# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) -component: receiver/github - -# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Fix the enforcement of configured `required_headers` on incoming webhook requests. - -# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [47854] - -# (Optional) One or more lines of additional information to render under the primary note. -# These lines will be padded with 2 spaces and then inserted directly into the document. -# Use pipe (|) for multiline entries. -subtext: - -# If your change doesn't affect end users or the exported elements of any package, -# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. -# Optional: The change log or logs in which this entry should be included. -# e.g. '[user]' or '[user, api]' -# Include 'user' if the change is relevant to end users. -# Include 'api' if there is a change to a library API. -# Default: '[user]' -change_logs: [user] diff --git a/.chloggen/k8sattributesprocessor-sharedcomponent.yaml b/.chloggen/k8sattributesprocessor-sharedcomponent.yaml deleted file mode 100644 index 22258259f4bb8..0000000000000 --- a/.chloggen/k8sattributesprocessor-sharedcomponent.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Use this changelog template to create an entry for release notes. - -# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: enhancement - -# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) -component: processor/k8s_attributes - -# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Allow k8sattributes processors to be shared between pipelines - -# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [36234] - -# (Optional) One or more lines of additional information to render under the primary note. -# These lines will be padded with 2 spaces and then inserted directly into the document. -# Use pipe (|) for multiline entries. -subtext: | - When the processor.k8sattributes.ShareProcessorBetweenPipelines feature flag is enabled, k8sattributes processors - using the same configuration are shared between pipelines. This reduces the local cache size and the number of - connections to the K8s API Server. - -# If your change doesn't affect end users or the exported elements of any package, -# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. -# Optional: The change log or logs in which this entry should be included. -# e.g. '[user]' or '[user, api]' -# Include 'user' if the change is relevant to end users. -# Include 'api' if there is a change to a library API. -# Default: '[user]' -change_logs: [] diff --git a/.chloggen/prw-receiver-otel-scope-handling.yaml b/.chloggen/prw-receiver-otel-scope-handling.yaml deleted file mode 100644 index 24d2ca96f7672..0000000000000 --- a/.chloggen/prw-receiver-otel-scope-handling.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Use this changelog template to create an entry for release notes. - -# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: enhancement - -# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) -component: receiver/prometheus_remote_write - -# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: "Handle all `otel_scope_*` prefixed labels per the Prometheus/OTLP compatibility spec." - -# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [47726] - -# (Optional) One or more lines of additional information to render under the primary note. -# These lines will be padded with 2 spaces and then inserted directly into the document. -# Use pipe (|) for multiline entries. -subtext: | - `otel_scope_schema_url` is now set as the instrumentation scope schema URL, and other `otel_scope_` labels become scope attributes (with the `otel_scope_` prefix stripped), instead of being incorrectly added as metric data point attributes. - -# If your change doesn't affect end users or the exported elements of any package, -# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. -# Optional: The change log or logs in which this entry should be included. -# e.g. '[user]' or '[user, api]' -# Include 'user' if the change is relevant to end users. -# Include 'api' if there is a change to a library API. -# Default: '[user]' -change_logs: [user] diff --git a/.chloggen/remove-promreceiver-featuregates.yaml b/.chloggen/remove-promreceiver-featuregates.yaml deleted file mode 100644 index 24b1b4f0cffee..0000000000000 --- a/.chloggen/remove-promreceiver-featuregates.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Use this changelog template to create an entry for release notes. - -# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: breaking - -# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) -component: receiver/prometheus - -# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Remove `receiver.prometheusreceiver.EnableNativeHistograms`, `receiver.prometheusreceiver.RemoveStartTimeAdjustment` and `receiver.prometheusreceiver.UseCreatedMetric` feature gates. - -# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [40606] - -# (Optional) One or more lines of additional information to render under the primary note. -# These lines will be padded with 2 spaces and then inserted directly into the document. -# Use pipe (|) for multiline entries. -subtext: - -# If your change doesn't affect end users or the exported elements of any package, -# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. -# Optional: The change log or logs in which this entry should be included. -# e.g. '[user]' or '[user, api]' -# Include 'user' if the change is relevant to end users. -# Include 'api' if there is a change to a library API. -# Default: '[user]' -change_logs: [user, api] diff --git a/.chloggen/rename-ssh-check-receiver.yaml b/.chloggen/rename-ssh-check-receiver.yaml deleted file mode 100644 index 55e5740e9aedd..0000000000000 --- a/.chloggen/rename-ssh-check-receiver.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# Use this changelog template to create an entry for release notes. - -change_type: deprecation - -component: receiver/ssh_check - -note: Rename `sshcheck` receiver to `ssh_check` with deprecated alias `sshcheck` - -issues: [45339] - -subtext: - -change_logs: [user] diff --git a/.chloggen/signalfxexporter-apm-deployment-environment-name.yaml b/.chloggen/signalfxexporter-apm-deployment-environment-name.yaml deleted file mode 100644 index dcc206d136d94..0000000000000 --- a/.chloggen/signalfxexporter-apm-deployment-environment-name.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Use this changelog template to create an entry for release notes. - -# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: enhancement - -# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) -component: exporter/signalfx - -# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: APM correlation now recognizes `deployment.environment.name` in addition to the deprecated `deployment.environment` attribute - -# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [47862] - -# If your change doesn't affect end users or the exported elements of any package, -# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. -# Optional: The change log or logs in which this entry should be included. -# e.g. '[user]' or '[user, api]' -# Include 'user' if the change is relevant to end users. -# Include 'api' if there is a change to a library API. -# Default: '[user]' -change_logs: [user] diff --git a/.chloggen/sumoexporter-default-batch.yaml b/.chloggen/sumoexporter-default-batch.yaml deleted file mode 100644 index f6b1dfd551c7e..0000000000000 --- a/.chloggen/sumoexporter-default-batch.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Use this changelog template to create an entry for release notes. - -# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: enhancement - -# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) -component: exporter/sumologic - -# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Make the batching logic default for sumologic exporter - -# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [47820] - -# (Optional) One or more lines of additional information to render under the primary note. -# These lines will be padded with 2 spaces and then inserted directly into the document. -# Use pipe (|) for multiline entries. -subtext: - -# If your change doesn't affect end users or the exported elements of any package, -# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. -# Optional: The change log or logs in which this entry should be included. -# e.g. '[user]' or '[user, api]' -# Include 'user' if the change is relevant to end users. -# Include 'api' if there is a change to a library API. -# Default: '[user]' -change_logs: [user] diff --git a/exporter/alibabacloudlogserviceexporter/go.mod b/exporter/alibabacloudlogserviceexporter/go.mod index dd4d9b0dc5b6c..95c2c51bb7589 100644 --- a/exporter/alibabacloudlogserviceexporter/go.mod +++ b/exporter/alibabacloudlogserviceexporter/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/exporter/alibab go 1.25.0 require ( - github.com/aliyun/aliyun-log-go-sdk v0.1.100 + github.com/aliyun/aliyun-log-go-sdk v0.1.83 github.com/gogo/protobuf v1.3.2 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.150.0 github.com/stretchr/testify v1.11.1 @@ -46,7 +46,7 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect - github.com/pierrec/lz4/v4 v4.1.22 // indirect + github.com/pierrec/lz4 v2.6.0+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect @@ -81,7 +81,6 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect google.golang.org/grpc v1.80.0 // indirect google.golang.org/protobuf v1.36.11 // indirect - gopkg.in/ini.v1 v1.66.6 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporter/alibabacloudlogserviceexporter/go.sum b/exporter/alibabacloudlogserviceexporter/go.sum index 6feafab86d458..b77b70311a3f9 100644 --- a/exporter/alibabacloudlogserviceexporter/go.sum +++ b/exporter/alibabacloudlogserviceexporter/go.sum @@ -20,8 +20,8 @@ github.com/alibabacloud-go/tea-utils/v2 v2.0.1 h1:K6kwgo+UiYx+/kr6CO0PN5ACZDzE3n github.com/alibabacloud-go/tea-utils/v2 v2.0.1/go.mod h1:U5MTY10WwlquGPS34DOeomUGBB0gXbLueiq5Trwu0C4= github.com/alibabacloud-go/tea-xml v1.1.2 h1:oLxa7JUXm2EDFzMg+7oRsYc+kutgCVwm+bZlhhmvW5M= github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= -github.com/aliyun/aliyun-log-go-sdk v0.1.100 h1:XXzm/92AGow1ZqVTUZXPLAykg59bWd8oUYZ4MGiGeIo= -github.com/aliyun/aliyun-log-go-sdk v0.1.100/go.mod h1:1NZbf//4a26kGXem8pb3/vc71M+XqRYQgekEZv89y4U= +github.com/aliyun/aliyun-log-go-sdk v0.1.83 h1:xdFXXsvhO5BedlO9EUSf/HJDHSCp6kQrwL4EKDnT/Zg= +github.com/aliyun/aliyun-log-go-sdk v0.1.83/go.mod h1:qNjBnTjQl8UeHhGmoZ7iredr2xyVBD1Ueu3JgOALR5U= github.com/aliyun/credentials-go v1.1.2 h1:qU1vwGIBb3UJ8BwunHDRFtAhS6jnQLnde/yk0+Ih2GY= github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= @@ -36,6 +36,8 @@ github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= +github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= @@ -90,8 +92,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= -github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -236,8 +238,8 @@ google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= -gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.56.0 h1:DPMeDvGTM54DXbPkVIZsp19fp/I2K7zwA/itHYHKo8Y= +gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/exporter/alibabacloudlogserviceexporter/uploader.go b/exporter/alibabacloudlogserviceexporter/uploader.go index eaef8a42c1bca..5c1b9b9b2521b 100644 --- a/exporter/alibabacloudlogserviceexporter/uploader.go +++ b/exporter/alibabacloudlogserviceexporter/uploader.go @@ -5,7 +5,6 @@ package alibabacloudlogserviceexporter // import "github.com/open-telemetry/open import ( "errors" - "fmt" "net" "os" @@ -60,15 +59,10 @@ func newLogServiceClient(config *Config, logger *zap.Logger) (logServiceClient, producerConfig.StsTokenShutDown = make(chan struct{}) } - producer, err := producer.NewProducer(producerConfig) - if err != nil { - return nil, fmt.Errorf("failed to create Log Service producer: %w", err) - } - c := &logServiceClientImpl{ project: config.Project, logstore: config.Logstore, - clientInstance: producer, + clientInstance: producer.InitProducer(producerConfig), logger: logger, } c.clientInstance.Start() diff --git a/exporter/kafkaexporter/kafka_exporter.go b/exporter/kafkaexporter/kafka_exporter.go index e7d9bce5802f2..cb058c1d72676 100644 --- a/exporter/kafkaexporter/kafka_exporter.go +++ b/exporter/kafkaexporter/kafka_exporter.go @@ -152,7 +152,16 @@ func (e *kafkaExporter[T]) exportData(ctx context.Context, data T) error { }) } err := e.producer.ExportData(ctx, m) - if err != nil { + if err == nil { + if e.logger.Core().Enabled(zap.DebugLevel) { + for _, mi := range m.TopicMessages { + e.logger.Debug("kafka records exported", + zap.Int("records", len(mi.Messages)), + zap.String("topic", mi.Topic), + ) + } + } + } else { for _, mi := range m.TopicMessages { e.logger.Error("kafka records export failed", zap.Int("records", len(mi.Messages)), @@ -167,17 +176,8 @@ func (e *kafkaExporter[T]) exportData(ctx context.Context, data T) error { zap.Int("max_message_bytes", msgTooLarge.MaxMessageBytes), ) } - return err - } - if e.logger.Core().Enabled(zap.DebugLevel) { - for _, mi := range m.TopicMessages { - e.logger.Debug("kafka records exported", - zap.Int("records", len(mi.Messages)), - zap.String("topic", mi.Topic), - ) - } } - return nil + return err } func newTracesExporter(config Config, set exporter.Settings) *kafkaExporter[ptrace.Traces] { diff --git a/exporter/prometheusexporter/README.md b/exporter/prometheusexporter/README.md index 198a7f5e5b60a..659b239c83ad5 100644 --- a/exporter/prometheusexporter/README.md +++ b/exporter/prometheusexporter/README.md @@ -64,14 +64,6 @@ exporters: Given the example, metrics will be available at `https://1.2.3.4:1234/metrics`. -### Native Histograms - -The exporter supports [Prometheus native histograms](https://prometheus.io/docs/concepts/native_histograms/). OpenTelemetry exponential histograms are automatically converted to the Prometheus native histogram format. - -To scrape native histograms, configure your Prometheus server to [scrape using protobuf format](https://prometheus.io/docs/prometheus/latest/getting_started/#configure-native-histograms) and to accept native histograms. - -Note that this exporter does not currently support exemplars for native histograms. - ### Feature gates There are also flags that control translation behavior. [See the documentation for the Prometheus translator module](../../pkg/translator/prometheus/) for more information. diff --git a/exporter/signalfxexporter/internal/apm/tracetracker/tracker.go b/exporter/signalfxexporter/internal/apm/tracetracker/tracker.go index 4435f3504cc57..1094567f9943a 100644 --- a/exporter/signalfxexporter/internal/apm/tracetracker/tracker.go +++ b/exporter/signalfxexporter/internal/apm/tracetracker/tracker.go @@ -136,14 +136,11 @@ func (a *ActiveServiceTracker) processEnvironment(res pcommon.Resource, now time } // Determine the environment value from the incoming spans. - // First check "deployment.environment.name" attribute (new OTel standard). - // Then check "deployment.environment" attribute (deprecated). + // First check "deployment.environment" attribute. // Then, try "environment" attribute (SignalFx schema). // Otherwise, use the same fallback value as set on the backend. var environment string - if env, ok := attrs.Get("deployment.environment.name"); ok { - environment = env.Str() - } else if env, ok = attrs.Get("deployment.environment"); ok { + if env, ok := attrs.Get("deployment.environment"); ok { environment = env.Str() } else if env, ok = attrs.Get("environment"); ok { environment = env.Str() diff --git a/exporter/signalfxexporter/internal/apm/tracetracker/tracker_test.go b/exporter/signalfxexporter/internal/apm/tracetracker/tracker_test.go index c5e0c56595455..7f1a09ece2d76 100644 --- a/exporter/signalfxexporter/internal/apm/tracetracker/tracker_test.go +++ b/exporter/signalfxexporter/internal/apm/tracetracker/tracker_test.go @@ -119,56 +119,6 @@ func (c *correlationTestClient) getCorrelations() []*correlations.Correlation { var _ correlations.CorrelationClient = &correlationTestClient{} -func TestEnvironmentAttributePriority(t *testing.T) { - hostIDDims := map[string]string{"host": "test"} - - tests := []struct { - name string - attrs map[string]string - wantEnv string - }{ - { - name: "deployment.environment.name takes precedence", - attrs: map[string]string{"deployment.environment.name": "new-env", "deployment.environment": "old-env", "environment": "sfx-env"}, - wantEnv: "new-env", - }, - { - name: "deployment.environment used when name absent", - attrs: map[string]string{"deployment.environment": "old-env", "environment": "sfx-env"}, - wantEnv: "old-env", - }, - { - name: "environment used when both deployment attrs absent", - attrs: map[string]string{"environment": "sfx-env"}, - wantEnv: "sfx-env", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - correlationClient := &correlationTestClient{} - a := New(log.Nil, 5*time.Minute, correlationClient, hostIDDims, DefaultDimsToSyncSource) - - fakeTraces := ptrace.NewTraces() - newResourceWithAttrs(hostIDDims, tt.attrs). - CopyTo(fakeTraces.ResourceSpans().AppendEmpty().Resource()) - a.ProcessTraces(t.Context(), fakeTraces) - - cors := correlationClient.getCorrelations() - var envCors []*correlations.Correlation - for _, c := range cors { - if c.Type == correlations.Environment { - envCors = append(envCors, c) - } - } - assert.NotEmpty(t, envCors, "expected environment correlations") - for _, c := range envCors { - assert.Equal(t, tt.wantEnv, c.Value) - } - }) - } -} - func TestCorrelationEmptyEnvironment(t *testing.T) { var wg sync.WaitGroup correlationClient := &correlationTestClient{ diff --git a/exporter/signalfxexporter/internal/hostmetadata/host.go b/exporter/signalfxexporter/internal/hostmetadata/host.go index e9fafc220d820..96ccc77e9f89c 100644 --- a/exporter/signalfxexporter/internal/hostmetadata/host.go +++ b/exporter/signalfxexporter/internal/hostmetadata/host.go @@ -8,7 +8,6 @@ package hostmetadata // import "github.com/open-telemetry/opentelemetry-collecto import ( "context" - "fmt" "strconv" "time" @@ -53,23 +52,19 @@ func (c *hostCPU) toStringMap() map[string]string { func getCPU(ctx context.Context) (info *hostCPU, err error) { info = &hostCPU{} + // get physical cpu stats var cpus []cpu.InfoStat // On Windows this can sometimes take longer than the default timeout (10 seconds). ctx, cancel := context.WithTimeout(ctx, cpuStatsTimeout) defer cancel() - // get cpu infoStats cpus, err = cpuInfo(ctx) if err != nil { return info, err } - // get physical cpu stats - info.HostPhysicalCPUs, err = cpuCounts(ctx, false) - if err != nil { - return info, err - } + info.HostPhysicalCPUs = len(cpus) // get logical cpu stats info.HostLogicalCPUs, err = cpuCounts(ctx, true) @@ -77,14 +72,9 @@ func getCPU(ctx context.Context) (info *hostCPU, err error) { return info, err } - // Count physical CPU cores by tracking unique {PhysicalID, CoreID} pairs. - physicalCores := make(map[string]bool) + // total number of cpu cores for i := range cpus { - k := fmt.Sprintf("%s,%s", cpus[i].PhysicalID, cpus[i].CoreID) - if !physicalCores[k] { - physicalCores[k] = true - info.HostCPUCores++ - } + info.HostCPUCores += int64(cpus[i].Cores) // TODO: This is not ideal... if there are different processors // we will only report one of the models... This is unlikely to happen, // but it could diff --git a/exporter/signalfxexporter/internal/hostmetadata/host_test.go b/exporter/signalfxexporter/internal/hostmetadata/host_test.go index c0c0169331cf0..4485ca7445237 100644 --- a/exporter/signalfxexporter/internal/hostmetadata/host_test.go +++ b/exporter/signalfxexporter/internal/hostmetadata/host_test.go @@ -32,33 +32,27 @@ func TestGetCPU(t *testing.T) { { name: "successful host cpu info", fixtures: testfixture{ - // Simulate a Linux host: 2 sockets, 2 cores each, 2 threads per core = 8 logical CPUs. - // On Linux, cpu.InfoWithContext returns one InfoStat per logical CPU with Cores=1, - // so PhysicalID and CoreID must be set to identify unique physical cores. cpuInfo: func(context.Context) ([]cpu.InfoStat, error) { return []cpu.InfoStat{ - {ModelName: "testmodelname", Cores: 1, PhysicalID: "0", CoreID: "0"}, - {ModelName: "testmodelname", Cores: 1, PhysicalID: "0", CoreID: "0"}, // hyperthreading sibling - {ModelName: "testmodelname", Cores: 1, PhysicalID: "0", CoreID: "1"}, - {ModelName: "testmodelname", Cores: 1, PhysicalID: "0", CoreID: "1"}, // hyperthreading sibling - {ModelName: "testmodelname2", Cores: 1, PhysicalID: "1", CoreID: "0"}, - {ModelName: "testmodelname2", Cores: 1, PhysicalID: "1", CoreID: "0"}, // hyperthreading sibling - {ModelName: "testmodelname2", Cores: 1, PhysicalID: "1", CoreID: "1"}, - {ModelName: "testmodelname2", Cores: 1, PhysicalID: "1", CoreID: "1"}, // hyperthreading sibling + { + ModelName: "testmodelname", + Cores: 4, + }, + { + ModelName: "testmodelname2", + Cores: 4, + }, }, nil }, - cpuCounts: func(_ context.Context, logical bool) (int, error) { - if logical { - return 8, nil // 2 sockets * 2 cores * 2 threads - } - return 2, nil // 2 physical sockets + cpuCounts: func(context.Context, bool) (int, error) { + return 2, nil }, }, wantInfo: map[string]string{ "host_physical_cpus": "2", - "host_cpu_cores": "4", + "host_cpu_cores": "8", "host_cpu_model": "testmodelname2", - "host_logical_cpus": "8", + "host_logical_cpus": "2", }, }, { @@ -96,7 +90,7 @@ func TestGetCPU(t *testing.T) { }, }, wantInfo: map[string]string{ - "host_physical_cpus": "0", // cpuCounts(false) fails, HostPhysicalCPUs stays at zero + "host_physical_cpus": "2", "host_cpu_cores": "0", "host_cpu_model": "", "host_logical_cpus": "0", diff --git a/exporter/signalfxexporter/internal/hostmetadata/metadata_test.go b/exporter/signalfxexporter/internal/hostmetadata/metadata_test.go index c7f30908c89c7..dc27f637e457c 100644 --- a/exporter/signalfxexporter/internal/hostmetadata/metadata_test.go +++ b/exporter/signalfxexporter/internal/hostmetadata/metadata_test.go @@ -41,10 +41,8 @@ func TestSyncMetadata(t *testing.T) { { name: "all_stats_available", cpuStat: cpu.InfoStat{ - Cores: 1, - ModelName: "testprocessor", - PhysicalID: "0", - CoreID: "0", + Cores: 4, + ModelName: "testprocessor", }, cpuStatErr: nil, memStat: mem.VirtualMemoryStat{ @@ -65,7 +63,7 @@ func TestSyncMetadata(t *testing.T) { ResourceID: "host1", MetadataDelta: metadata.MetadataDelta{ MetadataToUpdate: map[string]string{ - "host_cpu_cores": "1", + "host_cpu_cores": "4", "host_cpu_model": "testprocessor", "host_logical_cpus": "1", "host_physical_cpus": "1", @@ -86,10 +84,8 @@ func TestSyncMetadata(t *testing.T) { { name: "no_host_stats", cpuStat: cpu.InfoStat{ - Cores: 1, - ModelName: "testprocessor", - PhysicalID: "0", - CoreID: "0", + Cores: 4, + ModelName: "testprocessor", }, cpuStatErr: nil, memStat: mem.VirtualMemoryStat{ @@ -106,7 +102,7 @@ func TestSyncMetadata(t *testing.T) { ResourceID: "host1", MetadataDelta: metadata.MetadataDelta{ MetadataToUpdate: map[string]string{ - "host_cpu_cores": "1", + "host_cpu_cores": "4", "host_cpu_model": "testprocessor", "host_logical_cpus": "1", "host_physical_cpus": "1", diff --git a/exporter/sumologicexporter/README.md b/exporter/sumologicexporter/README.md index bd9fac94571f7..1c80fca31c94c 100644 --- a/exporter/sumologicexporter/README.md +++ b/exporter/sumologicexporter/README.md @@ -134,13 +134,8 @@ exporters: max_elapsed_time: sending_queue: - # default = true + # default = false enabled: {true, false} - - # Sizer determines the type of size measurement used by this component. - # It accepts "requests", "items", or "bytes". - # default = requests - sizer: <> # number of consumers that dequeue batches; ignored if enabled is false, # default = 10 num_consumers: @@ -155,25 +150,6 @@ exporters: # num_seconds is the number of seconds to buffer in case of a backend outage, # requests_per_second is the average number of requests per seconds. queue_size: - - # BlockOnOverflow determines the behavior when the component's TotalSize limit is reached. - # If true, the component will wait for space; otherwise, operations will immediately return a retryable error. - # default = false - block_on_overflow: - - # BatchConfig it configures how the requests are consumed from the queue and batch together during consumption. - batch: - # FlushTimeout sets the time after which a batch will be sent regardless of its size. - # default = 200ms - flush_timeout: <> - # Sizer determines the type of size measurement used by the batch. - # If not configured, use the same configuration as the queue. - # It accepts "requests", "items", or "bytes". - # default = items - sizer: <> - # MinSize defines the configuration for the minimum size of a batch. - # default = 8192 - min_size: <> ``` ## Source Templates diff --git a/exporter/sumologicexporter/factory.go b/exporter/sumologicexporter/factory.go index 7aea38d849659..558bdaddf4173 100644 --- a/exporter/sumologicexporter/factory.go +++ b/exporter/sumologicexporter/factory.go @@ -30,10 +30,7 @@ func NewFactory() exporter.Factory { } func createDefaultConfig() component.Config { - qConfig := exporterhelper.NewDefaultQueueConfig() - qConfig.Batch.GetOrInsertDefault() - qs := configoptional.Some(qConfig) - + qs := configoptional.Default(exporterhelper.NewDefaultQueueConfig()) retryConfig := configretry.NewDefaultBackOffConfig() retryConfig.Multiplier = DefaultRetryOnFailureMultiplier retryConfig.MaxInterval = DefaultRetryOnFailureMaxInterval diff --git a/exporter/sumologicexporter/factory_test.go b/exporter/sumologicexporter/factory_test.go index 1a3d7c2f340b4..e3e995e23b7e0 100644 --- a/exporter/sumologicexporter/factory_test.go +++ b/exporter/sumologicexporter/factory_test.go @@ -28,9 +28,7 @@ func TestType(t *testing.T) { func TestCreateDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - qConfig := exporterhelper.NewDefaultQueueConfig() - qConfig.Batch.GetOrInsertDefault() - qs := configoptional.Some(qConfig) + qs := configoptional.Default(exporterhelper.NewDefaultQueueConfig()) retryConfig := configretry.NewDefaultBackOffConfig() retryConfig.Enabled = true retryConfig.InitialInterval = 5 * time.Second diff --git a/exporter/tencentcloudlogserviceexporter/go.mod b/exporter/tencentcloudlogserviceexporter/go.mod index 78877e425faf1..606f01521b57a 100644 --- a/exporter/tencentcloudlogserviceexporter/go.mod +++ b/exporter/tencentcloudlogserviceexporter/go.mod @@ -6,7 +6,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.150.0 github.com/pierrec/lz4 v2.6.1+incompatible github.com/stretchr/testify v1.11.1 - github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.3.84 + github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.3.83 go.opentelemetry.io/collector/component v1.56.1-0.20260415114935-307e3abdbae9 go.opentelemetry.io/collector/component/componenttest v0.150.1-0.20260415114935-307e3abdbae9 go.opentelemetry.io/collector/config/configopaque v1.56.1-0.20260415114935-307e3abdbae9 diff --git a/exporter/tencentcloudlogserviceexporter/go.sum b/exporter/tencentcloudlogserviceexporter/go.sum index da8a913d651f5..ef4b28ea0deb7 100644 --- a/exporter/tencentcloudlogserviceexporter/go.sum +++ b/exporter/tencentcloudlogserviceexporter/go.sum @@ -63,8 +63,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.3.84 h1:XWdq0+Gry5rfaWJXNIEgnxoABE4OWvwtPv+DdE3+LBU= -github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.3.84/go.mod h1:r5r4xbfxSaeR04b166HGsBa/R4U3SueirEUpXGuw+Q0= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.3.83 h1:C8ro7XQVV17O+A7zUTe28VK02NuyazuaY0CB2CH5Scw= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.3.83/go.mod h1:r5r4xbfxSaeR04b166HGsBa/R4U3SueirEUpXGuw+Q0= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/collector/client v1.56.1-0.20260415114935-307e3abdbae9 h1:qaxVLASxLeP5bOIqCnRNRSG9oEWLqTMFYo3DML42leA= diff --git a/internal/datadog/e2e/go.mod b/internal/datadog/e2e/go.mod index 9ea26084193ee..ac0c0159602a1 100644 --- a/internal/datadog/e2e/go.mod +++ b/internal/datadog/e2e/go.mod @@ -336,7 +336,6 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.150.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.150.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.150.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.150.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.150.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.150.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.150.0 // indirect @@ -537,8 +536,6 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sa replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil => ../../pdatautil -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent => ../../sharedcomponent - replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/core/xidutils => ../../../pkg/core/xidutils replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor => ../../../processor/deltatocumulativeprocessor diff --git a/pkg/xk8stest/k8s_collector.go b/pkg/xk8stest/k8s_collector.go index c416f4ba22a54..025c0db02eb5a 100644 --- a/pkg/xk8stest/k8s_collector.go +++ b/pkg/xk8stest/k8s_collector.go @@ -232,24 +232,3 @@ func fetchContainerLogs(ctx context.Context, coreClient corev1client.CoreV1Inter } return strings.TrimRight(string(logs), "\n") } - -// FetchPodLogs returns the full log output for the first running pod matching -// the given labels in the specified namespace. It is intended for e2e test -// assertions that inspect collector behavior via its log output. -func FetchPodLogs(t *testing.T, client *K8sClient, namespace string, podLabels map[string]any) string { - t.Helper() - coreClient, err := corev1client.NewForConfig(client.restConfig) - require.NoError(t, err, "failed to create core client for pod logs") - - podGVR := schema.GroupVersionResource{Version: "v1", Resource: "pods"} - listOptions := metav1.ListOptions{LabelSelector: SelectorFromMap(podLabels).String()} - list, err := client.DynamicClient.Resource(podGVR).Namespace(namespace).List(t.Context(), listOptions) - require.NoError(t, err, "failed to list pods") - require.NotEmpty(t, list.Items, "no pods found matching labels") - - podName := list.Items[0].GetName() - ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) - defer cancel() - - return fetchContainerLogs(ctx, coreClient, namespace, podName, "", false, nil) -} diff --git a/processor/k8sattributesprocessor/documentation.md b/processor/k8sattributesprocessor/documentation.md index 230b002f191f5..403d60bd220de 100644 --- a/processor/k8sattributesprocessor/documentation.md +++ b/processor/k8sattributesprocessor/documentation.md @@ -320,7 +320,6 @@ This component has the following feature gates: | `k8sattr.labelsAnnotationsSingular.allow` | deprecated | When enabled, default k8s label and annotation resource attribute keys will be singular, instead of plural | v0.125.0 | v0.145.0 | [Link](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/39774) | | `processor.k8sattributes.DontEmitV0K8sConventions` | alpha | When enabled, semconv legacy attributes are disabled. | v0.145.0 | N/A | [Link](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/44589) | | `processor.k8sattributes.EmitV1K8sConventions` | alpha | When enabled, semconv stable attributes are enabled. | v0.145.0 | N/A | [Link](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/44589) | -| `processor.k8sattributes.ShareProcessorBetweenPipelines` | alpha | When enabled, processor instances with identical configuration are shared across different signal type pipelines, reducing duplicate Kubernetes API watchers. | v0.150.0 | N/A | [Link](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/2450) | | `processor.k8sattributes.telemetry.disableOldFormatMetrics` | alpha | When enabled, old formatted internal telemetry metrics are disabled. | v0.146.0 | N/A | [Link](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/45871) | | `processor.k8sattributes.telemetry.enableNewFormatMetrics` | alpha | When enabled, new formatted internal telemetry metrics are enabled. | v0.146.0 | N/A | [Link](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/45871) | diff --git a/processor/k8sattributesprocessor/e2e_test.go b/processor/k8sattributesprocessor/e2e_test.go index 8cfa83a6c2a7f..f2606efbafb30 100644 --- a/processor/k8sattributesprocessor/e2e_test.go +++ b/processor/k8sattributesprocessor/e2e_test.go @@ -12,7 +12,6 @@ import ( "os/exec" "path/filepath" "regexp" - "strings" "testing" "time" @@ -1969,146 +1968,6 @@ func waitForData(t *testing.T, entriesNum int, mc *consumertest.MetricsSink, tc len(mc.AllMetrics()), len(tc.AllTraces()), len(lc.AllLogs()), len(pc.AllProfiles()), timeoutMinutes) } -// TestE2E_SharedProcessor verifies that the k8s_attributes processor is shared across signal types -// when using the same processor configuration, and that different configurations produce separate -// processor instances. The test deploys a collector with two k8s_attributes processor configs: -// - k8s_attributes/full: extracts pod name, uid, namespace, deployment, node name, and pod labels. -// Used by both the traces and metrics pipelines. -// - k8s_attributes/minimal: extracts only the pod name. -// Used by the logs pipeline. -// -// The test then asserts that traces and metrics both receive the full set of attributes (proving the -// shared processor works for both signal types), while logs receive only the minimal set (proving a -// different config creates a separate processor instance). -func TestE2E_SharedProcessor(t *testing.T) { - testDir := filepath.Join("testdata", "e2e", "sharedprocessor") - - k8sClient, err := k8stest.NewK8sClient(testKubeConfig) - require.NoError(t, err) - - nsFile := filepath.Join(testDir, "namespace.yaml") - buf, err := os.ReadFile(nsFile) - require.NoErrorf(t, err, "failed to read namespace object file %s", nsFile) - nsObj, err := k8stest.CreateObject(k8sClient, buf) - require.NoErrorf(t, err, "failed to create k8s namespace from file %s", nsFile) - - testNs := nsObj.GetName() - defer func() { - require.NoErrorf(t, k8stest.DeleteObject(k8sClient, nsObj), "failed to delete namespace %s", testNs) - }() - - metricsConsumer := new(consumertest.MetricsSink) - tracesConsumer := new(consumertest.TracesSink) - logsConsumer := new(consumertest.LogsSink) - profilesConsumer := new(consumertest.ProfilesSink) - shutdownSinks := startUpSinks(t, metricsConsumer, tracesConsumer, logsConsumer, profilesConsumer) - defer shutdownSinks() - - testID := uuid.NewString()[:8] - collectorObjs := k8stest.CreateCollectorObjects(t, k8sClient, testID, filepath.Join(testDir, "collector"), map[string]string{}, "") - createTeleOpts := &k8stest.TelemetrygenCreateOpts{ - ManifestsDir: filepath.Join(testDir, "telemetrygen"), - TestID: testID, - OtlpEndpoint: fmt.Sprintf("otelcol-%s.%s:4317", testID, testNs), - DataTypes: []string{"metrics", "logs", "traces"}, - } - telemetryGenObjs, telemetryGenObjInfos := k8stest.CreateTelemetryGenObjects(t, k8sClient, createTeleOpts) - defer func() { - for _, obj := range append(collectorObjs, telemetryGenObjs...) { - require.NoErrorf(t, k8stest.DeleteObject(k8sClient, obj), "failed to delete object %s", obj.GetName()) - } - }() - - for _, info := range telemetryGenObjInfos { - k8stest.WaitForTelemetryGenToStart(t, k8sClient, info.Namespace, info.PodLabelSelectors, info.Workload, info.DataType) - } - - wantEntries := 128 - waitForData(t, wantEntries, metricsConsumer, tracesConsumer, logsConsumer, profilesConsumer) - - // Verify sharing by inspecting the collector's log output. - // The kube client logs "k8s filtering" once per Start() call. With sharedcomponent, - // Start() is called once per unique processor config. We have two configs - // (k8s_attributes/full and k8s_attributes/minimal), so we expect exactly 2 occurrences. - // Without sharing, each pipeline would get its own processor, producing 3 occurrences - // (traces + metrics + logs). - collectorPodLabels := map[string]any{ - "app.kubernetes.io/name": "opentelemetry-collector", - "app.kubernetes.io/instance": "otelcol-" + testID, - } - podLogs := k8stest.FetchPodLogs(t, k8sClient, testNs, collectorPodLabels) - initCount := strings.Count(podLogs, "k8s filtering") - assert.Equal(t, 2, initCount, - "expected 2 kube client initializations (one per unique processor config), got %d; "+ - "this suggests processors are not being shared across signal types", initCount) - - // Attributes that the "full" processor (traces + metrics) should add. - fullAttrs := map[string]*expectedValue{ - "k8s.pod.name": newExpectedValue(regex, "telemetrygen-"+testID+"-.*-deployment-[a-z0-9]*-[a-z0-9]*"), - "k8s.pod.uid": newExpectedValue(regex, uidRe), - "k8s.namespace.name": newExpectedValue(equal, testNs), - "k8s.deployment.name": newExpectedValue(regex, "telemetrygen-"+testID+"-.*-deployment"), - "k8s.node.name": newExpectedValue(exist, ""), - "k8s.labels.app": newExpectedValue(regex, "telemetrygen-"+testID+"-.*-deployment"), - } - - // Attributes that the "minimal" processor (logs) should add. - // It only extracts k8s.pod.name; the other attributes must NOT be present. - minimalAttrs := map[string]*expectedValue{ - "k8s.pod.name": newExpectedValue(regex, "telemetrygen-"+testID+"-.*-deployment-[a-z0-9]*-[a-z0-9]*"), - "k8s.pod.uid": newExpectedValue(shouldnotexist, ""), - "k8s.deployment.name": newExpectedValue(shouldnotexist, ""), - "k8s.node.name": newExpectedValue(shouldnotexist, ""), - "k8s.labels.app": newExpectedValue(shouldnotexist, ""), - } - - tcs := []struct { - name string - dataType pipeline.Signal - service string - attrs map[string]*expectedValue - }{ - { - // Traces use k8s_attributes/full – expect all attributes. - name: "traces-deployment-full", - dataType: pipeline.SignalTraces, - service: "test-traces-deployment", - attrs: fullAttrs, - }, - { - // Metrics also use k8s_attributes/full – same shared processor, - // so they must produce the same set of attributes. - name: "metrics-deployment-full", - dataType: pipeline.SignalMetrics, - service: "test-metrics-deployment", - attrs: fullAttrs, - }, - { - // Logs use k8s_attributes/minimal – different processor instance, - // so only k8s.pod.name should be present. - name: "logs-deployment-minimal", - dataType: pipeline.SignalLogs, - service: "test-logs-deployment", - attrs: minimalAttrs, - }, - } - - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - switch tc.dataType { - case pipeline.SignalTraces: - scanTracesForAttributes(t, tracesConsumer, tc.service, tc.attrs) - case pipeline.SignalMetrics: - scanMetricsForAttributes(t, metricsConsumer, tc.service, tc.attrs) - case pipeline.SignalLogs: - scanLogsForAttributes(t, logsConsumer, tc.service, tc.attrs) - default: - t.Fatalf("unknown data type %s", tc.dataType) - } - }) - } -} - func TestE2E_ContainerIDAssociation(t *testing.T) { testDir := filepath.Join("testdata", "e2e", "container_id_association_only") diff --git a/processor/k8sattributesprocessor/factory.go b/processor/k8sattributesprocessor/factory.go index 0e14d783583db..8f5be41b238b2 100644 --- a/processor/k8sattributesprocessor/factory.go +++ b/processor/k8sattributesprocessor/factory.go @@ -17,7 +17,6 @@ import ( "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/metadata" ) @@ -26,7 +25,6 @@ var ( kubeClientProvider = kube.ClientProvider(nil) consumerCapabilities = consumer.Capabilities{MutatesData: true} defaultExcludes = ExcludeConfig{Pods: []ExcludePodConfig{{Name: "jaeger-agent"}, {Name: "jaeger-collector"}}} - processors = sharedcomponent.NewSharedComponents() ) // NewFactory returns a new factory for the k8s processor. @@ -59,23 +57,7 @@ func createTracesProcessor( cfg component.Config, next consumer.Traces, ) (processor.Traces, error) { - if !metadata.ProcessorK8sattributesShareProcessorBetweenPipelinesFeatureGate.IsEnabled() { - return createTracesProcessorWithOptions(ctx, params, cfg, next) - } - sc := processors.GetOrAdd(cfg, func() component.Component { - return createKubernetesProcessor(params, cfg) - }) - kp := sc.Unwrap().(*kubernetesprocessor) - - return processorhelper.NewTraces( - ctx, - params, - cfg, - next, - kp.processTraces, - processorhelper.WithCapabilities(consumerCapabilities), - processorhelper.WithStart(sc.Start), - processorhelper.WithShutdown(sc.Shutdown)) + return createTracesProcessorWithOptions(ctx, params, cfg, next) } func createLogsProcessor( @@ -84,23 +66,7 @@ func createLogsProcessor( cfg component.Config, nextLogsConsumer consumer.Logs, ) (processor.Logs, error) { - if !metadata.ProcessorK8sattributesShareProcessorBetweenPipelinesFeatureGate.IsEnabled() { - return createLogsProcessorWithOptions(ctx, params, cfg, nextLogsConsumer) - } - sc := processors.GetOrAdd(cfg, func() component.Component { - return createKubernetesProcessor(params, cfg) - }) - kp := sc.Unwrap().(*kubernetesprocessor) - - return processorhelper.NewLogs( - ctx, - params, - cfg, - nextLogsConsumer, - kp.processLogs, - processorhelper.WithCapabilities(consumerCapabilities), - processorhelper.WithStart(sc.Start), - processorhelper.WithShutdown(sc.Shutdown)) + return createLogsProcessorWithOptions(ctx, params, cfg, nextLogsConsumer) } func createMetricsProcessor( @@ -109,23 +75,7 @@ func createMetricsProcessor( cfg component.Config, nextMetricsConsumer consumer.Metrics, ) (processor.Metrics, error) { - if !metadata.ProcessorK8sattributesShareProcessorBetweenPipelinesFeatureGate.IsEnabled() { - return createMetricsProcessorWithOptions(ctx, params, cfg, nextMetricsConsumer) - } - sc := processors.GetOrAdd(cfg, func() component.Component { - return createKubernetesProcessor(params, cfg) - }) - kp := sc.Unwrap().(*kubernetesprocessor) - - return processorhelper.NewMetrics( - ctx, - params, - cfg, - nextMetricsConsumer, - kp.processMetrics, - processorhelper.WithCapabilities(consumerCapabilities), - processorhelper.WithStart(sc.Start), - processorhelper.WithShutdown(sc.Shutdown)) + return createMetricsProcessorWithOptions(ctx, params, cfg, nextMetricsConsumer) } func createProfilesProcessor( @@ -134,24 +84,7 @@ func createProfilesProcessor( cfg component.Config, nextProfilesConsumer xconsumer.Profiles, ) (xprocessor.Profiles, error) { - if !metadata.ProcessorK8sattributesShareProcessorBetweenPipelinesFeatureGate.IsEnabled() { - return createProfilesProcessorWithOptions(ctx, params, cfg, nextProfilesConsumer) - } - sc := processors.GetOrAdd(cfg, func() component.Component { - return createKubernetesProcessor(params, cfg) - }) - kp := sc.Unwrap().(*kubernetesprocessor) - - return xprocessorhelper.NewProfiles( - ctx, - params, - cfg, - nextProfilesConsumer, - kp.processProfiles, - xprocessorhelper.WithCapabilities(consumerCapabilities), - xprocessorhelper.WithStart(sc.Start), - xprocessorhelper.WithShutdown(sc.Shutdown), - ) + return createProfilesProcessorWithOptions(ctx, params, cfg, nextProfilesConsumer) } func createTracesProcessorWithOptions( diff --git a/processor/k8sattributesprocessor/go.mod b/processor/k8sattributesprocessor/go.mod index 502d745cebcbd..bafad1befe8d5 100644 --- a/processor/k8sattributesprocessor/go.mod +++ b/processor/k8sattributesprocessor/go.mod @@ -9,7 +9,6 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.150.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.150.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.150.0 - github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.150.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/xk8stest v0.150.0 github.com/stretchr/testify v1.11.1 go.opentelemetry.io/collector/client v1.56.1-0.20260415114935-307e3abdbae9 @@ -171,5 +170,3 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../pkg/pdatatest replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../pkg/golden - -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent => ../../internal/sharedcomponent diff --git a/processor/k8sattributesprocessor/internal/metadata/generated_feature_gates.go b/processor/k8sattributesprocessor/internal/metadata/generated_feature_gates.go index 84c5b84dd4e56..ea6e207192ed4 100644 --- a/processor/k8sattributesprocessor/internal/metadata/generated_feature_gates.go +++ b/processor/k8sattributesprocessor/internal/metadata/generated_feature_gates.go @@ -31,14 +31,6 @@ var ProcessorK8sattributesEmitV1K8sConventionsFeatureGate = featuregate.GlobalRe featuregate.WithRegisterFromVersion("v0.145.0"), ) -var ProcessorK8sattributesShareProcessorBetweenPipelinesFeatureGate = featuregate.GlobalRegistry().MustRegister( - "processor.k8sattributes.ShareProcessorBetweenPipelines", - featuregate.StageAlpha, - featuregate.WithRegisterDescription("When enabled, processor instances with identical configuration are shared across different signal type pipelines, reducing duplicate Kubernetes API watchers."), - featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/2450"), - featuregate.WithRegisterFromVersion("v0.150.0"), -) - var ProcessorK8sattributesTelemetryDisableOldFormatMetricsFeatureGate = featuregate.GlobalRegistry().MustRegister( "processor.k8sattributes.telemetry.disableOldFormatMetrics", featuregate.StageAlpha, diff --git a/processor/k8sattributesprocessor/metadata.yaml b/processor/k8sattributesprocessor/metadata.yaml index 83c36932ccaae..7bf8495a38d75 100644 --- a/processor/k8sattributesprocessor/metadata.yaml +++ b/processor/k8sattributesprocessor/metadata.yaml @@ -163,13 +163,6 @@ feature_gates: When enabled, semconv stable attributes are enabled. from_version: v0.145.0 reference_url: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/44589 - - id: processor.k8sattributes.ShareProcessorBetweenPipelines - stage: alpha - description: >- - When enabled, processor instances with identical configuration are shared - across different signal type pipelines, reducing duplicate Kubernetes API watchers. - from_version: v0.150.0 - reference_url: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/2450 - id: processor.k8sattributes.telemetry.disableOldFormatMetrics stage: alpha description: >- @@ -210,13 +203,6 @@ attributes: telemetry: metrics: - # The metric is not in use currently. - # It can be used once pod_identifier attribute is properly calculated - # ensuring that it doesn't cause high cardinality issues. - # If each unique value of pod_identifier creates a new metric time series, - # memory grows proportionally to the number of distinct pods seen over the collector's lifetime - # impacting the component's performance. - # See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/47669 k8s.pod.association: prefix: otelcol. enabled: false diff --git a/processor/k8sattributesprocessor/processor.go b/processor/k8sattributesprocessor/processor.go index 40052dc75c4ae..5fef7f0ddc269 100644 --- a/processor/k8sattributesprocessor/processor.go +++ b/processor/k8sattributesprocessor/processor.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "strconv" + "strings" "time" "go.opentelemetry.io/collector/component" @@ -17,6 +18,8 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" conventions "go.opentelemetry.io/otel/semconv/v1.40.0" "go.uber.org/zap" @@ -139,7 +142,7 @@ func (kp *kubernetesprocessor) Shutdown(context.Context) error { func (kp *kubernetesprocessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { - kp.processResource(ctx, rss.At(i).Resource()) + kp.processResource(ctx, rss.At(i).Resource(), "traces") } return td, nil @@ -149,7 +152,7 @@ func (kp *kubernetesprocessor) processTraces(ctx context.Context, td ptrace.Trac func (kp *kubernetesprocessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rm := md.ResourceMetrics() for i := 0; i < rm.Len(); i++ { - kp.processResource(ctx, rm.At(i).Resource()) + kp.processResource(ctx, rm.At(i).Resource(), "metrics") } return md, nil @@ -159,7 +162,7 @@ func (kp *kubernetesprocessor) processMetrics(ctx context.Context, md pmetric.Me func (kp *kubernetesprocessor) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) { rl := ld.ResourceLogs() for i := 0; i < rl.Len(); i++ { - kp.processResource(ctx, rl.At(i).Resource()) + kp.processResource(ctx, rl.At(i).Resource(), "logs") } return ld, nil @@ -169,14 +172,14 @@ func (kp *kubernetesprocessor) processLogs(ctx context.Context, ld plog.Logs) (p func (kp *kubernetesprocessor) processProfiles(ctx context.Context, pd pprofile.Profiles) (pprofile.Profiles, error) { rp := pd.ResourceProfiles() for i := 0; i < rp.Len(); i++ { - kp.processResource(ctx, rp.At(i).Resource()) + kp.processResource(ctx, rp.At(i).Resource(), "profiles") } return pd, nil } // processResource adds Pod metadata tags to resource based on pod association configuration -func (kp *kubernetesprocessor) processResource(ctx context.Context, resource pcommon.Resource) { +func (kp *kubernetesprocessor) processResource(ctx context.Context, resource pcommon.Resource, signalType string) { podIdentifierValue := extractPodID(ctx, resource.Attributes(), kp.podAssociations) kp.logger.Debug("evaluating pod identifier", zap.Any("value", podIdentifierValue)) @@ -194,9 +197,21 @@ func (kp *kubernetesprocessor) processResource(ctx context.Context, resource pco var pod *kube.Pod var podFound bool + podIdentifierStr := buildPodIdentifierString(podIdentifierValue) if podIdentifierValue.IsNotEmpty() { if pod, podFound = kp.kc.GetPod(podIdentifierValue); podFound { kp.logger.Debug("getting the pod", zap.Any("pod", pod)) + + // Record successful pod association + if kp.telemetry != nil { + successAttr := metric.WithAttributes( + attribute.String("status", "success"), + attribute.String("pod_identifier", podIdentifierStr), + attribute.String("otelcol.signal", signalType), + ) + kp.telemetry.K8sPodAssociation.Add(ctx, 1, successAttr) + } + for key, val := range pod.Attributes { setResourceAttribute(resource.Attributes(), key, val) } @@ -204,10 +219,26 @@ func (kp *kubernetesprocessor) processResource(ctx context.Context, resource pco } else { // Record failed pod association kp.logger.Debug("pod not found", zap.Any("podIdentifier", podIdentifierValue)) + if kp.telemetry != nil { + errorAttr := metric.WithAttributes( + attribute.String("status", "error"), + attribute.String("pod_identifier", podIdentifierStr), + attribute.String("otelcol.signal", signalType), + ) + kp.telemetry.K8sPodAssociation.Add(ctx, 1, errorAttr) + } } } else { // Record failed pod association when no identifier found kp.logger.Debug("no pod identifier found") + if kp.telemetry != nil { + errorAttr := metric.WithAttributes( + attribute.String("status", "error"), + attribute.String("pod_identifier", podIdentifierStr), + attribute.String("otelcol.signal", signalType), + ) + kp.telemetry.K8sPodAssociation.Add(ctx, 1, errorAttr) + } } namespace := getNamespace(pod, resource.Attributes()) @@ -456,6 +487,20 @@ func (kp *kubernetesprocessor) getUIDForPodsNode(nodeName string) string { return node.NodeUID } +// buildPodIdentifierString combines all identifier values into a comma-separated string +func buildPodIdentifierString(podIdentifierValue kube.PodIdentifier) string { + var identifiers []string + for i := range podIdentifierValue { + if podIdentifierValue[i].Value != "" { + identifiers = append(identifiers, podIdentifierValue[i].Value) + } + } + if len(identifiers) > 0 { + return strings.Join(identifiers, ",") + } + return "unknown" +} + // intFromAttribute extracts int value from an attribute stored as string or int func intFromAttribute(val pcommon.Value) (int, error) { switch val.Type() { diff --git a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/clusterrole.yaml b/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/clusterrole.yaml deleted file mode 100644 index df4396bb45b4f..0000000000000 --- a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/clusterrole.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ .Name }} -rules: - - apiGroups: [""] - resources: ["pods", "nodes"] - verbs: ["get", "watch", "list"] - - apiGroups: ["apps"] - resources: ["replicasets", "deployments"] - verbs: ["get", "watch", "list"] - - apiGroups: [ "" ] - resources: ["namespaces"] - verbs: [ "get", "watch", "list" ] diff --git a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/clusterrolebinding.yaml b/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/clusterrolebinding.yaml deleted file mode 100644 index 3ee89881caa8b..0000000000000 --- a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/clusterrolebinding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ .Name }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ .Name }} -subjects: - - kind: ServiceAccount - name: {{ .Name }} - namespace: e2ek8sattribute-sharedprocessor diff --git a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/configmap.yaml b/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/configmap.yaml deleted file mode 100644 index 7985828bde309..0000000000000 --- a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/configmap.yaml +++ /dev/null @@ -1,74 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ .Name }}-config - namespace: e2ek8sattribute-sharedprocessor -data: - relay: | - exporters: - otlp_grpc: - endpoint: {{ .HostEndpoint }}:4317 - tls: - insecure: true - extensions: - health_check: - endpoint: 0.0.0.0:13133 - processors: - # "full" config: extracts many metadata fields and labels. - # Used by both traces and metrics pipelines to verify that the same - # processor configuration is shared across signal types. - k8s_attributes/full: - extract: - labels: - - from: pod - key: app - tag_name: k8s.labels.app - metadata: - - k8s.pod.name - - k8s.pod.uid - - k8s.namespace.name - - k8s.deployment.name - - k8s.node.name - pod_association: - - sources: - - from: connection - # "minimal" config: extracts only the pod name. - # Used by the logs pipeline to verify that a different config - # produces a separate processor instance with different behavior. - k8s_attributes/minimal: - extract: - metadata: - - k8s.pod.name - pod_association: - - sources: - - from: connection - receivers: - otlp: - protocols: - grpc: - endpoint: ${env:MY_POD_IP}:4317 - service: - extensions: - - health_check - pipelines: - traces: - exporters: - - otlp_grpc - processors: - - k8s_attributes/full - receivers: - - otlp - metrics: - exporters: - - otlp_grpc - processors: - - k8s_attributes/full - receivers: - - otlp - logs: - exporters: - - otlp_grpc - processors: - - k8s_attributes/minimal - receivers: - - otlp diff --git a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/deployment.yaml b/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/deployment.yaml deleted file mode 100644 index 379f66348a281..0000000000000 --- a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/deployment.yaml +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Name }} - namespace: e2ek8sattribute-sharedprocessor -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: opentelemetry-collector - app.kubernetes.io/instance: {{ .Name }} - template: - metadata: - labels: - app.kubernetes.io/name: opentelemetry-collector - app.kubernetes.io/instance: {{ .Name }} - spec: - serviceAccountName: {{ .Name }} - containers: - - name: opentelemetry-collector - command: - - /otelcontribcol - - --config=/conf/relay.yaml - - --feature-gates=processor.k8sattributes.ShareProcessorBetweenPipelines - image: "otelcontribcol:latest" - imagePullPolicy: Never - ports: - - name: otlp - containerPort: 4317 - protocol: TCP - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP - livenessProbe: - httpGet: - path: / - port: 13133 - initialDelaySeconds: 3 - readinessProbe: - httpGet: - path: / - port: 13133 - initialDelaySeconds: 3 - resources: - limits: - cpu: 128m - memory: 256Mi - volumeMounts: - - mountPath: /conf - name: opentelemetry-collector-configmap - volumes: - - name: opentelemetry-collector-configmap - configMap: - name: {{ .Name }}-config - items: - - key: relay - path: relay.yaml diff --git a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/service.yaml b/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/service.yaml deleted file mode 100644 index 8d5fc61f4406c..0000000000000 --- a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/service.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ .Name }} - namespace: e2ek8sattribute-sharedprocessor -spec: - type: ClusterIP - ports: - - name: otlp - port: 4317 - targetPort: 4317 - protocol: TCP - appProtocol: grpc - selector: - app.kubernetes.io/name: opentelemetry-collector - app.kubernetes.io/instance: {{ .Name }} diff --git a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/serviceaccount.yaml b/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/serviceaccount.yaml deleted file mode 100644 index e04944a7dcde1..0000000000000 --- a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/collector/serviceaccount.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Name }} - namespace: e2ek8sattribute-sharedprocessor diff --git a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/namespace.yaml b/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/namespace.yaml deleted file mode 100644 index 95a4bc6d97f20..0000000000000 --- a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/namespace.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: e2ek8sattribute-sharedprocessor - labels: - foons: barns diff --git a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/telemetrygen/deployment.yaml b/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/telemetrygen/deployment.yaml deleted file mode 100644 index 84246b1eebe75..0000000000000 --- a/processor/k8sattributesprocessor/testdata/e2e/sharedprocessor/telemetrygen/deployment.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Name }}-{{ .DataType }}-deployment - namespace: e2ek8sattribute-sharedprocessor -spec: - replicas: 1 - selector: - matchLabels: - app: {{ .Name }}-{{ .DataType }}-deployment - template: - metadata: - labels: - app: {{ .Name }}-{{ .DataType }}-deployment - spec: - containers: - - command: - - /telemetrygen - - {{ .DataType }} - - --otlp-insecure - - --otlp-endpoint={{ .OTLPEndpoint }} - - --duration=36000s - - --rate=1 - - --batch=false - - --otlp-attributes=service.name="test-{{ .DataType }}-deployment" - - --otlp-attributes=k8s.container.name="telemetrygen" - - --allow-export-failures -{{- if eq .DataType "traces" }} - - --status-code= -{{- end }} - image: ghcr.io/open-telemetry/opentelemetry-collector-contrib/telemetrygen:latest - imagePullPolicy: IfNotPresent - name: telemetrygen - restartPolicy: Always diff --git a/receiver/githubreceiver/README.md b/receiver/githubreceiver/README.md index e9df78669943d..e399423c2a4e5 100644 --- a/receiver/githubreceiver/README.md +++ b/receiver/githubreceiver/README.md @@ -160,7 +160,7 @@ The WebHook configuration exposes the following settings: * `path`: (default = `/events`) - The path for Action events to be sent to. * `health_path`: (default = `/health`) - The path for health checks. * `secret`: (optional) - The secret used to [validates the payload][valid]. -* `required_headers`: (optional) - One or more header key/value pairs that every webhook request must carry (the health check endpoint is exempt). Requests missing any configured header, or carrying the wrong value, are rejected before the payload is parsed. GitHub itself does not send custom request headers, so this setting is intended for deployments where a front-end WAF or proxy injects the agreed header. +* `required_header`: (optional) - The required header key and value for incoming requests. * `service_name`: (optional) - The service name for the traces. See the [Configuring Service Name](#configuring-service-name) section for more information. diff --git a/receiver/githubreceiver/trace_receiver.go b/receiver/githubreceiver/trace_receiver.go index 505865e05e34f..136b0db89e988 100644 --- a/receiver/githubreceiver/trace_receiver.go +++ b/receiver/githubreceiver/trace_receiver.go @@ -131,14 +131,6 @@ func (gtr *githubTracesReceiver) Shutdown(_ context.Context) error { func (gtr *githubTracesReceiver) handleReq(w http.ResponseWriter, req *http.Request) { ctx := gtr.obsrecv.StartTracesOp(req.Context()) - for k, v := range gtr.cfg.WebHook.RequiredHeaders { - if req.Header.Get(k) != string(v) { - gtr.logger.Sugar().Debugf("required header check failed", zap.String("header", k)) - http.Error(w, "invalid request", http.StatusBadRequest) - return - } - } - p, err := github.ValidatePayload(req, []byte(gtr.cfg.WebHook.Secret)) if err != nil { gtr.logger.Sugar().Debugf("unable to validate payload", zap.Error(err)) diff --git a/receiver/githubreceiver/trace_receiver_test.go b/receiver/githubreceiver/trace_receiver_test.go index 9e8506451845b..9e3063563e0c1 100644 --- a/receiver/githubreceiver/trace_receiver_test.go +++ b/receiver/githubreceiver/trace_receiver_test.go @@ -6,14 +6,12 @@ package githubreceiver // import "github.com/open-telemetry/opentelemetry-collec import ( "net/http" "net/http/httptest" - "strings" "testing" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/confignet" - "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/receiver/receivertest" @@ -86,78 +84,3 @@ func TestHealthCheck(t *testing.T) { response := w.Result() require.Equal(t, http.StatusOK, response.StatusCode) } - -func TestHandleReq_RequiredHeaders(t *testing.T) { - tests := []struct { - name string - requiredHeaders map[string]configopaque.String - reqHeaders map[string]string - wantStatus int - }{ - { - name: "valid_request_with_required_headers", - requiredHeaders: map[string]configopaque.String{"X-Proxy-Auth": "abc"}, - reqHeaders: map[string]string{ - "X-Proxy-Auth": "abc", - "X-GitHub-Event": "ping", - "Content-Type": "application/json", - }, - wantStatus: http.StatusOK, - }, - { - name: "missing_required_header", - requiredHeaders: map[string]configopaque.String{"X-Proxy-Auth": "abc"}, - reqHeaders: map[string]string{ - "X-GitHub-Event": "ping", - "Content-Type": "application/json", - }, - wantStatus: http.StatusBadRequest, - }, - { - name: "wrong_required_header_value", - requiredHeaders: map[string]configopaque.String{"X-Proxy-Auth": "abc"}, - reqHeaders: map[string]string{ - "X-Proxy-Auth": "wrong", - "X-GitHub-Event": "ping", - "Content-Type": "application/json", - }, - wantStatus: http.StatusBadRequest, - }, - { - name: "no_required_headers_configured", - requiredHeaders: nil, - reqHeaders: map[string]string{ - "X-GitHub-Event": "ping", - "Content-Type": "application/json", - }, - wantStatus: http.StatusOK, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := createDefaultConfig().(*Config) - cfg.WebHook.NetAddr.Endpoint = "localhost:0" - if tt.requiredHeaders != nil { - cfg.WebHook.RequiredHeaders = tt.requiredHeaders - } - - sink := new(consumertest.TracesSink) - r, err := newTracesReceiver(receivertest.NewNopSettings(metadata.Type), cfg, sink) - require.NoError(t, err) - - req := httptest.NewRequest(http.MethodPost, "http://localhost/events", - strings.NewReader(`{}`)) - for k, v := range tt.reqHeaders { - req.Header.Set(k, v) - } - w := httptest.NewRecorder() - r.handleReq(w, req) - - require.Equal(t, tt.wantStatus, w.Result().StatusCode) - if tt.wantStatus == http.StatusBadRequest { - require.Equal(t, 0, sink.SpanCount(), "consumer must not be called when header check fails") - } - }) - } -} diff --git a/receiver/googlecloudpubsubreceiver/receiver_test.go b/receiver/googlecloudpubsubreceiver/receiver_test.go index dd8e5f63527c3..9cb0c7aedeb93 100644 --- a/receiver/googlecloudpubsubreceiver/receiver_test.go +++ b/receiver/googlecloudpubsubreceiver/receiver_test.go @@ -79,10 +79,6 @@ func createBaseReceiver() (*pstest.Server, *pubsubReceiver) { Timeout: 12 * time.Second, }, Subscription: "projects/my-project/subscriptions/otlp", - FlowControlConfig: FlowControlConfig{ - StreamAckDeadline: 60 * time.Second, - TriggerAckBatchDuration: 10 * time.Second, - }, }, } } @@ -169,10 +165,6 @@ func TestReceiver(t *testing.T) { Timeout: 1 * time.Second, }, Subscription: "projects/my-project/subscriptions/otlp", - FlowControlConfig: FlowControlConfig{ - StreamAckDeadline: 60 * time.Second, - TriggerAckBatchDuration: 10 * time.Second, - }, }, tracesConsumer: traceSink, metricsConsumer: metricSink, diff --git a/receiver/httpcheckreceiver/scraper.go b/receiver/httpcheckreceiver/scraper.go index 17da7cb28ed2a..7376c41e7aec2 100644 --- a/receiver/httpcheckreceiver/scraper.go +++ b/receiver/httpcheckreceiver/scraper.go @@ -349,21 +349,10 @@ func (h *httpcheckScraper) scrape(ctx context.Context) (pmetric.Metrics, error) mux.Lock() - // Check if TLS metric is enabled and this is an HTTPS endpoint - if h.cfg.Metrics.HttpcheckTLSCertRemaining.Enabled && resp != nil && resp.TLS != nil { - // Extract TLS info directly from the HTTP response - issuer, commonName, sans, timeLeft := extractTLSInfo(resp.TLS) - if issuer != "" || commonName != "" || len(sans) > 0 { - h.mb.RecordHttpcheckTLSCertRemainingDataPoint( - now, - timeLeft, - h.cfg.Targets[targetIndex].Endpoint, - issuer, - commonName, - sans, - ) - } - } + // TLS cert-remaining is recorded once per scrape a few lines + // below (after the timing metrics). The duplicate block that + // used to live here double-emitted the data point for every + // HTTPS endpoint (#47740). // Record timing breakdown metrics dnsMs, tcpMs, tlsMs, requestMs, responseMs := timing.getDurations() endpoint := h.cfg.Targets[targetIndex].Endpoint diff --git a/receiver/k8sclusterreceiver/e2e_test.go b/receiver/k8sclusterreceiver/e2e_test.go index 246aa9789903f..ba93065706b81 100644 --- a/receiver/k8sclusterreceiver/e2e_test.go +++ b/receiver/k8sclusterreceiver/e2e_test.go @@ -129,7 +129,7 @@ func TestE2EClusterScoped(t *testing.T) { pmetrictest.ChangeResourceAttributeValue("k8s.node.uid", replaceWithStar), pmetrictest.ChangeResourceAttributeValue("k8s.persistentvolume.name", replaceWithStar), pmetrictest.ChangeResourceAttributeValue("k8s.persistentvolume.uid", replaceWithStar), - pmetrictest.ChangeResourceAttributeValue("k8s.persistentvolumeclaim.name", shortenNames), + pmetrictest.ChangeResourceAttributeValue("k8s.persistentvolumeclaim.name", replaceWithStar), pmetrictest.ChangeResourceAttributeValue("k8s.persistentvolumeclaim.uid", replaceWithStar), pmetrictest.ChangeResourceAttributeValue("k8s.pod.name", shortenNames), pmetrictest.ChangeResourceAttributeValue("k8s.pod.uid", replaceWithStar), @@ -229,7 +229,7 @@ func TestE2ENamespaceScoped(t *testing.T) { pmetrictest.ChangeResourceAttributeValue("k8s.job.uid", replaceWithStar), pmetrictest.ChangeResourceAttributeValue("k8s.namespace.uid", replaceWithStar), pmetrictest.ChangeResourceAttributeValue("k8s.node.uid", replaceWithStar), - pmetrictest.ChangeResourceAttributeValue("k8s.persistentvolumeclaim.name", shortenNames), + pmetrictest.ChangeResourceAttributeValue("k8s.persistentvolumeclaim.name", replaceWithStar), pmetrictest.ChangeResourceAttributeValue("k8s.persistentvolumeclaim.uid", replaceWithStar), pmetrictest.ChangeResourceAttributeValue("k8s.pod.name", shortenNames), pmetrictest.ChangeResourceAttributeValue("k8s.pod.uid", replaceWithStar), @@ -383,9 +383,6 @@ func shortenNames(value string) string { if strings.HasPrefix(value, "test-k8scluster-receiver-job") { return "test-k8scluster-receiver-job" } - if strings.HasPrefix(value, "test-k8scluster-receiver-statefulset-pvc") { - return "test-k8scluster-receiver-statefulset-pvc" - } return value } diff --git a/receiver/prometheusreceiver/documentation.md b/receiver/prometheusreceiver/documentation.md index 9f4d3ef517d02..2ef78ec8b3ca1 100644 --- a/receiver/prometheusreceiver/documentation.md +++ b/receiver/prometheusreceiver/documentation.md @@ -9,6 +9,9 @@ This component has the following feature gates: | Feature Gate | Stage | Description | From Version | To Version | Reference | | ------------ | ----- | ----------- | ------------ | ---------- | --------- | | `receiver.prometheusreceiver.EnableCreatedTimestampZeroIngestion` | alpha | Enables the Prometheus created-timestamps-zero-injection feature. Created timestamps are injected as 0-valued samples when appropriate. This is disabled by default due to worse CPU performance with higher metric volumes. | v0.113.0 | N/A | [Link](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/40355) | +| `receiver.prometheusreceiver.EnableNativeHistograms` | stable | Converts scraped Prometheus native histograms into OpenTelemetry exponential histograms. You still need to configure 'scrape_native_histograms: true' in your Prometheus scrape config to actually scrape native histograms. For mixed histograms (both classic and native), only the native histogram buckets are used. | v0.142.0 | v0.145.0 | [Link](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/34473) | | `receiver.prometheusreceiver.IgnoreScopeInfoMetric` | alpha | When enabled, the `otel_scope_info` metric is ignored for scope attribute extraction. | v0.148.0 | N/A | [Link](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/41502) | +| `receiver.prometheusreceiver.RemoveStartTimeAdjustment` | stable | When enabled, the Prometheus receiver will leave the start time unset. Use the metric_start_time processor instead if you need this functionality. | v0.121.0 | v0.142.0 | [Link](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/36364) | +| `receiver.prometheusreceiver.UseCreatedMetric` | deprecated | When enabled, the Prometheus receiver will retrieve the start time for Summary, Histogram and Sum metrics from _created metric. | v0.89.0 | v0.141.0 | [Link](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/21909) | For more information about feature gates, see the [Feature Gates](https://github.com/open-telemetry/opentelemetry-collector/blob/main/featuregate/README.md) documentation. diff --git a/receiver/prometheusreceiver/internal/metadata/generated_feature_gates.go b/receiver/prometheusreceiver/internal/metadata/generated_feature_gates.go index c737196cd1fe7..ee463bb95af41 100644 --- a/receiver/prometheusreceiver/internal/metadata/generated_feature_gates.go +++ b/receiver/prometheusreceiver/internal/metadata/generated_feature_gates.go @@ -14,6 +14,15 @@ var ReceiverPrometheusreceiverEnableCreatedTimestampZeroIngestionFeatureGate = f featuregate.WithRegisterFromVersion("v0.113.0"), ) +var ReceiverPrometheusreceiverEnableNativeHistogramsFeatureGate = featuregate.GlobalRegistry().MustRegister( + "receiver.prometheusreceiver.EnableNativeHistograms", + featuregate.StageStable, + featuregate.WithRegisterDescription("Converts scraped Prometheus native histograms into OpenTelemetry exponential histograms. You still need to configure 'scrape_native_histograms: true' in your Prometheus scrape config to actually scrape native histograms. For mixed histograms (both classic and native), only the native histogram buckets are used."), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/34473"), + featuregate.WithRegisterFromVersion("v0.142.0"), + featuregate.WithRegisterToVersion("v0.145.0"), +) + var ReceiverPrometheusreceiverIgnoreScopeInfoMetricFeatureGate = featuregate.GlobalRegistry().MustRegister( "receiver.prometheusreceiver.IgnoreScopeInfoMetric", featuregate.StageAlpha, @@ -21,3 +30,21 @@ var ReceiverPrometheusreceiverIgnoreScopeInfoMetricFeatureGate = featuregate.Glo featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/41502"), featuregate.WithRegisterFromVersion("v0.148.0"), ) + +var ReceiverPrometheusreceiverRemoveStartTimeAdjustmentFeatureGate = featuregate.GlobalRegistry().MustRegister( + "receiver.prometheusreceiver.RemoveStartTimeAdjustment", + featuregate.StageStable, + featuregate.WithRegisterDescription("When enabled, the Prometheus receiver will leave the start time unset. Use the metric_start_time processor instead if you need this functionality."), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/36364"), + featuregate.WithRegisterFromVersion("v0.121.0"), + featuregate.WithRegisterToVersion("v0.142.0"), +) + +var ReceiverPrometheusreceiverUseCreatedMetricFeatureGate = featuregate.GlobalRegistry().MustRegister( + "receiver.prometheusreceiver.UseCreatedMetric", + featuregate.StageDeprecated, + featuregate.WithRegisterDescription("When enabled, the Prometheus receiver will retrieve the start time for Summary, Histogram and Sum metrics from _created metric."), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/21909"), + featuregate.WithRegisterFromVersion("v0.89.0"), + featuregate.WithRegisterToVersion("v0.141.0"), +) diff --git a/receiver/prometheusreceiver/metadata.yaml b/receiver/prometheusreceiver/metadata.yaml index 610352edcf153..d1e8bd4ab0f8e 100644 --- a/receiver/prometheusreceiver/metadata.yaml +++ b/receiver/prometheusreceiver/metadata.yaml @@ -32,9 +32,38 @@ feature_gates: from_version: v0.113.0 reference_url: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/40355 + - id: receiver.prometheusreceiver.EnableNativeHistograms + stage: stable + description: >- + Converts scraped Prometheus native histograms into OpenTelemetry exponential histograms. + You still need to configure 'scrape_native_histograms: true' in your Prometheus scrape + config to actually scrape native histograms. For mixed histograms (both classic and native), + only the native histogram buckets are used. + from_version: v0.142.0 + to_version: v0.145.0 + reference_url: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/34473 + - id: receiver.prometheusreceiver.IgnoreScopeInfoMetric stage: alpha description: >- When enabled, the `otel_scope_info` metric is ignored for scope attribute extraction. from_version: v0.148.0 reference_url: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/41502 + + - id: receiver.prometheusreceiver.RemoveStartTimeAdjustment + stage: stable + description: >- + When enabled, the Prometheus receiver will leave the start time unset. + Use the metric_start_time processor instead if you need this functionality. + from_version: v0.121.0 + to_version: v0.142.0 + reference_url: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/36364 + + - id: receiver.prometheusreceiver.UseCreatedMetric + stage: deprecated + description: >- + When enabled, the Prometheus receiver will retrieve the start time for + Summary, Histogram and Sum metrics from _created metric. + from_version: v0.89.0 + to_version: v0.141.0 + reference_url: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/21909 \ No newline at end of file diff --git a/receiver/prometheusremotewritereceiver/receiver.go b/receiver/prometheusremotewritereceiver/receiver.go index db78c7f685f80..da6a46c47de28 100644 --- a/receiver/prometheusremotewritereceiver/receiver.go +++ b/receiver/prometheusremotewritereceiver/receiver.go @@ -69,77 +69,44 @@ type prometheusRemoteWriteReceiver struct { bodyBufferPool *sync.Pool } -// labelSeparator is used as a separator when building hashes from label values. -// 0xff is chosen because it cannot appear in valid UTF-8, avoiding accidental collisions. -const labelSeparator = "\xff" - -// scopeInfo holds instrumentation scope fields extracted from otel_scope_* labels. -type scopeInfo struct { - Name string - Version string - SchemaURL string - scopeAttrs []attribute // scope attributes with the "otel_scope_" prefix stripped -} - -// attribute is a simple key-value pair for scope attributes. -type attribute struct { - Key string - Value string -} - -func (si scopeInfo) key() string { - const fixedFields = 3 // Name, Version, SchemaURL - parts := make([]string, 0, fixedFields+len(si.scopeAttrs)) - parts = append(parts, si.Name, si.Version, si.SchemaURL) - for _, kv := range si.scopeAttrs { - parts = append(parts, kv.Key+labelSeparator+kv.Value) - } - return strings.Join(parts, labelSeparator) -} - // metricIdentity contains all the components that uniquely identify a metric // according to the OpenTelemetry Protocol data model. // The definition of the metric uniqueness is based on the following document. Ref: https://opentelemetry.io/docs/specs/otel/metrics/data-model/#opentelemetry-protocol-data-model type metricIdentity struct { - ResourceID string - ScopeName string - ScopeVersion string - ScopeSchemaURL string - ScopeAttrs []attribute - MetricName string - Unit string - Type writev2.Metadata_MetricType + ResourceID string + ScopeName string + ScopeVersion string + MetricName string + Unit string + Type writev2.Metadata_MetricType } // createMetricIdentity creates a metricIdentity struct from the required components -func createMetricIdentity(resourceID, metricName, unit string, si scopeInfo, metricType writev2.Metadata_MetricType) metricIdentity { +func createMetricIdentity(resourceID, scopeName, scopeVersion, metricName, unit string, metricType writev2.Metadata_MetricType) metricIdentity { return metricIdentity{ - ResourceID: resourceID, - ScopeName: si.Name, - ScopeVersion: si.Version, - ScopeSchemaURL: si.SchemaURL, - ScopeAttrs: si.scopeAttrs, - MetricName: metricName, - Unit: unit, - Type: metricType, + ResourceID: resourceID, + ScopeName: scopeName, + ScopeVersion: scopeVersion, + MetricName: metricName, + Unit: unit, + Type: metricType, } } // Hash generates a unique hash for the metric identity func (mi metricIdentity) Hash() uint64 { - parts := []string{ + const separator = "\xff" + + combined := strings.Join([]string{ mi.ResourceID, mi.ScopeName, mi.ScopeVersion, - mi.ScopeSchemaURL, mi.MetricName, mi.Unit, fmt.Sprintf("%d", mi.Type), - } - for _, kv := range mi.ScopeAttrs { - parts = append(parts, kv.Key+labelSeparator+kv.Value) - } - return xxhash.Sum64String(strings.Join(parts, labelSeparator)) + }, separator) + + return xxhash.Sum64String(combined) } func (prw *prometheusRemoteWriteReceiver) Start(ctx context.Context, host component.Host) error { @@ -296,7 +263,7 @@ func (*prometheusRemoteWriteReceiver) parseProto(contentType string) (remoteapi. // from the LRU cache when available. Never returns cached objects to avoid shared // mutation across concurrent requests. func (prw *prometheusRemoteWriteReceiver) getOrCreateRM(ls labels.Labels, otelMetrics pmetric.Metrics, reqRM map[uint64]pmetric.ResourceMetrics) (pmetric.ResourceMetrics, uint64) { - hashedLabels := xxhash.Sum64String(ls.Get("job") + labelSeparator + ls.Get("instance")) + hashedLabels := xxhash.Sum64String(ls.Get("job") + string([]byte{'\xff'}) + ls.Get("instance")) if rm, ok := reqRM[hashedLabels]; ok { return rm, hashedLabels @@ -381,7 +348,7 @@ func (prw *prometheusRemoteWriteReceiver) translateV2(_ context.Context, req *wr continue } - si := prw.extractScopeInfo(ls) + scopeName, scopeVersion := prw.extractScopeInfo(ls) metricName := metadata.Name if ts.Metadata.UnitRef >= uint32(len(req.Symbols)) { badRequestErrors = errors.Join(badRequestErrors, fmt.Errorf("unit ref %d is out of bounds of symbolsTable", ts.Metadata.UnitRef)) @@ -399,7 +366,7 @@ func (prw *prometheusRemoteWriteReceiver) translateV2(_ context.Context, req *wr // Handle histograms separately due to their complex mixed-schema processing if ts.Metadata.Type == writev2.Metadata_METRIC_TYPE_HISTOGRAM || ts.Metadata.Type == writev2.Metadata_METRIC_TYPE_UNSPECIFIED && len(ts.Histograms) > 0 { - prw.processHistogramTimeSeries(otelMetrics, ls, ts, si, metricName, unit, description, metricCache, &stats, modifiedResourceMetric, exemplarMap) + prw.processHistogramTimeSeries(otelMetrics, ls, ts, scopeName, scopeVersion, metricName, unit, description, metricCache, &stats, modifiedResourceMetric, exemplarMap) continue } @@ -407,22 +374,23 @@ func (prw *prometheusRemoteWriteReceiver) translateV2(_ context.Context, req *wr rm, _ := prw.getOrCreateRM(ls, otelMetrics, modifiedResourceMetric) resourceID := identity.OfResource(rm.Resource()) - metricID := createMetricIdentity( + metricIdentity := createMetricIdentity( resourceID.String(), // Resource identity + scopeName, // Scope name + scopeVersion, // Scope version metricName, // Metric name unit, // Unit - si, // Scope info ts.Metadata.Type, // Metric type ) - metricKey := metricID.Hash() + metricKey := metricIdentity.Hash() // Find or create scope var scope pmetric.ScopeMetrics var foundScope bool for i := 0; i < rm.ScopeMetrics().Len(); i++ { s := rm.ScopeMetrics().At(i) - if scopeMatchesInfo(s, si) { + if s.Scope().Name() == scopeName && s.Scope().Version() == scopeVersion { scope = s foundScope = true break @@ -430,7 +398,8 @@ func (prw *prometheusRemoteWriteReceiver) translateV2(_ context.Context, req *wr } if !foundScope { scope = rm.ScopeMetrics().AppendEmpty() - applyScopeInfo(scope, si) + scope.Scope().SetName(scopeName) + scope.Scope().SetVersion(scopeVersion) } // Get or create metric @@ -471,8 +440,8 @@ func (prw *prometheusRemoteWriteReceiver) translateV2(_ context.Context, req *wr case writev2.Metadata_METRIC_TYPE_COUNTER: addNumberDatapoints(metric.Sum().DataPoints(), ls, ts, &stats) key := exemplarKey{ - ScopeName: si.Name, - ScopeVersion: si.Version, + ScopeName: scopeName, + ScopeVersion: scopeVersion, MetricName: metricName, MetricType: ts.Metadata.Type, } @@ -497,8 +466,7 @@ func (prw *prometheusRemoteWriteReceiver) processHistogramTimeSeries( otelMetrics pmetric.Metrics, ls labels.Labels, ts *writev2.TimeSeries, - si scopeInfo, - metricName, unit, description string, + scopeName, scopeVersion, metricName, unit, description string, metricCache map[uint64]pmetric.Metric, stats *promremote.WriteResponseStats, modifiedRM map[uint64]pmetric.ResourceMetrics, @@ -548,7 +516,7 @@ func (prw *prometheusRemoteWriteReceiver) processHistogramTimeSeries( var foundScope bool for i := 0; i < rm.ScopeMetrics().Len(); i++ { s := rm.ScopeMetrics().At(i) - if scopeMatchesInfo(s, si) { + if s.Scope().Name() == scopeName && s.Scope().Version() == scopeVersion { scope = s foundScope = true break @@ -556,12 +524,14 @@ func (prw *prometheusRemoteWriteReceiver) processHistogramTimeSeries( } if !foundScope { scope = rm.ScopeMetrics().AppendEmpty() - applyScopeInfo(scope, si) + scope.Scope().SetName(scopeName) + scope.Scope().SetVersion(scopeVersion) } - metricID := fmt.Sprintf("%s:%s:%s:%s:%s:%s", + metricID := fmt.Sprintf("%s:%s:%s:%s:%s:%s:%s", resourceID.String(), - si.key(), + scopeName, + scopeVersion, metricName, unit, fmt.Sprintf("%d", ts.Metadata.Type), @@ -611,8 +581,8 @@ func (prw *prometheusRemoteWriteReceiver) processHistogramTimeSeries( } key := exemplarKey{ - ScopeName: si.Name, - ScopeVersion: si.Version, + ScopeName: scopeName, + ScopeVersion: scopeVersion, MetricName: metricName, MetricType: ts.Metadata.Type, } @@ -816,7 +786,7 @@ func convertAbsoluteBuckets(spans []writev2.BucketSpan, counts []float64, bucket } } -// extractAttributes returns metric data point attributes, excluding job, instance, metric name, and all otel_scope_* labels. +// extractAttributes return all attributes different from job, instance, metric name and scope name/version func extractAttributes(ls labels.Labels) pcommon.Map { attrs := pcommon.NewMap() // job, instance and metric name will always become labels @@ -824,66 +794,27 @@ func extractAttributes(ls labels.Labels) pcommon.Map { ls.Range(func(l labels.Label) { if l.Name != "instance" && l.Name != "job" && // Become resource attributes l.Name != model.MetricNameLabel && // Becomes metric name - !strings.HasPrefix(l.Name, "otel_scope_") { // Become instrumentation scope fields + l.Name != "otel_scope_name" && l.Name != "otel_scope_version" { // Becomes scope name and version attrs.PutStr(l.Name, l.Value) } }) return attrs } -// extractScopeInfo extracts all otel_scope_* labels into a scopeInfo per the Prometheus/OTLP compatibility spec. -// Falls back to receiver build info when otel_scope_name is absent. -func (prw *prometheusRemoteWriteReceiver) extractScopeInfo(ls labels.Labels) scopeInfo { - si := scopeInfo{ - Name: prw.settings.BuildInfo.Description, - Version: prw.settings.BuildInfo.Version, - } - - ls.Range(func(l labels.Label) { - switch l.Name { - case "otel_scope_name": - if l.Value != "" { - si.Name = l.Value - } - case "otel_scope_version": - if l.Value != "" { - si.Version = l.Value - } - case "otel_scope_schema_url": - si.SchemaURL = l.Value - default: - if attrKey, ok := strings.CutPrefix(l.Name, "otel_scope_"); ok { - si.scopeAttrs = append(si.scopeAttrs, attribute{Key: attrKey, Value: l.Value}) - } - } - }) +// extractScopeInfo extracts the scope name and version from the labels. If the labels do not contain the scope name/version, +// it will use the default values from the settings. +func (prw *prometheusRemoteWriteReceiver) extractScopeInfo(ls labels.Labels) (string, string) { + scopeName := prw.settings.BuildInfo.Description + scopeVersion := prw.settings.BuildInfo.Version - return si -} - -func scopeMatchesInfo(sm pmetric.ScopeMetrics, si scopeInfo) bool { - if sm.Scope().Name() != si.Name || sm.Scope().Version() != si.Version || sm.SchemaUrl() != si.SchemaURL { - return false - } - if sm.Scope().Attributes().Len() != len(si.scopeAttrs) { - return false + if sName := ls.Get("otel_scope_name"); sName != "" { + scopeName = sName } - for _, kv := range si.scopeAttrs { - v, ok := sm.Scope().Attributes().Get(kv.Key) - if !ok || v.Str() != kv.Value { - return false - } - } - return true -} -func applyScopeInfo(sm pmetric.ScopeMetrics, si scopeInfo) { - sm.Scope().SetName(si.Name) - sm.Scope().SetVersion(si.Version) - sm.SetSchemaUrl(si.SchemaURL) - for _, kv := range si.scopeAttrs { - sm.Scope().Attributes().PutStr(kv.Key, kv.Value) + if sVersion := ls.Get("otel_scope_version"); sVersion != "" { + scopeVersion = sVersion } + return scopeName, scopeVersion } // addNHCBDatapoint converts a single Native Histogram Custom Buckets (NHCB) to OpenTelemetry histogram datapoints diff --git a/receiver/prometheusremotewritereceiver/receiver_test.go b/receiver/prometheusremotewritereceiver/receiver_test.go index 5ced92de435c6..a9eff2419d9fa 100644 --- a/receiver/prometheusremotewritereceiver/receiver_test.go +++ b/receiver/prometheusremotewritereceiver/receiver_test.go @@ -2524,160 +2524,6 @@ func TestTranslateV2(t *testing.T) { Exemplars: 0, }, }, - { - // otel_scope_schema_url must be set as the schema URL on the ScopeMetrics, - // not appear as a metric data point attribute. - name: "otel_scope_schema_url sets scope schema URL", - request: &writev2.Request{ - Symbols: []string{ - "", - "__name__", "test_metric", // 1, 2 - "job", "service-x/test", // 3, 4 - "instance", "107cn001", // 5, 6 - "otel_scope_name", "myscope", // 7, 8 - "otel_scope_version", "v1", // 9, 10 - "otel_scope_schema_url", "https://example.com/schema/v1", // 11, 12 - "extra_attr", "extra_value", // 13, 14 - }, - Timeseries: []writev2.TimeSeries{ - { - Metadata: writev2.Metadata{Type: writev2.Metadata_METRIC_TYPE_GAUGE}, - LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, - Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}, - }, - }, - }, - expectedMetrics: func() pmetric.Metrics { - expected := pmetric.NewMetrics() - rm := expected.ResourceMetrics().AppendEmpty() - rm.Resource().Attributes().PutStr("service.namespace", "service-x") - rm.Resource().Attributes().PutStr("service.name", "test") - rm.Resource().Attributes().PutStr("service.instance.id", "107cn001") - - sm := rm.ScopeMetrics().AppendEmpty() - sm.Scope().SetName("myscope") - sm.Scope().SetVersion("v1") - sm.SetSchemaUrl("https://example.com/schema/v1") - - metric := sm.Metrics().AppendEmpty() - metric.SetName("test_metric") - metric.Metadata().PutStr(prometheus.MetricMetadataTypeKey, "gauge") - dp := metric.SetEmptyGauge().DataPoints().AppendEmpty() - dp.SetTimestamp(pcommon.Timestamp(1 * int64(time.Millisecond))) - dp.SetDoubleValue(1.0) - dp.Attributes().PutStr("extra_attr", "extra_value") - return expected - }(), - expectedStats: remote.WriteResponseStats{Confirmed: true, Samples: 1}, - }, - { - // Labels with otel_scope_ prefix other than name/version/schema_url must become - // scope attributes (with the prefix stripped) and must NOT appear as metric point attributes. - name: "otel_scope_* extra labels become scope attributes", - request: &writev2.Request{ - Symbols: []string{ - "", - "__name__", "test_metric", // 1, 2 - "job", "service-x/test", // 3, 4 - "instance", "107cn001", // 5, 6 - "otel_scope_name", "myscope", // 7, 8 - "otel_scope_version", "v1", // 9, 10 - "otel_scope_foo", "bar", // 11, 12 → scope attr "foo"="bar" - "regular_attr", "regular_value", // 13, 14 - }, - Timeseries: []writev2.TimeSeries{ - { - Metadata: writev2.Metadata{Type: writev2.Metadata_METRIC_TYPE_GAUGE}, - LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, - Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}, - }, - }, - }, - expectedMetrics: func() pmetric.Metrics { - expected := pmetric.NewMetrics() - rm := expected.ResourceMetrics().AppendEmpty() - rm.Resource().Attributes().PutStr("service.namespace", "service-x") - rm.Resource().Attributes().PutStr("service.name", "test") - rm.Resource().Attributes().PutStr("service.instance.id", "107cn001") - - sm := rm.ScopeMetrics().AppendEmpty() - sm.Scope().SetName("myscope") - sm.Scope().SetVersion("v1") - sm.Scope().Attributes().PutStr("foo", "bar") - - metric := sm.Metrics().AppendEmpty() - metric.SetName("test_metric") - metric.Metadata().PutStr(prometheus.MetricMetadataTypeKey, "gauge") - dp := metric.SetEmptyGauge().DataPoints().AppendEmpty() - dp.SetTimestamp(pcommon.Timestamp(1 * int64(time.Millisecond))) - dp.SetDoubleValue(1.0) - dp.Attributes().PutStr("regular_attr", "regular_value") - return expected - }(), - expectedStats: remote.WriteResponseStats{Confirmed: true, Samples: 1}, - }, - { - // All otel_scope_* label variants together (name, version, schema_url, extra attr). - name: "all otel_scope_* labels handled correctly for NHCB histogram", - request: &writev2.Request{ - Symbols: []string{ - "", - "__name__", "test_hist", // 1, 2 - "job", "service-x/test", // 3, 4 - "instance", "107cn001", // 5, 6 - "otel_scope_name", "histscope", // 7, 8 - "otel_scope_version", "v2", // 9, 10 - "otel_scope_schema_url", "https://example.com/schema/v2", // 11, 12 - "otel_scope_lib", "mylib", // 13, 14 → scope attr "lib"="mylib" - "dp_attr", "dp_value", // 15, 16 - }, - Timeseries: []writev2.TimeSeries{ - { - Metadata: writev2.Metadata{Type: writev2.Metadata_METRIC_TYPE_HISTOGRAM}, - LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, - Histograms: []writev2.Histogram{ - { - Schema: -53, // NHCB - Count: &writev2.Histogram_CountInt{CountInt: 2}, - Sum: 3.0, - Timestamp: 1000, - CustomValues: []float64{1.0}, - PositiveSpans: []writev2.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{1, 1}, - }, - }, - }, - }, - }, - expectedMetrics: func() pmetric.Metrics { - expected := pmetric.NewMetrics() - rm := expected.ResourceMetrics().AppendEmpty() - rm.Resource().Attributes().PutStr("service.namespace", "service-x") - rm.Resource().Attributes().PutStr("service.name", "test") - rm.Resource().Attributes().PutStr("service.instance.id", "107cn001") - - sm := rm.ScopeMetrics().AppendEmpty() - sm.Scope().SetName("histscope") - sm.Scope().SetVersion("v2") - sm.SetSchemaUrl("https://example.com/schema/v2") - sm.Scope().Attributes().PutStr("lib", "mylib") - - metric := sm.Metrics().AppendEmpty() - metric.SetName("test_hist") - metric.Metadata().PutStr(prometheus.MetricMetadataTypeKey, "histogram") - hist := metric.SetEmptyHistogram() - hist.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - dp := hist.DataPoints().AppendEmpty() - dp.SetTimestamp(pcommon.Timestamp(1000 * int64(time.Millisecond))) - dp.SetSum(3.0) - dp.SetCount(2) - dp.ExplicitBounds().FromRaw([]float64{1.0}) - dp.BucketCounts().FromRaw([]uint64{1, 2}) - dp.Attributes().PutStr("dp_attr", "dp_value") - return expected - }(), - expectedStats: remote.WriteResponseStats{Confirmed: true, Histograms: 1}, - }, } { t.Run(tc.name, func(t *testing.T) { // since we are using the rmCache to store values across requests, we need to clear it after each test, otherwise it will affect the next test diff --git a/receiver/sshcheckreceiver/README.md b/receiver/sshcheckreceiver/README.md index 9357e34215c17..226cac0d5e3ad 100644 --- a/receiver/sshcheckreceiver/README.md +++ b/receiver/sshcheckreceiver/README.md @@ -22,9 +22,6 @@ If `ignore_host_key` is not set then host key validation requires the agent eith ## Configuration -> **Note:** This receiver was renamed from `sshcheck` to `ssh_check` to match the snake_case naming convention. -> The deprecated component type `sshcheck` is still accepted as an alias and will log a deprecation warning. - The following settings are required: - `endpoint` - `username` @@ -44,7 +41,7 @@ The following settings are optional: ```yaml receivers: - ssh_check: + sshcheck: endpoint: localhost:2222 username: otelu password: $OTELP @@ -55,7 +52,7 @@ receivers: ```yaml receivers: - ssh_check: + sshcheck: endpoint: sftp.example.com:22 username: monitoring key_file: /path/to/private_key @@ -66,7 +63,7 @@ receivers: ```yaml receivers: - ssh_check: + sshcheck: endpoint: sftp.example.com:22 username: monitoring key_file: /path/to/private_key @@ -78,7 +75,7 @@ receivers: ```yaml receivers: - ssh_check: + sshcheck: endpoint: production-server.example.com:22 username: monitoring key_file: /etc/otel/ssh_monitoring_key @@ -98,7 +95,7 @@ exporters: service: pipelines: metrics: - receivers: [ssh_check] + receivers: [sshcheck] processors: [batch] exporters: [otlp_grpc] ``` @@ -113,7 +110,7 @@ The `timeout` option controls how long the receiver waits for an SSH connection ```yaml receivers: - ssh_check: + sshcheck: endpoint: slow-server.example.com:22 username: user password: pass @@ -127,14 +124,14 @@ SFTP checks can be enabled in two ways: 1. **Using the `check_sftp` option** (enables SFTP status and duration metrics): ```yaml receivers: - ssh_check: + sshcheck: check_sftp: true ``` 2. **By enabling SFTP metrics individually**: ```yaml receivers: - ssh_check: + sshcheck: metrics: sshcheck.sftp_duration: enabled: true @@ -150,7 +147,7 @@ Individual metrics can be enabled or disabled using the `metrics` configuration ```yaml receivers: - ssh_check: + sshcheck: endpoint: localhost:2222 username: user password: pass @@ -179,7 +176,7 @@ The `known_hosts` option specifies the path to the SSH known_hosts file for host ```yaml receivers: - ssh_check: + sshcheck: endpoint: server.example.com:22 username: user key_file: /path/to/key @@ -192,7 +189,7 @@ The `ignore_host_key` option disables host key validation. **This should only be ```yaml receivers: - ssh_check: + sshcheck: ignore_host_key: true # ⚠️ Only for testing! ``` diff --git a/receiver/sshcheckreceiver/config_test.go b/receiver/sshcheckreceiver/config_test.go index f9876e2a5d4e9..87dd31d3716f8 100644 --- a/receiver/sshcheckreceiver/config_test.go +++ b/receiver/sshcheckreceiver/config_test.go @@ -15,7 +15,6 @@ import ( "go.uber.org/multierr" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sshcheckreceiver/internal/configssh" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sshcheckreceiver/internal/metadata" ) // check that OTel Collector patterns are implemented @@ -115,7 +114,7 @@ func TestLoadConfig(t *testing.T) { require.NoError(t, err) rcvrs, err := cm.Sub("receivers") require.NoError(t, err) - sshconf, err := rcvrs.Sub(metadata.Type.String()) + sshconf, err := rcvrs.Sub("sshcheck") require.NoError(t, err) // unmarshal to receiver config actualConfig, ok := NewFactory().CreateDefaultConfig().(*Config) diff --git a/receiver/sshcheckreceiver/documentation.md b/receiver/sshcheckreceiver/documentation.md index 3091e2a0e23dc..df9e386341c78 100644 --- a/receiver/sshcheckreceiver/documentation.md +++ b/receiver/sshcheckreceiver/documentation.md @@ -1,6 +1,6 @@ [comment]: <> (Code generated by mdatagen. DO NOT EDIT.) -# ssh_check +# sshcheck ## Default Metrics diff --git a/receiver/sshcheckreceiver/factory.go b/receiver/sshcheckreceiver/factory.go index 39a3775bf18f9..4569aa37939ef 100644 --- a/receiver/sshcheckreceiver/factory.go +++ b/receiver/sshcheckreceiver/factory.go @@ -10,7 +10,6 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" - "go.opentelemetry.io/collector/receiver/xreceiver" "go.opentelemetry.io/collector/scraper" "go.opentelemetry.io/collector/scraper/scraperhelper" @@ -20,12 +19,10 @@ import ( // NewFactory creates a new receiver factory func NewFactory() receiver.Factory { - return xreceiver.NewFactory( + return receiver.NewFactory( metadata.Type, createDefaultConfig, - xreceiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability), - xreceiver.WithDeprecatedTypeAlias(metadata.DeprecatedType), - ) + receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability)) } func createDefaultConfig() component.Config { diff --git a/receiver/sshcheckreceiver/generated_component_test.go b/receiver/sshcheckreceiver/generated_component_test.go index 3a5e5767e6b36..74cfebfc6e50d 100644 --- a/receiver/sshcheckreceiver/generated_component_test.go +++ b/receiver/sshcheckreceiver/generated_component_test.go @@ -15,7 +15,7 @@ import ( "go.opentelemetry.io/collector/receiver/receivertest" ) -var typ = component.MustNewType("ssh_check") +var typ = component.MustNewType("sshcheck") func TestComponentFactoryType(t *testing.T) { require.Equal(t, typ, NewFactory().Type()) diff --git a/receiver/sshcheckreceiver/go.mod b/receiver/sshcheckreceiver/go.mod index 22a8bec6818d5..3a2b7e72385d3 100644 --- a/receiver/sshcheckreceiver/go.mod +++ b/receiver/sshcheckreceiver/go.mod @@ -19,7 +19,6 @@ require ( go.opentelemetry.io/collector/pdata v1.56.1-0.20260415114935-307e3abdbae9 go.opentelemetry.io/collector/receiver v1.56.1-0.20260415114935-307e3abdbae9 go.opentelemetry.io/collector/receiver/receivertest v0.150.1-0.20260415114935-307e3abdbae9 - go.opentelemetry.io/collector/receiver/xreceiver v0.150.1-0.20260415114935-307e3abdbae9 go.opentelemetry.io/collector/scraper v0.150.1-0.20260415114935-307e3abdbae9 go.opentelemetry.io/collector/scraper/scraperhelper v0.150.1-0.20260415114935-307e3abdbae9 go.uber.org/goleak v1.3.0 @@ -54,6 +53,7 @@ require ( go.opentelemetry.io/collector/pipeline v1.56.1-0.20260415114935-307e3abdbae9 // indirect go.opentelemetry.io/collector/pipeline/xpipeline v0.150.1-0.20260415114935-307e3abdbae9 // indirect go.opentelemetry.io/collector/receiver/receiverhelper v0.150.1-0.20260415114935-307e3abdbae9 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.150.1-0.20260415114935-307e3abdbae9 // indirect go.opentelemetry.io/otel/sdk v1.43.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect diff --git a/receiver/sshcheckreceiver/internal/metadata/config.schema.yaml b/receiver/sshcheckreceiver/internal/metadata/config.schema.yaml index d5ddbc376539c..afd00218b4599 100644 --- a/receiver/sshcheckreceiver/internal/metadata/config.schema.yaml +++ b/receiver/sshcheckreceiver/internal/metadata/config.schema.yaml @@ -1,7 +1,7 @@ # Code generated by mdatagen. DO NOT EDIT. $defs: metrics_config: - description: MetricsConfig provides config for ssh_check metrics. + description: MetricsConfig provides config for sshcheck metrics. type: object properties: sshcheck.duration: @@ -47,7 +47,7 @@ $defs: type: boolean default: true resource_attributes_config: - description: ResourceAttributesConfig provides config for ssh_check resource attributes. + description: ResourceAttributesConfig provides config for sshcheck resource attributes. type: object properties: ssh.endpoint: @@ -68,7 +68,7 @@ $defs: items: $ref: go.opentelemetry.io/collector/filter.config metrics_builder_config: - description: MetricsBuilderConfig is a configuration for ssh_check metrics builder. + description: MetricsBuilderConfig is a configuration for sshcheck metrics builder. type: object properties: metrics: diff --git a/receiver/sshcheckreceiver/internal/metadata/generated_config.go b/receiver/sshcheckreceiver/internal/metadata/generated_config.go index 39d9ce2c66b8f..47f2280a8d820 100644 --- a/receiver/sshcheckreceiver/internal/metadata/generated_config.go +++ b/receiver/sshcheckreceiver/internal/metadata/generated_config.go @@ -27,7 +27,7 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { return nil } -// MetricsConfig provides config for ssh_check metrics. +// MetricsConfig provides config for sshcheck metrics. type MetricsConfig struct { SshcheckDuration MetricConfig `mapstructure:"sshcheck.duration"` SshcheckError MetricConfig `mapstructure:"sshcheck.error"` @@ -86,7 +86,7 @@ func (rac *ResourceAttributeConfig) Unmarshal(parser *confmap.Conf) error { return nil } -// ResourceAttributesConfig provides config for ssh_check resource attributes. +// ResourceAttributesConfig provides config for sshcheck resource attributes. type ResourceAttributesConfig struct { SSHEndpoint ResourceAttributeConfig `mapstructure:"ssh.endpoint"` } @@ -99,7 +99,7 @@ func DefaultResourceAttributesConfig() ResourceAttributesConfig { } } -// MetricsBuilderConfig is a configuration for ssh_check metrics builder. +// MetricsBuilderConfig is a configuration for sshcheck metrics builder. type MetricsBuilderConfig struct { Metrics MetricsConfig `mapstructure:"metrics"` ResourceAttributes ResourceAttributesConfig `mapstructure:"resource_attributes"` diff --git a/receiver/sshcheckreceiver/internal/metadata/generated_status.go b/receiver/sshcheckreceiver/internal/metadata/generated_status.go index ce159fbc5e7a3..187add33c8c7a 100644 --- a/receiver/sshcheckreceiver/internal/metadata/generated_status.go +++ b/receiver/sshcheckreceiver/internal/metadata/generated_status.go @@ -7,9 +7,8 @@ import ( ) var ( - Type = component.MustNewType("ssh_check") - DeprecatedType = component.MustNewType("sshcheck") - ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sshcheckreceiver" + Type = component.MustNewType("sshcheck") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sshcheckreceiver" ) const ( diff --git a/receiver/sshcheckreceiver/metadata.yaml b/receiver/sshcheckreceiver/metadata.yaml index 50394a415640d..602ccc0cfb7a3 100644 --- a/receiver/sshcheckreceiver/metadata.yaml +++ b/receiver/sshcheckreceiver/metadata.yaml @@ -1,6 +1,5 @@ display_name: SSH Check Receiver -type: ssh_check -deprecated_type: sshcheck +type: sshcheck description: This receiver creates stats by connecting to an SSH server which may be an SFTP server. diff --git a/receiver/sshcheckreceiver/testdata/config.yaml b/receiver/sshcheckreceiver/testdata/config.yaml index f851374a462f0..61b906205951e 100644 --- a/receiver/sshcheckreceiver/testdata/config.yaml +++ b/receiver/sshcheckreceiver/testdata/config.yaml @@ -1,5 +1,5 @@ receivers: - ssh_check: + sshcheck: endpoint: notdefault:1313 username: notdefault_username password: notdefault_password @@ -17,6 +17,6 @@ exporters: service: pipelines: metrics: - receivers: [ssh_check] + receivers: [sshcheck] processors: [nop] exporters: [nop] diff --git a/receiver/tlscheckreceiver/documentation.md b/receiver/tlscheckreceiver/documentation.md index 58e90abe99e5d..285ba3b46c7b8 100644 --- a/receiver/tlscheckreceiver/documentation.md +++ b/receiver/tlscheckreceiver/documentation.md @@ -26,7 +26,7 @@ Time in seconds until certificate expiry, as specified by `NotAfter` field in th | ---- | ----------- | ------ | ----------------- | ------------------- | | tlscheck.x509.issuer | The entity that issued the certificate. | Any Str | Recommended | - | | tlscheck.x509.cn | The commonName in the subject of the certificate. | Any Str | Recommended | - | -| tlscheck.x509.san | The Subject Alternative Name of the certificate. | Any Slice | Opt-In | - | +| tlscheck.x509.san | The Subject Alternative Name of the certificate. | Any Slice | Recommended | - | ## Resource Attributes diff --git a/receiver/tlscheckreceiver/internal/metadata/config.schema.yaml b/receiver/tlscheckreceiver/internal/metadata/config.schema.yaml index 1bf0993e6fc59..9af44a561c823 100644 --- a/receiver/tlscheckreceiver/internal/metadata/config.schema.yaml +++ b/receiver/tlscheckreceiver/internal/metadata/config.schema.yaml @@ -11,25 +11,6 @@ $defs: enabled: type: boolean default: true - aggregation_strategy: - type: string - enum: - - "sum" - - "avg" - - "min" - - "max" - default: "avg" - attributes: - type: array - items: - type: string - enum: - - "tlscheck.x509.issuer" - - "tlscheck.x509.cn" - - "tlscheck.x509.san" - default: - - "tlscheck.x509.issuer" - - "tlscheck.x509.cn" resource_attributes_config: description: ResourceAttributesConfig provides config for tls_check resource attributes. type: object diff --git a/receiver/tlscheckreceiver/internal/metadata/generated_config.go b/receiver/tlscheckreceiver/internal/metadata/generated_config.go index d56f49092d850..1acb885c0fdac 100644 --- a/receiver/tlscheckreceiver/internal/metadata/generated_config.go +++ b/receiver/tlscheckreceiver/internal/metadata/generated_config.go @@ -3,31 +3,17 @@ package metadata import ( - "fmt" - "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/filter" ) -// TlscheckTimeLeftMetricAttributeKey specifies the key of an attribute for the tlscheck.time_left metric. -type TlscheckTimeLeftMetricAttributeKey string - -const ( - TlscheckTimeLeftMetricAttributeKeyTlscheckX509Issuer TlscheckTimeLeftMetricAttributeKey = "tlscheck.x509.issuer" - TlscheckTimeLeftMetricAttributeKeyTlscheckX509Cn TlscheckTimeLeftMetricAttributeKey = "tlscheck.x509.cn" - TlscheckTimeLeftMetricAttributeKeyTlscheckX509San TlscheckTimeLeftMetricAttributeKey = "tlscheck.x509.san" -) - -// TlscheckTimeLeftMetricConfig provides config for the tlscheck.time_left metric. -type TlscheckTimeLeftMetricConfig struct { +// MetricConfig provides common config for a particular metric. +type MetricConfig struct { Enabled bool `mapstructure:"enabled"` enabledSetByUser bool - - AggregationStrategy string `mapstructure:"aggregation_strategy"` - EnabledAttributes []TlscheckTimeLeftMetricAttributeKey `mapstructure:"attributes"` } -func (ms *TlscheckTimeLeftMetricConfig) Unmarshal(parser *confmap.Conf) error { +func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { if parser == nil { return nil } @@ -41,35 +27,15 @@ func (ms *TlscheckTimeLeftMetricConfig) Unmarshal(parser *confmap.Conf) error { return nil } -func (ms *TlscheckTimeLeftMetricConfig) Validate() error { - for _, val := range ms.EnabledAttributes { - switch val { - case TlscheckTimeLeftMetricAttributeKeyTlscheckX509Issuer, TlscheckTimeLeftMetricAttributeKeyTlscheckX509Cn, TlscheckTimeLeftMetricAttributeKeyTlscheckX509San: - default: - return fmt.Errorf("metric tlscheck.time_left doesn't have an attribute %v, valid attributes: [tlscheck.x509.issuer, tlscheck.x509.cn, tlscheck.x509.san]", val) - } - } - - switch ms.AggregationStrategy { - case AggregationStrategySum, AggregationStrategyAvg, AggregationStrategyMin, AggregationStrategyMax: - default: - return fmt.Errorf("invalid aggregation strategy %q, valid strategies: [%s, %s, %s, %s]", ms.AggregationStrategy, AggregationStrategySum, AggregationStrategyAvg, AggregationStrategyMin, AggregationStrategyMax) - } - - return nil -} - // MetricsConfig provides config for tls_check metrics. type MetricsConfig struct { - TlscheckTimeLeft TlscheckTimeLeftMetricConfig `mapstructure:"tlscheck.time_left"` + TlscheckTimeLeft MetricConfig `mapstructure:"tlscheck.time_left"` } func DefaultMetricsConfig() MetricsConfig { return MetricsConfig{ - TlscheckTimeLeft: TlscheckTimeLeftMetricConfig{ - Enabled: true, - AggregationStrategy: AggregationStrategyAvg, - EnabledAttributes: []TlscheckTimeLeftMetricAttributeKey{TlscheckTimeLeftMetricAttributeKeyTlscheckX509Issuer, TlscheckTimeLeftMetricAttributeKeyTlscheckX509Cn}, + TlscheckTimeLeft: MetricConfig{ + Enabled: true, }, } } diff --git a/receiver/tlscheckreceiver/internal/metadata/generated_config_test.go b/receiver/tlscheckreceiver/internal/metadata/generated_config_test.go index 7f0aebba74615..e8f483919487b 100644 --- a/receiver/tlscheckreceiver/internal/metadata/generated_config_test.go +++ b/receiver/tlscheckreceiver/internal/metadata/generated_config_test.go @@ -26,10 +26,8 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "all_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - TlscheckTimeLeft: TlscheckTimeLeftMetricConfig{ - Enabled: true, - AggregationStrategy: AggregationStrategyAvg, - EnabledAttributes: []TlscheckTimeLeftMetricAttributeKey{TlscheckTimeLeftMetricAttributeKeyTlscheckX509Issuer, TlscheckTimeLeftMetricAttributeKeyTlscheckX509Cn, TlscheckTimeLeftMetricAttributeKeyTlscheckX509San}, + TlscheckTimeLeft: MetricConfig{ + Enabled: true, }, }, ResourceAttributes: ResourceAttributesConfig{ @@ -41,10 +39,8 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "none_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - TlscheckTimeLeft: TlscheckTimeLeftMetricConfig{ - Enabled: false, - AggregationStrategy: AggregationStrategyAvg, - EnabledAttributes: []TlscheckTimeLeftMetricAttributeKey{TlscheckTimeLeftMetricAttributeKeyTlscheckX509Issuer, TlscheckTimeLeftMetricAttributeKeyTlscheckX509Cn, TlscheckTimeLeftMetricAttributeKeyTlscheckX509San}, + TlscheckTimeLeft: MetricConfig{ + Enabled: false, }, }, ResourceAttributes: ResourceAttributesConfig{ @@ -56,7 +52,7 @@ func TestMetricsBuilderConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadMetricsBuilderConfig(t, tt.name) - diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(TlscheckTimeLeftMetricConfig{}, ResourceAttributeConfig{})) + diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})) require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) }) } diff --git a/receiver/tlscheckreceiver/internal/metadata/generated_metrics.go b/receiver/tlscheckreceiver/internal/metadata/generated_metrics.go index 5eb02053bf4f3..402514c0ef165 100644 --- a/receiver/tlscheckreceiver/internal/metadata/generated_metrics.go +++ b/receiver/tlscheckreceiver/internal/metadata/generated_metrics.go @@ -3,7 +3,6 @@ package metadata import ( - "slices" "time" "go.opentelemetry.io/collector/component" @@ -13,13 +12,6 @@ import ( "go.opentelemetry.io/collector/receiver" ) -const ( - AggregationStrategySum = "sum" - AggregationStrategyAvg = "avg" - AggregationStrategyMin = "min" - AggregationStrategyMax = "max" -) - var MetricsInfo = metricsInfo{ TlscheckTimeLeft: metricInfo{ Name: "tlscheck.time_left", @@ -35,10 +27,9 @@ type metricInfo struct { } type metricTlscheckTimeLeft struct { - data pmetric.Metric // data buffer for generated metric. - config TlscheckTimeLeftMetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. - aggDataPoints []int64 // slice containing number of aggregated datapoints at each index + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. } // init fills tlscheck.time_left metric with initial data. @@ -48,54 +39,19 @@ func (m *metricTlscheckTimeLeft) init() { m.data.SetUnit("s") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) - m.aggDataPoints = m.aggDataPoints[:0] } func (m *metricTlscheckTimeLeft) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tlscheckX509IssuerAttributeValue string, tlscheckX509CnAttributeValue string, tlscheckX509SanAttributeValue []any) { if !m.config.Enabled { return } - - dp := pmetric.NewNumberDataPoint() + dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - if slices.Contains(m.config.EnabledAttributes, TlscheckTimeLeftMetricAttributeKeyTlscheckX509Issuer) { - dp.Attributes().PutStr("tlscheck.x509.issuer", tlscheckX509IssuerAttributeValue) - } - if slices.Contains(m.config.EnabledAttributes, TlscheckTimeLeftMetricAttributeKeyTlscheckX509Cn) { - dp.Attributes().PutStr("tlscheck.x509.cn", tlscheckX509CnAttributeValue) - } - if slices.Contains(m.config.EnabledAttributes, TlscheckTimeLeftMetricAttributeKeyTlscheckX509San) { - dp.Attributes().PutEmptySlice("tlscheck.x509.san").FromRaw(tlscheckX509SanAttributeValue) - } - - var s string - dps := m.data.Gauge().DataPoints() - for i := 0; i < dps.Len(); i++ { - dpi := dps.At(i) - if dp.Attributes().Equal(dpi.Attributes()) && dp.StartTimestamp() == dpi.StartTimestamp() && dp.Timestamp() == dpi.Timestamp() { - switch s = m.config.AggregationStrategy; s { - case AggregationStrategySum, AggregationStrategyAvg: - dpi.SetIntValue(dpi.IntValue() + val) - m.aggDataPoints[i] += 1 - return - case AggregationStrategyMin: - if dpi.IntValue() > val { - dpi.SetIntValue(val) - } - return - case AggregationStrategyMax: - if dpi.IntValue() < val { - dpi.SetIntValue(val) - } - return - } - } - } - dp.SetIntValue(val) - m.aggDataPoints = append(m.aggDataPoints, 1) - dp.MoveTo(dps.AppendEmpty()) + dp.Attributes().PutStr("tlscheck.x509.issuer", tlscheckX509IssuerAttributeValue) + dp.Attributes().PutStr("tlscheck.x509.cn", tlscheckX509CnAttributeValue) + dp.Attributes().PutEmptySlice("tlscheck.x509.san").FromRaw(tlscheckX509SanAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -108,18 +64,13 @@ func (m *metricTlscheckTimeLeft) updateCapacity() { // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricTlscheckTimeLeft) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - if m.config.AggregationStrategy == AggregationStrategyAvg { - for i, aggCount := range m.aggDataPoints { - m.data.Gauge().DataPoints().At(i).SetIntValue(m.data.Gauge().DataPoints().At(i).IntValue() / aggCount) - } - } m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricTlscheckTimeLeft(cfg TlscheckTimeLeftMetricConfig) metricTlscheckTimeLeft { +func newMetricTlscheckTimeLeft(cfg MetricConfig) metricTlscheckTimeLeft { m := metricTlscheckTimeLeft{config: cfg} if cfg.Enabled { diff --git a/receiver/tlscheckreceiver/internal/metadata/generated_metrics_test.go b/receiver/tlscheckreceiver/internal/metadata/generated_metrics_test.go index a170e131c23ba..25d2d4ee9ff76 100644 --- a/receiver/tlscheckreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/tlscheckreceiver/internal/metadata/generated_metrics_test.go @@ -19,7 +19,6 @@ const ( testDataSetDefault testDataSet = iota testDataSetAll testDataSetNone - testDataSetReag ) func TestMetricsBuilder(t *testing.T) { @@ -37,11 +36,6 @@ func TestMetricsBuilder(t *testing.T) { metricsSet: testDataSetAll, resAttrsSet: testDataSetAll, }, - { - name: "reaggregate_set", - metricsSet: testDataSetReag, - resAttrsSet: testDataSetReag, - }, { name: "none_set", metricsSet: testDataSetNone, @@ -66,13 +60,9 @@ func TestMetricsBuilder(t *testing.T) { settings := receivertest.NewNopSettings(receivertest.NopType) settings.Logger = zap.New(observedZapCore) mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, tt.name), settings, WithStartTime(start)) - aggMap := make(map[string]string) // contains the aggregation strategies for each metric name - aggMap["TlscheckTimeLeft"] = mb.metricTlscheckTimeLeft.config.AggregationStrategy expectedWarnings := 0 - if tt.metricsSet != testDataSetReag { - assert.Equal(t, expectedWarnings, observedLogs.Len()) - } + assert.Equal(t, expectedWarnings, observedLogs.Len()) defaultMetricsCount := 0 allMetricsCount := 0 @@ -80,17 +70,11 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount++ allMetricsCount++ mb.RecordTlscheckTimeLeftDataPoint(ts, 1, "tlscheck.x509.issuer-val", "tlscheck.x509.cn-val", []any{"tlscheck.x509.san-item1", "tlscheck.x509.san-item2"}) - if tt.name == "reaggregate_set" { - mb.RecordTlscheckTimeLeftDataPoint(ts, 3, "tlscheck.x509.issuer-val-2", "tlscheck.x509.cn-val-2", []any{"tlscheck.x509.san-item3", "tlscheck.x509.san-item4"}) - } rb := mb.NewResourceBuilder() rb.SetTlscheckTarget("tlscheck.target-val") res := rb.Emit() metrics := mb.Emit(WithResource(res)) - if tt.name == "reaggregate_set" { - assert.Empty(t, mb.metricTlscheckTimeLeft.aggDataPoints) - } if tt.expectEmpty { assert.Equal(t, 0, metrics.ResourceMetrics().Len()) @@ -118,52 +102,26 @@ func TestMetricsBuilder(t *testing.T) { for _, mi := range allMetricsList { switch mi.Name() { case "tlscheck.time_left": - if tt.name != "reaggregate_set" { - assert.False(t, validatedMetrics["tlscheck.time_left"], "Found a duplicate in the metrics slice: tlscheck.time_left") - validatedMetrics["tlscheck.time_left"] = true - assert.Equal(t, pmetric.MetricTypeGauge, mi.Type()) - assert.Equal(t, 1, mi.Gauge().DataPoints().Len()) - assert.Equal(t, "Time in seconds until certificate expiry, as specified by `NotAfter` field in the x.509 certificate. Negative values represent time in seconds since expiration.", mi.Description()) - assert.Equal(t, "s", mi.Unit()) - dp := mi.Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - tlscheckX509IssuerAttrVal, ok := dp.Attributes().Get("tlscheck.x509.issuer") - assert.True(t, ok) - assert.Equal(t, "tlscheck.x509.issuer-val", tlscheckX509IssuerAttrVal.Str()) - tlscheckX509CnAttrVal, ok := dp.Attributes().Get("tlscheck.x509.cn") - assert.True(t, ok) - assert.Equal(t, "tlscheck.x509.cn-val", tlscheckX509CnAttrVal.Str()) - } else { - assert.False(t, validatedMetrics["tlscheck.time_left"], "Found a duplicate in the metrics slice: tlscheck.time_left") - validatedMetrics["tlscheck.time_left"] = true - assert.Equal(t, pmetric.MetricTypeGauge, mi.Type()) - assert.Equal(t, 1, mi.Gauge().DataPoints().Len()) - assert.Equal(t, "Time in seconds until certificate expiry, as specified by `NotAfter` field in the x.509 certificate. Negative values represent time in seconds since expiration.", mi.Description()) - assert.Equal(t, "s", mi.Unit()) - dp := mi.Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - switch aggMap["tlscheck.time_left"] { - case "sum": - assert.Equal(t, int64(4), dp.IntValue()) - case "avg": - assert.Equal(t, int64(2), dp.IntValue()) - case "min": - assert.Equal(t, int64(1), dp.IntValue()) - case "max": - assert.Equal(t, int64(3), dp.IntValue()) - } - _, ok := dp.Attributes().Get("tlscheck.x509.issuer") - assert.False(t, ok) - _, ok = dp.Attributes().Get("tlscheck.x509.cn") - assert.False(t, ok) - _, ok = dp.Attributes().Get("tlscheck.x509.san") - assert.False(t, ok) - } + assert.False(t, validatedMetrics["tlscheck.time_left"], "Found a duplicate in the metrics slice: tlscheck.time_left") + validatedMetrics["tlscheck.time_left"] = true + assert.Equal(t, pmetric.MetricTypeGauge, mi.Type()) + assert.Equal(t, 1, mi.Gauge().DataPoints().Len()) + assert.Equal(t, "Time in seconds until certificate expiry, as specified by `NotAfter` field in the x.509 certificate. Negative values represent time in seconds since expiration.", mi.Description()) + assert.Equal(t, "s", mi.Unit()) + dp := mi.Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + tlscheckX509IssuerAttrVal, ok := dp.Attributes().Get("tlscheck.x509.issuer") + assert.True(t, ok) + assert.Equal(t, "tlscheck.x509.issuer-val", tlscheckX509IssuerAttrVal.Str()) + tlscheckX509CnAttrVal, ok := dp.Attributes().Get("tlscheck.x509.cn") + assert.True(t, ok) + assert.Equal(t, "tlscheck.x509.cn-val", tlscheckX509CnAttrVal.Str()) + tlscheckX509SanAttrVal, ok := dp.Attributes().Get("tlscheck.x509.san") + assert.True(t, ok) + assert.Equal(t, []any{"tlscheck.x509.san-item1", "tlscheck.x509.san-item2"}, tlscheckX509SanAttrVal.Slice().AsRaw()) } } }) diff --git a/receiver/tlscheckreceiver/internal/metadata/testdata/config.yaml b/receiver/tlscheckreceiver/internal/metadata/testdata/config.yaml index eb157ff18b0fc..99cbe50481b15 100644 --- a/receiver/tlscheckreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/tlscheckreceiver/internal/metadata/testdata/config.yaml @@ -3,15 +3,6 @@ all_set: metrics: tlscheck.time_left: enabled: true - attributes: ["tlscheck.x509.issuer","tlscheck.x509.cn","tlscheck.x509.san"] - resource_attributes: - tlscheck.target: - enabled: true -reaggregate_set: - metrics: - tlscheck.time_left: - enabled: true - attributes: [] resource_attributes: tlscheck.target: enabled: true @@ -19,7 +10,6 @@ none_set: metrics: tlscheck.time_left: enabled: false - attributes: ["tlscheck.x509.issuer","tlscheck.x509.cn","tlscheck.x509.san"] resource_attributes: tlscheck.target: enabled: false diff --git a/receiver/tlscheckreceiver/metadata.yaml b/receiver/tlscheckreceiver/metadata.yaml index 576fbd4f9c56b..97a88c934b41c 100644 --- a/receiver/tlscheckreceiver/metadata.yaml +++ b/receiver/tlscheckreceiver/metadata.yaml @@ -1,7 +1,6 @@ display_name: TLS Check Receiver type: tls_check deprecated_type: tlscheck -reaggregation_enabled: true description: This receiver emits metrics about x.509 certificates. @@ -22,15 +21,12 @@ resource_attributes: attributes: tlscheck.x509.cn: description: The commonName in the subject of the certificate. - requirement_level: recommended type: string tlscheck.x509.issuer: description: The entity that issued the certificate. - requirement_level: recommended type: string tlscheck.x509.san: description: The Subject Alternative Name of the certificate. - requirement_level: opt_in type: slice metrics: diff --git a/reports/distributions/contrib.yaml b/reports/distributions/contrib.yaml index 6d6b744807a8c..4f9cb379266b0 100644 --- a/reports/distributions/contrib.yaml +++ b/reports/distributions/contrib.yaml @@ -221,7 +221,7 @@ components: - splunkenterprise - sqlquery - sqlserver - - ssh_check + - sshcheck - statsd - stef - syslog diff --git a/testbed/tests/k8sattributes_processor_test.go b/testbed/tests/k8sattributes_processor_test.go index 8b51f200776c8..85a90d480a007 100644 --- a/testbed/tests/k8sattributes_processor_test.go +++ b/testbed/tests/k8sattributes_processor_test.go @@ -25,42 +25,14 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" ) -// ── Test coverage reference ─────────────────────────────────────────────────── -// -// # TestMetricK8sAttributesProcessor (basic) -// -// Cluster: 100 nodes, N namespaces -// Each Deployment owns exactly 1 ReplicaSet which owns exactly 1 Pod. -// -// Scale | Nodes | Namespaces | Deployments | ReplicaSets | Pods -// -------|-------|------------|-------------|-------------|------ -// 110 | 100 | 110 | 110 | 110 | 110 -// 1 000 | 100 | 1 000 | 1 000 | 1 000 | 1 000 -// 5 000 | 100 | 5 000 | 5 000 | 5 000 | 5 000 -// -// # TestMetricK8sAttributesProcessorExtended (extended) -// -// Cluster: 100 nodes, N namespaces in total with 4 namespaces being targeted for the workloads(1 per workload type). -// DaemonSet pods are pinned to a single node via nodeSelector (1 pod per DaemonSet). -// -// Namespace | Workload type | Ownership chain -// -----------------|---------------|-------------------------------------- -// namespace-000000 | Deployment | Deployment → ReplicaSet → Pod -// namespace-000001 | StatefulSet | StatefulSet → Pod -// namespace-000002 | DaemonSet | DaemonSet (nodeSelector) → Pod -// namespace-000003 | CronJob | CronJob → Job → Pod -// -// Scale | Nodes | NS | Deployments | RSes | StatefulSets | DaemonSets | CronJobs | Jobs | Total Pods -// -------|-------|----|-------------|-------|--------------|------------|----------|-------|------------ -// 110 | 100 | 110 | 110 | 110 | 110 | 110 | 110 | 110 | 440 -// 1 000 | 100 | 110 | 1 000 | 1 000 | 1 000 | 1 000 | 1 000 | 1 000 | 4 000 -// 5 000 | 100 | 110 | 5 000 | 5 000 | 5 000 | 5 000 | 5 000 | 5 000 | 20 000 - -// Create 100 fake nodes for extra cluster load using kwokctl scale -const numNodes = 100 - -// numMetricBatches is how many metric batches to send and assert on in the k8sattributes test. -const numMetricBatches = 10 +func skipIfKwokUnavailable(t *testing.T) { + if os.Getenv("SKIP_KWOK_TESTS") == "1" { + t.Skip("Skipping KWOK test: SKIP_KWOK_TESTS=1") + } + if _, err := exec.LookPath("kwokctl"); err != nil { + t.Skipf("Skipping KWOK test: kwokctl not found in PATH (install from https://kwok.sigs.k8s.io/)") + } +} // kwokNamespaceResourceYAML is a KwokctlResource for "kwokctl scale namespace" const kwokNamespaceResourceYAML = ` @@ -120,117 +92,6 @@ template: |- nodeName: {{ .nodeName }} ` -// kwokStatefulSetResourceYAML is a KwokctlResource for "kwokctl scale statefulset". -// Pods land in namespace-000001 and are created by the kube-controller-manager StatefulSet -// controller, giving each pod a direct OwnerReference to its StatefulSet. -const kwokStatefulSetResourceYAML = ` -apiVersion: config.kwok.x-k8s.io/v1alpha1 -kind: KwokctlResource -metadata: - name: statefulset -parameters: - replicas: 1 - containers: - - name: container-0 - image: registry.k8s.io/pause:3.9 -template: |- - apiVersion: apps/v1 - kind: StatefulSet - metadata: - name: {{ Name }} - namespace: namespace-000001 - spec: - replicas: {{ .replicas }} - selector: - matchLabels: - app: {{ Name }} - serviceName: "{{ Name }}-svc" - template: - metadata: - labels: - app: {{ Name }} - spec: - containers: - {{ range $index, $container := .containers }} - - name: {{ $container.name }} - image: {{ $container.image }} - {{ end }} -` - -// kwokDaemonSetResourceYAML is a KwokctlResource for "kwokctl scale daemonset". -// A nodeSelector pins each DaemonSet pod to kwok-node-000000, so that even with 100 -// nodes the DaemonSet controller creates exactly one pod per DaemonSet (1:1 ratio), -// giving that pod a direct OwnerReference to its DaemonSet. -// Pods land in namespace-000002. -const kwokDaemonSetResourceYAML = ` -apiVersion: config.kwok.x-k8s.io/v1alpha1 -kind: KwokctlResource -metadata: - name: daemonset -parameters: - containers: - - name: container-0 - image: registry.k8s.io/pause:3.9 -template: |- - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: {{ Name }} - namespace: namespace-000002 - spec: - selector: - matchLabels: - app: {{ Name }} - template: - metadata: - labels: - app: {{ Name }} - spec: - nodeSelector: - kubernetes.io/hostname: node-000000 - containers: - {{ range $index, $container := .containers }} - - name: {{ $container.name }} - image: {{ $container.image }} - {{ end }} -` - -// kwokCronJobResourceYAML is a KwokctlResource for "kwokctl scale cronjob". -// Each CronJob fires every minute; the kube-controller-manager CronJob controller -// creates a Job with an OwnerReference to the CronJob, and the Job controller then -// creates a Pod with an OwnerReference to the Job. This establishes the full -// Pod → Job → CronJob ownership chain that the k8sattributes processor traverses. -// Pods land in namespace-000003. -const kwokCronJobResourceYAML = ` -apiVersion: config.kwok.x-k8s.io/v1alpha1 -kind: KwokctlResource -metadata: - name: cronjob -parameters: - containers: - - name: container-0 - image: registry.k8s.io/pause:3.9 -template: |- - apiVersion: batch/v1 - kind: CronJob - metadata: - name: {{ Name }} - namespace: namespace-000003 - spec: - schedule: "*/1 * * * *" - concurrencyPolicy: Forbid - jobTemplate: - spec: - template: - spec: - restartPolicy: Never - containers: - {{ range $index, $container := .containers }} - - name: {{ $container.name }} - image: {{ $container.image }} - {{ end }} -` - // sample metrics consumed by the test pipeline // k8s.pod.uid is added at test time from the KWOK-created pod so the processor can associate by pod UID. var mockedConsumedMetricsForK8s = func() pmetric.Metrics { @@ -248,11 +109,12 @@ var mockedConsumedMetricsForK8s = func() pmetric.Metrics { type k8sAttributesProcessorTestCase struct { name string k8sAttributesConfig string + mockedConsumedMetrics pmetric.Metrics expectedResourceAttrs map[string]any // assert these resource attributes are present (nil = presence only) - numWorkloads int // workload instances to create (basic: one pod per deployment; extended: instances per ownership type) + numPods int // pods and namespaces to create; each pod in its own deployment (1 replica) } -func getK8sAttributesProcessorBasicTestCases() []k8sAttributesProcessorTestCase { +func getK8sAttributesProcessorTestCases() []k8sAttributesProcessorTestCase { // __CONTEXT__ is replaced at runtime with the current context from the kubeconfig. kwokConfig := ` k8s_attributes: @@ -281,102 +143,29 @@ func getK8sAttributesProcessorBasicTestCases() []k8sAttributesProcessorTestCase { name: "110_workload_cluster", k8sAttributesConfig: kwokConfig, + mockedConsumedMetrics: mockedConsumedMetricsForK8s, expectedResourceAttrs: expectedAttrs, - numWorkloads: 110, + numPods: 110, }, { name: "1K_workload_cluster", k8sAttributesConfig: kwokConfig, + mockedConsumedMetrics: mockedConsumedMetricsForK8s, expectedResourceAttrs: expectedAttrs, - numWorkloads: 1000, + numPods: 1000, }, { name: "5K_workload_cluster", k8sAttributesConfig: kwokConfig, + mockedConsumedMetrics: mockedConsumedMetricsForK8s, expectedResourceAttrs: expectedAttrs, - numWorkloads: 5000, + numPods: 5000, }, } } -func getK8sAttributesProcessorExtendedTestCases() []k8sAttributesProcessorTestCase { - // __CONTEXT__ is replaced at runtime with the current context from the kubeconfig. - kwokConfig := ` - k8s_attributes: - auth_type: "kubeConfig" - context: "__CONTEXT__" - extract: - metadata: - - k8s.pod.name - - k8s.pod.start_time - - k8s.pod.uid - - k8s.namespace.name - - k8s.deployment.name - - k8s.deployment.uid - - k8s.replicaset.name - - k8s.replicaset.uid - - k8s.statefulset.name - - k8s.statefulset.uid - - k8s.daemonset.name - - k8s.daemonset.uid - - k8s.cronjob.name - - k8s.cronjob.uid - - k8s.job.name - - k8s.job.uid - - k8s.node.name - - k8s.cluster.uid - - container.image.name - - container.image.tag - pod_association: - - sources: - - from: resource_attribute - name: k8s.pod.uid -` - // Attributes that must appear on every enriched batch regardless of ownership chain. - // Chain-specific attrs (deployment, statefulset, daemonset, cronjob/job) are validated - // separately in runTestbedWithK8sConfigExtended via the podTypes chainAttrs. - // container.id and container.image.repo_digests are omitted: KWOK runs no real container - // runtime, so those fields are never populated in the pod status. - expectedAttrs := map[string]any{ - "k8s.pod.name": nil, - "k8s.pod.start_time": nil, - "k8s.pod.uid": nil, - "k8s.namespace.name": nil, - "k8s.node.name": nil, - "k8s.cluster.uid": nil, - "container.image.name": nil, - "container.image.tag": nil, - } - return []k8sAttributesProcessorTestCase{ - { - name: "110_workload_cluster_extended", - k8sAttributesConfig: kwokConfig, - expectedResourceAttrs: expectedAttrs, - numWorkloads: 110, - }, - { - name: "1K_workload_cluster_extended", - k8sAttributesConfig: kwokConfig, - expectedResourceAttrs: expectedAttrs, - numWorkloads: 1000, - }, - { - name: "5K_workload_cluster_extended", - k8sAttributesConfig: kwokConfig, - expectedResourceAttrs: expectedAttrs, - numWorkloads: 5000, - }, - } -} - -func skipIfKwokUnavailable(t *testing.T) { - if os.Getenv("SKIP_KWOK_TESTS") == "1" { - t.Skip("Skipping KWOK test: SKIP_KWOK_TESTS=1") - } - if _, err := exec.LookPath("kwokctl"); err != nil { - t.Skipf("Skipping KWOK test: kwokctl not found in PATH (install from https://kwok.sigs.k8s.io/)") - } -} +// numMetricBatches is how many metric batches to send and assert on in the k8sattributes test. +const numMetricBatches = 10 // logKWOKClusterState logs namespaces, deployments, pods, and control-plane component logs in the cluster for debugging when pod count is 0. func logKWOKClusterState(ctx context.Context, t *testing.T, clientset *kubernetes.Clientset, targetNS, clusterName string) { @@ -471,55 +260,40 @@ func logKWOKClusterState(ctx context.Context, t *testing.T, clientset *kubernete } } -// kwokCluster holds the outputs of createKWOKCluster that both setup helpers need. -type kwokCluster struct { - Name string - KubeconfigPath string - Clientset *kubernetes.Clientset - Cleanup func() -} - -// createKWOKCluster starts a kwokctl binary-runtime cluster, scales it to numNodes nodes -// and numNamespaces namespaces, writes the kubeconfig to a temp file, constructs a -// Kubernetes client, and returns the assembled kwokCluster. -// The sanitized t.Name() to form the cluster name (capped at 50 -// characters). The returned Cleanup must be called when the cluster is no longer needed. -func createKWOKCluster(t *testing.T, numNamespaces int) kwokCluster { - t.Helper() - +// setupKWOKCluster creates a KWOK cluster with numPods pods as part of their own Deployment (1 replica). +// See https://kwok.sigs.k8s.io/ +func setupKWOKCluster(t *testing.T, numPods int) (kubeconfigPath, podUID string, cleanup func()) { + // Log kwokctl version for debugging CI/runner issues. if verOut, err := exec.Command("kwokctl", "--version").CombinedOutput(); err != nil { t.Logf("[kwok] kwokctl version: (failed to get: %v)", err) } else { t.Logf("[kwok] kwokctl version:\n%s", strings.TrimSpace(string(verOut))) } - clusterName := strings.ReplaceAll(t.Name(), "/", "-") + clusterName := "otelcol-k8s-" + strings.ReplaceAll(t.Name(), "/", "-") clusterName = strings.ReplaceAll(clusterName, " ", "-") if len(clusterName) > 50 { clusterName = clusterName[:50] } - // #nosec G204 -- clusterName is test-controlled create := exec.Command("kwokctl", "create", "cluster", "--disable-qps-limits", "--runtime", "binary", "--name", clusterName) create.Dir = t.TempDir() out, err := create.CombinedOutput() require.NoError(t, err, "kwokctl create cluster: %s", out) var cleanupOnce sync.Once - cleanup := func() { + cleanup = func() { cleanupOnce.Do(func() { - // #nosec G204 del := exec.Command("kwokctl", "delete", "cluster", "--name", clusterName) _ = del.Run() assert.Eventually(t, func() bool { getClusters := exec.Command("kwokctl", "get", "clusters") - clusterOut, clusterErr := getClusters.Output() - return clusterErr != nil || !strings.Contains(string(clusterOut), clusterName) + out, err = getClusters.Output() + return err != nil || !strings.Contains(string(out), clusterName) }, 30*time.Second, 500*time.Millisecond, "cluster %s should be removed", clusterName) }) } - // #nosec G204 getConfig := exec.Command("kwokctl", "get", "kubeconfig", "--name", clusterName) getConfig.Dir = t.TempDir() out, err = getConfig.CombinedOutput() @@ -530,74 +304,48 @@ func createKWOKCluster(t *testing.T, numNamespaces int) kwokCluster { _, err = tmpFile.Write(out) require.NoError(t, err) require.NoError(t, tmpFile.Close()) + kubeconfigPath = tmpFile.Name() - kubeconfigPath := tmpFile.Name() - k8sCfg, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + ctx := t.Context() + config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) require.NoError(t, err) - clientset, err := kubernetes.NewForConfig(k8sCfg) + clientset, err := kubernetes.NewForConfig(config) require.NoError(t, err) - // Scale to numNodes fake nodes. - // #nosec G204 -- clusterName and numNodes are test-controlled + // Create 100 fake nodes for extra cluster load using kwokctl scale + const numNodes = 100 + // #nosec G204 -- clusterName and numNodes are test-controlled, not user input scaleNodes := exec.Command("kwokctl", "scale", "node", "--replicas", fmt.Sprintf("%d", numNodes), "--name", clusterName) scaleNodes.Dir = t.TempDir() - if nodeOut, nodeErr := scaleNodes.CombinedOutput(); nodeErr != nil { - t.Logf("[kwok] kwokctl scale node (optional): %v\n%s", nodeErr, nodeOut) + if out, runErr := scaleNodes.CombinedOutput(); runErr != nil { + t.Logf("kwokctl scale node (optional): %v\n%s", runErr, out) } - // Scale to numNamespaces namespaces. - kwokNSConfigPath := filepath.Join(t.TempDir(), "kwok-ns-resource.yaml") - require.NoError(t, os.WriteFile(kwokNSConfigPath, []byte(strings.TrimSpace(kwokNamespaceResourceYAML)), 0o600)) - // #nosec G204 + kwokConfigPath := filepath.Join(t.TempDir(), "kwok-resources.yaml") + kwokConfigContent := strings.TrimSpace(kwokNamespaceResourceYAML) + "\n---\n" + strings.TrimSpace(kwokDeploymentResourceYAML) + require.NoError(t, os.WriteFile(kwokConfigPath, []byte(kwokConfigContent), 0o600)) + + // #nosec G204 -- clusterName, numPods, kwokConfigPath are test-controlled, not user input scaleNS := exec.Command("kwokctl", "scale", "namespace", - "--replicas", fmt.Sprintf("%d", numNamespaces), + "--replicas", fmt.Sprintf("%d", numPods), "--serial-length", "6", "--name", clusterName, - "--config", kwokNSConfigPath) + "--config", kwokConfigPath) scaleNS.Dir = t.TempDir() - if nsOut, nsErr := scaleNS.CombinedOutput(); nsErr != nil { - t.Fatalf("kwokctl scale namespace: %v\n%s", nsErr, nsOut) - } - - // Wait for all 4 namespaces to exist before returning so callers can immediately - // issue workload scale commands without racing the namespace controller. - ctx := t.Context() - for i := range 4 { - ns := fmt.Sprintf("namespace-%06d", i) - assert.Eventually(t, func() bool { - _, nsErr := clientset.CoreV1().Namespaces().Get(ctx, ns, metav1.GetOptions{}) - return nsErr == nil - }, 2*time.Minute, 2*time.Second, "namespace %s never created", ns) + if out, runErr := scaleNS.CombinedOutput(); runErr != nil { + t.Fatalf("kwokctl scale namespace: %v\n%s", runErr, out) } - return kwokCluster{ - Name: clusterName, - KubeconfigPath: kubeconfigPath, - Clientset: clientset, - Cleanup: cleanup, - } -} - -// setupKWOKCluster creates a KWOK cluster with numWorkloads pods as part of their own Deployment (1 replica). -// See https://kwok.sigs.k8s.io/ -func setupKWOKCluster(t *testing.T, numWorkloads int) (kubeconfigPath, podUID string, cleanup func()) { - // createKWOKCluster scales to numNodes nodes and numWorkloads namespaces (one per deployment). - kc := createKWOKCluster(t, numWorkloads) - cleanup = kc.Cleanup - kubeconfigPath = kc.KubeconfigPath - clusterName := kc.Name - clientset := kc.Clientset - ctx := t.Context() - - // All deployments are placed in namespace-000000 by the deployment template. targetNS := "namespace-000000" + // Wait for target namespace to exist before creating deployments (avoids races in CI). + assert.Eventually(t, func() bool { + _, getErr := clientset.CoreV1().Namespaces().Get(ctx, targetNS, metav1.GetOptions{}) + return getErr == nil + }, 2*time.Minute, 2*time.Second, "namespace %s never created", targetNS) - kwokConfigPath := filepath.Join(t.TempDir(), "kwok-resources.yaml") - require.NoError(t, os.WriteFile(kwokConfigPath, []byte(strings.TrimSpace(kwokDeploymentResourceYAML)), 0o600)) - - // #nosec G204 -- clusterName, numWorkloads, kwokConfigPath are test-controlled, not user input + // #nosec G204 -- clusterName, numPods, kwokConfigPath are test-controlled, not user input scaleDeploy := exec.Command("kwokctl", "scale", "deployment", - "--replicas", fmt.Sprintf("%d", numWorkloads), + "--replicas", fmt.Sprintf("%d", numPods), "--serial-length", "6", "--param", ".replicas=1", "--name", clusterName, @@ -607,8 +355,8 @@ func setupKWOKCluster(t *testing.T, numWorkloads int) (kubeconfigPath, podUID st t.Fatalf("kwokctl scale deployment: %v\n%s", runErr, out) } - // Wait until we have numWorkloads pods in namespace-000000 (all deployments go there) - podWaitTimeout := min(3*time.Minute+time.Duration(numWorkloads/5)*time.Second, 15*time.Minute) + // Wait until we have numPods pods in namespace-000000 (all deployments go there) + podWaitTimeout := min(3*time.Minute+time.Duration(numPods/5)*time.Second, 15*time.Minute) var podCount int var debugLogged bool var attempt int @@ -631,8 +379,8 @@ func setupKWOKCluster(t *testing.T, numWorkloads int) (kubeconfigPath, podUID st t.Logf("[kwok debug] got 0 pods in %q after %d attempts, logging cluster state", targetNS, attempt) logKWOKClusterState(ctx, t, clientset, targetNS, clusterName) } - return podCount >= numWorkloads - }, podWaitTimeout, 1*time.Second, "timed out waiting for %d pods in namespace-000000 (got %d)", numWorkloads, podCount) + return podCount >= numPods + }, podWaitTimeout, 1*time.Second, "timed out waiting for %d pods in namespace-000000 (got %d)", numPods, podCount) // Get first pod UID in namespace-000000 for metric association list, listErr := clientset.CoreV1().Pods(targetNS).List(ctx, metav1.ListOptions{}) @@ -643,6 +391,25 @@ func setupKWOKCluster(t *testing.T, numWorkloads int) (kubeconfigPath, podUID st return kubeconfigPath, podUID, cleanup } +// TestMetricK8sAttributesProcessor tests the k8sattributes processor's +// performance and resource utilization when the component +// is used to collect k8s metadata from a test k8s cluster +// with 100 number of nodes, N number of Pods each controlled by +// its own Deployment/Replicaset, while there are also N number of Namespaces +func TestMetricK8sAttributesProcessor(t *testing.T) { + tests := getK8sAttributesProcessorTestCases() + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + skipIfKwokUnavailable(t) + kubeconfigPath, podUID, cleanup := setupKWOKCluster(t, test.numPods) + defer cleanup() + runTestbedWithK8sConfig(t, &test, kubeconfigPath, podUID) + cleanup() + }) + } +} + // getKubeconfigCurrentContext returns the current context name from the kubeconfig at path, or "" on error. func getKubeconfigCurrentContext(kubeconfigPath string) string { cmd := exec.Command("kubectl", "config", "current-context") @@ -704,7 +471,7 @@ func runTestbedWithK8sConfig(t *testing.T, test *k8sAttributesProcessorTestCase, defer tc.StopAgent() // Allow the k8sattributes processor's informer to sync before sending metrics. - syncWait := min(15*time.Second+time.Duration(test.numWorkloads/100)*100*time.Millisecond, 60*time.Second) + syncWait := min(15*time.Second+time.Duration(test.numPods/100)*100*time.Millisecond, 60*time.Second) time.Sleep(syncWait) tc.EnableRecording() @@ -719,7 +486,7 @@ func runTestbedWithK8sConfig(t *testing.T, test *k8sAttributesProcessorTestCase, for i := range numMetricBatches { metricsToSend := pmetric.NewMetrics() - mockedConsumedMetricsForK8s.CopyTo(metricsToSend) + test.mockedConsumedMetrics.CopyTo(metricsToSend) if podUID != "" { metricsToSend.ResourceMetrics().At(0).Resource().Attributes().PutStr("k8s.pod.uid", podUID) } @@ -750,320 +517,3 @@ func runTestbedWithK8sConfig(t *testing.T, test *k8sAttributesProcessorTestCase, } } } - -// extendedClusterPodUIDs holds one representative pod UID from each of the four -// ownership types created by setupKWOKClusterExtended. -type extendedClusterPodUIDs struct { - Deployment string - StatefulSet string - DaemonSet string - CronJob string -} - -// setupKWOKClusterExtended creates a KWOK cluster populated with N workloads of each of -// four types, driven entirely by kwokctl scale commands — the same mechanism used by -// setupKWOKCluster for Deployments: -// -// - namespace-000000: N Deployments → ReplicaSets → Pods (deployment controller) -// - namespace-000001: N StatefulSets → Pods (statefulset controller) -// - namespace-000002: N DaemonSets → Pods (daemonset controller; nodeSelector → 1:1) -// - namespace-000003: N CronJobs → Jobs → Pods (cronjob+job controllers; */1 * * * *) -// -// createKWOKCluster handles node scaling (numNodes nodes) and namespace scaling -// The four workload scale commands run in parallel, as do their pod-readiness waits. -// CronJob pods receive extra timeout headroom because the schedule must first fire (≤ 60 s). -func setupKWOKClusterExtended(t *testing.T, n int) (kubeconfigPath string, podUIDs extendedClusterPodUIDs, cleanup func()) { - t.Helper() - // createKWOKCluster scales to numNodes nodes and n namespaces - kc := createKWOKCluster(t, n) - cleanup = kc.Cleanup - kubeconfigPath = kc.KubeconfigPath - clusterName := kc.Name - clientset := kc.Clientset - ctx := t.Context() - - // Single config file containing the four workload KwokctlResource templates. - kwokConfigPath := filepath.Join(t.TempDir(), "kwok-ext-resources.yaml") - kwokConfigContent := strings.Join([]string{ - strings.TrimSpace(kwokDeploymentResourceYAML), - strings.TrimSpace(kwokStatefulSetResourceYAML), - strings.TrimSpace(kwokDaemonSetResourceYAML), - strings.TrimSpace(kwokCronJobResourceYAML), - }, "\n---\n") - require.NoError(t, os.WriteFile(kwokConfigPath, []byte(kwokConfigContent), 0o600)) - - // Scale all four workload types in parallel. - type scaleSpec struct { - kind string - extraParams []string - } - workloads := []scaleSpec{ - {kind: "deployment", extraParams: []string{"--param", ".replicas=1"}}, - {kind: "statefulset", extraParams: []string{"--param", ".replicas=1"}}, - {kind: "daemonset"}, - {kind: "cronjob"}, - } - - scaleErrs := make([]error, len(workloads)) - var scaleWg sync.WaitGroup - for wi, ws := range workloads { - scaleWg.Go(func() { - args := []string{ - "scale", ws.kind, - "--replicas", fmt.Sprintf("%d", n), - "--serial-length", "6", - "--name", clusterName, - "--config", kwokConfigPath, - } - args = append(args, ws.extraParams...) - // #nosec G204 - cmd := exec.Command("kwokctl", args...) - cmd.Dir = t.TempDir() - if cmdOut, cmdErr := cmd.CombinedOutput(); cmdErr != nil { - scaleErrs[wi] = fmt.Errorf("kwokctl scale %s: %w\n%s", ws.kind, cmdErr, cmdOut) - } - }) - } - scaleWg.Wait() - for wi, e := range scaleErrs { - require.NoError(t, e, "scale command for workload index %d failed", wi) - } - - // Wait for N pods in each namespace in parallel. - // CronJob pods get extra headroom: the schedule must fire (≤ 60 s) before pods appear. - baseWait := min(3*time.Minute+time.Duration(n/5)*time.Second, 15*time.Minute) - nsWaits := []struct { - ns string - timeout time.Duration - }{ - {"namespace-000000", baseWait}, - {"namespace-000001", baseWait}, - {"namespace-000002", baseWait}, - {"namespace-000003", baseWait + 2*time.Minute}, - } - - podCounts := make([]int, len(nsWaits)) - var waitWg sync.WaitGroup - for wi, target := range nsWaits { - waitWg.Go(func() { - assert.Eventually(t, func() bool { - list, listErr := clientset.CoreV1().Pods(target.ns).List(ctx, metav1.ListOptions{}) - if listErr != nil { - return false - } - podCounts[wi] = len(list.Items) - return podCounts[wi] >= n - }, target.timeout, 1*time.Second, - "timed out waiting for %d pods in %s (got %d)", n, target.ns, podCounts[wi]) - }) - } - waitWg.Wait() - - // Collect one representative pod UID per namespace for use in metric association. - nsToPodUID := make([]string, len(nsWaits)) - for wi, target := range nsWaits { - list, listErr := clientset.CoreV1().Pods(target.ns).List(ctx, metav1.ListOptions{Limit: 1}) - if listErr == nil && len(list.Items) > 0 { - nsToPodUID[wi] = string(list.Items[0].UID) - } - } - - podUIDs = extendedClusterPodUIDs{ - Deployment: nsToPodUID[0], - StatefulSet: nsToPodUID[1], - DaemonSet: nsToPodUID[2], - CronJob: nsToPodUID[3], - } - require.NotEmpty(t, podUIDs.Deployment, "deploy pod UID must not be empty") - require.NotEmpty(t, podUIDs.StatefulSet, "statefulset pod UID must not be empty") - require.NotEmpty(t, podUIDs.DaemonSet, "daemonset pod UID must not be empty") - require.NotEmpty(t, podUIDs.CronJob, "cronjob pod UID must not be empty") - - t.Logf("[kwok-ext] %d of each type (4×%d=%d pods total); UIDs: deploy=%s sts=%s ds=%s cj=%s", - n, n, 4*n, podUIDs.Deployment, podUIDs.StatefulSet, podUIDs.DaemonSet, podUIDs.CronJob) - - return kubeconfigPath, podUIDs, cleanup -} - -// runTestbedWithK8sConfigExtended runs the collector testbed with the extended metadata -// configuration. It sends numMetricBatches metric batches per ownership type -// (4 types × numMetricBatches total), then validates that the processor enriches each -// batch with the correct ownership-chain attributes. -func runTestbedWithK8sConfigExtended(t *testing.T, test *k8sAttributesProcessorTestCase, kubeconfigPath string, podUIDs extendedClusterPodUIDs) { - t.Helper() - - sender := testbed.NewOTLPMetricDataSender(testbed.DefaultHost, testutil.GetAvailablePort(t)) - receiver := testbed.NewOTLPDataReceiver(testutil.GetAvailablePort(t)) - - resultDir, err := filepath.Abs(filepath.Join("results", t.Name())) - require.NoError(t, err) - - agentProc := testbed.NewChildProcessCollector( - testbed.WithEnvVar("GOMAXPROCS", "2"), - testbed.WithEnvVar("KUBECONFIG", kubeconfigPath), - ) - k8sBody := strings.Replace(test.k8sAttributesConfig, "__CONTEXT__", getKubeconfigCurrentContext(kubeconfigPath), 1) - configStr := createConfigYaml(t, sender, receiver, resultDir, - []ProcessorNameAndConfigBody{{Name: "k8s_attributes", Body: k8sBody}}, nil) - cfgCleanup, err := agentProc.PrepareConfig(t, configStr) - require.NoError(t, err) - defer cfgCleanup() - - dataProvider := testbed.NewPerfTestDataProvider(testbed.LoadOptions{DataItemsPerSecond: 10000, ItemsPerBatch: 10}) - tc := testbed.NewTestCase( - t, - dataProvider, - sender, - receiver, - agentProc, - &testbed.PerfTestValidator{}, - performanceResultsSummary, - testbed.WithResourceLimits(testbed.ResourceSpec{ - ExpectedMaxCPU: 200, - ExpectedMaxRAM: 3000, // extended metadata set requires more RAM at 20k-pod scale - }), - ) - defer tc.Stop() - - tc.StartBackend() - tc.StartAgent() - defer tc.StopAgent() - - // Extended sync wait: 4×N pods means the informer cache needs proportionally more time. - syncWait := min(15*time.Second+time.Duration(4*test.numWorkloads/100)*100*time.Millisecond, 180*time.Second) - time.Sleep(syncWait) - - tc.EnableRecording() - require.NoError(t, sender.Start()) - tc.MockBackend.ClearReceivedItems() - startCounter := tc.MockBackend.DataItemsReceived() - - metricSender, ok := tc.LoadGenerator.(*testbed.ProviderSender).Sender.(testbed.MetricDataSender) - require.True(t, ok, "unsupported metric sender") - - // podTypes defines the ownership chain to exercise for each workload type, - // the pod UID to embed in the metric for association, and the resource attributes - // that must appear on enriched metrics for that chain. - type podType struct { - uid string - description string - chainAttrs []string - } - podTypes := []podType{ - { - uid: podUIDs.Deployment, - description: "deployment", - chainAttrs: []string{"k8s.deployment.name", "k8s.deployment.uid", "k8s.replicaset.name", "k8s.replicaset.uid"}, - }, - { - uid: podUIDs.StatefulSet, - description: "statefulset", - chainAttrs: []string{"k8s.statefulset.name", "k8s.statefulset.uid"}, - }, - { - uid: podUIDs.DaemonSet, - description: "daemonset", - chainAttrs: []string{"k8s.daemonset.name", "k8s.daemonset.uid"}, - }, - { - uid: podUIDs.CronJob, - description: "cronjob", - // Both the Job and the CronJob attributes must be present for the two-level chain. - chainAttrs: []string{"k8s.cronjob.name", "k8s.cronjob.uid", "k8s.job.name", "k8s.job.uid"}, - }, - } - - totalBatches := len(podTypes) * numMetricBatches - for _, pt := range podTypes { - for i := range numMetricBatches { - m := pmetric.NewMetrics() - mockedConsumedMetricsForK8s.CopyTo(m) - m.ResourceMetrics().At(0).Resource().Attributes().PutStr("k8s.pod.uid", pt.uid) - require.NoError(t, metricSender.ConsumeMetrics(t.Context(), m)) - tc.LoadGenerator.IncDataItemsSent() - if i < numMetricBatches-1 { - time.Sleep(100 * time.Millisecond) - } - } - time.Sleep(250 * time.Millisecond) // give the pipeline time to flush between types - } - - tc.WaitFor( - func() bool { return tc.MockBackend.DataItemsReceived() == startCounter+uint64(totalBatches) }, - "all extended metric batches received", - ) - - received := tc.MockBackend.ReceivedMetrics - require.Len(t, received, totalBatches, "expected %d metric batches", totalBatches) - - // Group received batch indices by k8s.pod.uid for per-chain validation. - uidToBatches := make(map[string][]int, len(podTypes)) - for i, m := range received { - rm := m.ResourceMetrics() - require.Equal(t, 1, rm.Len(), "batch %d: expected 1 ResourceMetrics", i) - gotAttrs := rm.At(0).Resource().Attributes().AsRaw() - if test.expectedResourceAttrs != nil { - for k, v := range test.expectedResourceAttrs { - require.Contains(t, gotAttrs, k, "batch %d: missing resource attribute %q", i, k) - if v != nil { - require.Equal(t, v, gotAttrs[k], "batch %d: resource attribute %q", i, k) - } - } - } - if uid, ok := gotAttrs["k8s.pod.uid"].(string); ok { - uidToBatches[uid] = append(uidToBatches[uid], i) - } - } - - for _, pt := range podTypes { - batches, found := uidToBatches[pt.uid] - require.True(t, found && len(batches) > 0, - "%s: no batches received for pod UID %s", pt.description, pt.uid) - - // Spot-check the first received batch for this chain. - attrs := received[batches[0]].ResourceMetrics().At(0).Resource().Attributes().AsRaw() - for _, k := range pt.chainAttrs { - require.Contains(t, attrs, k, - "%s batch %d: missing chain attribute %q", pt.description, batches[0], k) - } - } -} - -// TestMetricK8sAttributesProcessor tests the k8sattributes processor's -// performance and resource utilization when the component -// is used to collect k8s metadata from a test k8s cluster -// with 100 number of nodes, N number of Pods each controlled by -// its own Deployment/Replicaset, while there are also N number of Namespaces -func TestMetricK8sAttributesProcessor(t *testing.T) { - for _, test := range getK8sAttributesProcessorBasicTestCases() { - t.Run(test.name, func(t *testing.T) { - skipIfKwokUnavailable(t) - kubeconfigPath, podUID, cleanup := setupKWOKCluster(t, test.numWorkloads) - defer cleanup() - runTestbedWithK8sConfig(t, &test, kubeconfigPath, podUID) - cleanup() - }) - } -} - -// TestMetricK8sAttributesProcessorExtended validates the k8sattributesprocessor at high -// scale with an extended metadata configuration, exercising four ownership chains: -// -// - Deployment → ReplicaSet → Pod -// - StatefulSet → Pod -// - DaemonSet → Pod (single-node cluster; 1:1 ratio) -// - CronJob → Job → Pod -// -// Three scales are tested: 110, 1 000, and 5 000 instances of each type -// (up to 20 000 pods for the 5 K scenario). -func TestMetricK8sAttributesProcessorExtended(t *testing.T) { - for _, test := range getK8sAttributesProcessorExtendedTestCases() { - t.Run(test.name, func(t *testing.T) { - skipIfKwokUnavailable(t) - kubeconfigPath, podUIDs, cleanup := setupKWOKClusterExtended(t, test.numWorkloads) - defer cleanup() - runTestbedWithK8sConfigExtended(t, &test, kubeconfigPath, podUIDs) - cleanup() - }) - } -}