diff --git a/charts/nr-k8s-otel-collector/Chart.yaml b/charts/nr-k8s-otel-collector/Chart.yaml index d44ed9da2..58714facb 100644 --- a/charts/nr-k8s-otel-collector/Chart.yaml +++ b/charts/nr-k8s-otel-collector/Chart.yaml @@ -17,7 +17,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.10.1 +version: 0.10.2 dependencies: diff --git a/charts/nr-k8s-otel-collector/examples/k8s/rendered/clusterrole.yaml b/charts/nr-k8s-otel-collector/examples/k8s/rendered/clusterrole.yaml index 865d96498..58adb7559 100644 --- a/charts/nr-k8s-otel-collector/examples/k8s/rendered/clusterrole.yaml +++ b/charts/nr-k8s-otel-collector/examples/k8s/rendered/clusterrole.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: nr-k8s-otel-collector app.kubernetes.io/version: 1.2.0 - helm.sh/chart: nr-k8s-otel-collector-0.10.1 + helm.sh/chart: nr-k8s-otel-collector-0.10.2 rules: - apiGroups: - "" diff --git a/charts/nr-k8s-otel-collector/examples/k8s/rendered/clusterrolebinding.yaml b/charts/nr-k8s-otel-collector/examples/k8s/rendered/clusterrolebinding.yaml index 914eb4694..4bb2f3d08 100644 --- a/charts/nr-k8s-otel-collector/examples/k8s/rendered/clusterrolebinding.yaml +++ b/charts/nr-k8s-otel-collector/examples/k8s/rendered/clusterrolebinding.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: nr-k8s-otel-collector app.kubernetes.io/version: 1.2.0 - helm.sh/chart: nr-k8s-otel-collector-0.10.1 + helm.sh/chart: nr-k8s-otel-collector-0.10.2 subjects: - kind: ServiceAccount name: nr-k8s-otel-collector diff --git a/charts/nr-k8s-otel-collector/examples/k8s/rendered/daemonset-configmap.yaml b/charts/nr-k8s-otel-collector/examples/k8s/rendered/daemonset-configmap.yaml index 1b4c8283e..d96c6ba31 100644 --- a/charts/nr-k8s-otel-collector/examples/k8s/rendered/daemonset-configmap.yaml +++ b/charts/nr-k8s-otel-collector/examples/k8s/rendered/daemonset-configmap.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: nr-k8s-otel-collector app.kubernetes.io/version: 1.2.0 - helm.sh/chart: nr-k8s-otel-collector-0.10.1 + helm.sh/chart: nr-k8s-otel-collector-0.10.2 data: daemonset-config.yaml: | receivers: @@ -514,25 +514,6 @@ data: aggregation_type: sum # following system.% metrics reduce metrics reported by hostmetrics receiver - filter/exclude_cpu_utilization: - metrics: - datapoint: - - 'metric.name == "system.cpu.utilization" and attributes["state"] == "interrupt"' - - 'metric.name == "system.cpu.utilization" and attributes["state"] == "nice"' - - 'metric.name == "system.cpu.utilization" and attributes["state"] == "softirq"' - filter/exclude_memory_utilization: - metrics: - datapoint: - - 'metric.name == "system.memory.utilization" and attributes["state"] == "slab_unreclaimable"' - - 'metric.name == "system.memory.utilization" and attributes["state"] == "inactive"' - - 'metric.name == "system.memory.utilization" and attributes["state"] == "cached"' - - 'metric.name == "system.memory.utilization" and attributes["state"] == "buffered"' - - 'metric.name == "system.memory.utilization" and attributes["state"] == "slab_reclaimable"' - filter/exclude_memory_usage: - metrics: - datapoint: - - 'metric.name == "system.memory.usage" and attributes["state"] == "slab_unreclaimable"' - - 'metric.name == "system.memory.usage" and attributes["state"] == "inactive"' filter/exclude_filesystem_utilization: metrics: datapoint: @@ -607,7 +588,7 @@ data: value: - key: "newrelic.chart.version" action: upsert - value: 0.10.1 + value: 0.10.2 - key: newrelic.entity.type action: upsert value: "k8s" @@ -775,9 +756,6 @@ data: - filter/exclude_metrics_low_data_mode - metricstransform/hostmetrics_cpu - transform/truncate - - filter/exclude_cpu_utilization - - filter/exclude_memory_utilization - - filter/exclude_memory_usage - filter/exclude_filesystem_utilization - filter/exclude_filesystem_usage - filter/exclude_filesystem_inodes_usage diff --git a/charts/nr-k8s-otel-collector/examples/k8s/rendered/daemonset.yaml b/charts/nr-k8s-otel-collector/examples/k8s/rendered/daemonset.yaml index adcae195d..db5eafef7 100644 --- a/charts/nr-k8s-otel-collector/examples/k8s/rendered/daemonset.yaml +++ b/charts/nr-k8s-otel-collector/examples/k8s/rendered/daemonset.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: nr-k8s-otel-collector app.kubernetes.io/version: 1.2.0 - helm.sh/chart: nr-k8s-otel-collector-0.10.1 + helm.sh/chart: nr-k8s-otel-collector-0.10.2 spec: selector: matchLabels: @@ -24,7 +24,7 @@ spec: app.kubernetes.io/name: nr-k8s-otel-collector component: daemonset annotations: - checksum/config: 2fc676bab1a4717b68d104ea4926bbd8ee75c4697be5afb59a9e1421975a09e6 + checksum/config: 504fbfbe00f77ebec6de42ca8bdc9d2d3879a112da328c8549c0b167316755b8 spec: serviceAccountName: nr-k8s-otel-collector initContainers: diff --git a/charts/nr-k8s-otel-collector/examples/k8s/rendered/deployment-configmap.yaml b/charts/nr-k8s-otel-collector/examples/k8s/rendered/deployment-configmap.yaml index 61e3045ba..bf82359fd 100644 --- a/charts/nr-k8s-otel-collector/examples/k8s/rendered/deployment-configmap.yaml +++ b/charts/nr-k8s-otel-collector/examples/k8s/rendered/deployment-configmap.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: nr-k8s-otel-collector app.kubernetes.io/version: 1.2.0 - helm.sh/chart: nr-k8s-otel-collector-0.10.1 + helm.sh/chart: nr-k8s-otel-collector-0.10.2 data: deployment-config.yaml: | receivers: @@ -477,37 +477,6 @@ data: metric: - 'HasAttrOnDatapoint("low.data.mode", "false")' - filter/exclude_zero_value_kube_node_status_condition: - metrics: - datapoint: - - metric.name == "kube_node_status_condition" and value_double == 0.0 - - filter/exclude_zero_value_kube_persistentvolumeclaim_status_phase: - metrics: - datapoint: - - metric.name == "kube_persistentvolumeclaim_status_phase" and value_double == 0.0 - - filter/nr_exclude_zero_value_kube_pod_container_deployment_statuses: - metrics: - datapoint: - - metric.name == "kube_pod_status_phase" and value_double < 0.5 - - metric.name == "kube_pod_status_ready" and value_double < 0.5 - - metric.name == "kube_pod_status_scheduled" and value_double < 0.5 - - metric.name == "kube_pod_container_status_ready" and value_double < 0.5 - - metric.name == "kube_pod_container_status_phase" and value_double < 0.5 - - metric.name == "kube_pod_container_status_restarts_total" and value_double < 0.5 - - metric.name == "kube_deployment_status_condition" and value_double < 0.5 - - metric.name == "kube_pod_container_status_waiting_reason" and value_double < 0.5 - - filter/nr_exclude_zero_value_kube_jobs: - metrics: - datapoint: - - metric.name == "kube_job_complete" and value_double < 0.5 - - metric.name == "kube_job_spec_parallelism" and value_double < 0.5 - - metric.name == "kube_job_status_failed" and value_double < 0.5 - - metric.name == "kube_job_status_active" and value_double < 0.5 - - metric.name == "kube_job_status_succeeded" and value_double < 0.5 - resource/newrelic: attributes: # We set the cluster name to what the customer specified in the helm chart @@ -516,7 +485,7 @@ data: value: - key: "newrelic.chart.version" action: upsert - value: 0.10.1 + value: 0.10.2 - key: newrelic.entity.type action: upsert value: "k8s" @@ -534,7 +503,7 @@ data: value: - key: "newrelic.chart.version" action: upsert - value: 0.10.1 + value: 0.10.2 transform/events: log_statements: @@ -759,15 +728,11 @@ data: processors: - memory_limiter - metricstransform/kube_pod_container_status_phase - - filter/exclude_zero_value_kube_node_status_condition - - filter/exclude_zero_value_kube_persistentvolumeclaim_status_phase - - filter/nr_exclude_zero_value_kube_pod_container_deployment_statuses - transform/convert_timestamp - metricstransform/ldm - metricstransform/k8s_cluster_info_ldm - metricstransform/ksm - filter/exclude_metrics_low_data_mode - - filter/nr_exclude_zero_value_kube_jobs - transform/low_data_mode_inator - resource/low_data_mode_inator - resource/newrelic diff --git a/charts/nr-k8s-otel-collector/examples/k8s/rendered/deployment.yaml b/charts/nr-k8s-otel-collector/examples/k8s/rendered/deployment.yaml index 86d9c482c..bc5dfbc71 100644 --- a/charts/nr-k8s-otel-collector/examples/k8s/rendered/deployment.yaml +++ b/charts/nr-k8s-otel-collector/examples/k8s/rendered/deployment.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: nr-k8s-otel-collector app.kubernetes.io/version: 1.2.0 - helm.sh/chart: nr-k8s-otel-collector-0.10.1 + helm.sh/chart: nr-k8s-otel-collector-0.10.2 spec: replicas: 1 minReadySeconds: 5 @@ -26,7 +26,7 @@ spec: app.kubernetes.io/name: nr-k8s-otel-collector component: deployment annotations: - checksum/config: 1c56911b2943644694f4358b8ff53a07746aa92f019544bf2e3996a0e1097e28 + checksum/config: 6e1c518452cac5c5f56b2e6f16c1238fb18e6b3c0d5d6ba977dbe539e365ea47 spec: serviceAccountName: nr-k8s-otel-collector containers: diff --git a/charts/nr-k8s-otel-collector/examples/k8s/rendered/secret.yaml b/charts/nr-k8s-otel-collector/examples/k8s/rendered/secret.yaml index 2b8c92958..27f9961b7 100644 --- a/charts/nr-k8s-otel-collector/examples/k8s/rendered/secret.yaml +++ b/charts/nr-k8s-otel-collector/examples/k8s/rendered/secret.yaml @@ -10,6 +10,6 @@ metadata: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: nr-k8s-otel-collector app.kubernetes.io/version: 1.2.0 - helm.sh/chart: nr-k8s-otel-collector-0.10.1 + helm.sh/chart: nr-k8s-otel-collector-0.10.2 data: licenseKey: PE5SX2xpY2Vuc2VLZXk+ diff --git a/charts/nr-k8s-otel-collector/examples/k8s/rendered/service.yaml b/charts/nr-k8s-otel-collector/examples/k8s/rendered/service.yaml index bbe753613..e1b8b5040 100644 --- a/charts/nr-k8s-otel-collector/examples/k8s/rendered/service.yaml +++ b/charts/nr-k8s-otel-collector/examples/k8s/rendered/service.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: nr-k8s-otel-collector app.kubernetes.io/version: 1.2.0 - helm.sh/chart: nr-k8s-otel-collector-0.10.1 + helm.sh/chart: nr-k8s-otel-collector-0.10.2 spec: type: ClusterIP ports: diff --git a/charts/nr-k8s-otel-collector/examples/k8s/rendered/serviceaccount.yaml b/charts/nr-k8s-otel-collector/examples/k8s/rendered/serviceaccount.yaml index b7cad3bc5..737c7fd47 100644 --- a/charts/nr-k8s-otel-collector/examples/k8s/rendered/serviceaccount.yaml +++ b/charts/nr-k8s-otel-collector/examples/k8s/rendered/serviceaccount.yaml @@ -10,5 +10,5 @@ metadata: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: nr-k8s-otel-collector app.kubernetes.io/version: 1.2.0 - helm.sh/chart: nr-k8s-otel-collector-0.10.1 + helm.sh/chart: nr-k8s-otel-collector-0.10.2 annotations: diff --git a/charts/nr-k8s-otel-collector/templates/daemonset-configmap.yaml b/charts/nr-k8s-otel-collector/templates/daemonset-configmap.yaml index 7d7db1ad6..907656567 100644 --- a/charts/nr-k8s-otel-collector/templates/daemonset-configmap.yaml +++ b/charts/nr-k8s-otel-collector/templates/daemonset-configmap.yaml @@ -531,25 +531,6 @@ data: aggregation_type: sum # following system.% metrics reduce metrics reported by hostmetrics receiver - filter/exclude_cpu_utilization: - metrics: - datapoint: - - 'metric.name == "system.cpu.utilization" and attributes["state"] == "interrupt"' - - 'metric.name == "system.cpu.utilization" and attributes["state"] == "nice"' - - 'metric.name == "system.cpu.utilization" and attributes["state"] == "softirq"' - filter/exclude_memory_utilization: - metrics: - datapoint: - - 'metric.name == "system.memory.utilization" and attributes["state"] == "slab_unreclaimable"' - - 'metric.name == "system.memory.utilization" and attributes["state"] == "inactive"' - - 'metric.name == "system.memory.utilization" and attributes["state"] == "cached"' - - 'metric.name == "system.memory.utilization" and attributes["state"] == "buffered"' - - 'metric.name == "system.memory.utilization" and attributes["state"] == "slab_reclaimable"' - filter/exclude_memory_usage: - metrics: - datapoint: - - 'metric.name == "system.memory.usage" and attributes["state"] == "slab_unreclaimable"' - - 'metric.name == "system.memory.usage" and attributes["state"] == "inactive"' filter/exclude_filesystem_utilization: metrics: datapoint: @@ -829,9 +810,6 @@ data: {{- end }} - metricstransform/hostmetrics_cpu - transform/truncate - - filter/exclude_cpu_utilization - - filter/exclude_memory_utilization - - filter/exclude_memory_usage - filter/exclude_filesystem_utilization - filter/exclude_filesystem_usage - filter/exclude_filesystem_inodes_usage diff --git a/charts/nr-k8s-otel-collector/templates/deployment-configmap.yaml b/charts/nr-k8s-otel-collector/templates/deployment-configmap.yaml index 502b57779..4b8e0eabd 100644 --- a/charts/nr-k8s-otel-collector/templates/deployment-configmap.yaml +++ b/charts/nr-k8s-otel-collector/templates/deployment-configmap.yaml @@ -522,37 +522,6 @@ data: metric: - 'HasAttrOnDatapoint("low.data.mode", "false")' - filter/exclude_zero_value_kube_node_status_condition: - metrics: - datapoint: - - metric.name == "kube_node_status_condition" and value_double == 0.0 - - filter/exclude_zero_value_kube_persistentvolumeclaim_status_phase: - metrics: - datapoint: - - metric.name == "kube_persistentvolumeclaim_status_phase" and value_double == 0.0 - - filter/nr_exclude_zero_value_kube_pod_container_deployment_statuses: - metrics: - datapoint: - - metric.name == "kube_pod_status_phase" and value_double < 0.5 - - metric.name == "kube_pod_status_ready" and value_double < 0.5 - - metric.name == "kube_pod_status_scheduled" and value_double < 0.5 - - metric.name == "kube_pod_container_status_ready" and value_double < 0.5 - - metric.name == "kube_pod_container_status_phase" and value_double < 0.5 - - metric.name == "kube_pod_container_status_restarts_total" and value_double < 0.5 - - metric.name == "kube_deployment_status_condition" and value_double < 0.5 - - metric.name == "kube_pod_container_status_waiting_reason" and value_double < 0.5 - - filter/nr_exclude_zero_value_kube_jobs: - metrics: - datapoint: - - metric.name == "kube_job_complete" and value_double < 0.5 - - metric.name == "kube_job_spec_parallelism" and value_double < 0.5 - - metric.name == "kube_job_status_failed" and value_double < 0.5 - - metric.name == "kube_job_status_active" and value_double < 0.5 - - metric.name == "kube_job_status_succeeded" and value_double < 0.5 - {{- if include "newrelic.common.openShift" . }} resourcedetection/openshift: detectors: ["openshift"] @@ -836,16 +805,12 @@ data: processors: - memory_limiter - metricstransform/kube_pod_container_status_phase - - filter/exclude_zero_value_kube_node_status_condition - - filter/exclude_zero_value_kube_persistentvolumeclaim_status_phase - - filter/nr_exclude_zero_value_kube_pod_container_deployment_statuses - transform/convert_timestamp {{- if include "nrKubernetesOtel.lowDataMode" . }} - metricstransform/ldm - metricstransform/k8s_cluster_info_ldm - metricstransform/ksm - filter/exclude_metrics_low_data_mode - - filter/nr_exclude_zero_value_kube_jobs - transform/low_data_mode_inator - resource/low_data_mode_inator {{- end }}