diff --git a/docs/design/logs.md b/docs/design/logs.md index f6b8cd7d80..25682aebd0 100644 --- a/docs/design/logs.md +++ b/docs/design/logs.md @@ -345,7 +345,25 @@ only meant for OTel components itself and anyone writing extensions like custom Exporters etc. // TODO: Document the principles followed when selecting severity for internal -logs // TODO: Document how this can cause circular loop and plans to address it. +logs + +When OpenTelemetry components generate logs that could potentially feed back +into OpenTelemetry, this can result in what is known as "telemetry-induced +telemetry." To address this, OpenTelemetry provides a mechanism to suppress such +telemetry using the `Context`. Components are expected to mark telemetry as +suppressed within a specific `Context` by invoking +`Context::enter_telemetry_suppressed_scope()`. The Logs SDK implementation +checks this flag in the current `Context` and ignores logs if suppression is +enabled. + +This mechanism relies on proper in-process propagation of the `Context`. +However, external libraries like `hyper` and `tonic`, which are used by +OpenTelemetry in its OTLP Exporters, do not propagate OpenTelemetry's `Context`. +As a result, the suppression mechanism does not work out-of-the-box to suppress +logs originating from these libraries. + +// TODO: Document how OTLP can solve this issue without asking external +crates to respect and propagate OTel Context. ## Summary diff --git a/examples/logs-basic/src/main.rs b/examples/logs-basic/src/main.rs index fb75b8cb00..f681331a0b 100644 --- a/examples/logs-basic/src/main.rs +++ b/examples/logs-basic/src/main.rs @@ -15,19 +15,21 @@ fn main() { .with_simple_exporter(exporter) .build(); - // For the OpenTelemetry layer, add a tracing filter to filter events from - // OpenTelemetry and its dependent crates (opentelemetry-otlp uses crates - // like reqwest/tonic etc.) from being sent back to OTel itself, thus - // preventing infinite telemetry generation. The filter levels are set as - // follows: + // To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal + // logging is properly suppressed. However, logs emitted by external components + // (such as reqwest, tonic, etc.) are not suppressed as they do not propagate + // OpenTelemetry context. Until this issue is addressed + // (https://github.com/open-telemetry/opentelemetry-rust/issues/2877), + // filtering like this is the best way to suppress such logs. + // + // The filter levels are set as follows: // - Allow `info` level and above by default. - // - Restrict `opentelemetry`, `hyper`, `tonic`, and `reqwest` completely. - // Note: This will also drop events from crates like `tonic` etc. even when - // they are used outside the OTLP Exporter. For more details, see: - // https://github.com/open-telemetry/opentelemetry-rust/issues/761 + // - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`. + // + // Note: This filtering will also drop logs from these components even when + // they are used outside of the OTLP Exporter. let filter_otel = EnvFilter::new("info") .add_directive("hyper=off".parse().unwrap()) - .add_directive("opentelemetry=off".parse().unwrap()) .add_directive("tonic=off".parse().unwrap()) .add_directive("h2=off".parse().unwrap()) .add_directive("reqwest=off".parse().unwrap()); diff --git a/opentelemetry-appender-tracing/examples/basic.rs b/opentelemetry-appender-tracing/examples/basic.rs index b50f575cde..b1c5427058 100644 --- a/opentelemetry-appender-tracing/examples/basic.rs +++ b/opentelemetry-appender-tracing/examples/basic.rs @@ -16,16 +16,19 @@ fn main() { .with_simple_exporter(exporter) .build(); - // For the OpenTelemetry layer, add a tracing filter to filter events from - // OpenTelemetry and its dependent crates (opentelemetry-otlp uses crates - // like reqwest/tonic etc.) from being sent back to OTel itself, thus - // preventing infinite telemetry generation. The filter levels are set as - // follows: + // To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal + // logging is properly suppressed. However, logs emitted by external components + // (such as reqwest, tonic, etc.) are not suppressed as they do not propagate + // OpenTelemetry context. Until this issue is addressed + // (https://github.com/open-telemetry/opentelemetry-rust/issues/2877), + // filtering like this is the best way to suppress such logs. + // + // The filter levels are set as follows: // - Allow `info` level and above by default. - // - Restrict `opentelemetry`, `hyper`, `tonic`, and `reqwest` completely. - // Note: This will also drop events from crates like `tonic` etc. even when - // they are used outside the OTLP Exporter. For more details, see: - // https://github.com/open-telemetry/opentelemetry-rust/issues/761 + // - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`. + // + // Note: This filtering will also drop logs from these components even when + // they are used outside of the OTLP Exporter. let filter_otel = EnvFilter::new("info") .add_directive("hyper=off".parse().unwrap()) .add_directive("opentelemetry=off".parse().unwrap()) diff --git a/opentelemetry-appender-tracing/src/layer.rs b/opentelemetry-appender-tracing/src/layer.rs index fe0bc645ed..3958f9f4de 100644 --- a/opentelemetry-appender-tracing/src/layer.rs +++ b/opentelemetry-appender-tracing/src/layer.rs @@ -289,13 +289,11 @@ mod tests { use opentelemetry::{logs::AnyValue, Key}; use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult}; use opentelemetry_sdk::logs::{InMemoryLogExporter, LogProcessor}; - use opentelemetry_sdk::logs::{LogBatch, LogExporter}; use opentelemetry_sdk::logs::{SdkLogRecord, SdkLoggerProvider}; use opentelemetry_sdk::trace::{Sampler, SdkTracerProvider}; - use tracing::{error, warn}; + use tracing::error; use tracing_subscriber::prelude::__tracing_subscriber_SubscriberExt; - use tracing_subscriber::util::SubscriberInitExt; - use tracing_subscriber::{EnvFilter, Layer}; + use tracing_subscriber::Layer; pub fn attributes_contains(log_record: &SdkLogRecord, key: &Key, value: &AnyValue) -> bool { log_record @@ -313,69 +311,6 @@ mod tests { } // cargo test --features=testing - - #[derive(Clone, Debug, Default)] - struct ReentrantLogExporter; - - impl LogExporter for ReentrantLogExporter { - async fn export(&self, _batch: LogBatch<'_>) -> OTelSdkResult { - // This will cause a deadlock as the export itself creates a log - // while still within the lock of the SimpleLogProcessor. - warn!(name: "my-event-name", target: "reentrant", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); - Ok(()) - } - } - - #[test] - #[ignore = "See issue: https://github.com/open-telemetry/opentelemetry-rust/issues/1745"] - fn simple_processor_deadlock() { - let exporter: ReentrantLogExporter = ReentrantLogExporter; - let logger_provider = SdkLoggerProvider::builder() - .with_simple_exporter(exporter.clone()) - .build(); - - let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); - - // Setting subscriber as global as that is the only way to test this scenario. - tracing_subscriber::registry().with(layer).init(); - warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); - } - - #[test] - #[ignore = "While this test runs fine, this uses global subscriber and does not play well with other tests."] - fn simple_processor_no_deadlock() { - let exporter: ReentrantLogExporter = ReentrantLogExporter; - let logger_provider = SdkLoggerProvider::builder() - .with_simple_exporter(exporter.clone()) - .build(); - - let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); - - // This filter will prevent the deadlock as the reentrant log will be - // ignored. - let filter = EnvFilter::new("debug").add_directive("reentrant=error".parse().unwrap()); - // Setting subscriber as global as that is the only way to test this scenario. - tracing_subscriber::registry() - .with(filter) - .with(layer) - .init(); - warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 1)] - #[ignore = "While this test runs fine, this uses global subscriber and does not play well with other tests."] - async fn batch_processor_no_deadlock() { - let exporter: ReentrantLogExporter = ReentrantLogExporter; - let logger_provider = SdkLoggerProvider::builder() - .with_batch_exporter(exporter.clone()) - .build(); - - let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); - - tracing_subscriber::registry().with(layer).init(); - warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); - } - #[test] fn tracing_appender_standalone() { // Arrange diff --git a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs index 1ee4f096f2..2077e72e91 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs @@ -72,19 +72,21 @@ async fn main() -> Result<(), Box> { // Create a new OpenTelemetryTracingBridge using the above LoggerProvider. let otel_layer = OpenTelemetryTracingBridge::new(&logger_provider); - // For the OpenTelemetry layer, add a tracing filter to filter events from - // OpenTelemetry and its dependent crates (opentelemetry-otlp uses crates - // like reqwest/tonic etc.) from being sent back to OTel itself, thus - // preventing infinite telemetry generation. The filter levels are set as - // follows: + // To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal + // logging is properly suppressed. However, logs emitted by external components + // (such as reqwest, tonic, etc.) are not suppressed as they do not propagate + // OpenTelemetry context. Until this issue is addressed + // (https://github.com/open-telemetry/opentelemetry-rust/issues/2877), + // filtering like this is the best way to suppress such logs. + // + // The filter levels are set as follows: // - Allow `info` level and above by default. - // - Restrict `opentelemetry`, `hyper`, `tonic`, and `reqwest` completely. - // Note: This will also drop events from crates like `tonic` etc. even when - // they are used outside the OTLP Exporter. For more details, see: - // https://github.com/open-telemetry/opentelemetry-rust/issues/761 + // - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`. + // + // Note: This filtering will also drop logs from these components even when + // they are used outside of the OTLP Exporter. let filter_otel = EnvFilter::new("info") .add_directive("hyper=off".parse().unwrap()) - .add_directive("opentelemetry=off".parse().unwrap()) .add_directive("tonic=off".parse().unwrap()) .add_directive("h2=off".parse().unwrap()) .add_directive("reqwest=off".parse().unwrap()); diff --git a/opentelemetry-otlp/examples/basic-otlp/src/main.rs b/opentelemetry-otlp/examples/basic-otlp/src/main.rs index af92451a0a..60dde360c8 100644 --- a/opentelemetry-otlp/examples/basic-otlp/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp/src/main.rs @@ -66,19 +66,21 @@ async fn main() -> Result<(), Box> { // Create a new OpenTelemetryTracingBridge using the above LoggerProvider. let otel_layer = OpenTelemetryTracingBridge::new(&logger_provider); - // For the OpenTelemetry layer, add a tracing filter to filter events from - // OpenTelemetry and its dependent crates (opentelemetry-otlp uses crates - // like reqwest/tonic etc.) from being sent back to OTel itself, thus - // preventing infinite telemetry generation. The filter levels are set as - // follows: + // To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal + // logging is properly suppressed. However, logs emitted by external components + // (such as reqwest, tonic, etc.) are not suppressed as they do not propagate + // OpenTelemetry context. Until this issue is addressed + // (https://github.com/open-telemetry/opentelemetry-rust/issues/2877), + // filtering like this is the best way to suppress such logs. + // + // The filter levels are set as follows: // - Allow `info` level and above by default. - // - Restrict `opentelemetry`, `hyper`, `tonic`, and `reqwest` completely. - // Note: This will also drop events from crates like `tonic` etc. even when - // they are used outside the OTLP Exporter. For more details, see: - // https://github.com/open-telemetry/opentelemetry-rust/issues/761 + // - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`. + // + // Note: This filtering will also drop logs from these components even when + // they are used outside of the OTLP Exporter. let filter_otel = EnvFilter::new("info") .add_directive("hyper=off".parse().unwrap()) - .add_directive("opentelemetry=off".parse().unwrap()) .add_directive("tonic=off".parse().unwrap()) .add_directive("h2=off".parse().unwrap()) .add_directive("reqwest=off".parse().unwrap()); diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index dff2dd365a..fbf20c13e5 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -2,6 +2,16 @@ ## vNext +[#2868](https://github.com/open-telemetry/opentelemetry-rust/pull/2868) +`SdkLogger`, `SdkTracer` modified to respect telemetry suppression based on +`Context`. In other words, if the current context has telemetry suppression +enabled, then logs/spans will be ignored. The flag is typically set by OTel +components to prevent telemetry from itself being fed back into OTel. +`BatchLogProcessor`, `BatchSpanProcessor`, and `PeriodicReader` modified to set +the suppression flag in their dedicated thread, so that telemetry generated from +those threads will not be fed back into OTel. Similarly, `SimpleLogProcessor` +also modified to suppress telemetry before invoking exporters. + ## 0.29.0 Released 2025-Mar-21 diff --git a/opentelemetry-sdk/benches/log_enabled.rs b/opentelemetry-sdk/benches/log_enabled.rs index 3e40ea23f4..c5298cf928 100644 --- a/opentelemetry-sdk/benches/log_enabled.rs +++ b/opentelemetry-sdk/benches/log_enabled.rs @@ -5,8 +5,8 @@ Total Number of Cores:   14 (10 performance and 4 efficiency) | Test | Average time| |---------------------------------------------|-------------| - | exporter_disabled_concurrent_processor | 1.1 ns | - | exporter_disabled_simple_processor | 4.3 ns | + | exporter_disabled_concurrent_processor | 2.5 ns | + | exporter_disabled_simple_processor | 5.3 ns | */ // cargo bench --bench log_enabled --features="spec_unstable_logs_enabled,experimental_logs_concurrent_log_processor" diff --git a/opentelemetry-sdk/src/logs/batch_log_processor.rs b/opentelemetry-sdk/src/logs/batch_log_processor.rs index bdfe312aaf..7f37914f28 100644 --- a/opentelemetry-sdk/src/logs/batch_log_processor.rs +++ b/opentelemetry-sdk/src/logs/batch_log_processor.rs @@ -23,7 +23,7 @@ use crate::{ }; use std::sync::mpsc::{self, RecvTimeoutError, SyncSender}; -use opentelemetry::{otel_debug, otel_error, otel_warn, InstrumentationScope}; +use opentelemetry::{otel_debug, otel_error, otel_warn, Context, InstrumentationScope}; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::{cmp::min, env, sync::Mutex}; @@ -342,6 +342,7 @@ impl BatchLogProcessor { let handle = thread::Builder::new() .name("OpenTelemetry.Logs.BatchProcessor".to_string()) .spawn(move || { + let _suppress_guard = Context::enter_telemetry_suppressed_scope(); otel_debug!( name: "BatchLogProcessor.ThreadStarted", interval_in_millisecs = config.scheduled_delay.as_millis(), diff --git a/opentelemetry-sdk/src/logs/logger.rs b/opentelemetry-sdk/src/logs/logger.rs index e42d00b0ff..f76a43792f 100644 --- a/opentelemetry-sdk/src/logs/logger.rs +++ b/opentelemetry-sdk/src/logs/logger.rs @@ -29,6 +29,9 @@ impl opentelemetry::logs::Logger for SdkLogger { /// Emit a `LogRecord`. fn emit(&self, mut record: Self::LogRecord) { + if Context::is_current_telemetry_suppressed() { + return; + } let provider = &self.provider; let processors = provider.log_processors(); @@ -52,6 +55,9 @@ impl opentelemetry::logs::Logger for SdkLogger { #[cfg(feature = "spec_unstable_logs_enabled")] #[inline] fn event_enabled(&self, level: Severity, target: &str, name: Option<&str>) -> bool { + if Context::is_current_telemetry_suppressed() { + return false; + } self.provider .log_processors() .iter() diff --git a/opentelemetry-sdk/src/logs/mod.rs b/opentelemetry-sdk/src/logs/mod.rs index 0be40e760c..d7535bfa89 100644 --- a/opentelemetry-sdk/src/logs/mod.rs +++ b/opentelemetry-sdk/src/logs/mod.rs @@ -36,6 +36,7 @@ pub mod log_processor_with_async_runtime; #[cfg(all(test, feature = "testing"))] mod tests { use super::*; + use crate::error::OTelSdkResult; use crate::Resource; use opentelemetry::baggage::BaggageExt; use opentelemetry::logs::LogRecord; @@ -44,6 +45,7 @@ mod tests { use opentelemetry::{Context, InstrumentationScope}; use std::borrow::Borrow; use std::collections::HashMap; + use std::sync::{Arc, Mutex}; #[test] fn logging_sdk_test() { @@ -212,4 +214,85 @@ mod tests { &AnyValue::String("value-from-bag".into()) )); } + + #[test] + fn log_suppression() { + // Arrange + let exporter: InMemoryLogExporter = InMemoryLogExporter::default(); + let logger_provider = SdkLoggerProvider::builder() + .with_simple_exporter(exporter.clone()) + .build(); + + // Act + let logger = logger_provider.logger("test-logger"); + let log_record = logger.create_log_record(); + { + let _suppressed_context = Context::enter_telemetry_suppressed_scope(); + // This log emission should be suppressed and not exported. + logger.emit(log_record); + } + + // Assert + let exported_logs = exporter.get_emitted_logs().expect("this should not fail."); + assert_eq!( + exported_logs.len(), + 0, + "There should be a no logs as log emission is done inside a suppressed context" + ); + } + + #[derive(Debug, Clone)] + struct ReentrantLogProcessor { + logger: Arc>>, + } + + impl ReentrantLogProcessor { + fn new() -> Self { + Self { + logger: Arc::new(Mutex::new(None)), + } + } + + fn set_logger(&self, logger: SdkLogger) { + let mut guard = self.logger.lock().unwrap(); + *guard = Some(logger); + } + } + + impl LogProcessor for ReentrantLogProcessor { + fn emit(&self, _data: &mut SdkLogRecord, _instrumentation: &InstrumentationScope) { + let _suppress = Context::enter_telemetry_suppressed_scope(); + // Without the suppression above, the logger.emit(log_record) below will cause a deadlock, + // as it emits another log, which will attempt to acquire the same lock that is + // already held by itself! + let logger = self.logger.lock().unwrap(); + if let Some(logger) = logger.as_ref() { + let mut log_record = logger.create_log_record(); + log_record.set_severity_number(Severity::Error); + logger.emit(log_record); + } + } + + fn force_flush(&self) -> OTelSdkResult { + Ok(()) + } + + fn shutdown(&self) -> OTelSdkResult { + Ok(()) + } + } + + #[test] + fn processor_internal_log_does_not_deadlock_with_suppression_enabled() { + let processor: ReentrantLogProcessor = ReentrantLogProcessor::new(); + let logger_provider = SdkLoggerProvider::builder() + .with_log_processor(processor.clone()) + .build(); + processor.set_logger(logger_provider.logger("processor-logger")); + + let logger = logger_provider.logger("test-logger"); + let mut log_record = logger.create_log_record(); + log_record.set_severity_number(Severity::Error); + logger.emit(log_record); + } } diff --git a/opentelemetry-sdk/src/logs/simple_log_processor.rs b/opentelemetry-sdk/src/logs/simple_log_processor.rs index 604d00e25c..5cbda696fc 100644 --- a/opentelemetry-sdk/src/logs/simple_log_processor.rs +++ b/opentelemetry-sdk/src/logs/simple_log_processor.rs @@ -23,7 +23,7 @@ use crate::{ Resource, }; -use opentelemetry::{otel_debug, otel_error, otel_warn, InstrumentationScope}; +use opentelemetry::{otel_debug, otel_error, otel_warn, Context, InstrumentationScope}; use std::fmt::Debug; use std::sync::atomic::AtomicBool; @@ -76,6 +76,7 @@ impl SimpleLogProcessor { impl LogProcessor for SimpleLogProcessor { fn emit(&self, record: &mut SdkLogRecord, instrumentation: &InstrumentationScope) { + let _suppress_guard = Context::enter_telemetry_suppressed_scope(); // noop after shutdown if self.is_shutdown.load(std::sync::atomic::Ordering::Relaxed) { // this is a warning, as the user is trying to log after the processor has been shutdown @@ -152,12 +153,13 @@ impl LogProcessor for SimpleLogProcessor { #[cfg(all(test, feature = "testing", feature = "logs"))] mod tests { use crate::logs::log_processor::tests::MockLogExporter; - use crate::logs::{LogBatch, LogExporter, SdkLogRecord}; + use crate::logs::{LogBatch, LogExporter, SdkLogRecord, SdkLogger}; use crate::{ error::OTelSdkResult, logs::{InMemoryLogExporterBuilder, LogProcessor, SdkLoggerProvider, SimpleLogProcessor}, Resource, }; + use opentelemetry::logs::{LogRecord, Logger, LoggerProvider}; use opentelemetry::InstrumentationScope; use opentelemetry::KeyValue; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -438,4 +440,56 @@ mod tests { assert_eq!(exporter.len(), 1); } + + #[derive(Debug, Clone)] + struct ReentrantLogExporter { + logger: Arc>>, + } + + impl ReentrantLogExporter { + fn new() -> Self { + Self { + logger: Arc::new(Mutex::new(None)), + } + } + + fn set_logger(&self, logger: SdkLogger) { + let mut guard = self.logger.lock().unwrap(); + *guard = Some(logger); + } + } + + impl LogExporter for ReentrantLogExporter { + fn shutdown(&self) -> OTelSdkResult { + Ok(()) + } + + async fn export(&self, _batch: LogBatch<'_>) -> OTelSdkResult { + let logger = self.logger.lock().unwrap(); + if let Some(logger) = logger.as_ref() { + let mut log_record = logger.create_log_record(); + log_record.set_severity_number(opentelemetry::logs::Severity::Error); + logger.emit(log_record); + } + + Ok(()) + } + } + + #[test] + fn exporter_internal_log_does_not_deadlock_with_simple_processor() { + // This tests that even when exporter produces logs while + // exporting, it does not deadlock, as SimpleLogProcessor + // activates SuppressGuard before calling the exporter. + let exporter: ReentrantLogExporter = ReentrantLogExporter::new(); + let logger_provider = SdkLoggerProvider::builder() + .with_simple_exporter(exporter.clone()) + .build(); + exporter.set_logger(logger_provider.logger("processor-logger")); + + let logger = logger_provider.logger("test-logger"); + let mut log_record = logger.create_log_record(); + log_record.set_severity_number(opentelemetry::logs::Severity::Error); + logger.emit(log_record); + } } diff --git a/opentelemetry-sdk/src/metrics/periodic_reader.rs b/opentelemetry-sdk/src/metrics/periodic_reader.rs index 1e9f5bd16f..00af7052ae 100644 --- a/opentelemetry-sdk/src/metrics/periodic_reader.rs +++ b/opentelemetry-sdk/src/metrics/periodic_reader.rs @@ -8,7 +8,7 @@ use std::{ time::{Duration, Instant}, }; -use opentelemetry::{otel_debug, otel_error, otel_info, otel_warn}; +use opentelemetry::{otel_debug, otel_error, otel_info, otel_warn, Context}; use crate::{ error::{OTelSdkError, OTelSdkResult}, @@ -158,6 +158,7 @@ impl PeriodicReader { let result_thread_creation = thread::Builder::new() .name("OpenTelemetry.Metrics.PeriodicReader".to_string()) .spawn(move || { + let _suppress_guard = Context::enter_telemetry_suppressed_scope(); let mut interval_start = Instant::now(); let mut remaining_interval = interval; otel_debug!( diff --git a/opentelemetry-sdk/src/trace/mod.rs b/opentelemetry-sdk/src/trace/mod.rs index ad3a18543f..8bd0968618 100644 --- a/opentelemetry-sdk/src/trace/mod.rs +++ b/opentelemetry-sdk/src/trace/mod.rs @@ -533,4 +533,30 @@ mod tests { let tracer2 = tracer_provider.tracer_with_scope(tracer_scope); tracer_name_retained_helper(tracer2, tracer_provider, exporter).await; } + + #[test] + fn trace_suppression() { + // Arrange + let exporter = InMemorySpanExporter::default(); + let span_processor = SimpleSpanProcessor::new(exporter.clone()); + let tracer_provider = SdkTracerProvider::builder() + .with_span_processor(span_processor) + .build(); + + // Act + let tracer = tracer_provider.tracer("test"); + { + let _suppressed_context = Context::enter_telemetry_suppressed_scope(); + // This span should not be emitted as it is created in a suppressed context + let _span = tracer.span_builder("span_name").start(&tracer); + } + + // Assert + let finished_spans = exporter.get_finished_spans().expect("this should not fail"); + assert_eq!( + finished_spans.len(), + 0, + "There should be a no spans as span emission is done inside a suppressed context" + ); + } } diff --git a/opentelemetry-sdk/src/trace/span_processor.rs b/opentelemetry-sdk/src/trace/span_processor.rs index d2253c1cb9..595099ef7f 100644 --- a/opentelemetry-sdk/src/trace/span_processor.rs +++ b/opentelemetry-sdk/src/trace/span_processor.rs @@ -316,6 +316,7 @@ impl BatchSpanProcessor { let handle = thread::Builder::new() .name("OpenTelemetry.Traces.BatchProcessor".to_string()) .spawn(move || { + let _suppress_guard = Context::enter_telemetry_suppressed_scope(); otel_debug!( name: "BatchSpanProcessor.ThreadStarted", interval_in_millisecs = config.scheduled_delay.as_millis(), diff --git a/opentelemetry-sdk/src/trace/tracer.rs b/opentelemetry-sdk/src/trace/tracer.rs index 2dfc09e86d..40e55fdc52 100644 --- a/opentelemetry-sdk/src/trace/tracer.rs +++ b/opentelemetry-sdk/src/trace/tracer.rs @@ -178,6 +178,15 @@ impl opentelemetry::trace::Tracer for SdkTracer { /// trace includes a single root span, which is the shared ancestor of all other /// spans in the trace. fn build_with_context(&self, mut builder: SpanBuilder, parent_cx: &Context) -> Self::Span { + if parent_cx.is_telemetry_suppressed() { + return Span::new( + SpanContext::empty_context(), + None, + self.clone(), + SpanLimits::default(), + ); + } + let provider = self.provider(); // no point start a span if the tracer provider has already being shutdown if provider.is_shutdown() { diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index 82dfb51cd3..c366d2cf39 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -2,10 +2,11 @@ ## vNext -Added the ability to prevent recursive telemetry generation through new -context-based suppression mechanisms. This feature helps prevent feedback loops -and excessive telemetry when OpenTelemetry components perform their own -operations. +[#2821](https://github.com/open-telemetry/opentelemetry-rust/pull/2821) Context +based suppression capabilities added: Added the ability to prevent recursive +telemetry generation through new context-based suppression mechanisms. This +feature helps prevent feedback loops and excessive telemetry when OpenTelemetry +components perform their own operations. New methods added to `Context`: