Skip to content

feat: add-shutdown-with-timeout-for-log-provider-and-processor #2941

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions examples/tracing-http-propagator/src/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,10 +120,6 @@ impl LogProcessor for EnrichWithBaggageLogProcessor {
fn force_flush(&self) -> OTelSdkResult {
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
Ok(())
}
}

/// A custom span processor that enriches spans with baggage attributes. Baggage
Expand Down
4 changes: 0 additions & 4 deletions opentelemetry-appender-tracing/benches/log-attributes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,6 @@ impl LogProcessor for NoopProcessor {
fn force_flush(&self) -> OTelSdkResult {
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
Ok(())
}
}

/// Creates a single benchmark for a specific number of attributes
Expand Down
4 changes: 0 additions & 4 deletions opentelemetry-appender-tracing/benches/logs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,6 @@ impl LogProcessor for NoopProcessor {
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
Ok(())
}

fn event_enabled(
&self,
_level: opentelemetry::logs::Severity,
Expand Down
4 changes: 0 additions & 4 deletions opentelemetry-appender-tracing/src/layer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -877,10 +877,6 @@ mod tests {
fn force_flush(&self) -> OTelSdkResult {
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
Ok(())
}
}

#[cfg(feature = "spec_unstable_logs_enabled")]
Expand Down
4 changes: 0 additions & 4 deletions opentelemetry-proto/src/transform/logs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -239,10 +239,6 @@ mod tests {
fn force_flush(&self) -> OTelSdkResult {
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
Ok(())
}
}

fn create_test_log_data(
Expand Down
8 changes: 3 additions & 5 deletions opentelemetry-sdk/src/logs/batch_log_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,6 @@
message_sender: SyncSender<BatchMessage>, // Control channel to store control messages for the worker thread
handle: Mutex<Option<thread::JoinHandle<()>>>,
forceflush_timeout: Duration,
shutdown_timeout: Duration,
export_log_message_sent: Arc<AtomicBool>,
current_batch_size: Arc<AtomicUsize>,
max_export_batch_size: usize,
Expand Down Expand Up @@ -256,7 +255,7 @@
}
}

fn shutdown(&self) -> OTelSdkResult {
fn shutdown_with_timeout(&self, timeout: Duration) -> OTelSdkResult {
let dropped_logs = self.dropped_logs_count.load(Ordering::Relaxed);
let max_queue_size = self.max_queue_size;
if dropped_logs > 0 {
Expand All @@ -272,7 +271,7 @@
match self.message_sender.try_send(BatchMessage::Shutdown(sender)) {
Ok(_) => {
receiver
.recv_timeout(self.shutdown_timeout)
.recv_timeout(timeout)
.map(|_| {
// join the background thread after receiving back the
// shutdown signal
Expand All @@ -287,7 +286,7 @@
name: "BatchLogProcessor.Shutdown.Timeout",
message = "BatchLogProcessor shutdown timing out."
);
OTelSdkError::Timeout(self.shutdown_timeout)
OTelSdkError::Timeout(timeout)

Check warning on line 289 in opentelemetry-sdk/src/logs/batch_log_processor.rs

View check run for this annotation

Codecov / codecov/patch

opentelemetry-sdk/src/logs/batch_log_processor.rs#L289

Added line #L289 was not covered by tests
}
_ => {
otel_error!(
Expand Down Expand Up @@ -489,7 +488,6 @@
message_sender,
handle: Mutex::new(Some(handle)),
forceflush_timeout: Duration::from_secs(5), // TODO: make this configurable
shutdown_timeout: Duration::from_secs(5), // TODO: make this configurable
dropped_logs_count: AtomicUsize::new(0),
max_queue_size,
export_log_message_sent: Arc::new(AtomicBool::new(false)),
Expand Down
5 changes: 3 additions & 2 deletions opentelemetry-sdk/src/logs/concurrent_log_processor.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use opentelemetry::{otel_info, InstrumentationScope};
use std::time::Duration;

use crate::{error::OTelSdkResult, Resource};

Expand Down Expand Up @@ -43,8 +44,8 @@
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
self.exporter.shutdown()
fn shutdown_with_timeout(&self, timeout: Duration) -> OTelSdkResult {
self.exporter.shutdown_with_timeout(timeout)

Check warning on line 48 in opentelemetry-sdk/src/logs/concurrent_log_processor.rs

View check run for this annotation

Codecov / codecov/patch

opentelemetry-sdk/src/logs/concurrent_log_processor.rs#L47-L48

Added lines #L47 - L48 were not covered by tests
}

#[cfg(feature = "spec_unstable_logs_enabled")]
Expand Down
17 changes: 8 additions & 9 deletions opentelemetry-sdk/src/logs/log_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ use opentelemetry::logs::Severity;
use opentelemetry::InstrumentationScope;

use std::fmt::Debug;
use std::time::Duration;

/// The interface for plugging into a [`SdkLogger`].
///
Expand All @@ -56,7 +57,13 @@ pub trait LogProcessor: Send + Sync + Debug {
/// Shuts down the processor.
/// After shutdown returns the log processor should stop processing any logs.
/// It's up to the implementation on when to drop the LogProcessor.
fn shutdown(&self) -> OTelSdkResult;
fn shutdown_with_timeout(&self, _timeout: Duration) -> OTelSdkResult {
Ok(())
}
/// Shuts down the processor with default timeout.
fn shutdown(&self) -> OTelSdkResult {
self.shutdown_with_timeout(Duration::from_secs(5))
}
#[cfg(feature = "spec_unstable_logs_enabled")]
/// Check if logging is enabled
fn event_enabled(&self, _level: Severity, _target: &str, _name: Option<&str>) -> bool {
Expand Down Expand Up @@ -133,10 +140,6 @@ pub(crate) mod tests {
fn force_flush(&self) -> OTelSdkResult {
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
Ok(())
}
}

#[derive(Debug)]
Expand All @@ -163,10 +166,6 @@ pub(crate) mod tests {
fn force_flush(&self) -> OTelSdkResult {
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
Ok(())
}
}

#[test]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -628,10 +628,6 @@ mod tests {
fn force_flush(&self) -> OTelSdkResult {
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
Ok(())
}
}

#[derive(Debug)]
Expand All @@ -658,10 +654,6 @@ mod tests {
fn force_flush(&self) -> OTelSdkResult {
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
Ok(())
}
}
#[test]
fn test_log_data_modification_by_multiple_processors() {
Expand Down
29 changes: 18 additions & 11 deletions opentelemetry-sdk/src/logs/logger_provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ use crate::error::{OTelSdkError, OTelSdkResult};
use crate::logs::LogExporter;
use crate::Resource;
use opentelemetry::{otel_debug, otel_info, InstrumentationScope};
use std::time::Duration;
use std::{
borrow::Cow,
sync::{
Expand Down Expand Up @@ -96,7 +97,7 @@ impl SdkLoggerProvider {
}

/// Shuts down this `LoggerProvider`
pub fn shutdown(&self) -> OTelSdkResult {
pub fn shutdown_with_timeout(&self, timeout: Duration) -> OTelSdkResult {
otel_debug!(
name: "LoggerProvider.ShutdownInvokedByUser",
);
Expand All @@ -107,7 +108,7 @@ impl SdkLoggerProvider {
.is_ok()
{
// propagate the shutdown signal to processors
let result = self.inner.shutdown();
let result = self.inner.shutdown_with_timeout(timeout);
if result.iter().all(|res| res.is_ok()) {
Ok(())
} else {
Expand All @@ -123,6 +124,11 @@ impl SdkLoggerProvider {
Err(OTelSdkError::AlreadyShutdown)
}
}

/// Shuts down this `LoggerProvider` with default timeout
pub fn shutdown(&self) -> OTelSdkResult {
self.shutdown_with_timeout(Duration::from_secs(5))
}
}

#[derive(Debug)]
Expand All @@ -133,10 +139,10 @@ struct LoggerProviderInner {

impl LoggerProviderInner {
/// Shuts down the `LoggerProviderInner` and returns any errors.
pub(crate) fn shutdown(&self) -> Vec<OTelSdkResult> {
pub(crate) fn shutdown_with_timeout(&self, timeout: Duration) -> Vec<OTelSdkResult> {
let mut results = vec![];
for processor in &self.processors {
let result = processor.shutdown();
let result = processor.shutdown_with_timeout(timeout);
if let Err(err) = &result {
// Log at debug level because:
// - The error is also returned to the user for handling (if applicable)
Expand All @@ -149,6 +155,11 @@ impl LoggerProviderInner {
}
results
}

/// Shuts down the `LoggerProviderInner` with default timeout and returns any errors.
pub(crate) fn shutdown(&self) -> Vec<OTelSdkResult> {
self.shutdown_with_timeout(Duration::from_secs(5))
}
}

impl Drop for LoggerProviderInner {
Expand Down Expand Up @@ -330,7 +341,7 @@ mod tests {
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
fn shutdown_with_timeout(&self, _timeout: Duration) -> OTelSdkResult {
self.is_shutdown
.lock()
.map(|mut is_shutdown| *is_shutdown = true)
Expand Down Expand Up @@ -383,10 +394,6 @@ mod tests {
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
Ok(())
}

fn set_resource(&mut self, resource: &Resource) {
let mut res = self.resource.lock().unwrap();
*res = resource.clone();
Expand Down Expand Up @@ -903,7 +910,7 @@ mod tests {
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
fn shutdown_with_timeout(&self, _timeout: Duration) -> OTelSdkResult {
*self.shutdown_called.lock().unwrap() = true;
Ok(())
}
Expand Down Expand Up @@ -934,7 +941,7 @@ mod tests {
Ok(())
}

fn shutdown(&self) -> OTelSdkResult {
fn shutdown_with_timeout(&self, _timeout: Duration) -> OTelSdkResult {
let mut count = self.shutdown_count.lock().unwrap();
*count += 1;
Ok(())
Expand Down