Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add log file rotation #2468

Open
wants to merge 7 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion crates/common/src/ffi/logging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,8 @@ pub unsafe extern "C" fn logging_init(
is_colored: u8,
is_bypassed: u8,
print_config: u8,
max_file_size: u64,
max_backup_count: u32,
) -> LogGuard_API {
let level_stdout = map_log_level_to_filter(level_stdout);
let level_file = map_log_level_to_filter(level_file);
Expand All @@ -107,13 +109,21 @@ pub unsafe extern "C" fn logging_init(
u8_as_bool(print_config),
);

// Configure file rotation if max_file_size > 0
let file_rotate = if max_file_size > 0 {
Some((max_file_size, max_backup_count))
} else {
None
};

let directory =
unsafe { optional_cstr_to_str(directory_ptr).map(std::string::ToString::to_string) };
let file_name =
unsafe { optional_cstr_to_str(file_name_ptr).map(std::string::ToString::to_string) };
let file_format =
unsafe { optional_cstr_to_str(file_format_ptr).map(std::string::ToString::to_string) };
let file_config = FileWriterConfig::new(directory, file_name, file_format);

let file_config = FileWriterConfig::new(directory, file_name, file_format, file_rotate);

if u8_as_bool(is_bypassed) {
logging_set_bypass();
Expand Down
90 changes: 90 additions & 0 deletions crates/common/src/logging/logger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -538,6 +538,7 @@ mod tests {
use nautilus_model::identifiers::TraderId;
use rstest::*;
use serde_json::Value;
use std::thread::sleep;
use tempfile::tempdir;
use ustr::Ustr;

Expand Down Expand Up @@ -774,4 +775,93 @@ mod tests {
"{\"timestamp\":\"1970-01-20T02:20:00.000000000Z\",\"trader_id\":\"TRADER-001\",\"level\":\"INFO\",\"color\":\"NORMAL\",\"component\":\"RiskEngine\",\"message\":\"This is a test.\"}\n"
);
}

#[test]
fn test_file_rotation_and_backup_limits() {
// Create a temporary directory for log files
let temp_dir = tempdir().expect("Failed to create temporary directory");
let dir_path = temp_dir.path().to_str().unwrap().to_string();

// Configure a small max file size to trigger rotation quickly
let max_backups = 3;
let max_file_size = 100;
let file_config = FileWriterConfig {
directory: Some(dir_path.clone()),
file_name: None,
file_format: Some("log".to_string()),
file_rotate: Some((max_file_size, max_backups).into()), // 100 bytes max size, 3 max backups
};

// Create the file writer
let config = LoggerConfig::from_spec("fileout=Info;Test=Info").unwrap();
let log_guard = Logger::init_with_config(
TraderId::from("TRADER-001"),
UUID4::new(),
config,
file_config,
);

log::info!(
component = "Test";
"Test log message with enough content to exceed our small max file size limit"
);

sleep(Duration::from_millis(100));

// Count the number of log files in the directory
let files: Vec<_> = std::fs::read_dir(&dir_path)
.expect("Failed to read directory")
.filter_map(Result::ok)
.filter(|entry| entry.path().extension().map_or(false, |ext| ext == "log"))
.collect();

// We should have multiple files due to rotation
assert_eq!(files.len(), 1);

log::info!(
component = "Test";
"Test log message with enough content to exceed our small max file size limit"
);

sleep(Duration::from_millis(100));

// Count the number of log files in the directory
let files: Vec<_> = std::fs::read_dir(&dir_path)
.expect("Failed to read directory")
.filter_map(Result::ok)
.filter(|entry| entry.path().extension().map_or(false, |ext| ext == "log"))
.collect();

// We should have multiple files due to rotation
assert_eq!(files.len(), 2);

for _ in 0..5 {
// Write enough data to trigger a few rotations
log::info!(
component = "Test";
"Test log message with enough content to exceed our small max file size limit"
);

sleep(Duration::from_millis(100));
}

// Count the number of log files in the directory
let files: Vec<_> = std::fs::read_dir(&dir_path)
.expect("Failed to read directory")
.filter_map(Result::ok)
.filter(|entry| entry.path().extension().map_or(false, |ext| ext == "log"))
.collect();

// We should have at most max_backups + 1 files (current file + backups)
assert!(
files.len() == max_backups as usize + 1,
"Expected at most {} log files, found {}",
max_backups,
files.len()
);

// Clean up
drop(log_guard);
drop(temp_dir);
}
}
154 changes: 115 additions & 39 deletions crates/common/src/logging/writer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,14 @@
// -------------------------------------------------------------------------------------------------

use std::{
collections::VecDeque,
fs::{File, create_dir_all},
io::{self, BufWriter, Stderr, Stdout, Write},
path::PathBuf,
sync::OnceLock,
};

use chrono::{DateTime, Utc};
use chrono::Utc;
use log::LevelFilter;
use regex::Regex;

Expand Down Expand Up @@ -114,6 +115,42 @@ impl LogWriter for StderrWriter {
}
}

/// File rotation config
#[derive(Debug, Clone)]
pub struct FileRotateConfig {
/// Maximum file size in bytes before rotating
pub max_file_size: u64,
/// Maximum number of backup files to keep
pub max_backup_count: u32,
/// Current file size tracking
cur_file_size: u64,
/// Queue of backup file paths (oldest first)
backup_files: VecDeque<PathBuf>,
}

impl Default for FileRotateConfig {
fn default() -> Self {
Self {
max_file_size: 10 * 1024 * 1024, // 10MB default
max_backup_count: 5,
cur_file_size: 0,
backup_files: VecDeque::new(),
}
}
}

impl From<(u64, u32)> for FileRotateConfig {
fn from(value: (u64, u32)) -> Self {
let (max_file_size, max_backup_count) = value;
Self {
max_file_size,
max_backup_count,
cur_file_size: 0,
backup_files: VecDeque::new(),
}
}
}

#[cfg_attr(
feature = "python",
pyo3::pyclass(module = "nautilus_trader.core.nautilus_pyo3.common")
Expand All @@ -123,20 +160,24 @@ pub struct FileWriterConfig {
pub directory: Option<String>,
pub file_name: Option<String>,
pub file_format: Option<String>,
pub file_rotate: Option<FileRotateConfig>,
}

impl FileWriterConfig {
/// Creates a new [`FileWriterConfig`] instance.
#[must_use]
pub const fn new(
pub fn new(
directory: Option<String>,
file_name: Option<String>,
file_format: Option<String>,
file_rotate: Option<(u64, u32)>,
) -> Self {
let file_rotate = file_rotate.map(FileRotateConfig::from);
Self {
directory,
file_name,
file_format,
file_rotate,
}
}
}
Expand Down Expand Up @@ -203,10 +244,15 @@ impl FileWriter {
is_json_format: bool,
) -> PathBuf {
let basename = if let Some(file_name) = file_config.file_name.as_ref() {
file_name.clone()
if file_config.file_rotate.is_some() {
let current_date_utc = Utc::now().format("%Y-%m-%d_%H%M%S:%3f");
format!("{file_name}_{current_date_utc}")
} else {
file_name.clone()
}
} else {
// default base name
let current_date_utc = Utc::now().format("%Y-%m-%d");
let current_date_utc = Utc::now().format("%Y-%m-%d_%H%M%S:%3f");
format!("{trader_id}_{current_date_utc}_{instance_id}")
};

Expand All @@ -224,52 +270,82 @@ impl FileWriter {
}

#[must_use]
pub fn should_rotate_file(&self) -> bool {
let current_date_utc = Utc::now().date_naive();
let metadata = self
.path
.metadata()
.expect("Failed to read log file metadata");
let creation_time = metadata
.created()
.expect("Failed to get log file creation time");

let creation_time_utc: DateTime<Utc> = creation_time.into();
let creation_date_utc = creation_time_utc.date_naive();

current_date_utc != creation_date_utc
pub fn should_rotate_file(&mut self) -> bool {
self.file_config
.file_rotate
.as_ref()
.map(|config| config.cur_file_size >= config.max_file_size)
.unwrap_or(false)
}

fn rotate_file(&mut self) {
// Flush current file
self.flush();

// Create new file
let new_path = Self::create_log_file_path(
&self.file_config,
&self.trader_id,
&self.instance_id,
self.json_format,
);
match File::options().create(true).append(true).open(&new_path) {
Ok(new_file) => {
// Rotate existing file
if let Some(rotate_config) = &mut self.file_config.file_rotate {
// Add current file to backup queue
rotate_config.backup_files.push_back(self.path.clone());
rotate_config.cur_file_size = 0;
cleanup_backups(rotate_config);
}

self.buf = BufWriter::new(new_file);
self.path = new_path;
}
Err(e) => tracing::error!("Error creating log file: {e}"),
}

tracing::info!("Rotated log file, now logging to: {}", self.path.display());
}
}

/// Clean up old backup files if we exceed the max backup count
///
/// TODO: Minor consider using a more specific version to pop a single file
/// since normal execution will not create more than 1 excess file
fn cleanup_backups(rotate_config: &mut FileRotateConfig) {
let files_to_remove = rotate_config
.backup_files
.len()
.saturating_sub(rotate_config.max_backup_count as usize);

// Remove oldest files if we exceed the max backup count
rotate_config
.backup_files
.drain(..files_to_remove)
.filter(|path| path.exists())
.for_each(|path| match std::fs::remove_file(&path) {
Ok(_) => tracing::debug!("Removed old log file: {}", path.display()),
Err(e) => tracing::error!("Failed to remove old log file {}: {}", path.display(), e),
});
}

impl LogWriter for FileWriter {
fn write(&mut self, line: &str) {
// Check if we need to rotate the file
if self.should_rotate_file() {
self.flush();

let file_path = Self::create_log_file_path(
&self.file_config,
&self.trader_id,
&self.instance_id,
self.json_format,
);

match File::options()
.create(true)
.append(true)
.open(file_path.clone())
{
Ok(file) => {
self.buf = BufWriter::new(file);
self.path = file_path;
}
Err(e) => tracing::error!("Error creating log file: {e}"),
}
self.rotate_file();
}

let line = strip_ansi_codes(line);

match self.buf.write_all(line.as_bytes()) {
Ok(()) => {}
Ok(()) => {
// Update current file size
if let Some(rotate_config) = &mut self.file_config.file_rotate {
rotate_config.cur_file_size += line.len() as u64;
}
}
Err(e) => tracing::error!("Error writing to file: {e:?}"),
}
}
Expand Down
12 changes: 7 additions & 5 deletions crates/common/src/python/logging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,14 +45,15 @@ impl LoggerConfig {
#[pymethods]
impl FileWriterConfig {
#[new]
#[pyo3(signature = (directory=None, file_name=None, file_format=None))]
#[pyo3(signature = (directory=None, file_name=None, file_format=None, file_rotate=None))]
#[must_use]
pub const fn py_new(
pub fn py_new(
directory: Option<String>,
file_name: Option<String>,
file_format: Option<String>,
file_rotate: Option<(u64, u32)>,
) -> Self {
Self::new(directory, file_name, file_format)
Self::new(directory, file_name, file_format, file_rotate)
}
}

Expand Down Expand Up @@ -90,7 +91,7 @@ pub fn py_init_tracing() -> PyResult<()> {
#[pyfunction]
#[pyo3(name = "init_logging")]
#[allow(clippy::too_many_arguments)]
#[pyo3(signature = (trader_id, instance_id, level_stdout, level_file=None, component_levels=None, directory=None, file_name=None, file_format=None, is_colored=None, is_bypassed=None, print_config=None))]
#[pyo3(signature = (trader_id, instance_id, level_stdout, level_file=None, component_levels=None, directory=None, file_name=None, file_format=None, file_rotate=None, is_colored=None, is_bypassed=None, print_config=None))]
pub fn py_init_logging(
trader_id: TraderId,
instance_id: UUID4,
Expand All @@ -100,6 +101,7 @@ pub fn py_init_logging(
directory: Option<String>,
file_name: Option<String>,
file_format: Option<String>,
file_rotate: Option<(u64, u32)>,
is_colored: Option<bool>,
is_bypassed: Option<bool>,
print_config: Option<bool>,
Expand All @@ -114,7 +116,7 @@ pub fn py_init_logging(
print_config.unwrap_or(false),
);

let file_config = FileWriterConfig::new(directory, file_name, file_format);
let file_config = FileWriterConfig::new(directory, file_name, file_format, file_rotate);

if is_bypassed.unwrap_or(false) {
logging_set_bypass();
Expand Down
Loading
Loading