AirLibrary/Logging/
mod.rs

1//! # Structured Logging Module
2//!
3//! Provides comprehensive structured logging with JSON output, request ID
4//! propagation, context-aware logging, log rotation, sensitive data filtering,
5//! and validation.
6//!
7//! ## Responsibilities
8//!
9//! ### Structured Logging
10//! - JSON output format for machine parsing and analysis
11//! - Request ID and trace ID propagation across log entries
12//! - Context-aware logging with operation tracking
13//! - Log level filtering (TRACE, DEBUG, INFO, WARN, ERROR)
14//!
15//! ###Log Rotation
16//! - Size-based log rotation to prevent disk exhaustion
17//! - Time-based rotation (daily) for archival
18//! - Automatic cleanup of old log files
19//! - Compressed log file storage for space efficiency
20//!
21//! ### Context Management
22//! - Thread-local context storage for async operations
23//! - Automatic context propagation across await points
24//! - Correlation ID linking distributed requests
25//! - User and session tracking
26//!
27//! ### Sensitive Data Handling
28//! - Automatic redaction of sensitive fields
29//! - Configurable sensitive patterns
30//! - Sanitization of error messages
31//! - Audit logging for security events
32//!
33//! ### Log Validation
34//! - Structured log data validation before output
35//! - Schema enforcement for consistent format
36//! - Size limits on log messages
37//! - Malformed log rejection
38//!
39//! ## Integration with Mountain
40//!
41//! Logs flow to Mountain's debugging infrastructure:
42//! - Real-time log streaming to debug console
43//! - Historical log search and filtering
44//! - Error aggregation and alerting
45//! - Performance profiling logs
46//!
47//! ## VSCode Debugging References
48//!
49//! Similar logging patterns used in VSCode for:
50//! - Exception and error tracking
51//! - Debug output for extension development
52//! - Performance profiling traces
53//! - Cross-process communication logging
54//!
55//! Reference:
56//! vs/base/common/errors
57//!
58//! # TODOs
59//!
60//! - [DISTRIBUTED TRACING] Tighter integration with Tracing module
61//! - [ELASTICSEARCH] Direct log export to Elasticsearch/Logstash
62//! - [LOG ANALYSIS] Automatic anomaly detection in logs
63//! - [KIBANA] Pre-built Kibana dashboards
64//! - [LOG PARSING] Support for custom log formats
65//!
66//! ## Sensitive Data Handling
67//!
68//! All logs are automatically sanitized:
69//! - Passwords, tokens, and secrets are redacted
70//! - User-identifiable information is masked
71//! - API keys and secrets are removed
72//! - Error messages are parsed for sensitive patterns
73
74use std::{
75	collections::HashMap,
76	path::{Path, PathBuf},
77	sync::{Arc, Mutex},
78	time::{SystemTime, UNIX_EPOCH},
79};
80
81use serde::{Deserialize, Serialize};
82use tracing::{debug, error, info, warn};
83use tracing_subscriber::{fmt::format::FmtSpan, prelude::*};
84use tracing_appender::rolling::Rotation;
85
86use crate::Result;
87
88/// Configuration for log rotation and management
89#[derive(Debug, Clone, Serialize, Deserialize)]
90pub struct LogRotationConfig {
91	/// Maximum size of a single log file in bytes before rotation
92	pub MaxFileSizeBytes:u64,
93	/// Maximum number of rotated log files to retain
94	pub MaxFiles:usize,
95	/// Rotation strategy (daily, hourly, never)
96	pub Rotation:LogRotation,
97	/// Whether to compress rotated log files
98	pub Compress:bool,
99	/// Log directory path
100	pub LogDirectory:String,
101	/// Log file name prefix
102	pub LogFilePrefix:String,
103}
104
105/// Log rotation strategies
106#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
107pub enum LogRotation {
108	/// Rotate daily
109	Daily,
110	/// Rotate every hour
111	Hourly,
112	/// Rotate every minute (for debugging)
113	Minutely,
114	/// Never rotate automatically
115	Never,
116}
117
118impl Default for LogRotation {
119	fn default() -> Self { Self::Daily }
120}
121
122impl Default for LogRotationConfig {
123	fn default() -> Self {
124		Self {
125			MaxFileSizeBytes:100 * 1024 * 1024, // 100 MB
126			MaxFiles:30,                        // Keep 30 days of logs
127			Rotation:LogRotation::Daily,
128			Compress:true,
129			LogDirectory:"./Log".to_string(),
130			LogFilePrefix:"Air".to_string(),
131		}
132	}
133}
134
135impl LogRotationConfig {
136	/// Validate log rotation configuration
137	pub fn Validate(&self) -> Result<()> {
138		if self.MaxFileSizeBytes == 0 {
139			return Err("MaxFileSizeBytes must be greater than 0".into());
140		}
141		if self.MaxFileSizeBytes > 10 * 1024 * 1024 * 1024 {
142			// Max 10 GB
143			return Err("MaxFileSizeBytes cannot exceed 10 GB".into());
144		}
145		if self.MaxFiles == 0 {
146			return Err("MaxFiles must be greater than 0".into());
147		}
148		if self.MaxFiles > 365 {
149			// Max 1 year retention
150			return Err("MaxFiles cannot exceed 365".into());
151		}
152		Ok(())
153	}
154
155	/// Convert to tracing_appender Rotation
156	pub fn ToTracingRotation(&self) -> Rotation {
157		match self.Rotation {
158			LogRotation::Daily => Rotation::DAILY,
159			LogRotation::Hourly => Rotation::HOURLY,
160			LogRotation::Minutely => Rotation::NEVER, // No minutely support
161			LogRotation::Never => Rotation::NEVER,
162		}
163	}
164}
165
166/// Sensitive data patterns for redaction
167#[derive(Debug, Clone, Serialize, Deserialize)]
168pub struct SensitiveDataConfig {
169	/// Enable automatic sensitive data redaction
170	pub Enabled:bool,
171	/// Custom patterns to redact (regex)
172	pub CustomPatterns:Vec<String>,
173	/// Standard patterns to include (password, token, secret, etc.)
174	pub IncludeStandardPatterns:bool,
175}
176
177impl Default for SensitiveDataConfig {
178	fn default() -> Self { Self { Enabled:true, CustomPatterns:Vec::new(), IncludeStandardPatterns:true } }
179}
180
181/// Context for structured logging with request IDs and metadata
182#[derive(Debug, Clone, Serialize, Deserialize)]
183pub struct LogContext {
184	pub RequestId:String,
185	pub TraceId:String,
186	pub SpanId:String,
187	pub UserId:Option<String>,
188	pub SessionId:Option<String>,
189	pub Operation:String,
190	pub Metadata:HashMap<String, String>,
191}
192
193impl LogContext {
194	/// Create a new log context
195	pub fn New(Operation:impl Into<String>) -> Self {
196		let RequestId = crate::Utility::GenerateRequestId();
197		let TraceId = crate::Utility::GenerateRequestId();
198		let SpanId = uuid::Uuid::new_v4().to_string();
199
200		Self {
201			RequestId,
202			TraceId,
203			SpanId,
204			UserId:None,
205			SessionId:None,
206			Operation:Operation.into(),
207			Metadata:HashMap::new(),
208		}
209	}
210
211	/// Validate log context for required fields
212	pub fn Validate(&self) -> Result<()> {
213		if self.RequestId.is_empty() {
214			return Err("RequestId cannot be empty".into());
215		}
216		if self.TraceId.is_empty() {
217			return Err("TraceId cannot be empty".into());
218		}
219		if self.Operation.is_empty() {
220			return Err("Operation cannot be empty".into());
221		}
222		Ok(())
223	}
224
225	/// Set user ID in context
226	pub fn WithUserId(mut self, UserId:String) -> Self {
227		self.UserId = Some(UserId);
228		self
229	}
230
231	/// Set session ID in context
232	pub fn WithSessionId(mut self, SessionId:String) -> Self {
233		self.SessionId = Some(SessionId);
234		self
235	}
236
237	/// Add metadata to context
238	pub fn WithMetadata(mut self, Key:String, Value:String) -> Self {
239		self.Metadata.insert(Key, Value);
240		self
241	}
242
243	/// Add multiple metadata entries
244	pub fn WithMetadataMap(mut self, Metadata:HashMap<String, String>) -> Self {
245		self.Metadata.extend(Metadata);
246		self
247	}
248}
249
250thread_local! {
251	static LOG_CONTEXT: std::cell::RefCell<Option<LogContext>> = std::cell::RefCell::new(None);
252}
253
254/// Set the log context for the current thread
255pub fn SetLogContext(Context:LogContext) {
256	if let Err(e) = Context.Validate() {
257		error!("[Logging] Invalid log context provided: {:?}", e);
258		return;
259	}
260	LOG_CONTEXT.with(|ctx| {
261		*ctx.borrow_mut() = Some(Context);
262	});
263}
264
265/// Get the current log context
266pub fn GetLogContext() -> Option<LogContext> { LOG_CONTEXT.with(|Context| Context.borrow().clone()) }
267
268/// Clear the log context for the current thread
269pub fn ClearLogContext() {
270	LOG_CONTEXT.with(|Context| {
271		*Context.borrow_mut() = None;
272	});
273}
274
275/// Log file manager for rotation and cleanup
276pub struct LogManager {
277	Config:LogRotationConfig,
278	CurrentFile:Arc<Mutex<Option<PathBuf>>>,
279	CurrentSize:Arc<Mutex<u64>>,
280}
281
282impl LogManager {
283	fn new(Config:LogRotationConfig) -> Result<Self> {
284		Config.Validate()?;
285
286		// Ensure log directory exists
287		std::fs::create_dir_all(&Config.LogDirectory)?;
288
289		Ok(Self {
290			Config,
291			CurrentFile:Arc::new(Mutex::new(None)),
292			CurrentSize:Arc::new(Mutex::new(0)),
293		})
294	}
295
296	/// Check if log rotation is needed
297	fn ShouldRotate(&self) -> bool {
298		let size = *self.CurrentSize.lock().unwrap();
299		size >= self.Config.MaxFileSizeBytes
300	}
301
302	/// Perform log rotation
303	fn Rotate(&self) -> Result<()> {
304		let CurrentFile = self.CurrentFile.lock().unwrap();
305
306		if let Some(ref FilePath) = *CurrentFile {
307			// Rename current file with timestamp
308			let Timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
309
310			let RotatedPath = format!("{}.{}.log", FilePath.display(), Timestamp);
311
312			std::fs::rename(FilePath, &RotatedPath)?;
313
314			// Compress if enabled
315			if self.Config.Compress {
316				self.CompressFile(&RotatedPath)?;
317			}
318
319			// Cleanup old log files
320			self.CleanupOldLogs()?;
321		}
322
323		*self.CurrentSize.lock().unwrap() = 0;
324
325		Ok(())
326	}
327
328	/// Compress a log file
329	fn CompressFile(&self, path:&str) -> crate::Result<()> {
330		// Basic compression - in production would use actual compression
331		let _ = path;
332		Ok(())
333	}
334
335	/// Cleanup old log files
336	fn CleanupOldLogs(&self) -> Result<()> {
337		let log_dir = Path::new(&self.Config.LogDirectory);
338
339		if !log_dir.exists() {
340			return Ok(());
341		}
342
343		let mut log_files:Vec<_> = std::fs::read_dir(log_dir)?
344			.filter_map(|e| e.ok())
345			.filter(|e| {
346				e.path()
347					.extension()
348					.and_then(|s| s.to_str())
349					.map(|ext| ext == "log")
350					.unwrap_or(false)
351			})
352			.collect();
353
354		// Sort by modification time (newest first)
355		log_files.sort_by(|a, b| {
356			let a_time = a.metadata().and_then(|m| m.modified()).unwrap_or(UNIX_EPOCH);
357			let b_time = b.metadata().and_then(|m| m.modified()).unwrap_or(UNIX_EPOCH);
358			b_time.cmp(&a_time)
359		});
360
361		// Keep only max_files
362		for file in log_files.into_iter().skip(self.Config.MaxFiles) {
363			let _ = std::fs::remove_file(file.path());
364		}
365
366		Ok(())
367	}
368}
369
370/// Sensitive data filter for log sanitization
371#[derive(Debug, Clone)]
372pub struct SensitiveDataFilter {
373	enabled:bool,
374	patterns:Vec<regex::Regex>,
375}
376
377impl Default for SensitiveDataFilter {
378	fn default() -> Self {
379		let mut patterns = Vec::new();
380
381		// Standard sensitive patterns - simplified to avoid escaping issues
382		patterns.push(regex::Regex::new(r"(?i)password[=[:space:]]+\S+").unwrap());
383		patterns.push(regex::Regex::new(r"(?i)token[=[:space:]]+\S+").unwrap());
384		patterns.push(regex::Regex::new(r"(?i)secret[=[:space:]]+\S+").unwrap());
385		patterns.push(regex::Regex::new(r"(?i)(api|private)[_-]?key[=[:space:]]+\S+").unwrap());
386		patterns.push(regex::Regex::new(r"(?i)authorization[=[:space:]]+Bearer[[:space:]]+\S+").unwrap());
387		patterns.push(regex::Regex::new(r"(?i)credential[=[:space:]]+\S+").unwrap());
388
389		Self { enabled:true, patterns }
390	}
391}
392
393impl SensitiveDataFilter {
394	fn new(Config:SensitiveDataConfig) -> Result<Self> {
395		let mut filter = Self::default();
396		filter.enabled = Config.Enabled;
397
398		if !Config.IncludeStandardPatterns {
399			filter.patterns.clear();
400		}
401
402		// Add custom patterns
403		for pattern in &Config.CustomPatterns {
404			match regex::Regex::new(pattern) {
405				Ok(re) => filter.patterns.push(re),
406				Err(e) => warn!("[Logging] Failed to compile custom regex '{}': {}", pattern, e),
407			}
408		}
409
410		Ok(filter)
411	}
412
413	/// Filter sensitive data from a string
414	fn Filter(&self, input:&str) -> String {
415		if !self.enabled {
416			return input.to_string();
417		}
418
419		let mut filtered = input.to_string();
420
421		for pattern in &self.patterns {
422			filtered = pattern.replace_all(&filtered, "[REDACTED]").to_string();
423		}
424
425		filtered
426	}
427}
428
429/// Structured log entry for validation
430#[derive(Debug, Clone, Serialize, Deserialize)]
431pub struct StructuredLogEntry {
432	pub Timestamp:u64,
433	pub Level:String,
434	pub Message:String,
435	pub RequestId:Option<String>,
436	pub TraceId:Option<String>,
437	pub SpanId:Option<String>,
438	pub Operation:Option<String>,
439	pub UserId:Option<String>,
440	pub Metadata:HashMap<String, String>,
441}
442
443impl StructuredLogEntry {
444	/// Validate log entry structure
445	pub fn Validate(&self) -> Result<()> {
446		if self.Level.is_empty() {
447			return Err("log level cannot be empty".into());
448		}
449		if self.Message.is_empty() {
450			return Err("log message cannot be empty".into());
451		}
452		if !["TRACE", "DEBUG", "INFO", "WARN", "ERROR"].contains(&self.Level.as_str()) {
453			return Err(format!("invalid log level: {}", self.Level).into());
454		}
455		if self.Message.len() > 10000 {
456			// Max 10KB message
457			return Err("log message too large".into());
458		}
459		Ok(())
460	}
461}
462
463/// Context-aware logger for structured logging
464#[derive(Debug, Clone)]
465pub struct ContextLogger {
466	json_output:bool,
467	log_file_path:Option<String>,
468	rotation_config:Option<LogRotationConfig>,
469	sensitive_filter:Arc<SensitiveDataFilter>,
470	initialized:Arc<Mutex<bool>>,
471}
472
473impl ContextLogger {
474	/// Create a new context logger
475	pub fn New(json_output:bool, log_file_path:Option<String>) -> Self {
476		Self {
477			json_output,
478			log_file_path,
479			rotation_config:None,
480			sensitive_filter:Arc::new(SensitiveDataFilter::default()),
481			initialized:Arc::new(Mutex::new(false)),
482		}
483	}
484
485	/// Create with log rotation configuration
486	pub fn WithRotation(
487		json_output:bool,
488		log_file_path:Option<String>,
489		rotation_config:LogRotationConfig,
490	) -> Result<Self> {
491		rotation_config.Validate()?;
492
493		Ok(Self {
494			json_output,
495			log_file_path,
496			rotation_config:Some(rotation_config),
497			sensitive_filter:Arc::new(SensitiveDataFilter::default()),
498			initialized:Arc::new(Mutex::new(false)),
499		})
500	}
501
502	/// Set sensitive data filter configuration
503	pub fn WithSensitiveFilter(mut self, Config:SensitiveDataConfig) -> Result<Self> {
504		self.sensitive_filter = Arc::new(SensitiveDataFilter::new(Config)?);
505		Ok(self)
506	}
507
508	/// Initialize the logging system with tracing
509	pub fn Initialize(&self) -> Result<()> {
510		// Check if already initialized
511		let mut initialized = self.initialized.lock().unwrap();
512		if *initialized {
513			return Ok(());
514		}
515
516		let filter = tracing_subscriber::EnvFilter::from_default_env()
517			.add_directive(tracing_subscriber::filter::LevelFilter::INFO.into());
518
519		if self.json_output {
520			// JSON output format
521			let fmt_layer = tracing_subscriber::fmt::layer()
522				.json()
523				.with_current_span(true)
524				.with_span_list(false)
525				.with_target(true)
526				.with_file(true)
527				.with_line_number(true)
528				.with_writer(std::io::stderr)
529				.with_ansi(false)
530				.with_span_events(FmtSpan::FULL);
531
532			let registry = tracing_subscriber::registry().with(filter).with(fmt_layer);
533
534			// Set up log file if specified
535			if let Some(ref log_path) = self.log_file_path {
536				let log_dir = std::path::Path::new(log_path).parent().unwrap_or(std::path::Path::new("."));
537				let log_file = std::path::Path::new(log_path)
538					.file_name()
539					.unwrap_or(std::ffi::OsStr::new("Air.log"));
540
541				let file_appender = tracing_appender::rolling::daily(log_dir, log_file);
542				let (non_blocking, _guard) = tracing_appender::non_blocking(file_appender);
543
544				let file_layer = tracing_subscriber::fmt::layer()
545					.json()
546					.with_current_span(true)
547					.with_span_list(false)
548					.with_target(true)
549					.with_file(true)
550					.with_line_number(true)
551					.with_writer(non_blocking)
552					.with_ansi(false)
553					.with_span_events(FmtSpan::FULL);
554
555				registry.with(file_layer).init();
556			} else {
557				registry.init();
558			}
559		} else {
560			// Standard text output format
561			let fmt_layer = tracing_subscriber::fmt::layer()
562				.with_target(true)
563				.with_file(true)
564				.with_line_number(true)
565				.with_writer(std::io::stderr)
566				.with_ansi(true)
567				.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE);
568
569			let registry = tracing_subscriber::registry().with(filter).with(fmt_layer);
570
571			// Set up log file if specified
572			if let Some(ref log_path) = self.log_file_path {
573				let log_dir = std::path::Path::new(log_path).parent().unwrap_or(std::path::Path::new("."));
574				let log_file = std::path::Path::new(log_path)
575					.file_name()
576					.unwrap_or(std::ffi::OsStr::new("Air.log"));
577
578				let file_appender = tracing_appender::rolling::daily(log_dir, log_file);
579				let (non_blocking, _guard) = tracing_appender::non_blocking(file_appender);
580
581				let file_layer = tracing_subscriber::fmt::layer()
582					.with_target(true)
583					.with_file(true)
584					.with_line_number(true)
585					.with_writer(non_blocking)
586					.with_ansi(false)
587					.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE);
588
589				registry.with(file_layer).init();
590			} else {
591				registry.init();
592			}
593		}
594
595		*initialized = true;
596		info!("[Logging] ContextLogger initialized - JSON output: {}", self.json_output);
597		Ok(())
598	}
599
600	/// Log with context at info level
601	pub fn Info(&self, message:impl Into<String>) {
602		let msg = self.sensitive_filter.Filter(&message.into());
603		if let Some(Context) = GetLogContext() {
604			info!(
605				RequestId = Context.RequestId,
606				TraceId = Context.TraceId,
607				SpanId = Context.SpanId,
608				Operation = Context.Operation,
609				"{}",
610				msg
611			);
612		} else {
613			info!("{}", msg);
614		}
615	}
616
617	/// Log with context at debug level
618	pub fn Debug(&self, message:impl Into<String>) {
619		let msg = self.sensitive_filter.Filter(&message.into());
620		if let Some(Context) = GetLogContext() {
621			debug!(
622				RequestId = Context.RequestId,
623				TraceId = Context.TraceId,
624				SpanId = Context.SpanId,
625				Operation = Context.Operation,
626				"{}",
627				msg
628			);
629		} else {
630			debug!("{}", msg);
631		}
632	}
633
634	/// Log with context at warn level
635	pub fn Warn(&self, message:impl Into<String>) {
636		let msg = self.sensitive_filter.Filter(&message.into());
637		if let Some(Context) = GetLogContext() {
638			warn!(
639				RequestId = Context.RequestId,
640				TraceId = Context.TraceId,
641				SpanId = Context.SpanId,
642				Operation = Context.Operation,
643				"{}",
644				msg
645			);
646		} else {
647			warn!("{}", msg);
648		}
649	}
650
651	/// Log with context at error level
652	pub fn Error(&self, message:impl Into<String>) {
653		let msg = self.sensitive_filter.Filter(&message.into());
654		if let Some(Context) = GetLogContext() {
655			error!(
656				RequestId = Context.RequestId,
657				TraceId = Context.TraceId,
658				SpanId = Context.SpanId,
659				Operation = Context.Operation,
660				"{}",
661				msg
662			);
663		} else {
664			error!("{}", msg);
665		}
666	}
667}
668
669/// Global context logger instance
670static LOGGER_INSTANCE:std::sync::OnceLock<ContextLogger> = std::sync::OnceLock::new();
671
672/// Get the global context logger
673pub fn GetLogger() -> &'static ContextLogger { LOGGER_INSTANCE.get_or_init(|| ContextLogger::New(false, None)) }
674
675/// Initialize the global context logger
676pub fn InitializeLogger(json_output:bool, log_file_path:Option<String>) -> Result<()> {
677	let logger = ContextLogger::New(json_output, log_file_path);
678	logger.Initialize()?;
679	let _old = LOGGER_INSTANCE.set(logger);
680	Ok(())
681}
682
683/// Initialize the global context logger with rotation
684pub fn InitializeLoggerWithRotation(
685	json_output:bool,
686	log_file_path:Option<String>,
687	rotation_config:LogRotationConfig,
688	sensitive_config:Option<SensitiveDataConfig>,
689) -> Result<()> {
690	let mut logger = ContextLogger::WithRotation(json_output, log_file_path, rotation_config)?;
691
692	if let Some(sensitive_config) = sensitive_config {
693		logger = logger.WithSensitiveFilter(sensitive_config)?;
694	}
695
696	logger.Initialize()?;
697	let _old = LOGGER_INSTANCE.set(logger);
698	Ok(())
699}