diff --git a/server_manager/Cargo.lock b/server_manager/Cargo.lock index 9d73bac..b9a528a 100644 --- a/server_manager/Cargo.lock +++ b/server_manager/Cargo.lock @@ -347,6 +347,7 @@ dependencies = [ "ciborium", "clap", "criterion-plot", + "futures", "is-terminal", "itertools", "num-traits", @@ -359,6 +360,7 @@ dependencies = [ "serde_derive", "serde_json", "tinytemplate", + "tokio", "walkdir", ] diff --git a/server_manager/Cargo.toml b/server_manager/Cargo.toml index 475f80e..64dd121 100644 --- a/server_manager/Cargo.toml +++ b/server_manager/Cargo.toml @@ -26,12 +26,16 @@ time = "0.3" rand = "0.10.0" [dev-dependencies] -criterion = "0.5" +criterion = { version = "0.5", features = ["async_tokio"] } [[bench]] name = "service_benchmark" harness = false +[[bench]] +name = "config_benchmark" +harness = false + [workspace] members = ["."] diff --git a/server_manager/benches/config_benchmark.rs b/server_manager/benches/config_benchmark.rs new file mode 100644 index 0000000..69f56e4 --- /dev/null +++ b/server_manager/benches/config_benchmark.rs @@ -0,0 +1,18 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use server_manager::core::config::Config; +use tokio::runtime::Runtime; + +fn benchmark_config_load_async(c: &mut Criterion) { + let rt = Runtime::new().unwrap(); + // Ensure config.yaml exists + std::fs::write("config.yaml", "disabled_services: []").unwrap(); + + c.bench_function("config_load_async", |b| { + b.to_async(&rt).iter(|| async { + let _ = Config::load_async().await; + }) + }); +} + +criterion_group!(benches, benchmark_config_load_async); +criterion_main!(benches); diff --git a/server_manager/src/core/config.rs b/server_manager/src/core/config.rs index c57c2a7..8129160 100644 --- a/server_manager/src/core/config.rs +++ b/server_manager/src/core/config.rs @@ -5,13 +5,14 @@ use std::collections::HashSet; use std::fs; use std::path::Path; use std::sync::OnceLock; -use std::time::SystemTime; +use std::time::{Duration, SystemTime}; use tokio::sync::RwLock; #[derive(Debug, Clone)] struct CachedConfig { config: Config, last_mtime: Option, + last_check: SystemTime, } static CONFIG_CACHE: OnceLock> = OnceLock::new(); @@ -42,64 +43,77 @@ impl Config { RwLock::new(CachedConfig { config: Config::default(), last_mtime: None, + last_check: SystemTime::UNIX_EPOCH, }) }); - // Fast path: Optimistic read + let now = SystemTime::now(); + + // 1. Highly optimistic path: if we checked recently, return cache + // We throttle even if the file was missing (last_mtime is None) { let guard = cache.read().await; - if let Some(cached_mtime) = guard.last_mtime { - // Check if file still matches - if let Ok(metadata) = tokio::fs::metadata("config.yaml").await { - if let Ok(modified) = metadata.modified() { - if modified == cached_mtime { - return Ok(guard.config.clone()); - } - } - } + if now.duration_since(guard.last_check).unwrap_or_default() < Duration::from_millis(500) { + return Ok(guard.config.clone()); } } - // Slow path: Update cache + // 2. Perform metadata check (outside of lock to minimize contention) + let metadata_res = tokio::fs::metadata("config.yaml").await; + let mtime = match &metadata_res { + Ok(m) => Some(m.modified().unwrap_or(SystemTime::now())), + Err(_) => None, + }; + + // 3. Update cache if needed let mut guard = cache.write().await; - // Check metadata again (double-checked locking pattern) - let metadata_res = tokio::fs::metadata("config.yaml").await; + // If file hasn't changed, just update last_check and return + if guard.last_mtime == mtime { + guard.last_check = now; + return Ok(guard.config.clone()); + } + // Actually reload match metadata_res { - Ok(metadata) => { - let modified = metadata.modified().unwrap_or(SystemTime::now()); - - if let Some(cached_mtime) = guard.last_mtime { - if modified == cached_mtime { - return Ok(guard.config.clone()); + Ok(_) => match tokio::fs::read_to_string("config.yaml").await { + Ok(content) => { + let config_res: Result = if content.trim().is_empty() { + Ok(Config::default()) + } else { + serde_yaml_ng::from_str(&content).map_err(|e| anyhow::anyhow!(e)) + }; + + match config_res { + Ok(config) => { + guard.config = config.clone(); + guard.last_mtime = mtime; + guard.last_check = now; + Ok(config) + } + Err(e) => { + // On parse error, keep the old config but update last_check to prevent spamming + guard.last_check = now; + Err(e.context("Failed to parse config.yaml")) + } } } - - // Load file - match tokio::fs::read_to_string("config.yaml").await { - Ok(content) => { - let config = if content.trim().is_empty() { - Config::default() - } else { - serde_yaml_ng::from_str(&content) - .context("Failed to parse config.yaml")? - }; - - guard.config = config.clone(); - guard.last_mtime = Some(modified); - Ok(config) - } - Err(e) => Err(anyhow::Error::new(e).context("Failed to read config.yaml")), + Err(e) => { + guard.last_check = now; + Err(anyhow::Error::new(e).context("Failed to read config.yaml")) } - } + }, Err(e) if e.kind() == std::io::ErrorKind::NotFound => { // File not found -> Default guard.config = Config::default(); guard.last_mtime = None; + guard.last_check = now; Ok(guard.config.clone()) } - Err(e) => Err(anyhow::Error::new(e).context("Failed to read config metadata")), + Err(e) => { + guard.last_check = now; + Err(anyhow::Error::new(e).context("Failed to read config metadata")) + } } } diff --git a/server_manager/src/interface/web.rs b/server_manager/src/interface/web.rs index 1f677c3..020162b 100644 --- a/server_manager/src/interface/web.rs +++ b/server_manager/src/interface/web.rs @@ -28,11 +28,6 @@ struct SessionUser { const SESSION_KEY: &str = "user"; -struct CachedConfig { - config: Config, - last_modified: Option, -} - struct CachedUsers { manager: UserManager, last_modified: Option, @@ -41,7 +36,6 @@ struct CachedUsers { struct AppState { system: Mutex, last_system_refresh: Mutex, - config_cache: RwLock, users_cache: RwLock, } @@ -49,38 +43,7 @@ type SharedState = Arc; impl AppState { async fn get_config(&self) -> Config { - // Fast path: check metadata - let current_mtime = tokio::fs::metadata("config.yaml") - .await - .and_then(|m| m.modified()) - .ok(); - - { - let cache = self.config_cache.read().await; - if cache.last_modified == current_mtime { - return cache.config.clone(); - } - } - - // Slow path: reload - let mut cache = self.config_cache.write().await; - - // Re-check mtime under write lock to avoid race - let current_mtime_2 = tokio::fs::metadata("config.yaml") - .await - .and_then(|m| m.modified()) - .ok(); - - if cache.last_modified == current_mtime_2 { - return cache.config.clone(); - } - - if let Ok(cfg) = Config::load_async().await { - cache.config = cfg; - cache.last_modified = current_mtime_2; - } - - cache.config.clone() + Config::load_async().await.unwrap_or_default() } async fn get_users(&self) -> UserManager { @@ -134,11 +97,6 @@ pub async fn start_server(port: u16) -> anyhow::Result<()> { let mut sys = System::new_all(); sys.refresh_all(); - let initial_config = Config::load().unwrap_or_default(); - let initial_config_mtime = std::fs::metadata("config.yaml") - .ok() - .and_then(|m| m.modified().ok()); - let initial_users = UserManager::load().unwrap_or_default(); let initial_users_mtime = std::fs::metadata("users.yaml") .ok() @@ -152,10 +110,6 @@ pub async fn start_server(port: u16) -> anyhow::Result<()> { let app_state = Arc::new(AppState { system: Mutex::new(sys), last_system_refresh: Mutex::new(SystemTime::now()), - config_cache: RwLock::new(CachedConfig { - config: initial_config, - last_modified: initial_config_mtime, - }), users_cache: RwLock::new(CachedUsers { manager: initial_users, last_modified: initial_users_mtime,