Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions server_manager/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 5 additions & 1 deletion server_manager/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,16 @@ time = "0.3"
rand = "0.10.0"

[dev-dependencies]
criterion = "0.5"
criterion = { version = "0.5", features = ["async_tokio"] }

[[bench]]
name = "service_benchmark"
harness = false

[[bench]]
name = "config_benchmark"
harness = false

[workspace]
members = ["."]

Expand Down
18 changes: 18 additions & 0 deletions server_manager/benches/config_benchmark.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
use criterion::{criterion_group, criterion_main, Criterion};
use server_manager::core::config::Config;
use tokio::runtime::Runtime;

fn benchmark_config_load_async(c: &mut Criterion) {
let rt = Runtime::new().unwrap();
// Ensure config.yaml exists
std::fs::write("config.yaml", "disabled_services: []").unwrap();

c.bench_function("config_load_async", |b| {
b.to_async(&rt).iter(|| async {
let _ = Config::load_async().await;
})
});
}

criterion_group!(benches, benchmark_config_load_async);
criterion_main!(benches);
90 changes: 52 additions & 38 deletions server_manager/src/core/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,14 @@ use std::collections::HashSet;
use std::fs;
use std::path::Path;
use std::sync::OnceLock;
use std::time::SystemTime;
use std::time::{Duration, SystemTime};
use tokio::sync::RwLock;

#[derive(Debug, Clone)]
struct CachedConfig {
config: Config,
last_mtime: Option<SystemTime>,
last_check: SystemTime,
}

static CONFIG_CACHE: OnceLock<RwLock<CachedConfig>> = OnceLock::new();
Expand Down Expand Up @@ -42,64 +43,77 @@ impl Config {
RwLock::new(CachedConfig {
config: Config::default(),
last_mtime: None,
last_check: SystemTime::UNIX_EPOCH,
})
});

// Fast path: Optimistic read
let now = SystemTime::now();

// 1. Highly optimistic path: if we checked recently, return cache
// We throttle even if the file was missing (last_mtime is None)
{
let guard = cache.read().await;
if let Some(cached_mtime) = guard.last_mtime {
// Check if file still matches
if let Ok(metadata) = tokio::fs::metadata("config.yaml").await {
if let Ok(modified) = metadata.modified() {
if modified == cached_mtime {
return Ok(guard.config.clone());
}
}
}
if now.duration_since(guard.last_check).unwrap_or_default() < Duration::from_millis(500) {
return Ok(guard.config.clone());
}
}

// Slow path: Update cache
// 2. Perform metadata check (outside of lock to minimize contention)
let metadata_res = tokio::fs::metadata("config.yaml").await;
let mtime = match &metadata_res {
Ok(m) => Some(m.modified().unwrap_or(SystemTime::now())),
Err(_) => None,
};

// 3. Update cache if needed
let mut guard = cache.write().await;

// Check metadata again (double-checked locking pattern)
let metadata_res = tokio::fs::metadata("config.yaml").await;
// If file hasn't changed, just update last_check and return
if guard.last_mtime == mtime {
guard.last_check = now;
return Ok(guard.config.clone());
}

// Actually reload
match metadata_res {
Ok(metadata) => {
let modified = metadata.modified().unwrap_or(SystemTime::now());

if let Some(cached_mtime) = guard.last_mtime {
if modified == cached_mtime {
return Ok(guard.config.clone());
Ok(_) => match tokio::fs::read_to_string("config.yaml").await {
Ok(content) => {
let config_res: Result<Config> = if content.trim().is_empty() {
Ok(Config::default())
} else {
serde_yaml_ng::from_str(&content).map_err(|e| anyhow::anyhow!(e))
};

match config_res {
Ok(config) => {
guard.config = config.clone();
guard.last_mtime = mtime;
guard.last_check = now;
Ok(config)
}
Err(e) => {
// On parse error, keep the old config but update last_check to prevent spamming
guard.last_check = now;
Err(e.context("Failed to parse config.yaml"))
}
}
}

// Load file
match tokio::fs::read_to_string("config.yaml").await {
Ok(content) => {
let config = if content.trim().is_empty() {
Config::default()
} else {
serde_yaml_ng::from_str(&content)
.context("Failed to parse config.yaml")?
};

guard.config = config.clone();
guard.last_mtime = Some(modified);
Ok(config)
}
Err(e) => Err(anyhow::Error::new(e).context("Failed to read config.yaml")),
Err(e) => {
guard.last_check = now;
Err(anyhow::Error::new(e).context("Failed to read config.yaml"))
}
}
},
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
// File not found -> Default
guard.config = Config::default();
guard.last_mtime = None;
guard.last_check = now;
Ok(guard.config.clone())
}
Err(e) => Err(anyhow::Error::new(e).context("Failed to read config metadata")),
Err(e) => {
guard.last_check = now;
Err(anyhow::Error::new(e).context("Failed to read config metadata"))
}
}
}

Expand Down
48 changes: 1 addition & 47 deletions server_manager/src/interface/web.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,6 @@ struct SessionUser {

const SESSION_KEY: &str = "user";

struct CachedConfig {
config: Config,
last_modified: Option<SystemTime>,
}

struct CachedUsers {
manager: UserManager,
last_modified: Option<SystemTime>,
Expand All @@ -41,46 +36,14 @@ struct CachedUsers {
struct AppState {
system: Mutex<System>,
last_system_refresh: Mutex<SystemTime>,
config_cache: RwLock<CachedConfig>,
users_cache: RwLock<CachedUsers>,
}

type SharedState = Arc<AppState>;

impl AppState {
async fn get_config(&self) -> Config {
// Fast path: check metadata
let current_mtime = tokio::fs::metadata("config.yaml")
.await
.and_then(|m| m.modified())
.ok();

{
let cache = self.config_cache.read().await;
if cache.last_modified == current_mtime {
return cache.config.clone();
}
}

// Slow path: reload
let mut cache = self.config_cache.write().await;

// Re-check mtime under write lock to avoid race
let current_mtime_2 = tokio::fs::metadata("config.yaml")
.await
.and_then(|m| m.modified())
.ok();

if cache.last_modified == current_mtime_2 {
return cache.config.clone();
}

if let Ok(cfg) = Config::load_async().await {
cache.config = cfg;
cache.last_modified = current_mtime_2;
}

cache.config.clone()
Config::load_async().await.unwrap_or_default()
}

async fn get_users(&self) -> UserManager {
Expand Down Expand Up @@ -134,11 +97,6 @@ pub async fn start_server(port: u16) -> anyhow::Result<()> {
let mut sys = System::new_all();
sys.refresh_all();

let initial_config = Config::load().unwrap_or_default();
let initial_config_mtime = std::fs::metadata("config.yaml")
.ok()
.and_then(|m| m.modified().ok());

let initial_users = UserManager::load().unwrap_or_default();
let initial_users_mtime = std::fs::metadata("users.yaml")
.ok()
Expand All @@ -152,10 +110,6 @@ pub async fn start_server(port: u16) -> anyhow::Result<()> {
let app_state = Arc::new(AppState {
system: Mutex::new(sys),
last_system_refresh: Mutex::new(SystemTime::now()),
config_cache: RwLock::new(CachedConfig {
config: initial_config,
last_modified: initial_config_mtime,
}),
users_cache: RwLock::new(CachedUsers {
manager: initial_users,
last_modified: initial_users_mtime,
Expand Down