Files
GopherGate/src/dashboard/system.rs
hobokenchicken cc5eba1957
Some checks failed
CI / Check (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Formatting (push) Has been cancelled
CI / Test (push) Has been cancelled
CI / Release Build (push) Has been cancelled
feat: implement reasoning_tokens tracking and enhanced usage logging
2026-03-11 17:14:49 +00:00

406 lines
14 KiB
Rust

use axum::{extract::State, response::Json};
use chrono;
use serde_json;
use sqlx::Row;
use std::collections::HashMap;
use tracing::warn;
use super::{ApiResponse, DashboardState};
/// Read a value from /proc files, returning None on any failure.
fn read_proc_file(path: &str) -> Option<String> {
std::fs::read_to_string(path).ok()
}
pub(super) async fn handle_system_health(
State(state): State<DashboardState>,
headers: axum::http::HeaderMap,
) -> Json<ApiResponse<serde_json::Value>> {
let (_session, _) = match super::auth::require_admin(&state, &headers).await {
Ok((session, new_token)) => (session, new_token),
Err(e) => return e,
};
let mut components = HashMap::new();
components.insert("api_server".to_string(), "online".to_string());
components.insert("database".to_string(), "online".to_string());
// Check provider health via circuit breakers
let provider_ids: Vec<String> = state
.app_state
.provider_manager
.get_all_providers()
.await
.iter()
.map(|p| p.name().to_string())
.collect();
for p_id in provider_ids {
if state
.app_state
.rate_limit_manager
.check_provider_request(&p_id)
.await
.unwrap_or(true)
{
components.insert(p_id, "online".to_string());
} else {
components.insert(p_id, "degraded".to_string());
}
}
// Read real memory usage from /proc/self/status
let memory_mb = read_proc_file("/proc/self/status")
.and_then(|s| s.lines().find(|l| l.starts_with("VmRSS:")).map(|l| l.to_string()))
.and_then(|l| l.split_whitespace().nth(1).and_then(|v| v.parse::<f64>().ok()))
.map(|kb| kb / 1024.0)
.unwrap_or(0.0);
// Get real database pool stats
let db_pool_size = state.app_state.db_pool.size();
let db_pool_idle = state.app_state.db_pool.num_idle();
Json(ApiResponse::success(serde_json::json!({
"status": "healthy",
"timestamp": chrono::Utc::now().to_rfc3339(),
"components": components,
"metrics": {
"memory_usage_mb": (memory_mb * 10.0).round() / 10.0,
"db_connections_active": db_pool_size - db_pool_idle as u32,
"db_connections_idle": db_pool_idle,
}
})))
}
/// Real system metrics from /proc (Linux only).
pub(super) async fn handle_system_metrics(
State(state): State<DashboardState>,
headers: axum::http::HeaderMap,
) -> Json<ApiResponse<serde_json::Value>> {
let (_session, _) = match super::auth::require_admin(&state, &headers).await {
Ok((session, new_token)) => (session, new_token),
Err(e) => return e,
};
// --- CPU usage (aggregate across all cores) ---
// /proc/stat first line: cpu user nice system idle iowait irq softirq steal guest guest_nice
let cpu_percent = read_proc_file("/proc/stat")
.and_then(|s| {
let line = s.lines().find(|l| l.starts_with("cpu "))?.to_string();
let fields: Vec<u64> = line
.split_whitespace()
.skip(1)
.filter_map(|v| v.parse().ok())
.collect();
if fields.len() >= 4 {
let idle = fields[3];
let total: u64 = fields.iter().sum();
if total > 0 {
Some(((total - idle) as f64 / total as f64 * 100.0 * 10.0).round() / 10.0)
} else {
None
}
} else {
None
}
})
.unwrap_or(0.0);
// --- Memory (system-wide from /proc/meminfo) ---
let meminfo = read_proc_file("/proc/meminfo").unwrap_or_default();
let parse_meminfo = |key: &str| -> u64 {
meminfo
.lines()
.find(|l| l.starts_with(key))
.and_then(|l| l.split_whitespace().nth(1))
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(0)
};
let mem_total_kb = parse_meminfo("MemTotal:");
let mem_available_kb = parse_meminfo("MemAvailable:");
let mem_used_kb = mem_total_kb.saturating_sub(mem_available_kb);
let mem_total_mb = mem_total_kb as f64 / 1024.0;
let mem_used_mb = mem_used_kb as f64 / 1024.0;
let mem_percent = if mem_total_kb > 0 {
(mem_used_kb as f64 / mem_total_kb as f64 * 100.0 * 10.0).round() / 10.0
} else {
0.0
};
// --- Process-specific memory (VmRSS) ---
let process_rss_mb = read_proc_file("/proc/self/status")
.and_then(|s| s.lines().find(|l| l.starts_with("VmRSS:")).map(|l| l.to_string()))
.and_then(|l| l.split_whitespace().nth(1).and_then(|v| v.parse::<f64>().ok()))
.map(|kb| (kb / 1024.0 * 10.0).round() / 10.0)
.unwrap_or(0.0);
// --- Disk usage of the data directory ---
let (disk_total_gb, disk_used_gb, disk_percent) = {
// statvfs via libc would be ideal; use df as a simple fallback
std::process::Command::new("df")
.args(["-BM", "--output=size,used,pcent", "."])
.output()
.ok()
.and_then(|o| {
let out = String::from_utf8_lossy(&o.stdout);
let line = out.lines().nth(1)?.to_string();
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() >= 3 {
let total = parts[0].trim_end_matches('M').parse::<f64>().unwrap_or(0.0) / 1024.0;
let used = parts[1].trim_end_matches('M').parse::<f64>().unwrap_or(0.0) / 1024.0;
let pct = parts[2].trim_end_matches('%').parse::<f64>().unwrap_or(0.0);
Some(((total * 10.0).round() / 10.0, (used * 10.0).round() / 10.0, pct))
} else {
None
}
})
.unwrap_or((0.0, 0.0, 0.0))
};
// --- Uptime ---
let uptime_seconds = read_proc_file("/proc/uptime")
.and_then(|s| s.split_whitespace().next().and_then(|v| v.parse::<f64>().ok()))
.unwrap_or(0.0) as u64;
// --- Load average ---
let (load_1, load_5, load_15) = read_proc_file("/proc/loadavg")
.and_then(|s| {
let parts: Vec<&str> = s.split_whitespace().collect();
if parts.len() >= 3 {
Some((
parts[0].parse::<f64>().unwrap_or(0.0),
parts[1].parse::<f64>().unwrap_or(0.0),
parts[2].parse::<f64>().unwrap_or(0.0),
))
} else {
None
}
})
.unwrap_or((0.0, 0.0, 0.0));
// --- Network (from /proc/net/dev, aggregate non-lo interfaces) ---
let (net_rx_bytes, net_tx_bytes) = read_proc_file("/proc/net/dev")
.map(|s| {
s.lines()
.skip(2) // skip header lines
.filter(|l| !l.trim().starts_with("lo:"))
.fold((0u64, 0u64), |(rx, tx), line| {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() >= 10 {
let r = parts[1].parse::<u64>().unwrap_or(0);
let t = parts[9].parse::<u64>().unwrap_or(0);
(rx + r, tx + t)
} else {
(rx, tx)
}
})
})
.unwrap_or((0, 0));
// --- Database pool ---
let db_pool_size = state.app_state.db_pool.size();
let db_pool_idle = state.app_state.db_pool.num_idle();
// --- Active WebSocket listeners ---
let ws_listeners = state.app_state.dashboard_tx.receiver_count();
Json(ApiResponse::success(serde_json::json!({
"cpu": {
"usage_percent": cpu_percent,
"load_average": [load_1, load_5, load_15],
},
"memory": {
"total_mb": (mem_total_mb * 10.0).round() / 10.0,
"used_mb": (mem_used_mb * 10.0).round() / 10.0,
"usage_percent": mem_percent,
"process_rss_mb": process_rss_mb,
},
"disk": {
"total_gb": disk_total_gb,
"used_gb": disk_used_gb,
"usage_percent": disk_percent,
},
"network": {
"rx_bytes": net_rx_bytes,
"tx_bytes": net_tx_bytes,
},
"uptime_seconds": uptime_seconds,
"connections": {
"db_active": db_pool_size - db_pool_idle as u32,
"db_idle": db_pool_idle,
"websocket_listeners": ws_listeners,
},
"timestamp": chrono::Utc::now().to_rfc3339(),
})))
}
pub(super) async fn handle_system_logs(
State(state): State<DashboardState>,
headers: axum::http::HeaderMap,
) -> Json<ApiResponse<serde_json::Value>> {
let (_session, _) = match super::auth::require_admin(&state, &headers).await {
Ok((session, new_token)) => (session, new_token),
Err(e) => return e,
};
let pool = &state.app_state.db_pool;
let result = sqlx::query(
r#"
SELECT
id,
timestamp,
client_id,
provider,
model,
prompt_tokens,
completion_tokens,
reasoning_tokens,
total_tokens,
cache_read_tokens,
cache_write_tokens,
cost,
status,
error_message,
duration_ms
FROM llm_requests
ORDER BY timestamp DESC
LIMIT 100
"#,
)
.fetch_all(pool)
.await;
match result {
Ok(rows) => {
let logs: Vec<serde_json::Value> = rows
.into_iter()
.map(|row| {
serde_json::json!({
"id": row.get::<i64, _>("id"),
"timestamp": row.get::<chrono::DateTime<chrono::Utc>, _>("timestamp"),
"client_id": row.get::<String, _>("client_id"),
"provider": row.get::<String, _>("provider"),
"model": row.get::<String, _>("model"),
"prompt_tokens": row.get::<i64, _>("prompt_tokens"),
"completion_tokens": row.get::<i64, _>("completion_tokens"),
"reasoning_tokens": row.get::<i64, _>("reasoning_tokens"),
"cache_read_tokens": row.get::<i64, _>("cache_read_tokens"),
"cache_write_tokens": row.get::<i64, _>("cache_write_tokens"),
"tokens": row.get::<i64, _>("total_tokens"),
"cost": row.get::<f64, _>("cost"),
"status": row.get::<String, _>("status"),
"error": row.get::<Option<String>, _>("error_message"),
"duration": row.get::<i64, _>("duration_ms"),
})
})
.collect();
Json(ApiResponse::success(serde_json::json!(logs)))
}
Err(e) => {
warn!("Failed to fetch system logs: {}", e);
Json(ApiResponse::error("Failed to fetch system logs".to_string()))
}
}
}
pub(super) async fn handle_system_backup(
State(state): State<DashboardState>,
headers: axum::http::HeaderMap,
) -> Json<ApiResponse<serde_json::Value>> {
let (_session, _) = match super::auth::require_admin(&state, &headers).await {
Ok((session, new_token)) => (session, new_token),
Err(e) => return e,
};
let pool = &state.app_state.db_pool;
let backup_id = format!("backup-{}", chrono::Utc::now().timestamp());
let backup_path = format!("data/{}.db", backup_id);
// Ensure the data directory exists
if let Err(e) = std::fs::create_dir_all("data") {
return Json(ApiResponse::error(format!("Failed to create backup directory: {}", e)));
}
// Use SQLite VACUUM INTO for a consistent backup
let result = sqlx::query(&format!("VACUUM INTO '{}'", backup_path))
.execute(pool)
.await;
match result {
Ok(_) => {
// Get backup file size
let size_bytes = std::fs::metadata(&backup_path).map(|m| m.len()).unwrap_or(0);
Json(ApiResponse::success(serde_json::json!({
"success": true,
"message": "Backup completed successfully",
"backup_id": backup_id,
"backup_path": backup_path,
"size_bytes": size_bytes,
})))
}
Err(e) => {
warn!("Database backup failed: {}", e);
Json(ApiResponse::error(format!("Backup failed: {}", e)))
}
}
}
pub(super) async fn handle_get_settings(
State(state): State<DashboardState>,
headers: axum::http::HeaderMap,
) -> Json<ApiResponse<serde_json::Value>> {
let (_session, _) = match super::auth::require_admin(&state, &headers).await {
Ok((session, new_token)) => (session, new_token),
Err(e) => return e,
};
let registry = &state.app_state.model_registry;
let provider_count = registry.providers.len();
let model_count: usize = registry.providers.values().map(|p| p.models.len()).sum();
Json(ApiResponse::success(serde_json::json!({
"server": {
"auth_tokens": state.app_state.auth_tokens.iter().map(|t| mask_token(t)).collect::<Vec<_>>(),
"version": env!("CARGO_PKG_VERSION"),
},
"registry": {
"provider_count": provider_count,
"model_count": model_count,
},
"database": {
"type": "SQLite",
}
})))
}
pub(super) async fn handle_update_settings(
State(state): State<DashboardState>,
headers: axum::http::HeaderMap,
) -> Json<ApiResponse<serde_json::Value>> {
let (_session, _) = match super::auth::require_admin(&state, &headers).await {
Ok((session, new_token)) => (session, new_token),
Err(e) => return e,
};
Json(ApiResponse::error(
"Changing settings at runtime is not yet supported. Please update your config file and restart the server."
.to_string(),
))
}
// Helper functions
fn mask_token(token: &str) -> String {
if token.len() <= 8 {
return "*****".to_string();
}
let masked_len = token.len().min(12);
let visible_len = 4;
let mask_len = masked_len - visible_len;
format!("{}{}", "*".repeat(mask_len), &token[token.len() - visible_len..])
}