feat: add cache token tracking and cache-aware cost calculation
Some checks failed
CI / Check (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Formatting (push) Has been cancelled
CI / Test (push) Has been cancelled
CI / Release Build (push) Has been cancelled

Track cache_read_tokens and cache_write_tokens end-to-end: parse from
provider responses (OpenAI, DeepSeek, Grok, Gemini), persist to SQLite,
apply cache-aware pricing from the model registry, and surface in API
responses and the dashboard.

- Add cache fields to ProviderResponse, StreamUsage, RequestLog structs
- Parse cached_tokens (OpenAI/Grok), prompt_cache_hit/miss (DeepSeek),
  cachedContentTokenCount (Gemini) from provider responses
- Send stream_options.include_usage for streaming; capture real usage
  from final SSE chunk in AggregatingStream
- ALTER TABLE migration for cache_read_tokens/cache_write_tokens columns
- Cache-aware cost formula using registry cache_read/cache_write rates
- Update Provider trait calculate_cost signature across all providers
- Add cache_read_tokens/cache_write_tokens to Usage API response
- Dashboard: cache hit rate card, cache columns in pricing and usage
  tables, cache token aggregation in SQL queries
- Remove API debug panel and verbose console logging from api.js
- Bump static asset cache-bust to v5
This commit is contained in:
2026-03-02 14:45:21 -05:00
parent 232f092f27
commit db5824f0fb
19 changed files with 352 additions and 109 deletions

View File

@@ -2,7 +2,7 @@ use crate::client::ClientManager;
use crate::errors::AppError;
use crate::logging::{RequestLog, RequestLogger};
use crate::models::ToolCall;
use crate::providers::{Provider, ProviderStreamChunk};
use crate::providers::{Provider, ProviderStreamChunk, StreamUsage};
use crate::state::ModelConfigCache;
use crate::utils::tokens::estimate_completion_tokens;
use futures::stream::Stream;
@@ -33,6 +33,8 @@ pub struct AggregatingStream<S> {
accumulated_content: String,
accumulated_reasoning: String,
accumulated_tool_calls: Vec<ToolCall>,
/// Real usage data from the provider's final stream chunk (when available).
real_usage: Option<StreamUsage>,
logger: Arc<RequestLogger>,
client_manager: Arc<ClientManager>,
model_registry: Arc<crate::models::registry::ModelRegistry>,
@@ -56,6 +58,7 @@ where
accumulated_content: String::new(),
accumulated_reasoning: String::new(),
accumulated_tool_calls: Vec::new(),
real_usage: None,
logger: config.logger,
client_manager: config.client_manager,
model_registry: config.model_registry,
@@ -78,33 +81,68 @@ where
let logger = self.logger.clone();
let client_manager = self.client_manager.clone();
let provider = self.provider.clone();
let prompt_tokens = self.prompt_tokens;
let estimated_prompt_tokens = self.prompt_tokens;
let has_images = self.has_images;
let registry = self.model_registry.clone();
let config_cache = self.model_config_cache.clone();
let real_usage = self.real_usage.take();
// Estimate completion tokens (including reasoning if present)
let content_tokens = estimate_completion_tokens(&self.accumulated_content, &model);
let reasoning_tokens = if !self.accumulated_reasoning.is_empty() {
let estimated_content_tokens = estimate_completion_tokens(&self.accumulated_content, &model);
let estimated_reasoning_tokens = if !self.accumulated_reasoning.is_empty() {
estimate_completion_tokens(&self.accumulated_reasoning, &model)
} else {
0
};
let completion_tokens = content_tokens + reasoning_tokens;
let total_tokens = prompt_tokens + completion_tokens;
let estimated_completion = estimated_content_tokens + estimated_reasoning_tokens;
// Spawn a background task to log the completion
tokio::spawn(async move {
// Use real usage from the provider when available, otherwise fall back to estimates
let (prompt_tokens, completion_tokens, total_tokens, cache_read_tokens, cache_write_tokens) =
if let Some(usage) = &real_usage {
(
usage.prompt_tokens,
usage.completion_tokens,
usage.total_tokens,
usage.cache_read_tokens,
usage.cache_write_tokens,
)
} else {
(
estimated_prompt_tokens,
estimated_completion,
estimated_prompt_tokens + estimated_completion,
0u32,
0u32,
)
};
// Check in-memory cache for cost overrides (no SQLite hit)
let cost = if let Some(cached) = config_cache.get(&model).await {
if let (Some(p), Some(c)) = (cached.prompt_cost_per_m, cached.completion_cost_per_m) {
// Cost override doesn't have cache-aware pricing, use simple formula
(prompt_tokens as f64 * p / 1_000_000.0) + (completion_tokens as f64 * c / 1_000_000.0)
} else {
provider.calculate_cost(&model, prompt_tokens, completion_tokens, &registry)
provider.calculate_cost(
&model,
prompt_tokens,
completion_tokens,
cache_read_tokens,
cache_write_tokens,
&registry,
)
}
} else {
provider.calculate_cost(&model, prompt_tokens, completion_tokens, &registry)
provider.calculate_cost(
&model,
prompt_tokens,
completion_tokens,
cache_read_tokens,
cache_write_tokens,
&registry,
)
};
// Log to database
@@ -116,6 +154,8 @@ where
prompt_tokens,
completion_tokens,
total_tokens,
cache_read_tokens,
cache_write_tokens,
cost,
has_images,
status: "success".to_string(),
@@ -146,6 +186,10 @@ where
if let Some(reasoning) = &chunk.reasoning_content {
self.accumulated_reasoning.push_str(reasoning);
}
// Capture real usage from the provider when present (typically on the final chunk)
if let Some(usage) = &chunk.usage {
self.real_usage = Some(usage.clone());
}
// Accumulate tool call deltas into complete tool calls
if let Some(deltas) = &chunk.tool_calls {
for delta in deltas {
@@ -230,7 +274,7 @@ mod tests {
fn estimate_tokens(&self, _req: &crate::models::UnifiedRequest) -> Result<u32> {
Ok(10)
}
fn calculate_cost(&self, _model: &str, _p: u32, _c: u32, _r: &crate::models::registry::ModelRegistry) -> f64 {
fn calculate_cost(&self, _model: &str, _p: u32, _c: u32, _cr: u32, _cw: u32, _r: &crate::models::registry::ModelRegistry) -> f64 {
0.05
}
}
@@ -244,6 +288,7 @@ mod tests {
finish_reason: None,
tool_calls: None,
model: "test".to_string(),
usage: None,
}),
Ok(ProviderStreamChunk {
content: " World".to_string(),
@@ -251,6 +296,7 @@ mod tests {
finish_reason: Some("stop".to_string()),
tool_calls: None,
model: "test".to_string(),
usage: None,
}),
];
let inner_stream = stream::iter(chunks);