feat: add cache token tracking and cache-aware cost calculation
Track cache_read_tokens and cache_write_tokens end-to-end: parse from provider responses (OpenAI, DeepSeek, Grok, Gemini), persist to SQLite, apply cache-aware pricing from the model registry, and surface in API responses and the dashboard. - Add cache fields to ProviderResponse, StreamUsage, RequestLog structs - Parse cached_tokens (OpenAI/Grok), prompt_cache_hit/miss (DeepSeek), cachedContentTokenCount (Gemini) from provider responses - Send stream_options.include_usage for streaming; capture real usage from final SSE chunk in AggregatingStream - ALTER TABLE migration for cache_read_tokens/cache_write_tokens columns - Cache-aware cost formula using registry cache_read/cache_write rates - Update Provider trait calculate_cost signature across all providers - Add cache_read_tokens/cache_write_tokens to Usage API response - Dashboard: cache hit rate card, cache columns in pricing and usage tables, cache token aggregation in SQL queries - Remove API debug panel and verbose console logging from api.js - Bump static asset cache-bust to v5
This commit is contained in:
@@ -37,12 +37,16 @@ pub trait Provider: Send + Sync {
|
||||
/// Estimate token count for a request (for cost calculation)
|
||||
fn estimate_tokens(&self, request: &UnifiedRequest) -> Result<u32>;
|
||||
|
||||
/// Calculate cost based on token usage and model using the registry
|
||||
/// Calculate cost based on token usage and model using the registry.
|
||||
/// `cache_read_tokens` / `cache_write_tokens` allow cache-aware pricing
|
||||
/// when the registry provides `cache_read` / `cache_write` rates.
|
||||
fn calculate_cost(
|
||||
&self,
|
||||
model: &str,
|
||||
prompt_tokens: u32,
|
||||
completion_tokens: u32,
|
||||
cache_read_tokens: u32,
|
||||
cache_write_tokens: u32,
|
||||
registry: &crate::models::registry::ModelRegistry,
|
||||
) -> f64;
|
||||
}
|
||||
@@ -54,9 +58,21 @@ pub struct ProviderResponse {
|
||||
pub prompt_tokens: u32,
|
||||
pub completion_tokens: u32,
|
||||
pub total_tokens: u32,
|
||||
pub cache_read_tokens: u32,
|
||||
pub cache_write_tokens: u32,
|
||||
pub model: String,
|
||||
}
|
||||
|
||||
/// Usage data from the final streaming chunk (when providers report real token counts).
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct StreamUsage {
|
||||
pub prompt_tokens: u32,
|
||||
pub completion_tokens: u32,
|
||||
pub total_tokens: u32,
|
||||
pub cache_read_tokens: u32,
|
||||
pub cache_write_tokens: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ProviderStreamChunk {
|
||||
pub content: String,
|
||||
@@ -64,6 +80,8 @@ pub struct ProviderStreamChunk {
|
||||
pub finish_reason: Option<String>,
|
||||
pub tool_calls: Option<Vec<crate::models::ToolCallDelta>>,
|
||||
pub model: String,
|
||||
/// Populated only on the final chunk when providers report usage (e.g. stream_options.include_usage).
|
||||
pub usage: Option<StreamUsage>,
|
||||
}
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
@@ -299,6 +317,8 @@ pub mod placeholder {
|
||||
_model: &str,
|
||||
_prompt_tokens: u32,
|
||||
_completion_tokens: u32,
|
||||
_cache_read_tokens: u32,
|
||||
_cache_write_tokens: u32,
|
||||
_registry: &crate::models::registry::ModelRegistry,
|
||||
) -> f64 {
|
||||
0.0
|
||||
|
||||
Reference in New Issue
Block a user