Files
GopherGate/src/providers/ollama.rs
hobokenchicken db5824f0fb
Some checks failed
CI / Check (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Formatting (push) Has been cancelled
CI / Test (push) Has been cancelled
CI / Release Build (push) Has been cancelled
feat: add cache token tracking and cache-aware cost calculation
Track cache_read_tokens and cache_write_tokens end-to-end: parse from
provider responses (OpenAI, DeepSeek, Grok, Gemini), persist to SQLite,
apply cache-aware pricing from the model registry, and surface in API
responses and the dashboard.

- Add cache fields to ProviderResponse, StreamUsage, RequestLog structs
- Parse cached_tokens (OpenAI/Grok), prompt_cache_hit/miss (DeepSeek),
  cachedContentTokenCount (Gemini) from provider responses
- Send stream_options.include_usage for streaming; capture real usage
  from final SSE chunk in AggregatingStream
- ALTER TABLE migration for cache_read_tokens/cache_write_tokens columns
- Cache-aware cost formula using registry cache_read/cache_write rates
- Update Provider trait calculate_cost signature across all providers
- Add cache_read_tokens/cache_write_tokens to Usage API response
- Dashboard: cache hit rate card, cache columns in pricing and usage
  tables, cache token aggregation in SQL queries
- Remove API debug panel and verbose console logging from api.js
- Bump static asset cache-bust to v5
2026-03-02 14:45:21 -05:00

141 lines
4.6 KiB
Rust

use anyhow::Result;
use async_trait::async_trait;
use futures::stream::BoxStream;
use super::helpers;
use super::{ProviderResponse, ProviderStreamChunk};
use crate::{config::AppConfig, errors::AppError, models::UnifiedRequest};
pub struct OllamaProvider {
client: reqwest::Client,
config: crate::config::OllamaConfig,
pricing: Vec<crate::config::ModelPricing>,
}
impl OllamaProvider {
pub fn new(config: &crate::config::OllamaConfig, app_config: &AppConfig) -> Result<Self> {
let client = reqwest::Client::builder()
.connect_timeout(std::time::Duration::from_secs(5))
.timeout(std::time::Duration::from_secs(300))
.pool_idle_timeout(std::time::Duration::from_secs(90))
.pool_max_idle_per_host(4)
.tcp_keepalive(std::time::Duration::from_secs(30))
.build()?;
Ok(Self {
client,
config: config.clone(),
pricing: app_config.pricing.ollama.clone(),
})
}
}
#[async_trait]
impl super::Provider for OllamaProvider {
fn name(&self) -> &str {
"ollama"
}
fn supports_model(&self, model: &str) -> bool {
self.config.models.iter().any(|m| m == model) || model.starts_with("ollama/")
}
fn supports_multimodal(&self) -> bool {
true
}
async fn chat_completion(&self, mut request: UnifiedRequest) -> Result<ProviderResponse, AppError> {
// Strip "ollama/" prefix if present for the API call
let api_model = request
.model
.strip_prefix("ollama/")
.unwrap_or(&request.model)
.to_string();
let original_model = request.model.clone();
request.model = api_model;
let messages_json = helpers::messages_to_openai_json(&request.messages).await?;
let body = helpers::build_openai_body(&request, messages_json, false);
let response = self
.client
.post(format!("{}/chat/completions", self.config.base_url))
.json(&body)
.send()
.await
.map_err(|e| AppError::ProviderError(e.to_string()))?;
if !response.status().is_success() {
let error_text = response.text().await.unwrap_or_default();
return Err(AppError::ProviderError(format!("Ollama API error: {}", error_text)));
}
let resp_json: serde_json::Value = response
.json()
.await
.map_err(|e| AppError::ProviderError(e.to_string()))?;
// Ollama also supports "thought" as an alias for reasoning_content
let mut result = helpers::parse_openai_response(&resp_json, original_model)?;
if result.reasoning_content.is_none() {
result.reasoning_content = resp_json["choices"]
.get(0)
.and_then(|c| c["message"]["thought"].as_str())
.map(|s| s.to_string());
}
Ok(result)
}
fn estimate_tokens(&self, request: &UnifiedRequest) -> Result<u32> {
Ok(crate::utils::tokens::estimate_request_tokens(&request.model, request))
}
fn calculate_cost(
&self,
model: &str,
prompt_tokens: u32,
completion_tokens: u32,
cache_read_tokens: u32,
cache_write_tokens: u32,
registry: &crate::models::registry::ModelRegistry,
) -> f64 {
helpers::calculate_cost_with_registry(
model,
prompt_tokens,
completion_tokens,
cache_read_tokens,
cache_write_tokens,
registry,
&self.pricing,
0.0,
0.0,
)
}
async fn chat_completion_stream(
&self,
mut request: UnifiedRequest,
) -> Result<BoxStream<'static, Result<ProviderStreamChunk, AppError>>, AppError> {
let api_model = request
.model
.strip_prefix("ollama/")
.unwrap_or(&request.model)
.to_string();
let original_model = request.model.clone();
request.model = api_model;
let messages_json = helpers::messages_to_openai_json_text_only(&request.messages).await?;
let body = helpers::build_openai_body(&request, messages_json, true);
let es = reqwest_eventsource::EventSource::new(
self.client
.post(format!("{}/chat/completions", self.config.base_url))
.json(&body),
)
.map_err(|e| AppError::ProviderError(format!("Failed to create EventSource: {}", e)))?;
// Ollama uses "thought" as an alternative field for reasoning content
Ok(helpers::create_openai_stream(es, original_model, Some("thought")))
}
}