Phase 1: Fix compilation (config_path Option<PathBuf>, streaming test, stale test cleanup) Phase 2: Fix critical bugs (remove block_on deadlocks in 4 providers, fix broken SQL query builder) Phase 3: Security hardening (session manager, real auth, token masking, Gemini key to header, password policy) Phase 4: Implement stubs (real provider test, /proc health metrics, client/provider/backup endpoints, has_images) Phase 5: Code quality (shared provider helpers, explicit re-exports, all Clippy warnings fixed, unwrap removal, 6 unused deps removed, dashboard split into 7 sub-modules) Phase 6: Infrastructure (GitHub Actions CI, multi-stage Dockerfile, rustfmt.toml, clippy.toml, script fixes)
129 lines
4.2 KiB
Rust
129 lines
4.2 KiB
Rust
use anyhow::Result;
|
|
use async_trait::async_trait;
|
|
use futures::stream::BoxStream;
|
|
|
|
use super::helpers;
|
|
use super::{ProviderResponse, ProviderStreamChunk};
|
|
use crate::{config::AppConfig, errors::AppError, models::UnifiedRequest};
|
|
|
|
pub struct OllamaProvider {
|
|
client: reqwest::Client,
|
|
config: crate::config::OllamaConfig,
|
|
pricing: Vec<crate::config::ModelPricing>,
|
|
}
|
|
|
|
impl OllamaProvider {
|
|
pub fn new(config: &crate::config::OllamaConfig, app_config: &AppConfig) -> Result<Self> {
|
|
Ok(Self {
|
|
client: reqwest::Client::new(),
|
|
config: config.clone(),
|
|
pricing: app_config.pricing.ollama.clone(),
|
|
})
|
|
}
|
|
}
|
|
|
|
#[async_trait]
|
|
impl super::Provider for OllamaProvider {
|
|
fn name(&self) -> &str {
|
|
"ollama"
|
|
}
|
|
|
|
fn supports_model(&self, model: &str) -> bool {
|
|
self.config.models.iter().any(|m| m == model) || model.starts_with("ollama/")
|
|
}
|
|
|
|
fn supports_multimodal(&self) -> bool {
|
|
true
|
|
}
|
|
|
|
async fn chat_completion(&self, mut request: UnifiedRequest) -> Result<ProviderResponse, AppError> {
|
|
// Strip "ollama/" prefix if present for the API call
|
|
let api_model = request
|
|
.model
|
|
.strip_prefix("ollama/")
|
|
.unwrap_or(&request.model)
|
|
.to_string();
|
|
let original_model = request.model.clone();
|
|
request.model = api_model;
|
|
|
|
let messages_json = helpers::messages_to_openai_json(&request.messages).await?;
|
|
let body = helpers::build_openai_body(&request, messages_json, false);
|
|
|
|
let response = self
|
|
.client
|
|
.post(format!("{}/chat/completions", self.config.base_url))
|
|
.json(&body)
|
|
.send()
|
|
.await
|
|
.map_err(|e| AppError::ProviderError(e.to_string()))?;
|
|
|
|
if !response.status().is_success() {
|
|
let error_text = response.text().await.unwrap_or_default();
|
|
return Err(AppError::ProviderError(format!("Ollama API error: {}", error_text)));
|
|
}
|
|
|
|
let resp_json: serde_json::Value = response
|
|
.json()
|
|
.await
|
|
.map_err(|e| AppError::ProviderError(e.to_string()))?;
|
|
|
|
// Ollama also supports "thought" as an alias for reasoning_content
|
|
let mut result = helpers::parse_openai_response(&resp_json, original_model)?;
|
|
if result.reasoning_content.is_none() {
|
|
result.reasoning_content = resp_json["choices"]
|
|
.get(0)
|
|
.and_then(|c| c["message"]["thought"].as_str())
|
|
.map(|s| s.to_string());
|
|
}
|
|
Ok(result)
|
|
}
|
|
|
|
fn estimate_tokens(&self, request: &UnifiedRequest) -> Result<u32> {
|
|
Ok(crate::utils::tokens::estimate_request_tokens(&request.model, request))
|
|
}
|
|
|
|
fn calculate_cost(
|
|
&self,
|
|
model: &str,
|
|
prompt_tokens: u32,
|
|
completion_tokens: u32,
|
|
registry: &crate::models::registry::ModelRegistry,
|
|
) -> f64 {
|
|
helpers::calculate_cost_with_registry(
|
|
model,
|
|
prompt_tokens,
|
|
completion_tokens,
|
|
registry,
|
|
&self.pricing,
|
|
0.0,
|
|
0.0,
|
|
)
|
|
}
|
|
|
|
async fn chat_completion_stream(
|
|
&self,
|
|
mut request: UnifiedRequest,
|
|
) -> Result<BoxStream<'static, Result<ProviderStreamChunk, AppError>>, AppError> {
|
|
let api_model = request
|
|
.model
|
|
.strip_prefix("ollama/")
|
|
.unwrap_or(&request.model)
|
|
.to_string();
|
|
let original_model = request.model.clone();
|
|
request.model = api_model;
|
|
|
|
let messages_json = helpers::messages_to_openai_json_text_only(&request.messages).await?;
|
|
let body = helpers::build_openai_body(&request, messages_json, true);
|
|
|
|
let es = reqwest_eventsource::EventSource::new(
|
|
self.client
|
|
.post(format!("{}/chat/completions", self.config.base_url))
|
|
.json(&body),
|
|
)
|
|
.map_err(|e| AppError::ProviderError(format!("Failed to create EventSource: {}", e)))?;
|
|
|
|
// Ollama uses "thought" as an alternative field for reasoning content
|
|
Ok(helpers::create_openai_stream(es, original_model, Some("thought")))
|
|
}
|
|
}
|