feat: add tool-calling passthrough for all providers
Some checks failed
CI / Check (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Formatting (push) Has been cancelled
CI / Test (push) Has been cancelled
CI / Release Build (push) Has been cancelled

Implement full OpenAI-compatible tool-calling support across the proxy,
enabling OpenCode to use llm-proxy as its sole LLM backend.

- Add 9 tool-calling types (Tool, FunctionDef, ToolChoice, ToolCall, etc.)
- Update ChatCompletionRequest/ChatMessage/ChatStreamDelta with tool fields
- Update UnifiedRequest/UnifiedMessage to carry tool data through the pipeline
- Shared helpers: messages_to_openai_json handles tool messages, build_openai_body
  includes tools/tool_choice, parse/stream extract tool_calls from responses
- Gemini: full OpenAI<->Gemini format translation (functionDeclarations,
  functionCall/functionResponse, synthetic call IDs, tool_config mapping)
- Gemini: extract duplicated message-conversion into shared convert_messages()
- Server: SSE streams include tool_calls deltas, finish_reason='tool_calls'
- AggregatingStream: accumulate tool call deltas across stream chunks
- OpenAI provider: add o4- prefix to supports_model()
This commit is contained in:
2026-03-02 09:40:57 -05:00
parent 942aa23f88
commit 9318336f62
8 changed files with 543 additions and 113 deletions

View File

@@ -1,6 +1,7 @@
use crate::client::ClientManager;
use crate::errors::AppError;
use crate::logging::{RequestLog, RequestLogger};
use crate::models::ToolCall;
use crate::providers::{Provider, ProviderStreamChunk};
use crate::utils::tokens::estimate_completion_tokens;
use futures::stream::Stream;
@@ -31,6 +32,7 @@ pub struct AggregatingStream<S> {
has_images: bool,
accumulated_content: String,
accumulated_reasoning: String,
accumulated_tool_calls: Vec<ToolCall>,
logger: Arc<RequestLogger>,
client_manager: Arc<ClientManager>,
model_registry: Arc<crate::models::registry::ModelRegistry>,
@@ -53,6 +55,7 @@ where
has_images: config.has_images,
accumulated_content: String::new(),
accumulated_reasoning: String::new(),
accumulated_tool_calls: Vec::new(),
logger: config.logger,
client_manager: config.client_manager,
model_registry: config.model_registry,
@@ -153,6 +156,38 @@ where
if let Some(reasoning) = &chunk.reasoning_content {
self.accumulated_reasoning.push_str(reasoning);
}
// Accumulate tool call deltas into complete tool calls
if let Some(deltas) = &chunk.tool_calls {
for delta in deltas {
let idx = delta.index as usize;
// Grow the accumulated_tool_calls vec if needed
while self.accumulated_tool_calls.len() <= idx {
self.accumulated_tool_calls.push(ToolCall {
id: String::new(),
call_type: "function".to_string(),
function: crate::models::FunctionCall {
name: String::new(),
arguments: String::new(),
},
});
}
let tc = &mut self.accumulated_tool_calls[idx];
if let Some(id) = &delta.id {
tc.id.clone_from(id);
}
if let Some(ct) = &delta.call_type {
tc.call_type.clone_from(ct);
}
if let Some(f) = &delta.function {
if let Some(name) = &f.name {
tc.function.name.push_str(name);
}
if let Some(args) = &f.arguments {
tc.function.arguments.push_str(args);
}
}
}
}
}
Poll::Ready(Some(Err(_))) => {
// If there's an error, we might still want to log what we got so far?
@@ -217,12 +252,14 @@ mod tests {
content: "Hello".to_string(),
reasoning_content: None,
finish_reason: None,
tool_calls: None,
model: "test".to_string(),
}),
Ok(ProviderStreamChunk {
content: " World".to_string(),
reasoning_content: None,
finish_reason: Some("stop".to_string()),
tool_calls: None,
model: "test".to_string(),
}),
];