feat: add tool-calling passthrough for all providers
Implement full OpenAI-compatible tool-calling support across the proxy, enabling OpenCode to use llm-proxy as its sole LLM backend. - Add 9 tool-calling types (Tool, FunctionDef, ToolChoice, ToolCall, etc.) - Update ChatCompletionRequest/ChatMessage/ChatStreamDelta with tool fields - Update UnifiedRequest/UnifiedMessage to carry tool data through the pipeline - Shared helpers: messages_to_openai_json handles tool messages, build_openai_body includes tools/tool_choice, parse/stream extract tool_calls from responses - Gemini: full OpenAI<->Gemini format translation (functionDeclarations, functionCall/functionResponse, synthetic call IDs, tool_config mapping) - Gemini: extract duplicated message-conversion into shared convert_messages() - Server: SSE streams include tool_calls deltas, finish_reason='tool_calls' - AggregatingStream: accumulate tool call deltas across stream chunks - OpenAI provider: add o4- prefix to supports_model()
This commit is contained in:
@@ -50,6 +50,7 @@ pub trait Provider: Send + Sync {
|
||||
pub struct ProviderResponse {
|
||||
pub content: String,
|
||||
pub reasoning_content: Option<String>,
|
||||
pub tool_calls: Option<Vec<crate::models::ToolCall>>,
|
||||
pub prompt_tokens: u32,
|
||||
pub completion_tokens: u32,
|
||||
pub total_tokens: u32,
|
||||
@@ -61,6 +62,7 @@ pub struct ProviderStreamChunk {
|
||||
pub content: String,
|
||||
pub reasoning_content: Option<String>,
|
||||
pub finish_reason: Option<String>,
|
||||
pub tool_calls: Option<Vec<crate::models::ToolCallDelta>>,
|
||||
pub model: String,
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user