feat: add tool-calling passthrough for all providers
Implement full OpenAI-compatible tool-calling support across the proxy, enabling OpenCode to use llm-proxy as its sole LLM backend. - Add 9 tool-calling types (Tool, FunctionDef, ToolChoice, ToolCall, etc.) - Update ChatCompletionRequest/ChatMessage/ChatStreamDelta with tool fields - Update UnifiedRequest/UnifiedMessage to carry tool data through the pipeline - Shared helpers: messages_to_openai_json handles tool messages, build_openai_body includes tools/tool_choice, parse/stream extract tool_calls from responses - Gemini: full OpenAI<->Gemini format translation (functionDeclarations, functionCall/functionResponse, synthetic call IDs, tool_config mapping) - Gemini: extract duplicated message-conversion into shared convert_messages() - Server: SSE streams include tool_calls deltas, finish_reason='tool_calls' - AggregatingStream: accumulate tool call deltas across stream chunks - OpenAI provider: add o4- prefix to supports_model()
This commit is contained in:
@@ -325,11 +325,16 @@ pub(super) async fn handle_test_provider(
|
||||
messages: vec![crate::models::UnifiedMessage {
|
||||
role: "user".to_string(),
|
||||
content: vec![crate::models::ContentPart::Text { text: "Hi".to_string() }],
|
||||
tool_calls: None,
|
||||
name: None,
|
||||
tool_call_id: None,
|
||||
}],
|
||||
temperature: None,
|
||||
max_tokens: Some(5),
|
||||
stream: false,
|
||||
has_images: false,
|
||||
tools: None,
|
||||
tool_choice: None,
|
||||
};
|
||||
|
||||
match provider.chat_completion(test_request).await {
|
||||
|
||||
Reference in New Issue
Block a user