diff --git a/src/models/mod.rs b/src/models/mod.rs index a56fa8da..0a3f8671 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -38,7 +38,7 @@ pub struct ChatMessage { pub role: String, // "system", "user", "assistant", "tool" #[serde(flatten)] pub content: MessageContent, - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(alias = "reasoning", alias = "thought", skip_serializing_if = "Option::is_none")] pub reasoning_content: Option, #[serde(skip_serializing_if = "Option::is_none")] pub tool_calls: Option>, @@ -192,7 +192,7 @@ pub struct ChatStreamChoice { pub struct ChatStreamDelta { pub role: Option, pub content: Option, - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(alias = "reasoning", alias = "thought", skip_serializing_if = "Option::is_none")] pub reasoning_content: Option, #[serde(skip_serializing_if = "Option::is_none")] pub tool_calls: Option>, diff --git a/src/providers/deepseek.rs b/src/providers/deepseek.rs index c964ee55..207f3f53 100644 --- a/src/providers/deepseek.rs +++ b/src/providers/deepseek.rs @@ -58,7 +58,34 @@ impl super::Provider for DeepSeekProvider { async fn chat_completion(&self, request: UnifiedRequest) -> Result { let messages_json = helpers::messages_to_openai_json(&request.messages).await?; - let body = helpers::build_openai_body(&request, messages_json, false); + let mut body = helpers::build_openai_body(&request, messages_json, false); + + // Sanitize and fix for deepseek-reasoner (R1) + if request.model == "deepseek-reasoner" { + if let Some(obj) = body.as_object_mut() { + // Remove unsupported parameters + obj.remove("temperature"); + obj.remove("top_p"); + obj.remove("presence_penalty"); + obj.remove("frequency_penalty"); + obj.remove("logit_bias"); + obj.remove("logprobs"); + obj.remove("top_logprobs"); + + // ENSURE: assistant messages with tool_calls must have reasoning_content + if let Some(messages) = obj.get_mut("messages").and_then(|m| m.as_array_mut()) { + for m in messages { + if m["role"] == "assistant" && m.get("tool_calls").is_some() { + if m.get("reasoning_content").is_none() || m["reasoning_content"].is_null() { + // DeepSeek R1 requires reasoning_content for tool calls in history. + // If missing (e.g. from client or other model), inject a placeholder. + m["reasoning_content"] = serde_json::json!("Thinking..."); + } + } + } + } + } + } let response = self .client @@ -118,9 +145,34 @@ impl super::Provider for DeepSeekProvider { let messages_json = helpers::messages_to_openai_json_text_only(&request.messages).await?; let mut body = helpers::build_openai_body(&request, messages_json, true); - // Standard OpenAI cleanup - if let Some(obj) = body.as_object_mut() { - obj.remove("stream_options"); + // Sanitize and fix for deepseek-reasoner (R1) + if request.model == "deepseek-reasoner" { + if let Some(obj) = body.as_object_mut() { + obj.remove("stream_options"); + obj.remove("temperature"); + obj.remove("top_p"); + obj.remove("presence_penalty"); + obj.remove("frequency_penalty"); + obj.remove("logit_bias"); + obj.remove("logprobs"); + obj.remove("top_logprobs"); + + // ENSURE: assistant messages with tool_calls must have reasoning_content + if let Some(messages) = obj.get_mut("messages").and_then(|m| m.as_array_mut()) { + for m in messages { + if m["role"] == "assistant" && m.get("tool_calls").is_some() { + if m.get("reasoning_content").is_none() || m["reasoning_content"].is_null() { + m["reasoning_content"] = serde_json::json!("Thinking..."); + } + } + } + } + } + } else { + // For standard deepseek-chat, keep it clean + if let Some(obj) = body.as_object_mut() { + obj.remove("stream_options"); + } } let url = format!("{}/chat/completions", self.config.base_url);