From f5677afba01cc17bb78a904121d60e7300507d64 Mon Sep 17 00:00:00 2001 From: hobokenchicken Date: Thu, 5 Mar 2026 19:32:56 +0000 Subject: [PATCH] fix(openai): transition to max_completion_tokens for newer models Newer OpenAI models (o1, o3, gpt-5) have deprecated 'max_tokens' in favor of 'max_completion_tokens'. The provider now automatically maps this parameter to ensure compatibility and avoid 400 errors. --- src/providers/openai.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/providers/openai.rs b/src/providers/openai.rs index dfbfa54d..7a63d31e 100644 --- a/src/providers/openai.rs +++ b/src/providers/openai.rs @@ -54,7 +54,15 @@ impl super::Provider for OpenAIProvider { async fn chat_completion(&self, request: UnifiedRequest) -> Result { let messages_json = helpers::messages_to_openai_json(&request.messages).await?; - let body = helpers::build_openai_body(&request, messages_json, false); + let mut body = helpers::build_openai_body(&request, messages_json, false); + + // Transition: Newer OpenAI models (o1, o3, gpt-5) require max_completion_tokens + // instead of the legacy max_tokens parameter. + if request.model.starts_with("o1-") || request.model.starts_with("o3-") || request.model.contains("gpt-5") { + if let Some(max_tokens) = body.as_object_mut().and_then(|obj| obj.remove("max_tokens")) { + body["max_completion_tokens"] = max_tokens; + } + } let response = self .client @@ -301,6 +309,13 @@ impl super::Provider for OpenAIProvider { // Standard OpenAI cleanup if let Some(obj) = body.as_object_mut() { obj.remove("stream_options"); + + // Transition: Newer OpenAI models (o1, o3, gpt-5) require max_completion_tokens + if request.model.starts_with("o1-") || request.model.starts_with("o3-") || request.model.contains("gpt-5") { + if let Some(max_tokens) = obj.remove("max_tokens") { + obj.insert("max_completion_tokens".to_string(), max_tokens); + } + } } let url = format!("{}/chat/completions", self.config.base_url);