fix(openai): transition to max_completion_tokens for newer models
Some checks failed
CI / Check (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Formatting (push) Has been cancelled
CI / Test (push) Has been cancelled
CI / Release Build (push) Has been cancelled

Newer OpenAI models (o1, o3, gpt-5) have deprecated 'max_tokens' in favor of
'max_completion_tokens'. The provider now automatically maps this parameter
to ensure compatibility and avoid 400 errors.
This commit is contained in:
2026-03-05 19:32:56 +00:00
parent 4ffc6452e0
commit f5677afba0

View File

@@ -54,7 +54,15 @@ impl super::Provider for OpenAIProvider {
async fn chat_completion(&self, request: UnifiedRequest) -> Result<ProviderResponse, AppError> {
let messages_json = helpers::messages_to_openai_json(&request.messages).await?;
let body = helpers::build_openai_body(&request, messages_json, false);
let mut body = helpers::build_openai_body(&request, messages_json, false);
// Transition: Newer OpenAI models (o1, o3, gpt-5) require max_completion_tokens
// instead of the legacy max_tokens parameter.
if request.model.starts_with("o1-") || request.model.starts_with("o3-") || request.model.contains("gpt-5") {
if let Some(max_tokens) = body.as_object_mut().and_then(|obj| obj.remove("max_tokens")) {
body["max_completion_tokens"] = max_tokens;
}
}
let response = self
.client
@@ -301,6 +309,13 @@ impl super::Provider for OpenAIProvider {
// Standard OpenAI cleanup
if let Some(obj) = body.as_object_mut() {
obj.remove("stream_options");
// Transition: Newer OpenAI models (o1, o3, gpt-5) require max_completion_tokens
if request.model.starts_with("o1-") || request.model.starts_with("o3-") || request.model.contains("gpt-5") {
if let Some(max_tokens) = obj.remove("max_tokens") {
obj.insert("max_completion_tokens".to_string(), max_tokens);
}
}
}
let url = format!("{}/chat/completions", self.config.base_url);