fix(openai): use max_output_tokens for Responses API
Some checks failed
CI / Check (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Formatting (push) Has been cancelled
CI / Test (push) Has been cancelled
CI / Release Build (push) Has been cancelled

This commit is contained in:
2026-03-17 18:36:05 +00:00
parent 0d28241e39
commit e123f542f1

View File

@@ -159,10 +159,10 @@ impl super::Provider for OpenAIProvider {
body["temperature"] = serde_json::json!(temp); body["temperature"] = serde_json::json!(temp);
} }
// Newer models (gpt-5, o1) prefer max_completion_tokens // Newer models (gpt-5, o1) in Responses API use max_output_tokens
if let Some(max_tokens) = request.max_tokens { if let Some(max_tokens) = request.max_tokens {
if request.model.contains("gpt-5") || request.model.starts_with("o1-") || request.model.starts_with("o3-") { if request.model.contains("gpt-5") || request.model.starts_with("o1-") || request.model.starts_with("o3-") {
body["max_completion_tokens"] = serde_json::json!(max_tokens); body["max_output_tokens"] = serde_json::json!(max_tokens);
} else { } else {
body["max_tokens"] = serde_json::json!(max_tokens); body["max_tokens"] = serde_json::json!(max_tokens);
} }
@@ -423,10 +423,10 @@ impl super::Provider for OpenAIProvider {
body["temperature"] = serde_json::json!(temp); body["temperature"] = serde_json::json!(temp);
} }
// Newer models (gpt-5, o1) prefer max_completion_tokens // Newer models (gpt-5, o1) in Responses API use max_output_tokens
if let Some(max_tokens) = request.max_tokens { if let Some(max_tokens) = request.max_tokens {
if request.model.contains("gpt-5") || request.model.starts_with("o1-") || request.model.starts_with("o3-") { if request.model.contains("gpt-5") || request.model.starts_with("o1-") || request.model.starts_with("o3-") {
body["max_completion_tokens"] = serde_json::json!(max_tokens); body["max_output_tokens"] = serde_json::json!(max_tokens);
} else { } else {
body["max_tokens"] = serde_json::json!(max_tokens); body["max_tokens"] = serde_json::json!(max_tokens);
} }