diff --git a/src/providers/openai.rs b/src/providers/openai.rs index 08d26053..ad9b74cf 100644 --- a/src/providers/openai.rs +++ b/src/providers/openai.rs @@ -159,10 +159,10 @@ impl super::Provider for OpenAIProvider { body["temperature"] = serde_json::json!(temp); } - // Newer models (gpt-5, o1) prefer max_completion_tokens + // Newer models (gpt-5, o1) in Responses API use max_output_tokens if let Some(max_tokens) = request.max_tokens { if request.model.contains("gpt-5") || request.model.starts_with("o1-") || request.model.starts_with("o3-") { - body["max_completion_tokens"] = serde_json::json!(max_tokens); + body["max_output_tokens"] = serde_json::json!(max_tokens); } else { body["max_tokens"] = serde_json::json!(max_tokens); } @@ -423,10 +423,10 @@ impl super::Provider for OpenAIProvider { body["temperature"] = serde_json::json!(temp); } - // Newer models (gpt-5, o1) prefer max_completion_tokens + // Newer models (gpt-5, o1) in Responses API use max_output_tokens if let Some(max_tokens) = request.max_tokens { if request.model.contains("gpt-5") || request.model.starts_with("o1-") || request.model.starts_with("o3-") { - body["max_completion_tokens"] = serde_json::json!(max_tokens); + body["max_output_tokens"] = serde_json::json!(max_tokens); } else { body["max_tokens"] = serde_json::json!(max_tokens); }