From 78fff616602dfe6697e2958c53b9936747311337 Mon Sep 17 00:00:00 2001 From: hobokenchicken Date: Tue, 17 Mar 2026 18:50:55 +0000 Subject: [PATCH] fix(openai): map system to developer role and enhance stream diagnostics for Responses API --- src/providers/openai.rs | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/src/providers/openai.rs b/src/providers/openai.rs index 74055a56..305f0114 100644 --- a/src/providers/openai.rs +++ b/src/providers/openai.rs @@ -112,7 +112,12 @@ impl super::Provider for OpenAIProvider { let messages_json = helpers::messages_to_openai_json(&request.messages).await?; let mut input_parts = Vec::new(); for m in &messages_json { - let role = m["role"].as_str().unwrap_or("user"); + let mut role = m["role"].as_str().unwrap_or("user").to_string(); + // Newer models (gpt-5, o1) prefer "developer" over "system" + if role == "system" { + role = "developer".to_string(); + } + let mut content = m.get("content").cloned().unwrap_or(serde_json::json!([])); // Map content types based on role for Responses API @@ -374,7 +379,12 @@ impl super::Provider for OpenAIProvider { let messages_json = helpers::messages_to_openai_json(&request.messages).await?; let mut input_parts = Vec::new(); for m in &messages_json { - let role = m["role"].as_str().unwrap_or("user"); + let mut role = m["role"].as_str().unwrap_or("user").to_string(); + // Newer models (gpt-5, o1) prefer "developer" over "system" + if role == "system" { + role = "developer".to_string(); + } + let mut content = m.get("content").cloned().unwrap_or(serde_json::json!([])); // Map content types based on role for Responses API @@ -515,21 +525,22 @@ impl super::Provider for OpenAIProvider { let probe_resp = probe_client .post(&url) .header("Authorization", format!("Bearer {}", api_key)) + .header("Accept", "application/json") // Ask for JSON during probe .json(&probe_body) .send() .await; match probe_resp { - Ok(r) if !r.status().is_success() => { + Ok(r) => { let status = r.status(); - let error_body = r.text().await.unwrap_or_default(); - tracing::error!("OpenAI Responses Stream Error Probe ({}): {}", status, error_body); - Err(AppError::ProviderError(format!("OpenAI Responses API error ({}): {}", status, error_body)))?; - } - Ok(_) => { - // If the probe returned 200, but the stream ended, it might be a silent failure or timeout. - tracing::warn!("Responses stream ended prematurely (probe returned 200). Error: {}", e); - Err(AppError::ProviderError(format!("Responses stream error (connection closed by server after 200 OK): {}", e)))?; + let body = r.text().await.unwrap_or_default(); + if status.is_success() { + tracing::warn!("Responses stream ended prematurely but probe returned 200 OK. Body: {}", body); + Err(AppError::ProviderError(format!("Responses stream ended (server sent 200 OK with body: {})", body)))?; + } else { + tracing::error!("OpenAI Responses Stream Error Probe ({}): {}", status, body); + Err(AppError::ProviderError(format!("OpenAI Responses API error ({}): {}", status, body)))?; + } } Err(probe_err) => { tracing::error!("OpenAI Responses Stream Error Probe failed: {}", probe_err);