feat(providers): model-registry routing + Responses API support and streaming fallbacks for OpenAI/Gemini
Some checks failed
CI / Check (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Formatting (push) Has been cancelled
CI / Test (push) Has been cancelled
CI / Release Build (push) Has been cancelled

This commit is contained in:
2026-03-04 13:36:03 -05:00
parent 1453e64d4b
commit 5a8510bf1e
5 changed files with 328 additions and 8 deletions

View File

@@ -304,7 +304,17 @@ async fn chat_completions(
}
} else {
// Handle non-streaming response
let result = provider.chat_completion(unified_request).await;
// Allow provider-specific routing: for OpenAI, some models prefer the
// Responses API (/v1/responses). Use the model registry heuristic to
// choose chat_responses vs chat_completion automatically.
let use_responses = provider.name() == "openai"
&& crate::utils::registry::model_prefers_responses(&state.model_registry, &unified_request.model);
let result = if use_responses {
provider.chat_responses(unified_request).await
} else {
provider.chat_completion(unified_request).await
};
match result {
Ok(response) => {