feat(providers): model-registry routing + Responses API support and streaming fallbacks for OpenAI/Gemini
Some checks failed
CI / Check (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Formatting (push) Has been cancelled
CI / Test (push) Has been cancelled
CI / Release Build (push) Has been cancelled

This commit is contained in:
2026-03-04 13:36:03 -05:00
parent 1453e64d4b
commit 5a8510bf1e
5 changed files with 328 additions and 8 deletions

View File

@@ -22,3 +22,28 @@ pub async fn fetch_registry() -> Result<ModelRegistry> {
Ok(registry)
}
/// Heuristic: decide whether a model should be routed to OpenAI Responses API
/// instead of the legacy chat/completions endpoint.
///
/// Currently this uses simple patterns (codex, gpt-5 series) and also checks
/// the loaded registry metadata name for the substring "codex" as a hint.
pub fn model_prefers_responses(registry: &ModelRegistry, model: &str) -> bool {
let model_lc = model.to_lowercase();
if model_lc.contains("codex") {
return true;
}
if model_lc.starts_with("gpt-5") || model_lc.contains("gpt-5.") {
return true;
}
if let Some(meta) = registry.find_model(model) {
if meta.name.to_lowercase().contains("codex") {
return true;
}
}
false
}