fix(gemini): add model mapping fallback and token clamping
- Map unknown Gemini model names to the configured default model to prevent 400 errors. - Clamp max_tokens to a safe limit of 8192 for Gemini models. - Clean up message filtering and role injection for better client compatibility.
This commit is contained in:
@@ -454,7 +454,16 @@ impl super::Provider for GeminiProvider {
|
||||
}
|
||||
|
||||
async fn chat_completion(&self, request: UnifiedRequest) -> Result<ProviderResponse, AppError> {
|
||||
let model = request.model.clone();
|
||||
let mut model = request.model.clone();
|
||||
|
||||
// Normalize model name: If it's a known Gemini model, use it;
|
||||
// otherwise, if it starts with gemini- but is unknown (e.g. gemini-3-flash-preview),
|
||||
// fallback to the default model to avoid 400 errors.
|
||||
if !model.starts_with("gemini-1.5") && !model.starts_with("gemini-2.0") && model.starts_with("gemini-") {
|
||||
tracing::info!("Mapping unknown Gemini model {} to default {}", model, self.config.default_model);
|
||||
model = self.config.default_model.clone();
|
||||
}
|
||||
|
||||
let tools = Self::convert_tools(&request);
|
||||
let tool_config = Self::convert_tool_config(&request);
|
||||
let (contents, system_instruction) = Self::convert_messages(request.messages.clone()).await?;
|
||||
@@ -464,9 +473,13 @@ impl super::Provider for GeminiProvider {
|
||||
}
|
||||
|
||||
let generation_config = if request.temperature.is_some() || request.max_tokens.is_some() {
|
||||
// Some Gemini models (especially 1.5) have lower max_output_tokens limits (e.g. 8192)
|
||||
// than what clients like opencode might request. Clamp to a safe maximum.
|
||||
let max_tokens = request.max_tokens.map(|t| t.min(8192));
|
||||
|
||||
Some(GeminiGenerationConfig {
|
||||
temperature: request.temperature,
|
||||
max_output_tokens: request.max_tokens,
|
||||
max_output_tokens: max_tokens,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
@@ -579,7 +592,14 @@ impl super::Provider for GeminiProvider {
|
||||
&self,
|
||||
request: UnifiedRequest,
|
||||
) -> Result<BoxStream<'static, Result<ProviderStreamChunk, AppError>>, AppError> {
|
||||
let model = request.model.clone();
|
||||
let mut model = request.model.clone();
|
||||
|
||||
// Normalize model name: fallback to default if unknown Gemini model is requested
|
||||
if !model.starts_with("gemini-1.5") && !model.starts_with("gemini-2.0") && model.starts_with("gemini-") {
|
||||
tracing::info!("Mapping unknown Gemini model {} to default {}", model, self.config.default_model);
|
||||
model = self.config.default_model.clone();
|
||||
}
|
||||
|
||||
let tools = Self::convert_tools(&request);
|
||||
let tool_config = Self::convert_tool_config(&request);
|
||||
let (contents, system_instruction) = Self::convert_messages(request.messages.clone()).await?;
|
||||
@@ -589,9 +609,13 @@ impl super::Provider for GeminiProvider {
|
||||
}
|
||||
|
||||
let generation_config = if request.temperature.is_some() || request.max_tokens.is_some() {
|
||||
// Some Gemini models (especially 1.5) have lower max_output_tokens limits (e.g. 8192)
|
||||
// than what clients like opencode might request. Clamp to a safe maximum.
|
||||
let max_tokens = request.max_tokens.map(|t| t.min(8192));
|
||||
|
||||
Some(GeminiGenerationConfig {
|
||||
temperature: request.temperature,
|
||||
max_output_tokens: request.max_tokens,
|
||||
max_output_tokens: max_tokens,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
|
||||
Reference in New Issue
Block a user