Compare commits
2 Commits
a022bd1272
...
be9fdd9a52
| Author | SHA1 | Date | |
|---|---|---|---|
| be9fdd9a52 | |||
| e89658fd87 |
@@ -439,6 +439,17 @@ impl GeminiProvider {
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine the appropriate base URL for the model.
|
||||
/// "preview" models often require the v1beta endpoint.
|
||||
fn get_base_url(&self, model: &str) -> String {
|
||||
if model.contains("preview") || model.contains("thinking") {
|
||||
self.config.base_url.replace("/v1", "/v1beta")
|
||||
} else {
|
||||
self.config.base_url.clone()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl super::Provider for GeminiProvider {
|
||||
fn name(&self) -> &str {
|
||||
@@ -454,7 +465,22 @@ impl super::Provider for GeminiProvider {
|
||||
}
|
||||
|
||||
async fn chat_completion(&self, request: UnifiedRequest) -> Result<ProviderResponse, AppError> {
|
||||
let model = request.model.clone();
|
||||
let mut model = request.model.clone();
|
||||
|
||||
// Normalize model name: If it's a known Gemini model version, use it;
|
||||
// otherwise, if it starts with gemini- but is an unknown legacy version,
|
||||
// fallback to the default model to avoid 400 errors.
|
||||
// We now allow gemini-3+ as valid versions.
|
||||
let is_known_version = model.starts_with("gemini-1.5") ||
|
||||
model.starts_with("gemini-2.0") ||
|
||||
model.starts_with("gemini-2.5") ||
|
||||
model.starts_with("gemini-3");
|
||||
|
||||
if !is_known_version && model.starts_with("gemini-") {
|
||||
tracing::info!("Mapping unknown Gemini model {} to default {}", model, self.config.default_model);
|
||||
model = self.config.default_model.clone();
|
||||
}
|
||||
|
||||
let tools = Self::convert_tools(&request);
|
||||
let tool_config = Self::convert_tool_config(&request);
|
||||
let (contents, system_instruction) = Self::convert_messages(request.messages.clone()).await?;
|
||||
@@ -464,9 +490,14 @@ impl super::Provider for GeminiProvider {
|
||||
}
|
||||
|
||||
let generation_config = if request.temperature.is_some() || request.max_tokens.is_some() {
|
||||
// Some Gemini models (especially 1.5) have lower max_output_tokens limits (e.g. 8192)
|
||||
// than what clients like opencode might request. Clamp to a safe maximum.
|
||||
// Note: Gemini 2.0+ supports much higher limits, but 8192 is a safe universal floor.
|
||||
let max_tokens = request.max_tokens.map(|t| t.min(8192));
|
||||
|
||||
Some(GeminiGenerationConfig {
|
||||
temperature: request.temperature,
|
||||
max_output_tokens: request.max_tokens,
|
||||
max_output_tokens: max_tokens,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
@@ -480,7 +511,8 @@ impl super::Provider for GeminiProvider {
|
||||
tool_config,
|
||||
};
|
||||
|
||||
let url = format!("{}/models/{}:generateContent", self.config.base_url, model);
|
||||
let base_url = self.get_base_url(&model);
|
||||
let url = format!("{}/models/{}:generateContent", base_url, model);
|
||||
|
||||
let response = self
|
||||
.client
|
||||
@@ -579,7 +611,19 @@ impl super::Provider for GeminiProvider {
|
||||
&self,
|
||||
request: UnifiedRequest,
|
||||
) -> Result<BoxStream<'static, Result<ProviderStreamChunk, AppError>>, AppError> {
|
||||
let model = request.model.clone();
|
||||
let mut model = request.model.clone();
|
||||
|
||||
// Normalize model name: fallback to default if unknown Gemini model is requested
|
||||
let is_known_version = model.starts_with("gemini-1.5") ||
|
||||
model.starts_with("gemini-2.0") ||
|
||||
model.starts_with("gemini-2.5") ||
|
||||
model.starts_with("gemini-3");
|
||||
|
||||
if !is_known_version && model.starts_with("gemini-") {
|
||||
tracing::info!("Mapping unknown Gemini model {} to default {}", model, self.config.default_model);
|
||||
model = self.config.default_model.clone();
|
||||
}
|
||||
|
||||
let tools = Self::convert_tools(&request);
|
||||
let tool_config = Self::convert_tool_config(&request);
|
||||
let (contents, system_instruction) = Self::convert_messages(request.messages.clone()).await?;
|
||||
@@ -589,9 +633,13 @@ impl super::Provider for GeminiProvider {
|
||||
}
|
||||
|
||||
let generation_config = if request.temperature.is_some() || request.max_tokens.is_some() {
|
||||
// Some Gemini models (especially 1.5) have lower max_output_tokens limits (e.g. 8192)
|
||||
// than what clients like opencode might request. Clamp to a safe maximum.
|
||||
let max_tokens = request.max_tokens.map(|t| t.min(8192));
|
||||
|
||||
Some(GeminiGenerationConfig {
|
||||
temperature: request.temperature,
|
||||
max_output_tokens: request.max_tokens,
|
||||
max_output_tokens: max_tokens,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
@@ -605,9 +653,10 @@ impl super::Provider for GeminiProvider {
|
||||
tool_config,
|
||||
};
|
||||
|
||||
let base_url = self.get_base_url(&model);
|
||||
let url = format!(
|
||||
"{}/models/{}:streamGenerateContent?alt=sse",
|
||||
self.config.base_url, model,
|
||||
base_url, model,
|
||||
);
|
||||
|
||||
// (no fallback_request needed here)
|
||||
@@ -622,7 +671,7 @@ impl super::Provider for GeminiProvider {
|
||||
// Prepare clones for HTTP fallback usage inside non-streaming paths.
|
||||
let http_client = self.client.clone();
|
||||
let http_api_key = self.api_key.clone();
|
||||
let http_base = self.config.base_url.clone();
|
||||
let http_base = base_url.clone();
|
||||
let gemini_request_clone = gemini_request.clone();
|
||||
|
||||
let es_result = reqwest_eventsource::EventSource::new(
|
||||
|
||||
Reference in New Issue
Block a user