chore: use newest cheap models for provider tests
Updated OpenAI test model to gpt-4o-mini and verified Gemini is using gemini-2.0-flash.
This commit is contained in:
@@ -676,7 +676,7 @@ func (s *Server) handleTestProvider(c *gin.Context) {
|
||||
|
||||
// Prepare a simple test request
|
||||
testReq := &models.UnifiedRequest{
|
||||
Model: "gpt-4o", // Default test model
|
||||
Model: "gpt-4o-mini", // Default cheap test model for OpenAI
|
||||
Messages: []models.UnifiedMessage{
|
||||
{
|
||||
Role: "user",
|
||||
|
||||
Reference in New Issue
Block a user