Init repo
This commit is contained in:
75
test_server.sh
Executable file
75
test_server.sh
Executable file
@@ -0,0 +1,75 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Test script for LLM Proxy Gateway
|
||||
|
||||
echo "Building LLM Proxy Gateway..."
|
||||
cargo build --release
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Build failed!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Build successful!"
|
||||
|
||||
echo ""
|
||||
echo "Project Structure Summary:"
|
||||
echo "=========================="
|
||||
echo "Core Components:"
|
||||
echo " - main.rs: Application entry point with server setup"
|
||||
echo " - config/: Configuration management"
|
||||
echo " - server/: API route handlers"
|
||||
echo " - auth/: Bearer token authentication"
|
||||
echo " - database/: SQLite database setup"
|
||||
echo " - models/: Data structures (OpenAI-compatible)"
|
||||
echo " - providers/: LLM provider implementations (OpenAI, Gemini, DeepSeek, Grok)"
|
||||
echo " - errors/: Custom error types"
|
||||
echo " - dashboard/: Admin dashboard with WebSocket support"
|
||||
echo " - logging/: Request logging middleware"
|
||||
echo " - state/: Shared application state"
|
||||
echo " - multimodal/: Image processing support (basic structure)"
|
||||
|
||||
echo ""
|
||||
echo "Key Features Implemented:"
|
||||
echo "=========================="
|
||||
echo "✓ OpenAI-compatible API endpoint (/v1/chat/completions)"
|
||||
echo "✓ Bearer token authentication"
|
||||
echo "✓ SQLite database for request tracking"
|
||||
echo "✓ Request logging with token/cost calculation"
|
||||
echo "✓ Provider abstraction layer"
|
||||
echo "✓ Admin dashboard with real-time monitoring"
|
||||
echo "✓ WebSocket support for live updates"
|
||||
echo "✓ Configuration management (config.toml, .env, env vars)"
|
||||
echo "✓ Multimodal support structure (images)"
|
||||
echo "✓ Error handling with proper HTTP status codes"
|
||||
|
||||
echo ""
|
||||
echo "Next Steps Needed:"
|
||||
echo "=================="
|
||||
echo "1. Add API keys to .env file:"
|
||||
echo " OPENAI_API_KEY=your_key_here"
|
||||
echo " GEMINI_API_KEY=your_key_here"
|
||||
echo " DEEPSEEK_API_KEY=your_key_here"
|
||||
echo " GROK_API_KEY=your_key_here (optional)"
|
||||
echo ""
|
||||
echo "2. Create config.toml for custom configuration (optional)"
|
||||
echo ""
|
||||
echo "3. Run the server:"
|
||||
echo " cargo run"
|
||||
echo ""
|
||||
echo "4. Access dashboard at: http://localhost:8080"
|
||||
echo ""
|
||||
echo "5. Test API with curl:"
|
||||
echo " curl -X POST http://localhost:8080/v1/chat/completions \\"
|
||||
echo " -H 'Authorization: Bearer your_token' \\"
|
||||
echo " -H 'Content-Type: application/json' \\"
|
||||
echo " -d '{\"model\": \"gpt-4\", \"messages\": [{\"role\": \"user\", \"content\": \"Hello\"}]}'"
|
||||
|
||||
echo ""
|
||||
echo "Deployment Notes:"
|
||||
echo "================="
|
||||
echo "Memory: Designed for 512MB RAM (LXC container)"
|
||||
echo "Database: SQLite (./data/llm_proxy.db)"
|
||||
echo "Port: 8080 (configurable)"
|
||||
echo "Authentication: Single Bearer token (configurable)"
|
||||
echo "Providers: OpenAI, Gemini, DeepSeek, Grok (disabled by default)"
|
||||
Reference in New Issue
Block a user