Removed .env and .env.backup from git tracking and consolidated configuration into .env.example. Updated .gitignore to robustly prevent accidental inclusion of sensitive files.
44 lines
1.9 KiB
Plaintext
44 lines
1.9 KiB
Plaintext
# LLM Proxy Gateway Configuration Example
|
|
# Copy this file to .env and fill in your values
|
|
|
|
# ==============================================================================
|
|
# MANDATORY: Encryption & Security
|
|
# ==============================================================================
|
|
# A 32-byte hex or base64 encoded string used for session signing and
|
|
# database encryption.
|
|
# Generate one with: openssl rand -hex 32
|
|
LLM_PROXY__ENCRYPTION_KEY=your_secure_32_byte_key_here
|
|
|
|
# ==============================================================================
|
|
# LLM Provider API Keys
|
|
# ==============================================================================
|
|
OPENAI_API_KEY=sk-...
|
|
GEMINI_API_KEY=AIza...
|
|
DEEPSEEK_API_KEY=sk-...
|
|
GROK_API_KEY=xai-...
|
|
|
|
# ==============================================================================
|
|
# Server Configuration
|
|
# ==============================================================================
|
|
LLM_PROXY__SERVER__PORT=8080
|
|
LLM_PROXY__SERVER__HOST=0.0.0.0
|
|
|
|
# Optional: Bearer tokens for client authentication (comma-separated)
|
|
# If not set, the proxy will look up tokens in the database.
|
|
# LLM_PROXY__SERVER__AUTH_TOKENS=token1,token2
|
|
|
|
# ==============================================================================
|
|
# Database Configuration
|
|
# ==============================================================================
|
|
LLM_PROXY__DATABASE__PATH=./data/llm_proxy.db
|
|
LLM_PROXY__DATABASE__MAX_CONNECTIONS=10
|
|
|
|
# ==============================================================================
|
|
# Provider Overrides (Optional)
|
|
# ==============================================================================
|
|
# LLM_PROXY__PROVIDERS__OPENAI__BASE_URL=https://api.openai.com/v1
|
|
# LLM_PROXY__PROVIDERS__GEMINI__ENABLED=true
|
|
# LLM_PROXY__PROVIDERS__OLLAMA__BASE_URL=http://localhost:11434/v1
|
|
# LLM_PROXY__PROVIDERS__OLLAMA__ENABLED=true
|
|
# LLM_PROXY__PROVIDERS__OLLAMA__MODELS=llama3,mistral,llava
|