env.example 919 B

1234567891011121314151617181920
  1. # OpenAI-compatible endpoint URL
  2. # If on same Docker network (llm_internal): use container name
  3. # Example: http://vllm-server:8000/v1/chat/completions
  4. # If on host machine: use host.docker.internal
  5. # Example: http://host.docker.internal:8000/v1/chat/completions
  6. OPENAI_ENDPOINT=http://host.docker.internal:8000/v1/chat/completions
  7. # OpenAI API Key (if required by your endpoint)
  8. OPENAI_API_KEY=none
  9. # Model name to use (check your endpoint's /v1/models for available models)
  10. # For InternVL3-8B-AWQ: Use the exact model name from your deployment
  11. OPENAI_MODEL=OpenGVLab/InternVL3-8B-AWQ
  12. # Video format - How to send video to the endpoint
  13. # 'openai' - Standard format (works with OpenAI gpt-4o, vLLM, Ollama with vision models)
  14. # 'vllm' - Experimental vLLM-specific format
  15. # 'skip' - Don't send video (for endpoints without video support like SGLang)
  16. # 'error' - Fail if video is present
  17. VIDEO_FORMAT=openai