| 1234567891011121314151617181920 |
- # OpenAI-compatible endpoint URL
- # If on same Docker network (llm_internal): use container name
- # Example: http://vllm-server:8000/v1/chat/completions
- # If on host machine: use host.docker.internal
- # Example: http://host.docker.internal:8000/v1/chat/completions
- OPENAI_ENDPOINT=http://host.docker.internal:8000/v1/chat/completions
- # OpenAI API Key (if required by your endpoint)
- OPENAI_API_KEY=none
- # Model name to use (check your endpoint's /v1/models for available models)
- # For InternVL3-8B-AWQ: Use the exact model name from your deployment
- OPENAI_MODEL=OpenGVLab/InternVL3-8B-AWQ
- # Video format - How to send video to the endpoint
- # 'openai' - Standard format (works with OpenAI gpt-4o, vLLM, Ollama with vision models)
- # 'vllm' - Experimental vLLM-specific format
- # 'skip' - Don't send video (for endpoints without video support like SGLang)
- # 'error' - Fail if video is present
- VIDEO_FORMAT=openai
|