version: '3.8' services: post-monitor: build: . ports: - "5005:5000" container_name: post-request-monitor restart: unless-stopped environment: - OPENAI_ENDPOINT=${OPENAI_ENDPOINT:-http://host.docker.internal:8000/v1/chat/completions} - OPENAI_API_KEY=${OPENAI_API_KEY:-none} - OPENAI_MODEL=${OPENAI_MODEL:-gpt-4-vision-preview} - VIDEO_FORMAT=${VIDEO_FORMAT:-openai} networks: - llm_internal networks: llm_internal: external: true