services: deepseek_ocr_vllm: build: context: . dockerfile: Dockerfile image: deepseek-ocr-api:torch2.6.0-cuda12.6-cudnn9-vllm0.8.5 container_name: deepseek_ocr_vllm working_dir: /workspace volumes: - ./:/workspace ports: - "11635:11635" gpus: all shm_size: "16g" ipc: "host" environment: - HF_HOME=/workspace/.cache/huggingface - CUDA_HOME=/usr/local/cuda - LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH} - PIP_DISABLE_PIP_VERSION_CHECK=1 - PYTHONUNBUFFERED=1 tty: true restart: always networks: - llm_gateway_local_net networks: llm_gateway_local_net: external: true