Files
VLM-OCR/model_services/deepseek_ocr/Dockerfile
2025-10-27 13:50:19 +09:00

39 lines
1.2 KiB
Docker

# PyTorch 2.6.0 + CUDA 12.6 + cuDNN9
FROM pytorch/pytorch:2.6.0-cuda12.6-cudnn9-devel
# 기본 환경 변수 설정
ENV DEBIAN_FRONTEND=noninteractive \
HF_HOME=/workspace/.cache/huggingface \
CUDA_HOME=/usr/local/cuda \
LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH} \
PIP_DISABLE_PIP_VERSION_CHECK=1 \
PYTHONUNBUFFERED=1 \
TRITON_PTXAS_PATH=/usr/local/cuda/bin/ptxas \
TORCH_CUDA_ARCH_LIST="8.0"
WORKDIR /workspace
# 필수 빌드 도구 설치
RUN apt-get update && apt-get install -y --no-install-recommends \
git build-essential ninja-build \
&& rm -rf /var/lib/apt/lists/*
# pip 업그레이드
RUN python -m pip install -U pip setuptools wheel
# 기존 라이브러리 제거 및 특정 버전 재설치
RUN pip uninstall -y vllm torch torchvision torchaudio triton flash-attn || true
RUN pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0
# 프로젝트 의존성 설치
COPY requirements.txt /tmp/requirements.txt
RUN pip install -r /tmp/requirements.txt
# vLLM 특정 버전 설치
RUN pip install vllm==0.8.5
# FlashAttention 소스에서 빌드하여 설치
RUN pip cache purge && \
pip install --no-cache-dir --no-build-isolation --no-binary=flash-attn flash-attn==2.7.3
WORKDIR /workspace