Files
VLM-OCR/Dockerfile.hf
2025-10-23 15:48:46 +09:00

49 lines
1.5 KiB
Docker

# PyTorch 2.6.0 + CUDA 12.6 + cuDNN9
FROM pytorch/pytorch:2.6.0-cuda12.6-cudnn9-devel
ENV DEBIAN_FRONTEND=noninteractive \
HF_HOME=/workspace/.cache/huggingface \
CUDA_HOME=/usr/local/cuda \
LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH} \
PIP_DISABLE_PIP_VERSION_CHECK=1 \
PYTHONUNBUFFERED=1 \
HF_HUB_DISABLE_TELEMETRY=1
ARG TORCH_CUDA_ARCH_LIST=80
ENV TORCH_CUDA_ARCH_LIST=${TORCH_CUDA_ARCH_LIST}
ENV TRITON_PTXAS_PATH=/usr/local/cuda/bin/ptxas
WORKDIR /workspace
# 빌드 도구
RUN apt-get update && apt-get install -y --no-install-recommends \
git build-essential ninja-build cmake \
&& rm -rf /var/lib/apt/lists/*
RUN python -m pip install -U pip setuptools wheel packaging ninja
# 명시 재설치(일관성)
RUN pip install --no-cache-dir \
torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0
# DeepSeek-OCR(HF) 호환 스택
RUN pip install --no-cache-dir \
"transformers==4.43.3" \
"accelerate==0.33.0" \
"tokenizers==0.19.1" \
"numpy==1.26.4" \
"safetensors>=0.4.2" \
"einops" "timm>=0.9"
# flash-attn 2.7.3 (CUDA 12.6에서 빌드)
RUN pip install --no-cache-dir --no-build-isolation --no-binary=flash-attn flash-attn==2.7.3
# vLLM 제거(혹시 들어오더라도 충돌 방지)
RUN pip uninstall -y vllm || true
# OpenCV ↔ numpy 1.26 호환 버전 고정
RUN pip install --no-cache-dir "opencv-python-headless==4.8.1.78"
# 앱 소스
COPY DeepSeek-OCR-master/DeepSeek-OCR-hf/ /workspace/DeepSeek-OCR-hf/