Files
llm-gateway-sub-backup/docker-compose_ollama.yml
2025-08-11 18:56:38 +09:00

78 lines
1.7 KiB
YAML

services:
ollama_gemma:
image: ollama/ollama:0.11.2
container_name: pgn_ollama_gemma
ports:
- "11534:11534"
volumes:
- ollama_data_gemma:/root/.ollama
- ./start_ollama_gemma.sh:/start_ollama_gemma.sh
environment:
- OLLAMA_NUM_PARALLEL=4
env_file:
- .env.ollama
restart: always
entrypoint: ["/bin/sh", "/start_ollama_gemma.sh"]
networks:
- pgn_net
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
ollama_gpt_oss:
image: ollama/ollama:0.11.2
container_name: pgn_ollama_gpt_oss
ports:
- "11634:11634"
volumes:
- ollama_data_gpt_oss:/root/.ollama
- ./start_ollama_gpt_oss.sh:/start_ollama_gpt_oss.sh
env_file:
- .env.ollama
restart: always
entrypoint: ["/bin/sh", "/start_ollama_gpt_oss.sh"]
networks:
- pgn_net
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
ollama_qwen:
image: ollama/ollama:0.11.2
container_name: pgn_ollama_qwen
ports:
- "11734:11734"
volumes:
- ollama_data_qwen:/root/.ollama
- ./start_ollama_qwen.sh:/start_ollama_qwen.sh
env_file:
- .env.ollama
restart: always
entrypoint: ["/bin/sh", "/start_ollama_qwen.sh"]
networks:
- pgn_net
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
volumes:
ollama_data_gemma:
ollama_data_gpt_oss:
ollama_data_qwen:
networks:
pgn_net:
external: true