Files
llm-gateway-sub-backup/docker-compose_monitoring.yml
2025-08-11 18:56:38 +09:00

76 lines
1.7 KiB
YAML

services:
ollama_gemma:
image: ollama/ollama:0.10.0
container_name: pgn_ollama_gemma
ports:
- "11534:11534"
volumes:
- ollama_data_gemma:/root/.ollama
- ./start_ollama_gemma.sh:/start_ollama_gemma.sh
env_file:
- .env.ollama
restart: always
entrypoint: ["/bin/sh", "/start_ollama_gemma.sh"]
networks:
- pgn_net
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
ollama_qwen_v1:
image: ollama/ollama:0.10.0
container_name: pgn_ollama_qwen_v1
ports:
- "11634:11634"
volumes:
- ollama_data_qwen_30b_v1:/root/.ollama
- ./start_ollama_qwen_v1.sh:/start_ollama_qwen_v1.sh
env_file:
- .env.ollama
restart: always
entrypoint: ["/bin/sh", "/start_ollama_qwen_v1.sh"]
networks:
- pgn_net
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
ollama_qwen_v2:
image: ollama/ollama:0.10.0
container_name: pgn_ollama_qwen_v2
ports:
- "11734:11734"
volumes:
- ollama_data_qwen_30b_v2:/root/.ollama
- ./start_ollama_qwen_v2.sh:/start_ollama_qwen_v2.sh
env_file:
- .env.ollama
restart: always
entrypoint: ["/bin/sh", "/start_ollama_qwen_v2.sh"]
networks:
- pgn_net
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
volumes:
ollama_data_gemma:
ollama_data_qwen_30b_v1:
ollama_data_qwen_30b_v2:
networks:
pgn_net:
external: true