refactor: move Ollama services to docker-compose.standalone.yml

Ollama profiles (ollama-gpu, ollama-cpu) are only for Linux standalone
deployment. Mac devs never use them. Separate file keeps the main
compose clean and provides a natural home for future standalone services
(MinIO, etc.).

Linux: docker compose -f docker-compose.yml -f docker-compose.standalone.yml --profile ollama-gpu up -d
Mac: docker compose up -d (native Ollama, no standalone file needed)
This commit is contained in:
Igor Loskutov
2026-02-10 16:02:28 -05:00
parent 663345ece6
commit 33a93db802
5 changed files with 65 additions and 78 deletions

View File

@@ -69,8 +69,10 @@ case "$OS" in
LLM_URL="http://ollama-cpu:$OLLAMA_PORT/v1"
fi
COMPOSE="docker compose -f docker-compose.yml -f docker-compose.standalone.yml"
echo "Starting Ollama container..."
docker compose --profile "$PROFILE" up -d
$COMPOSE --profile "$PROFILE" up -d
# Determine container name
if [ "$PROFILE" = "ollama-gpu" ]; then
@@ -82,7 +84,7 @@ case "$OS" in
wait_for_ollama "http://localhost:$OLLAMA_PORT"
echo "Pulling model $MODEL..."
docker compose exec "$SVC" ollama pull "$MODEL"
$COMPOSE exec "$SVC" ollama pull "$MODEL"
echo ""
echo "Done. Add to server/.env:"
@@ -90,7 +92,7 @@ case "$OS" in
echo " LLM_MODEL=$MODEL"
echo " LLM_API_KEY=not-needed"
echo ""
echo "Then: docker compose --profile $PROFILE up -d"
echo "Then: $COMPOSE --profile $PROFILE up -d"
;;
*)