The 100-line combined log dump was entirely filled by litestream R2 errors, hiding the actual blue-app crash output. Now dumps blue-app (60 lines), router (10 lines), and litestream (10 lines) separately. Revert litestream image tag to latest — the R2 errors were caused by misconfigured endpoint/bucket CI variables, not a litestream version bug. The v0.5.8 tag may not exist on Docker Hub (tags omit 'v' prefix). Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
104 lines
3.8 KiB
Bash
Executable File
104 lines
3.8 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
set -euo pipefail
|
|
|
|
COMPOSE="docker compose -f docker-compose.prod.yml"
|
|
LIVE_FILE=".live-slot"
|
|
ROUTER_CONF="router/default.conf"
|
|
|
|
# ── Determine slots ─────────────────────────────────────────
|
|
|
|
CURRENT=$(cat "$LIVE_FILE" 2>/dev/null || echo "none")
|
|
|
|
if [ "$CURRENT" = "blue" ]; then
|
|
TARGET="green"
|
|
else
|
|
TARGET="blue"
|
|
fi
|
|
|
|
echo "==> Current: $CURRENT → Deploying: $TARGET"
|
|
|
|
# ── Build ───────────────────────────────────────────────────
|
|
|
|
echo "==> Building $TARGET..."
|
|
$COMPOSE --profile "$TARGET" build
|
|
|
|
# ── Backup DB before migration ────────────────────────────────
|
|
|
|
BACKUP_TAG="pre-deploy-$(date +%Y%m%d-%H%M%S)"
|
|
echo "==> Backing up database (${BACKUP_TAG})..."
|
|
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
|
|
sh -c "cp /app/data/app.db /app/data/app.db.${BACKUP_TAG} 2>/dev/null || true"
|
|
|
|
# ── Migrate ─────────────────────────────────────────────────
|
|
|
|
echo "==> Running migrations..."
|
|
$COMPOSE --profile "$TARGET" run --rm "${TARGET}-app" \
|
|
python -m padelnomics.migrations.migrate
|
|
|
|
# ── Start & health check ───────────────────────────────────
|
|
# Router config is NOT written yet — router keeps old config so it stays
|
|
# healthy while we wait for the new slot to pass its own health check.
|
|
|
|
echo "==> Starting $TARGET (waiting for health check)..."
|
|
if ! $COMPOSE --profile "$TARGET" up -d --wait; then
|
|
echo "!!! Health check failed — dumping logs"
|
|
echo "--- ${TARGET}-app logs ---"
|
|
$COMPOSE logs --tail=60 "${TARGET}-app" 2>&1 || true
|
|
echo "--- router logs ---"
|
|
$COMPOSE logs --tail=10 router 2>&1 || true
|
|
echo "--- litestream logs ---"
|
|
$COMPOSE logs --tail=10 litestream 2>&1 || true
|
|
echo "!!! Rolling back"
|
|
$COMPOSE stop "${TARGET}-app" "${TARGET}-worker" "${TARGET}-scheduler"
|
|
LATEST=$($COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
|
|
sh -c "ls -t /app/data/app.db.pre-deploy-* 2>/dev/null | head -1")
|
|
if [ -n "$LATEST" ]; then
|
|
echo "==> Restoring database from ${LATEST}..."
|
|
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
|
|
sh -c "cp '${LATEST}' /app/data/app.db"
|
|
fi
|
|
exit 1
|
|
fi
|
|
|
|
# ── Write router config and reload (new slot is healthy) ────
|
|
|
|
echo "==> Writing router config for $TARGET..."
|
|
mkdir -p "$(dirname "$ROUTER_CONF")"
|
|
cat > "$ROUTER_CONF" <<NGINX
|
|
upstream app {
|
|
server ${TARGET}-app:5000;
|
|
}
|
|
|
|
server {
|
|
listen 80;
|
|
|
|
location / {
|
|
proxy_pass http://app;
|
|
proxy_set_header Host \$host;
|
|
proxy_set_header X-Real-IP \$remote_addr;
|
|
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
|
proxy_set_header X-Forwarded-Proto \$scheme;
|
|
}
|
|
}
|
|
NGINX
|
|
|
|
echo "==> Reloading router..."
|
|
$COMPOSE exec router nginx -s reload
|
|
|
|
# ── Cleanup old pre-deploy backups (keep last 3) ─────────────
|
|
|
|
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
|
|
sh -c "ls -t /app/data/app.db.pre-deploy-* 2>/dev/null | tail -n +4 | xargs rm -f" || true
|
|
|
|
# ── Stop old slot ───────────────────────────────────────────
|
|
|
|
if [ "$CURRENT" != "none" ]; then
|
|
echo "==> Stopping $CURRENT..."
|
|
$COMPOSE stop "${CURRENT}-app" "${CURRENT}-worker" "${CURRENT}-scheduler"
|
|
fi
|
|
|
|
# ── Record live slot ────────────────────────────────────────
|
|
|
|
echo "$TARGET" > "$LIVE_FILE"
|
|
echo "==> Deployed $TARGET successfully!"
|