refactor: move deployment files from web/ to repo root
Moves Dockerfile, docker-compose.yml, docker-compose.prod.yml, deploy.sh, litestream.yml, and router/ from web/ to the monorepo root so copier update can manage them from the template. Dockerfile updated for monorepo layout: - CSS build stage: COPY web/src/ ./web/src/ (Tailwind input path updated) - Python build stage: copies root uv.lock + web/pyproject.toml separately, runs `uv sync --package beanflows` (not full workspace sync) - Runtime CSS copy path updated to web/src/beanflows/static/css/output.css deploy.sh: fixed sops path ($APP_DIR/.env.prod.sops, was $APP_DIR/../) supervisor.py: - web_code_changed(): Dockerfile path is now root-level (was web/Dockerfile) - tick(): deploy script is now ./deploy.sh (was ./web/deploy.sh) Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
34
Dockerfile
Normal file
34
Dockerfile
Normal file
@@ -0,0 +1,34 @@
|
||||
# CSS build stage (Tailwind standalone CLI, no Node.js)
|
||||
FROM debian:bookworm-slim AS css-build
|
||||
ADD https://github.com/tailwindlabs/tailwindcss/releases/latest/download/tailwindcss-linux-x64 /usr/local/bin/tailwindcss
|
||||
RUN chmod +x /usr/local/bin/tailwindcss
|
||||
WORKDIR /app
|
||||
COPY web/src/ ./web/src/
|
||||
RUN tailwindcss -i ./web/src/beanflows/static/css/input.css \
|
||||
-o ./web/src/beanflows/static/css/output.css --minify
|
||||
|
||||
|
||||
# Build stage
|
||||
FROM python:3.12-slim AS build
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.8 /uv /uvx /bin/
|
||||
WORKDIR /app
|
||||
ENV UV_COMPILE_BYTECODE=1 UV_LINK_MODE=copy
|
||||
COPY uv.lock pyproject.toml README.md ./
|
||||
COPY web/pyproject.toml web/README.md ./web/
|
||||
COPY web/src/ ./web/src/
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv sync --no-dev --frozen --package beanflows
|
||||
|
||||
# Runtime stage
|
||||
FROM python:3.12-slim AS runtime
|
||||
ENV PATH="/app/.venv/bin:$PATH"
|
||||
RUN useradd -m -u 1000 appuser
|
||||
WORKDIR /app
|
||||
RUN mkdir -p /app/data && chown -R appuser:appuser /app
|
||||
COPY --from=build --chown=appuser:appuser /app .
|
||||
COPY --from=css-build /app/web/src/beanflows/static/css/output.css ./web/src/beanflows/static/css/output.css
|
||||
USER appuser
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV DATABASE_PATH=/app/data/app.db
|
||||
EXPOSE 5000
|
||||
CMD ["hypercorn", "beanflows.app:app", "--bind", "0.0.0.0:5000", "--workers", "1"]
|
||||
125
deploy.sh
Normal file
125
deploy.sh
Normal file
@@ -0,0 +1,125 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
APP_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
# ── Verify sops is installed (setup_server.sh installs it to /usr/local/bin) ──
|
||||
if ! command -v sops &>/dev/null; then
|
||||
echo "ERROR: sops not found — run infra/setup_server.sh first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ── Decrypt secrets (SOPS auto-discovers age key from ~/.config/sops/age/) ────
|
||||
echo "==> Decrypting secrets from .env.prod.sops..."
|
||||
sops --input-type dotenv --output-type dotenv -d "$APP_DIR/.env.prod.sops" > "$APP_DIR/.env"
|
||||
chmod 600 "$APP_DIR/.env"
|
||||
|
||||
COMPOSE="docker compose -f docker-compose.prod.yml"
|
||||
LIVE_FILE=".live-slot"
|
||||
ROUTER_CONF="router/default.conf"
|
||||
|
||||
# ── Determine slots ─────────────────────────────────────────
|
||||
|
||||
CURRENT=$(cat "$LIVE_FILE" 2>/dev/null || echo "none")
|
||||
|
||||
if [ "$CURRENT" = "blue" ]; then
|
||||
TARGET="green"
|
||||
else
|
||||
TARGET="blue"
|
||||
fi
|
||||
|
||||
echo "==> Current: $CURRENT → Deploying: $TARGET"
|
||||
|
||||
# ── Build ───────────────────────────────────────────────────
|
||||
|
||||
echo "==> Building $TARGET..."
|
||||
$COMPOSE --profile "$TARGET" build
|
||||
|
||||
# ── Backup DB before migration ────────────────────────────────
|
||||
|
||||
BACKUP_TAG="pre-deploy-$(date +%Y%m%d-%H%M%S)"
|
||||
echo "==> Backing up database (${BACKUP_TAG})..."
|
||||
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
|
||||
sh -c "cp /app/data/app.db /app/data/app.db.${BACKUP_TAG} 2>/dev/null || true"
|
||||
|
||||
# ── Migrate ─────────────────────────────────────────────────
|
||||
|
||||
echo "==> Running migrations..."
|
||||
$COMPOSE --profile "$TARGET" run --rm "${TARGET}-app" \
|
||||
python -m beanflows.migrations.migrate
|
||||
|
||||
# ── Ensure router points to current live slot before --wait ──
|
||||
# nginx resolves upstream hostnames — if config points to a stopped slot,
|
||||
# the health check fails. Reset router to current slot while target starts.
|
||||
|
||||
_write_router_conf() {
|
||||
local SLOT="$1"
|
||||
mkdir -p "$(dirname "$ROUTER_CONF")"
|
||||
cat > "$ROUTER_CONF" <<NGINX
|
||||
upstream app {
|
||||
server ${SLOT}-app:5000;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
|
||||
location / {
|
||||
proxy_pass http://app;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
}
|
||||
}
|
||||
NGINX
|
||||
}
|
||||
|
||||
if [ "$CURRENT" != "none" ]; then
|
||||
echo "==> Resetting router to current slot ($CURRENT)..."
|
||||
_write_router_conf "$CURRENT"
|
||||
$COMPOSE restart router
|
||||
fi
|
||||
|
||||
# ── Start & health check ───────────────────────────────────
|
||||
|
||||
echo "==> Starting $TARGET (waiting for health check)..."
|
||||
if ! $COMPOSE --profile "$TARGET" up -d --wait; then
|
||||
echo "!!! Health check failed — dumping logs"
|
||||
echo "--- ${TARGET}-app logs ---"
|
||||
$COMPOSE --profile "$TARGET" logs --tail=60 "${TARGET}-app" 2>&1 || true
|
||||
echo "--- router logs ---"
|
||||
$COMPOSE logs --tail=10 router 2>&1 || true
|
||||
echo "!!! Rolling back"
|
||||
$COMPOSE stop "${TARGET}-app" "${TARGET}-worker" "${TARGET}-scheduler"
|
||||
LATEST=$($COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
|
||||
sh -c "ls -t /app/data/app.db.pre-deploy-* 2>/dev/null | head -1")
|
||||
if [ -n "$LATEST" ]; then
|
||||
echo "==> Restoring database from ${LATEST}..."
|
||||
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
|
||||
sh -c "cp '${LATEST}' /app/data/app.db"
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ── Write router config and reload (new slot is healthy) ────
|
||||
|
||||
echo "==> Switching router to $TARGET..."
|
||||
_write_router_conf "$TARGET"
|
||||
$COMPOSE exec router nginx -s reload
|
||||
|
||||
# ── Cleanup old pre-deploy backups (keep last 3) ─────────────
|
||||
|
||||
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
|
||||
sh -c "ls -t /app/data/app.db.pre-deploy-* 2>/dev/null | tail -n +4 | xargs rm -f" || true
|
||||
|
||||
# ── Stop old slot ───────────────────────────────────────────
|
||||
|
||||
if [ "$CURRENT" != "none" ]; then
|
||||
echo "==> Stopping $CURRENT..."
|
||||
$COMPOSE stop "${CURRENT}-app" "${CURRENT}-worker" "${CURRENT}-scheduler"
|
||||
fi
|
||||
|
||||
# ── Record live slot ────────────────────────────────────────
|
||||
|
||||
echo "$TARGET" > "$LIVE_FILE"
|
||||
echo "==> Deployed $TARGET successfully!"
|
||||
132
docker-compose.prod.yml
Normal file
132
docker-compose.prod.yml
Normal file
@@ -0,0 +1,132 @@
|
||||
services:
|
||||
# ── Always-on infrastructure ──────────────────────────────
|
||||
|
||||
router:
|
||||
image: nginx:alpine
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "5000:80"
|
||||
volumes:
|
||||
- ./router/default.conf:/etc/nginx/conf.d/default.conf:ro
|
||||
networks:
|
||||
- net
|
||||
healthcheck:
|
||||
test: ["CMD", "nginx", "-t"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
|
||||
litestream:
|
||||
image: litestream/litestream:latest
|
||||
restart: unless-stopped
|
||||
command: replicate -config /etc/litestream.yml
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
- ./litestream.yml:/etc/litestream.yml:ro
|
||||
|
||||
# ── Blue slot ─────────────────────────────────────────────
|
||||
|
||||
blue-app:
|
||||
profiles: ["blue"]
|
||||
build:
|
||||
context: .
|
||||
restart: unless-stopped
|
||||
env_file: ./.env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
- SERVING_DUCKDB_PATH=/data/materia/analytics.duckdb
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
- /data/materia/analytics.duckdb:/data/materia/analytics.duckdb:ro
|
||||
networks:
|
||||
- net
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5000/health')"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 15s
|
||||
|
||||
blue-worker:
|
||||
profiles: ["blue"]
|
||||
build:
|
||||
context: .
|
||||
restart: unless-stopped
|
||||
command: python -m beanflows.worker
|
||||
env_file: ./.env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
networks:
|
||||
- net
|
||||
|
||||
blue-scheduler:
|
||||
profiles: ["blue"]
|
||||
build:
|
||||
context: .
|
||||
restart: unless-stopped
|
||||
command: python -m beanflows.worker scheduler
|
||||
env_file: ./.env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
networks:
|
||||
- net
|
||||
|
||||
# ── Green slot ────────────────────────────────────────────
|
||||
|
||||
green-app:
|
||||
profiles: ["green"]
|
||||
build:
|
||||
context: .
|
||||
restart: unless-stopped
|
||||
env_file: ./.env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
- SERVING_DUCKDB_PATH=/data/materia/analytics.duckdb
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
- /data/materia/analytics.duckdb:/data/materia/analytics.duckdb:ro
|
||||
networks:
|
||||
- net
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5000/health')"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 15s
|
||||
|
||||
green-worker:
|
||||
profiles: ["green"]
|
||||
build:
|
||||
context: .
|
||||
restart: unless-stopped
|
||||
command: python -m beanflows.worker
|
||||
env_file: ./.env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
networks:
|
||||
- net
|
||||
|
||||
green-scheduler:
|
||||
profiles: ["green"]
|
||||
build:
|
||||
context: .
|
||||
restart: unless-stopped
|
||||
command: python -m beanflows.worker scheduler
|
||||
env_file: ./.env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
networks:
|
||||
- net
|
||||
|
||||
volumes:
|
||||
app-data:
|
||||
|
||||
networks:
|
||||
net:
|
||||
56
docker-compose.yml
Normal file
56
docker-compose.yml
Normal file
@@ -0,0 +1,56 @@
|
||||
services:
|
||||
app:
|
||||
build: .
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "5000:5000"
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
- ./duckdb:/app/duckdb:ro
|
||||
env_file: .env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
- SERVING_DUCKDB_PATH=/app/duckdb/analytics.duckdb
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:5000/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
worker:
|
||||
build: .
|
||||
restart: unless-stopped
|
||||
command: python -m beanflows.worker
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
env_file: .env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
depends_on:
|
||||
- app
|
||||
|
||||
scheduler:
|
||||
build: .
|
||||
restart: unless-stopped
|
||||
command: python -m beanflows.worker scheduler
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
env_file: .env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
depends_on:
|
||||
- app
|
||||
|
||||
# Optional: Litestream for backups
|
||||
litestream:
|
||||
image: litestream/litestream:latest
|
||||
restart: unless-stopped
|
||||
command: replicate -config /etc/litestream.yml
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
- ./litestream.yml:/etc/litestream.yml:ro
|
||||
depends_on:
|
||||
- app
|
||||
|
||||
volumes:
|
||||
22
litestream.yml
Normal file
22
litestream.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
# Litestream configuration for SQLite replication
|
||||
# Supports S3, Cloudflare R2, MinIO, etc.
|
||||
|
||||
dbs:
|
||||
- path: /app/data/app.db
|
||||
replicas:
|
||||
# Option 1: AWS S3
|
||||
# - url: s3://your-bucket/beanflows/app.db
|
||||
# access-key-id: ${AWS_ACCESS_KEY_ID}
|
||||
# secret-access-key: ${AWS_SECRET_ACCESS_KEY}
|
||||
# region: us-east-1
|
||||
|
||||
# Option 2: Cloudflare R2
|
||||
# - url: s3://your-bucket/beanflows/app.db
|
||||
# access-key-id: ${R2_ACCESS_KEY_ID}
|
||||
# secret-access-key: ${R2_SECRET_ACCESS_KEY}
|
||||
# endpoint: https://${R2_ACCOUNT_ID}.r2.cloudflarestorage.com
|
||||
|
||||
# Option 3: Local file backup (for development)
|
||||
- path: /app/data/backups
|
||||
retention: 24h
|
||||
snapshot-interval: 1h
|
||||
15
router/default.conf
Normal file
15
router/default.conf
Normal file
@@ -0,0 +1,15 @@
|
||||
upstream app {
|
||||
server blue-app:5000;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
|
||||
location / {
|
||||
proxy_pass http://app;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
}
|
||||
@@ -259,7 +259,7 @@ def run_export() -> None:
|
||||
def web_code_changed() -> bool:
|
||||
"""Check if web app code changed since last deploy."""
|
||||
result = subprocess.run(
|
||||
["git", "diff", "--name-only", "HEAD~1", "HEAD", "--", "web/", "web/Dockerfile"],
|
||||
["git", "diff", "--name-only", "HEAD~1", "HEAD", "--", "web/", "Dockerfile"],
|
||||
capture_output=True, text=True, timeout=30,
|
||||
)
|
||||
return bool(result.stdout.strip())
|
||||
@@ -353,7 +353,7 @@ def tick() -> None:
|
||||
# Deploy web app if code changed
|
||||
if os.getenv("SUPERVISOR_GIT_PULL") and web_code_changed():
|
||||
logger.info("Web code changed — deploying")
|
||||
ok = run_shell("./web/deploy.sh")
|
||||
ok = run_shell("./deploy.sh")
|
||||
if ok:
|
||||
send_alert("Deploy succeeded")
|
||||
else:
|
||||
|
||||
Reference in New Issue
Block a user