chore: delete stale web/ deployment files (now at repo root)

Removes: web/Dockerfile, web/docker-compose.yml, web/docker-compose.prod.yml,
web/deploy.sh, web/litestream.yml, web/router/, web/.copier-answers.yml,
web/.env.example — all superseded by root-level counterparts.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Deeman
2026-02-27 10:26:26 +01:00
parent 3a8dd6ba00
commit dee0600ee8
9 changed files with 13 additions and 437 deletions

View File

@@ -4,6 +4,19 @@ All notable changes to BeanFlows are documented here.
## [Unreleased] ## [Unreleased]
### Changed
- **Monorepo copier migration**: moved all deployment files from `web/` to repo root so
`copier update` can manage them from the template
- `Dockerfile` at root: updated for monorepo layout (`web/src/` paths, `--package beanflows`)
- `docker-compose.yml`, `docker-compose.prod.yml`, `deploy.sh`, `litestream.yml`, `router/`
all moved to root
- `deploy.sh`: fixed sops path (`$APP_DIR/.env.prod.sops`, was `$APP_DIR/../.env.prod.sops`)
- `.copier-answers.yml` at root: points to local template, `_commit: v0.19.0`
- `.env.example` at root: updated paths for root-relative DuckDB locations
- `web/src/beanflows/core.py` (`Config`): added `ENABLE_CMS`, `ENABLE_DAAS`, `ENABLE_DIRECTORY`,
`ENABLE_LEADS`, `BUSINESS_MODEL` feature flags (mirrors copier.yml questions)
- `supervisor.py`: `web_code_changed()` now checks root `Dockerfile`; deploy script is `./deploy.sh`
### Added ### Added
- **ICE certified stock aging report** — Monthly age-bucket × port breakdown extracted via ICE API, stored as gzip CSV, modelled through raw→foundation→serving, exposed at `GET /api/v1/commodities/<code>/stocks/aging` - **ICE certified stock aging report** — Monthly age-bucket × port breakdown extracted via ICE API, stored as gzip CSV, modelled through raw→foundation→serving, exposed at `GET /api/v1/commodities/<code>/stocks/aging`
- **ICE historical warehouse stocks by port** — End-of-month data from Nov 1996 to present, downloaded from static ICE URL, full SQLMesh pipeline, exposed at `GET /api/v1/commodities/<code>/stocks/by-port` - **ICE historical warehouse stocks by port** — End-of-month data from Nov 1996 to present, downloaded from static ICE URL, full SQLMesh pipeline, exposed at `GET /api/v1/commodities/<code>/stocks/by-port`

View File

@@ -1,16 +0,0 @@
# Changes here will be overwritten by Copier; NEVER EDIT MANUALLY
_commit: v0.17.0
_src_path: git@gitlab.com:deemanone/materia_saas_boilerplate.master.git
author_email: hendrik@beanflows.coffee
author_name: Hendrik Deeman
base_url: https://beanflows.coffee
business_model: saas
description: Commodity analytics for coffee traders
enable_cms: true
enable_daas: true
enable_directory: false
enable_i18n: false
enable_leads: false
payment_provider: paddle
project_name: BeanFlows
project_slug: beanflows

View File

@@ -1,38 +0,0 @@
# App
APP_NAME=BeanFlows
SECRET_KEY=change-me-generate-a-real-secret
BASE_URL=http://localhost:5001
DEBUG=true
ADMIN_EMAILS=admin@beanflows.coffee
# Database
DATABASE_PATH=data/app.db
# DUCKDB_PATH points to the full pipeline DB (lakehouse.duckdb) — used by SQLMesh and export_serving.
# SERVING_DUCKDB_PATH points to the serving-only export (analytics.duckdb) — used by the web app.
# Run `uv run materia pipeline run export_serving` after each SQLMesh transform to populate it.
DUCKDB_PATH=../local.duckdb
SERVING_DUCKDB_PATH=../analytics.duckdb
# Auth
MAGIC_LINK_EXPIRY_MINUTES=15
SESSION_LIFETIME_DAYS=30
# Email (Resend)
RESEND_API_KEY=
EMAIL_FROM=hello@example.com
# Paddle
PADDLE_API_KEY=
PADDLE_WEBHOOK_SECRET=
PADDLE_PRICE_STARTER=
PADDLE_PRICE_PRO=
# Rate limiting
RATE_LIMIT_REQUESTS=100
RATE_LIMIT_WINDOW=60
# Waitlist (set to true to enable waitlist gate on /auth/signup)
WAITLIST_MODE=false
RESEND_AUDIENCE_WAITLIST=

View File

@@ -1,33 +0,0 @@
# CSS build stage (Tailwind standalone CLI, no Node.js)
FROM debian:bookworm-slim AS css-build
ADD https://github.com/tailwindlabs/tailwindcss/releases/latest/download/tailwindcss-linux-x64 /usr/local/bin/tailwindcss
RUN chmod +x /usr/local/bin/tailwindcss
WORKDIR /app
COPY src/ ./src/
RUN tailwindcss -i ./src/beanflows/static/css/input.css \
-o ./src/beanflows/static/css/output.css --minify
# Build stage
FROM python:3.12-slim AS build
COPY --from=ghcr.io/astral-sh/uv:0.8 /uv /uvx /bin/
WORKDIR /app
ENV UV_COMPILE_BYTECODE=1 UV_LINK_MODE=copy
COPY uv.lock pyproject.toml README.md ./
COPY src/ ./src/
RUN --mount=type=cache,target=/root/.cache/uv \
uv sync --no-dev --frozen
# Runtime stage
FROM python:3.12-slim AS runtime
ENV PATH="/app/.venv/bin:$PATH"
RUN useradd -m -u 1000 appuser
WORKDIR /app
RUN mkdir -p /app/data && chown -R appuser:appuser /app
COPY --from=build --chown=appuser:appuser /app .
COPY --from=css-build /app/src/beanflows/static/css/output.css ./src/beanflows/static/css/output.css
USER appuser
ENV PYTHONUNBUFFERED=1
ENV DATABASE_PATH=/app/data/app.db
EXPOSE 5000
CMD ["hypercorn", "beanflows.app:app", "--bind", "0.0.0.0:5000", "--workers", "1"]

View File

@@ -1,125 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
APP_DIR="$(cd "$(dirname "$0")" && pwd)"
# ── Verify sops is installed (setup_server.sh installs it to /usr/local/bin) ──
if ! command -v sops &>/dev/null; then
echo "ERROR: sops not found — run infra/setup_server.sh first"
exit 1
fi
# ── Decrypt secrets (SOPS auto-discovers age key from ~/.config/sops/age/) ────
echo "==> Decrypting secrets from .env.prod.sops..."
sops --input-type dotenv --output-type dotenv -d "$APP_DIR/../.env.prod.sops" > "$APP_DIR/.env"
chmod 600 "$APP_DIR/.env"
COMPOSE="docker compose -f docker-compose.prod.yml"
LIVE_FILE=".live-slot"
ROUTER_CONF="router/default.conf"
# ── Determine slots ─────────────────────────────────────────
CURRENT=$(cat "$LIVE_FILE" 2>/dev/null || echo "none")
if [ "$CURRENT" = "blue" ]; then
TARGET="green"
else
TARGET="blue"
fi
echo "==> Current: $CURRENT → Deploying: $TARGET"
# ── Build ───────────────────────────────────────────────────
echo "==> Building $TARGET..."
$COMPOSE --profile "$TARGET" build
# ── Backup DB before migration ────────────────────────────────
BACKUP_TAG="pre-deploy-$(date +%Y%m%d-%H%M%S)"
echo "==> Backing up database (${BACKUP_TAG})..."
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
sh -c "cp /app/data/app.db /app/data/app.db.${BACKUP_TAG} 2>/dev/null || true"
# ── Migrate ─────────────────────────────────────────────────
echo "==> Running migrations..."
$COMPOSE --profile "$TARGET" run --rm "${TARGET}-app" \
python -m beanflows.migrations.migrate
# ── Ensure router points to current live slot before --wait ──
# nginx resolves upstream hostnames — if config points to a stopped slot,
# the health check fails. Reset router to current slot while target starts.
_write_router_conf() {
local SLOT="$1"
mkdir -p "$(dirname "$ROUTER_CONF")"
cat > "$ROUTER_CONF" <<NGINX
upstream app {
server ${SLOT}-app:5000;
}
server {
listen 80;
location / {
proxy_pass http://app;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
}
NGINX
}
if [ "$CURRENT" != "none" ]; then
echo "==> Resetting router to current slot ($CURRENT)..."
_write_router_conf "$CURRENT"
$COMPOSE restart router
fi
# ── Start & health check ───────────────────────────────────
echo "==> Starting $TARGET (waiting for health check)..."
if ! $COMPOSE --profile "$TARGET" up -d --wait; then
echo "!!! Health check failed — dumping logs"
echo "--- ${TARGET}-app logs ---"
$COMPOSE --profile "$TARGET" logs --tail=60 "${TARGET}-app" 2>&1 || true
echo "--- router logs ---"
$COMPOSE logs --tail=10 router 2>&1 || true
echo "!!! Rolling back"
$COMPOSE stop "${TARGET}-app" "${TARGET}-worker" "${TARGET}-scheduler"
LATEST=$($COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
sh -c "ls -t /app/data/app.db.pre-deploy-* 2>/dev/null | head -1")
if [ -n "$LATEST" ]; then
echo "==> Restoring database from ${LATEST}..."
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
sh -c "cp '${LATEST}' /app/data/app.db"
fi
exit 1
fi
# ── Write router config and reload (new slot is healthy) ────
echo "==> Switching router to $TARGET..."
_write_router_conf "$TARGET"
$COMPOSE exec router nginx -s reload
# ── Cleanup old pre-deploy backups (keep last 3) ─────────────
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
sh -c "ls -t /app/data/app.db.pre-deploy-* 2>/dev/null | tail -n +4 | xargs rm -f" || true
# ── Stop old slot ───────────────────────────────────────────
if [ "$CURRENT" != "none" ]; then
echo "==> Stopping $CURRENT..."
$COMPOSE stop "${CURRENT}-app" "${CURRENT}-worker" "${CURRENT}-scheduler"
fi
# ── Record live slot ────────────────────────────────────────
echo "$TARGET" > "$LIVE_FILE"
echo "==> Deployed $TARGET successfully!"

View File

@@ -1,132 +0,0 @@
services:
# ── Always-on infrastructure ──────────────────────────────
router:
image: nginx:alpine
restart: unless-stopped
ports:
- "5000:80"
volumes:
- ./router/default.conf:/etc/nginx/conf.d/default.conf:ro
networks:
- net
healthcheck:
test: ["CMD", "nginx", "-t"]
interval: 30s
timeout: 5s
litestream:
image: litestream/litestream:latest
restart: unless-stopped
command: replicate -config /etc/litestream.yml
volumes:
- app-data:/app/data
- ./litestream.yml:/etc/litestream.yml:ro
# ── Blue slot ─────────────────────────────────────────────
blue-app:
profiles: ["blue"]
build:
context: .
restart: unless-stopped
env_file: ./.env
environment:
- DATABASE_PATH=/app/data/app.db
- SERVING_DUCKDB_PATH=/data/materia/analytics.duckdb
volumes:
- app-data:/app/data
- /data/materia/analytics.duckdb:/data/materia/analytics.duckdb:ro
networks:
- net
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5000/health')"]
interval: 10s
timeout: 5s
retries: 3
start_period: 15s
blue-worker:
profiles: ["blue"]
build:
context: .
restart: unless-stopped
command: python -m beanflows.worker
env_file: ./.env
environment:
- DATABASE_PATH=/app/data/app.db
volumes:
- app-data:/app/data
networks:
- net
blue-scheduler:
profiles: ["blue"]
build:
context: .
restart: unless-stopped
command: python -m beanflows.worker scheduler
env_file: ./.env
environment:
- DATABASE_PATH=/app/data/app.db
volumes:
- app-data:/app/data
networks:
- net
# ── Green slot ────────────────────────────────────────────
green-app:
profiles: ["green"]
build:
context: .
restart: unless-stopped
env_file: ./.env
environment:
- DATABASE_PATH=/app/data/app.db
- SERVING_DUCKDB_PATH=/data/materia/analytics.duckdb
volumes:
- app-data:/app/data
- /data/materia/analytics.duckdb:/data/materia/analytics.duckdb:ro
networks:
- net
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5000/health')"]
interval: 10s
timeout: 5s
retries: 3
start_period: 15s
green-worker:
profiles: ["green"]
build:
context: .
restart: unless-stopped
command: python -m beanflows.worker
env_file: ./.env
environment:
- DATABASE_PATH=/app/data/app.db
volumes:
- app-data:/app/data
networks:
- net
green-scheduler:
profiles: ["green"]
build:
context: .
restart: unless-stopped
command: python -m beanflows.worker scheduler
env_file: ./.env
environment:
- DATABASE_PATH=/app/data/app.db
volumes:
- app-data:/app/data
networks:
- net
volumes:
app-data:
networks:
net:

View File

@@ -1,56 +0,0 @@
services:
app:
build: .
restart: unless-stopped
ports:
- "5000:5000"
volumes:
- ./data:/app/data
- ./duckdb:/app/duckdb:ro
env_file: .env
environment:
- DATABASE_PATH=/app/data/app.db
- SERVING_DUCKDB_PATH=/app/duckdb/analytics.duckdb
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
worker:
build: .
restart: unless-stopped
command: python -m beanflows.worker
volumes:
- ./data:/app/data
env_file: .env
environment:
- DATABASE_PATH=/app/data/app.db
depends_on:
- app
scheduler:
build: .
restart: unless-stopped
command: python -m beanflows.worker scheduler
volumes:
- ./data:/app/data
env_file: .env
environment:
- DATABASE_PATH=/app/data/app.db
depends_on:
- app
# Optional: Litestream for backups
litestream:
image: litestream/litestream:latest
restart: unless-stopped
command: replicate -config /etc/litestream.yml
volumes:
- ./data:/app/data
- ./litestream.yml:/etc/litestream.yml:ro
depends_on:
- app
volumes:

View File

@@ -1,22 +0,0 @@
# Litestream configuration for SQLite replication
# Supports S3, Cloudflare R2, MinIO, etc.
dbs:
- path: /app/data/app.db
replicas:
# Option 1: AWS S3
# - url: s3://your-bucket/beanflows/app.db
# access-key-id: ${AWS_ACCESS_KEY_ID}
# secret-access-key: ${AWS_SECRET_ACCESS_KEY}
# region: us-east-1
# Option 2: Cloudflare R2
# - url: s3://your-bucket/beanflows/app.db
# access-key-id: ${R2_ACCESS_KEY_ID}
# secret-access-key: ${R2_SECRET_ACCESS_KEY}
# endpoint: https://${R2_ACCOUNT_ID}.r2.cloudflarestorage.com
# Option 3: Local file backup (for development)
- path: /app/data/backups
retention: 24h
snapshot-interval: 1h

View File

@@ -1,15 +0,0 @@
upstream app {
server blue-app:5000;
}
server {
listen 80;
location / {
proxy_pass http://app;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}