Compare commits

..

10 Commits

Author SHA1 Message Date
Deeman
2e928de156 chore: migrate from GitLab to self-hosted Gitea
Some checks failed
CI / test-cli (push) Has been cancelled
CI / test-sqlmesh (push) Has been cancelled
CI / test-web (push) Has been cancelled
CI / tag (push) Has been cancelled
Update bootstrap_supervisor.sh and setup_server.sh to use
git.padelnomics.io:2222 instead of gitlab.com.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-27 18:05:54 +01:00
Deeman
1de5d69906 fix(supervisor): use sqlmesh plan prod --auto-apply instead of run
'run' requires the prod environment to already exist and defaults to
dev_<username> on first run. 'plan --auto-apply' initializes prod if
missing and applies pending changes — fully self-healing.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-27 15:49:38 +01:00
Deeman
dd07b0218b fix(infra): chown -R APP_DIR so service user owns full tree
Without -R, a manual uv sync or git operation run as root would create
files under the app dir owned by root, breaking uv for the service user
(Permission denied on .venv/bin/python3).

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-27 15:23:20 +01:00
Deeman
a5d2a61cfb fix(billing): add missing helper functions and fix upsert_subscription signature
- Add upsert_billing_customer / get_billing_customer (billing_customers table)
- Add record_transaction (idempotent on provider_transaction_id)
- Fix upsert_subscription: remove provider_customer_id param, key by
  provider_subscription_id instead of user_id (allows multi-sub)
- Update webhook handler to call upsert_billing_customer separately

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-27 14:43:14 +01:00
Deeman
3faa29d8e5 fix(ci): move .gitlab-ci.yml to repo root so GitLab picks it up
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-27 14:26:37 +01:00
Deeman
2d79627ca9 fix(infra): change host port to 5001 to avoid conflict with padelnomics
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-27 14:12:45 +01:00
Deeman
33c8b4edbd update kyes 2026-02-27 13:51:56 +01:00
Deeman
5e22f2e1ae update secrets 2026-02-27 13:30:53 +01:00
Deeman
37b48d8f1c chore: use git remote for copier _src_path
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-27 11:03:18 +01:00
Deeman
dee0600ee8 chore: delete stale web/ deployment files (now at repo root)
Removes: web/Dockerfile, web/docker-compose.yml, web/docker-compose.prod.yml,
web/deploy.sh, web/litestream.yml, web/router/, web/.copier-answers.yml,
web/.env.example — all superseded by root-level counterparts.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-27 10:26:26 +01:00
22 changed files with 224 additions and 496 deletions

View File

@@ -1,6 +1,6 @@
# Changes here will be overwritten by Copier; NEVER EDIT MANUALLY
_commit: v0.19.0
_src_path: /home/Deeman/Projects/quart_saas_boilerplate
_src_path: git@gitlab.com:deemanone/materia_saas_boilerplate.master.git
author_email: hendrik@beanflows.coffee
author_name: Hendrik Deeman
base_url: https://beanflows.coffee

View File

@@ -36,3 +36,10 @@ RATE_LIMIT_WINDOW=60
# Waitlist (set to true to enable waitlist gate on /auth/signup)
WAITLIST_MODE=false
RESEND_AUDIENCE_WAITLIST=
# R2 Backup (optional — enables materia-backup.timer when all three are set)
# Get from: Cloudflare Dashboard → R2 → Manage R2 API Tokens
# R2_ENDPOINT format: https://<account_id>.r2.cloudflarestorage.com
R2_ACCESS_KEY_ID=
R2_SECRET_ACCESS_KEY=
R2_ENDPOINT=

View File

@@ -8,12 +8,16 @@ ADMIN_EMAILS=ENC[AES256_GCM,data:W7kmtrgck47tGpiHy4bIoF7TZouqjNGPHK+zQoZvxT9iz1r
DATABASE_PATH=ENC[AES256_GCM,data:Rzif9KAhrVn/F3U=,iv:VgXwn8b38/dFkiTYHDiKe660eWtGPdbeMPC4Xc2RPHk=,tag:OSlbuCeQHcVigj0zxnH+5Q==,type:str]
DUCKDB_PATH=ENC[AES256_GCM,data:UWMI9RTAHBNgb9EOxnmKUZovyGedu/xz5/yoOFpd,iv:oWVAoDtboVAC+SCTf+b/mQ+zzCGSRTrf3fjt1femqng=,tag:B46K6jTM0iVWQvL1FJlbyg==,type:str]
SERVING_DUCKDB_PATH=ENC[AES256_GCM,data:Y3bouhWcgp3d9v1KGuXuPZIFiIe/WKnVwEVs799T,iv:uTpVqvRYOhUKM2JNiFsX/YK/sfmajWI899vtmuWuozA=,tag:z8ASJTKzG6lSUBLuvzciwQ==,type:str]
#ENC[AES256_GCM,data:BUei2Df8W9g=,iv:NpOYrAUKS8tRwkGrh9SfyCowHaLSCaB9WrUEleVKp+Q=,tag:W5mebu7kkxC/ZAY+7NDfcA==,type:comment]
R2_ACCESS_KEY_ID=ENC[AES256_GCM,data:pd3cnES/P1/m7ydbes4QLD6lgLY1GVIVYman+oGGv4Y=,iv:UTlG2vehbaaI9Hs72Pgr84963EILdyZNDiV14n7HPtM=,tag:fXKpaVzlZLeiQeQaYS7uzg==,type:str]
R2_SECRET_ACCESS_KEY=ENC[AES256_GCM,data:KAY/y2sWeEudSOoljyDCNVaxqTJf31qtOOlZCPdgg3jJRYhRzXDfVJQ1LhNc0lRu4aGuXW295NNTtfASNCYg0A==,iv:O3DkVlD1r25Nb/jYeg4/lyzDnzux2XyYBPgd6+OmeW0=,tag:E4Onwxo7Vbocd30o8gKwmA==,type:str]
R2_ENDPOINT=ENC[AES256_GCM,data:PsiOwvRyxqaGtGoCkWp+xvWk3q/FJB68PanNbDFWD1f/B2/ZNP2P8Opy1jkQ13Eql1oXRyMtYVZ4LG1PdHtpon4=,iv:+5ZjakxyB6JUmMK/ayQW4XJffyTPr+f7kb2BXUq6Ics=,tag:+ohog6SewrSh/SLoM3t35A==,type:str]
#ENC[AES256_GCM,data:E3cNcRc=,iv:GR/I/NNyv/Ha6ZMH8nd0GZstJLI9MNLCutEKefuBDpk=,tag:dHOwKaKKPoWSt2TiVJVXJA==,type:comment]
MAGIC_LINK_EXPIRY_MINUTES=ENC[AES256_GCM,data:w1I=,iv:CGm9QV5OeVaDVBbRXJL/qO7RnOeSemG+zh3QCgww688=,tag:lfv4wxdx4hzFRC8vPu0Txg==,type:str]
SESSION_LIFETIME_DAYS=ENC[AES256_GCM,data:9fA=,iv:uBe1LugrsipQpOQX3wLFf4Er+v1SIQKNEcdglsmDwKM=,tag:g5lyQgBUCpWNWb2bkCmS3Q==,type:str]
#ENC[AES256_GCM,data:Rd7HVrAHuomB78FCbYDB,iv:kxl7/gArMFCkWuQiv+hXWxCzgNkwDbe2WMs7p9/rlXQ=,tag:+IOGQO/HziVl32CDjiI9Pg==,type:comment]
RESEND_API_KEY=ENC[AES256_GCM,data:srgytZ80mgTWF9DePH8QUR6TqrxI,iv:fCttiplfgdso2lKT2wPaS57SZ3npu0r2GIMnZLcAi7Q=,tag:k7OrEr2J5ikDWeDdZ6raRg==,type:str]
EMAIL_FROM=ENC[AES256_GCM,data:oI1SUEpq5lbRT1FmHQ7QecDSj222kQ==,iv:ou981i5Ksx89IzDmudYFVuKWnHqXFXfcMI1jLwBAtPQ=,tag:QYmUIsgcqccmgrOJX+1Kvg==,type:str]
RESEND_API_KEY=ENC[AES256_GCM,data:rbnmeF4TqhG6Z8FOgtTu1A8y6aMWQH7cu04eye88utZeLwag,iv:hg5zYYzeygee13QutIY2uXAAp3msVMDf6XoPSqtsMKE=,tag:aR0wYZ636VEpbvCN1lad3w==,type:str]
EMAIL_FROM=ENC[AES256_GCM,data:STVZgvdgAuX1keZZ6KXrFhLz2h0KA0yRQRl+FIPMNT459SsY,iv:J/gG8kgJzqvI80UiGWKV7g0rrW4NI3KQTsyYEnVf0Uk=,tag:8uL7vS3f2PCPDbeH+DBRLQ==,type:str]
#ENC[AES256_GCM,data:BLQ9NzKrxA==,iv:7Lc0e7NxwMWZ3T405KAdaNXWtGnnHHWcp6oI8m2GJio=,tag:/NMk8DWNjxrRoDcYjDjvPQ==,type:comment]
PADDLE_API_KEY=ENC[AES256_GCM,data:fS/C0Iygf+S1xjss49D2w8/LlcfI,iv:wLNuuqpBGnClizMRTIRtMdsu8SytU5p13zpkLbXEnNI=,tag:4//Cj5GQ/EolpKxOyEMkNg==,type:str]
PADDLE_WEBHOOK_SECRET=ENC[AES256_GCM,data:8Z/ODGntXsms8i+p+enaBVZjJuUa9ZIe,iv:NBr4IlxG60eQf7E43oDCCKKKDYeQSB1zMXL/z4YckP8=,tag:M4bF4y74bdLZgQ5dWkHFnQ==,type:str]
@@ -24,21 +28,21 @@ PADDLE_PRICE_PRO=ENC[AES256_GCM,data:qk74BtToWDvY32eaYKyB1G3q+znH,iv:TLwWA7erfJP
RATE_LIMIT_REQUESTS=ENC[AES256_GCM,data:c78c,iv:f7ZIb5n/f4DeMg5WKzVE/lbgfT7RfftnB3amrvuviE8=,tag:nPAI9P9oTV84cHWXOmYacw==,type:str]
RATE_LIMIT_WINDOW=ENC[AES256_GCM,data:rTs=,iv:s4ns8X4FPtOdmNtZ35xwgMk5F+kdiAnz0BKdhf6qN3k=,tag:6RSI4kp9ENb5iNj7jXY86Q==,type:str]
#ENC[AES256_GCM,data:IiDU8DxK2LgK,iv:n0zJ+UixDFs2u1rLSxJ/VnWXYJZ8Vda/BQdyS+RujEE=,tag:GfVtYNoHmy9GX5+ZW7QjPg==,type:comment]
WAITLIST_MODE=ENC[AES256_GCM,data:e0tSBHY=,iv:L83mH2xgqLakaq9wb4RymKeXb7l67MNo38zGmSbhi48=,tag:i0z/OalFlgvj/lP4ipzfYQ==,type:str]
RESEND_AUDIENCE_WAITLIST=ENC[AES256_GCM,data:FcQEW8NGrdY7naM1LZuqaAEllNpMjIV9,iv:v0XxXCsjmk1rigORy8vrf1NNzYfn093x2sNb1JAPXuY=,tag:XjLmhewcV3M+Lk4zUhIWbg==,type:str]
WAITLIST_MODE=ENC[AES256_GCM,data:PL6dKA==,iv:1447ZD6aAO33qcVV+LHAlpNbLznJmzm2MLf2pAgHsIA=,tag:J/WmINlDCGlHW6xSSMRDZg==,type:str]
#ENC[AES256_GCM,data:LgHFs0MBe0NfkE0DMJNYUkZh,iv:/C+IKpNQgSbOcwW9+1wN2gfwtY/OT5InkFDyJdPNw/M=,tag:jqEcXMfhowRVNSnrSs3ENg==,type:comment]
UMAMI_SCRIPT_URL=ENC[AES256_GCM,data:85Nyjy8Rho38dyerGD5Mmw==,iv:+MXncm4quelDuV4QTI2Qqgt9G9ZffIkVDYpIdfOVI5Y=,tag:6LVNGEipfo+XWfdA6g7O5w==,type:str]
UMAMI_WEBSITE_ID=ENC[AES256_GCM,data:ArK+fRNSVlXQBnbCOl6+,iv:1nhATMUcBq9m+GLGlkVXaJhFOH9yVfngux7ZPi1bzLM=,tag:SJSSl8G9rztaCbf49e54eQ==,type:str]
UMAMI_SCRIPT_URL=ENC[AES256_GCM,data:93ElrUmwstsR5gTx2AxFQ7vS14qZDYU3Yb7yXkeg28sA,iv:GzlnuNA8O6aTVYQYIsGhFJPewJQ3eIXm4Tiob/Yg/Ek=,tag:hh/LO6nlZx7xwohZhD/bcg==,type:str]
UMAMI_WEBSITE_ID=ENC[AES256_GCM,data:ZM3JMBncWZ0qYyGwxSbePkDNs9/Tw4+LfAWssB/nazL9t6h4,iv:x7HlnTqRwE4skZJv1t+K3uXHvyY9/ENa10R4QWaKWiQ=,tag:nXfGGZekAAHk1zLzNzvgpg==,type:str]
UMAMI_API_URL=ENC[AES256_GCM,data:ErcfZNoB9ESrRqSG/laQMvRJjx2ieeSRlFQ=,iv:WAvEGFsVS2y07dnGZI2KcSpJQ93WjUqW93en1bS03kk=,tag:F1a/yZihEfH/PpUfZHGKCw==,type:str]
UMAMI_API_TOKEN=ENC[AES256_GCM,data:N7xqH1we7SvCkPLxI7olyV3/bKaiDhUoVYfj/OgVja39+NyeZKeWwkpKy5Mg8Hs/KXBwDJgN8NHA5T4gJgvoZ4MC+n1FMMaAOcmZkTGH412Uu8SEiTlfj4ZLUC5v1+izzAPC6BgBngDEz5SOsH+sdh2Tb/eImIgGw/CxLjyRBk2LsCrKsG8KtYAJ71peH8FF8s+najkhYaZ7zUH2Kl7xcPtyBnMfF+0PfnUSaoEatucAXX0LEf+pCEPp80wbKb1sWafXmfAEXgRX+Lv+rjxSJxmr1xoJ5eG5loNs3gHUoj9WOwqwmws61pnhRtoIKi8sc0wu4D09m/RWuDm3dVqtWQ8AxuS/dmqx5VICVaBoDM6t9ePIpirVct/Hxge+68XKKKI0VaW6uWXECJn8T1vLyChNc0x5Z1mqi1E6tmcdvm6O/EBL/Qgw/MaHQk4UwgmEvRIBnqxxUcjYllS4CGADdcxmbEfNknMWFrCS/x0EIoI0dLzjst9xT/peUQk=,iv:OdJqyCreI9i8cYi+58cU8ZboCPWOPTCqo/iFO9zkh4M=,tag:8giR+gN/1pjWj+tbZOlilA==,type:str]
#ENC[AES256_GCM,data:zx6ieYt6brZX6IrIgGkfGCqDlf0FOw==,iv:3dBgRYc9eI/Dhx109NUMh2yW2Fmqegg0n3rsjcbzJEw=,tag:4lbfJT/n1T53D0peeI4IhQ==,type:comment]
LANDING_DIR=ENC[AES256_GCM,data:3YAGFB10q6g6ZLIHdDuvzMaD59+E,iv:S9NVxU/w+cwU1OPWjOEjnG8ocMdWrqR9VG4rFa4h4uA=,tag:0vq5Cn0Di1cUmbLrv1C1Uw==,type:str]
ALERT_WEBHOOK_URL=ENC[AES256_GCM,data:ARYR45VFPLX37u5UNn9fJeBNXDj8,iv:rWDphUHYX/nLD46fDNfx3ZyFEbYK1hMksHCGqWTI66o=,tag:qE1FR6Sj+k07Yb+SlV3Vgw==,type:str]
#ENC[AES256_GCM,data:ySDq589xP4ZwGD5JTQxh1Lr89h8zoz7RDLYfSl2Up/TSFF1tqA==,iv:oBQMgWLlT+r4TbtdLPSs7q7stg/qnEEbsu65+HjGBqQ=,tag:JiySwKWJIuZbEsY0sWJnQA==,type:comment]
GITLAB_READ_TOKEN=ENC[AES256_GCM,data:JRxX3H9mj3DCa0kyi7aGqvop,iv:W/oqCW7sDv791VclZteW0M+jkab3unGVWJoB//w4FJ4=,tag:3FJbkKPxH/obs67Hcd80+A==,type:str]
ALERT_WEBHOOK_URL=ENC[AES256_GCM,data:ArkJg/hBzLN8P/Q+jmbmWOM2iQVLybBCaoCGMJgaYQM=,iv:zroifzAQ4rGn+QLF/SZUPeWmIOFkLWq8QVtVWUeiYOk=,tag:8oVeHuXStKxCLaP77TMxDA==,type:str]
NTFY_TOKEN=ENC[AES256_GCM,data:63Y734rhyTCHt4hdw4S/LPOZ/eEktk6X7SMFsFidwps=,iv:3OJEXCN5sqGyOJwE6fOniBXXslT/rOMASOotE1s+quk=,tag:NTc3YJp6dtNRpLGxIDLO8Q==,type:str]
sops_age__list_0__map_enc=-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBrUWNTNnlLMWgzU21RY3p4\nUjRaVnJzK3lBS2hUTVZCcXI0Y0Q1K2QrOERJCi9qeVc2UVlleWZldUpUckF3WWVM\nMVhWanpGdGlGdXhGV2FnQytQYnZCSncKLS0tIER6RlNqMDhXMlFObkhOVmtVOXZw\nSVRHZTVzYkl5aytSWmNEblhTOVJCOGcKjWhIRS+pjFCMNp52Nt5GyLMhG9Xich7O\n8AlIkVaNN96Q7bVa52norLUQNQOprIGwEu5JXdUFU5Y3ULnoCTQimQ==\n-----END AGE ENCRYPTED FILE-----\n
sops_age__list_0__map_recipient=age1f5002gj4s78jju45jd28kuejtcfhn5cdujz885fl7z2p9ym68pnsgky87a
sops_age__list_1__map_enc=-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBQTllrSWJnU2I4WitWYVY0\nL1RRdFZudzdoSU1Ycmorb3ZRS0p0YnNBR2gwCmQ1T0M0YlpuNFo4SzVMUC9iTlM0\ndHhpd0NOZldJcVBsU0d6M3BwVVJaWjQKLS0tICtVVUVTRm1QSFZCNncvb0RqdC8r\nTmpMM3NmMFBKMDN6QjlIdko3NmFubEkK94oIMrcOYcBy69NjWn6NyWqhvKcP/0Az\nbOuqR0tgSs5xb8s9UUFHRpegZ3uJhQz4VMvOBN8fYaQjO5+4X22D9A==\n-----END AGE ENCRYPTED FILE-----\n
sops_age__list_1__map_recipient=age1frqzxxwumn0zfkl3mp647t3zgx7uudksevsqrnxvycfhkl84yvmqskdzq5
sops_lastmodified=2026-02-26T19:25:04Z
sops_mac=ENC[AES256_GCM,data:LnKzPbxRoxtzw54ZqYuuZxq458Q8Mpo5edT7GvuLrw19NsYPmWMBcFmyXZH6WorEdVyy0YYYJLhiBHCm4J1rnYDCa/331xMtg+qG9N++u1OcpOGZI5QSMbEEFArSLWfOPHqdbYYZ4a5KiRd9L05bkW9kXsfLztbBzHtnxgzoQxQ=,iv:q2eMBkAv9M/liBlm5Tj6+g1V+CdgBYxlxfng2DqFH1Y=,tag:D3MTaywCb2rE4h9CH2EhKA==,type:str]
sops_lastmodified=2026-02-27T12:28:08Z
sops_mac=ENC[AES256_GCM,data:5olw6kL/IWhL3MtQZev0s58EuKaaWpiXpsEdPJEhGHvrjEVfqrI0gCaPdzn5fuCu2hXu0iP13zP3MbInhaWlpCUc8A0LZFjzvkvPPHYKgFrAgzImDa78OJlZDeesYu4co8JoX3FjyRBL4YxFKq7UNPRzmlNJBPTtWFPIe8EtUfQ=,iv:rApUbbZNDhII9pwtfRlPHGLWljVbarusv5PVz9B9AYs=,tag:r8FBVGbubqxUdrH2sAJRgg==,type:str]
sops_unencrypted_suffix=_unencrypted
sops_version=3.12.1

64
.gitlab-ci.yml Normal file
View File

@@ -0,0 +1,64 @@
image: python:3.13
stages:
- test
- tag
variables:
UV_CACHE_DIR: "$CI_PROJECT_DIR/.uv-cache"
cache:
paths:
- .uv-cache/
.uv_setup: &uv_setup
- curl -LsSf https://astral.sh/uv/install.sh | sh
- source $HOME/.local/bin/env
workflow:
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
# ── Tests ─────────────────────────────────────────────────────────────────────
test:cli:
stage: test
before_script:
- *uv_setup
script:
- uv sync --all-packages
- uv run pytest tests/
test:sqlmesh:
stage: test
before_script:
- *uv_setup
script:
- uv sync --all-packages
- cd transform/sqlmesh_materia && uv run sqlmesh test
test:web:
stage: test
before_script:
- *uv_setup
script:
- uv sync --all-packages
- cd web && uv run pytest tests/ -x -q
- cd web && uv run ruff check src/ tests/
# ── Tag (pull-based deploy) ───────────────────────────────────────────────────
# Creates v<N> tag after all tests pass. The on-server supervisor polls for new
# tags every 60s and deploys automatically. No SSH keys or deploy credentials
# needed in CI — only the built-in CI_JOB_TOKEN.
tag:
stage: tag
image: alpine:latest
before_script:
- apk add --no-cache git
script:
- git tag "v${CI_PIPELINE_IID}"
- git push "https://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}/${CI_PROJECT_PATH}.git" "v${CI_PIPELINE_IID}"
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH

View File

@@ -4,6 +4,19 @@ All notable changes to BeanFlows are documented here.
## [Unreleased]
### Changed
- **Monorepo copier migration**: moved all deployment files from `web/` to repo root so
`copier update` can manage them from the template
- `Dockerfile` at root: updated for monorepo layout (`web/src/` paths, `--package beanflows`)
- `docker-compose.yml`, `docker-compose.prod.yml`, `deploy.sh`, `litestream.yml`, `router/`
all moved to root
- `deploy.sh`: fixed sops path (`$APP_DIR/.env.prod.sops`, was `$APP_DIR/../.env.prod.sops`)
- `.copier-answers.yml` at root: points to local template, `_commit: v0.19.0`
- `.env.example` at root: updated paths for root-relative DuckDB locations
- `web/src/beanflows/core.py` (`Config`): added `ENABLE_CMS`, `ENABLE_DAAS`, `ENABLE_DIRECTORY`,
`ENABLE_LEADS`, `BUSINESS_MODEL` feature flags (mirrors copier.yml questions)
- `supervisor.py`: `web_code_changed()` now checks root `Dockerfile`; deploy script is `./deploy.sh`
### Added
- **ICE certified stock aging report** — Monthly age-bucket × port breakdown extracted via ICE API, stored as gzip CSV, modelled through raw→foundation→serving, exposed at `GET /api/v1/commodities/<code>/stocks/aging`
- **ICE historical warehouse stocks by port** — End-of-month data from Nov 1996 to present, downloaded from static ICE URL, full SQLMesh pipeline, exposed at `GET /api/v1/commodities/<code>/stocks/by-port`

View File

@@ -209,3 +209,6 @@ Read `coding_philosophy.md` for the full guide. Key points:
| `SERVING_DUCKDB_PATH` | `analytics.duckdb` | Path to the serving DB (read by web app) |
| `ALERT_WEBHOOK_URL` | _(empty)_ | ntfy.sh URL for supervisor failure alerts |
| `SUPERVISOR_GIT_PULL` | _(unset)_ | Set to any value to enable tag-based git pull in supervisor |
| `R2_ACCESS_KEY_ID` | _(empty)_ | Cloudflare R2 access key — enables backup timer when all three R2 vars are set |
| `R2_SECRET_ACCESS_KEY` | _(empty)_ | Cloudflare R2 secret key |
| `R2_ENDPOINT` | _(empty)_ | Cloudflare account ID (used to construct R2 endpoint URL) |

View File

@@ -5,7 +5,7 @@ services:
image: nginx:alpine
restart: unless-stopped
ports:
- "5000:80"
- "5001:80"
volumes:
- ./router/default.conf:/etc/nginx/conf.d/default.conf:ro
networks:

View File

@@ -5,5 +5,5 @@ Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/bin/rclone sync /data/materia/landing/ r2:materia-raw/landing/ --log-level INFO
ExecStart=/usr/bin/rclone sync /data/materia/landing/ r2:backup/materia/landing/ --log-level INFO
TimeoutStartSec=1800

View File

@@ -3,12 +3,14 @@
#
# Get credentials from: Cloudflare Dashboard → R2 → Manage R2 API Tokens
# Or from Pulumi ESC: esc env open beanflows/prod --format shell
#
# Bucket: backup (syncs to backup/materia/landing/)
[r2]
type = s3
provider = Cloudflare
access_key_id = <R2_ACCESS_KEY_ID>
secret_access_key = <R2_SECRET_ACCESS_KEY>
endpoint = https://<CLOUDFLARE_ACCOUNT_ID>.r2.cloudflarestorage.com
endpoint = <R2_ENDPOINT>
acl = private
no_check_bucket = true

View File

@@ -15,7 +15,7 @@ set -euo pipefail
SERVICE_USER="beanflows_service"
REPO_DIR="/opt/materia"
GITLAB_PROJECT="deemanone/materia"
GITEA_REPO="ssh://git@git.padelnomics.io:2222/deemanone/materia.git"
UV="/home/${SERVICE_USER}/.local/bin/uv"
[ "$(id -u)" = "0" ] || { echo "ERROR: Run as root"; exit 1; }
@@ -35,7 +35,7 @@ if [ -d "${REPO_DIR}/.git" ]; then
sudo -u "${SERVICE_USER}" git -C "${REPO_DIR}" fetch --tags --prune-tags origin
else
sudo -u "${SERVICE_USER}" git clone \
"git@gitlab.com:${GITLAB_PROJECT}.git" "${REPO_DIR}"
"${GITEA_REPO}" "${REPO_DIR}"
fi
LATEST_TAG=$(sudo -u "${SERVICE_USER}" \
@@ -47,23 +47,74 @@ fi
# ── Decrypt secrets ───────────────────────────────────────────────────────────
sudo -u "${SERVICE_USER}" bash -c \
"sops --input-type dotenv --output-type dotenv -d ${REPO_DIR}/.env.prod.sops > ${REPO_DIR}/.env"
"SOPS_AGE_KEY_FILE=/home/${SERVICE_USER}/.config/sops/age/keys.txt \
sops --input-type dotenv --output-type dotenv -d ${REPO_DIR}/.env.prod.sops > ${REPO_DIR}/.env"
chmod 600 "${REPO_DIR}/.env"
# ── Python dependencies ───────────────────────────────────────────────────────
sudo -u "${SERVICE_USER}" bash -c "cd ${REPO_DIR} && ${UV} sync --all-packages"
# ── Systemd service ───────────────────────────────────────────────────────────
# ── Systemd supervisor service ────────────────────────────────────────────────
cp "${REPO_DIR}/infra/supervisor/materia-supervisor.service" /etc/systemd/system/
systemctl daemon-reload
systemctl enable --now materia-supervisor
# ── R2 backup timer (optional) ────────────────────────────────────────────────
# Enabled only when R2_ACCESS_KEY_ID, R2_SECRET_ACCESS_KEY, and R2_ENDPOINT
# are present in .env. Idempotent: rclone.conf is always regenerated from env
# (so credential rotations take effect on re-run); systemd units are only copied
# when they differ from what's already installed.
_env_get() { grep -E "^${1}=" "${REPO_DIR}/.env" 2>/dev/null | head -1 | cut -d= -f2- | tr -d '"'"'" || true; }
R2_KEY=$(_env_get R2_ACCESS_KEY_ID)
R2_SECRET=$(_env_get R2_SECRET_ACCESS_KEY)
R2_ENDPOINT=$(_env_get R2_ENDPOINT)
if [ -n "${R2_KEY}" ] && [ -n "${R2_SECRET}" ] && [ -n "${R2_ENDPOINT}" ]; then
echo "$(date '+%H:%M:%S') ==> Configuring R2 backup..."
RCLONE_CONF_DIR="/home/${SERVICE_USER}/.config/rclone"
RCLONE_CONF="${RCLONE_CONF_DIR}/rclone.conf"
sudo -u "${SERVICE_USER}" mkdir -p "${RCLONE_CONF_DIR}"
# Always write from env so credential rotations take effect automatically.
cat > "${RCLONE_CONF}" <<EOF
[r2]
type = s3
provider = Cloudflare
access_key_id = ${R2_KEY}
secret_access_key = ${R2_SECRET}
endpoint = ${R2_ENDPOINT}
acl = private
no_check_bucket = true
EOF
chown "${SERVICE_USER}:${SERVICE_USER}" "${RCLONE_CONF}"
chmod 600 "${RCLONE_CONF}"
UNITS_CHANGED=0
for unit in materia-backup.service materia-backup.timer; do
if ! diff -q "${REPO_DIR}/infra/backup/${unit}" "/etc/systemd/system/${unit}" >/dev/null 2>&1; then
cp "${REPO_DIR}/infra/backup/${unit}" /etc/systemd/system/
UNITS_CHANGED=1
fi
done
[ "${UNITS_CHANGED}" = "1" ] && systemctl daemon-reload
systemctl enable --now materia-backup.timer
echo "$(date '+%H:%M:%S') ==> R2 backup timer enabled."
else
echo "$(date '+%H:%M:%S') ==> R2_ACCESS_KEY_ID / R2_SECRET_ACCESS_KEY / R2_ENDPOINT not set — skipping backup timer."
fi
echo ""
echo "=== Bootstrap complete! ==="
echo ""
echo "Check status: systemctl status materia-supervisor"
echo "View logs: journalctl -u materia-supervisor -f"
echo "Workflow status: sudo -u ${SERVICE_USER} ${UV} run -p ${REPO_DIR} python src/materia/supervisor.py status"
echo "Backup timer: systemctl list-timers materia-backup.timer"
echo "Tag: $(sudo -u "${SERVICE_USER}" git -C "${REPO_DIR}" describe --tags --always)"

View File

@@ -24,7 +24,7 @@ Hetzner Server (NVMe)
1. **Extract** — Supervisor runs due extractors per `infra/supervisor/workflows.toml`
2. **Transform** — SQLMesh reads landing → writes `lakehouse.duckdb`
3. **Export**`export_serving` copies `serving.*``analytics.duckdb` (atomic rename)
4. **Backup** — rclone syncs `/data/materia/landing/` → R2 `materia-raw/landing/`
4. **Backup** — rclone syncs `/data/materia/landing/` → R2 `backup/materia/landing/`
5. **Web** — Web app reads `analytics.duckdb` read-only (per-thread connections)
## Setup (new server)
@@ -59,20 +59,7 @@ ssh root@<server_ip> 'bash -s' < infra/bootstrap_supervisor.sh
This clones the repo via SSH, decrypts secrets, installs Python dependencies, and starts the supervisor service. No access tokens required — access is via the SSH deploy key. (All tools must already be installed by setup_server.sh.)
### 4. Set up R2 backup
```bash
apt install rclone
# Configure rclone as the service user (used by the backup timer):
sudo -u beanflows_service mkdir -p /home/beanflows_service/.config/rclone
sudo -u beanflows_service cp infra/backup/rclone.conf.example \
/home/beanflows_service/.config/rclone/rclone.conf
# Fill in R2 credentials from .env.prod.sops (ACCESS_KEY_ID, SECRET_ACCESS_KEY, bucket endpoint)
cp infra/backup/materia-backup.service /etc/systemd/system/
cp infra/backup/materia-backup.timer /etc/systemd/system/
systemctl daemon-reload
systemctl enable --now materia-backup.timer
```
If `R2_ACCESS_KEY_ID`, `R2_SECRET_ACCESS_KEY`, and `R2_ENDPOINT` are present in `.env.prod.sops`, bootstrap also generates `rclone.conf` and enables `materia-backup.timer` automatically. No manual R2 setup step needed.
## Secrets management

View File

@@ -41,7 +41,7 @@ usermod -aG docker "${SERVICE_USER}"
log "Creating directories..."
mkdir -p "${APP_DIR}" "${DATA_DIR}/landing"
chown "${SERVICE_USER}:${SERVICE_USER}" "${APP_DIR}"
chown -R "${SERVICE_USER}:${SERVICE_USER}" "${APP_DIR}"
chown -R "${SERVICE_USER}:${SERVICE_USER}" "${DATA_DIR}"
# ── System tools ──────────────────────────────────────────────────────────────
@@ -68,7 +68,8 @@ fi
if [ ! -f "${SSH_DIR}/config" ]; then
cat > "${SSH_DIR}/config" <<EOF
Host gitlab.com
Host git.padelnomics.io
Port 2222
IdentityFile ${DEPLOY_KEY}
IdentitiesOnly yes
EOF
@@ -76,7 +77,7 @@ EOF
chmod 600 "${SSH_DIR}/config"
fi
ssh-keyscan -H gitlab.com >> "${SSH_DIR}/known_hosts" 2>/dev/null
ssh-keyscan -H -p 2222 git.padelnomics.io >> "${SSH_DIR}/known_hosts" 2>/dev/null
sort -u "${SSH_DIR}/known_hosts" -o "${SSH_DIR}/known_hosts"
chown "${SERVICE_USER}:${SERVICE_USER}" "${SSH_DIR}/known_hosts"
chmod 644 "${SSH_DIR}/known_hosts"

View File

@@ -240,7 +240,7 @@ def run_shell(cmd: str, timeout_seconds: int = SUBPROCESS_TIMEOUT_SECONDS) -> bo
def run_transform() -> None:
"""Run SQLMesh — evaluates model staleness internally."""
logger.info("Running SQLMesh transform")
ok = run_shell("uv run sqlmesh -p transform/sqlmesh_materia run")
ok = run_shell("uv run sqlmesh -p transform/sqlmesh_materia plan prod --auto-apply")
if not ok:
send_alert("SQLMesh transform failed")

View File

@@ -1,16 +0,0 @@
# Changes here will be overwritten by Copier; NEVER EDIT MANUALLY
_commit: v0.17.0
_src_path: git@gitlab.com:deemanone/materia_saas_boilerplate.master.git
author_email: hendrik@beanflows.coffee
author_name: Hendrik Deeman
base_url: https://beanflows.coffee
business_model: saas
description: Commodity analytics for coffee traders
enable_cms: true
enable_daas: true
enable_directory: false
enable_i18n: false
enable_leads: false
payment_provider: paddle
project_name: BeanFlows
project_slug: beanflows

View File

@@ -1,38 +0,0 @@
# App
APP_NAME=BeanFlows
SECRET_KEY=change-me-generate-a-real-secret
BASE_URL=http://localhost:5001
DEBUG=true
ADMIN_EMAILS=admin@beanflows.coffee
# Database
DATABASE_PATH=data/app.db
# DUCKDB_PATH points to the full pipeline DB (lakehouse.duckdb) — used by SQLMesh and export_serving.
# SERVING_DUCKDB_PATH points to the serving-only export (analytics.duckdb) — used by the web app.
# Run `uv run materia pipeline run export_serving` after each SQLMesh transform to populate it.
DUCKDB_PATH=../local.duckdb
SERVING_DUCKDB_PATH=../analytics.duckdb
# Auth
MAGIC_LINK_EXPIRY_MINUTES=15
SESSION_LIFETIME_DAYS=30
# Email (Resend)
RESEND_API_KEY=
EMAIL_FROM=hello@example.com
# Paddle
PADDLE_API_KEY=
PADDLE_WEBHOOK_SECRET=
PADDLE_PRICE_STARTER=
PADDLE_PRICE_PRO=
# Rate limiting
RATE_LIMIT_REQUESTS=100
RATE_LIMIT_WINDOW=60
# Waitlist (set to true to enable waitlist gate on /auth/signup)
WAITLIST_MODE=false
RESEND_AUDIENCE_WAITLIST=

View File

@@ -1,33 +0,0 @@
# CSS build stage (Tailwind standalone CLI, no Node.js)
FROM debian:bookworm-slim AS css-build
ADD https://github.com/tailwindlabs/tailwindcss/releases/latest/download/tailwindcss-linux-x64 /usr/local/bin/tailwindcss
RUN chmod +x /usr/local/bin/tailwindcss
WORKDIR /app
COPY src/ ./src/
RUN tailwindcss -i ./src/beanflows/static/css/input.css \
-o ./src/beanflows/static/css/output.css --minify
# Build stage
FROM python:3.12-slim AS build
COPY --from=ghcr.io/astral-sh/uv:0.8 /uv /uvx /bin/
WORKDIR /app
ENV UV_COMPILE_BYTECODE=1 UV_LINK_MODE=copy
COPY uv.lock pyproject.toml README.md ./
COPY src/ ./src/
RUN --mount=type=cache,target=/root/.cache/uv \
uv sync --no-dev --frozen
# Runtime stage
FROM python:3.12-slim AS runtime
ENV PATH="/app/.venv/bin:$PATH"
RUN useradd -m -u 1000 appuser
WORKDIR /app
RUN mkdir -p /app/data && chown -R appuser:appuser /app
COPY --from=build --chown=appuser:appuser /app .
COPY --from=css-build /app/src/beanflows/static/css/output.css ./src/beanflows/static/css/output.css
USER appuser
ENV PYTHONUNBUFFERED=1
ENV DATABASE_PATH=/app/data/app.db
EXPOSE 5000
CMD ["hypercorn", "beanflows.app:app", "--bind", "0.0.0.0:5000", "--workers", "1"]

View File

@@ -1,125 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
APP_DIR="$(cd "$(dirname "$0")" && pwd)"
# ── Verify sops is installed (setup_server.sh installs it to /usr/local/bin) ──
if ! command -v sops &>/dev/null; then
echo "ERROR: sops not found — run infra/setup_server.sh first"
exit 1
fi
# ── Decrypt secrets (SOPS auto-discovers age key from ~/.config/sops/age/) ────
echo "==> Decrypting secrets from .env.prod.sops..."
sops --input-type dotenv --output-type dotenv -d "$APP_DIR/../.env.prod.sops" > "$APP_DIR/.env"
chmod 600 "$APP_DIR/.env"
COMPOSE="docker compose -f docker-compose.prod.yml"
LIVE_FILE=".live-slot"
ROUTER_CONF="router/default.conf"
# ── Determine slots ─────────────────────────────────────────
CURRENT=$(cat "$LIVE_FILE" 2>/dev/null || echo "none")
if [ "$CURRENT" = "blue" ]; then
TARGET="green"
else
TARGET="blue"
fi
echo "==> Current: $CURRENT → Deploying: $TARGET"
# ── Build ───────────────────────────────────────────────────
echo "==> Building $TARGET..."
$COMPOSE --profile "$TARGET" build
# ── Backup DB before migration ────────────────────────────────
BACKUP_TAG="pre-deploy-$(date +%Y%m%d-%H%M%S)"
echo "==> Backing up database (${BACKUP_TAG})..."
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
sh -c "cp /app/data/app.db /app/data/app.db.${BACKUP_TAG} 2>/dev/null || true"
# ── Migrate ─────────────────────────────────────────────────
echo "==> Running migrations..."
$COMPOSE --profile "$TARGET" run --rm "${TARGET}-app" \
python -m beanflows.migrations.migrate
# ── Ensure router points to current live slot before --wait ──
# nginx resolves upstream hostnames — if config points to a stopped slot,
# the health check fails. Reset router to current slot while target starts.
_write_router_conf() {
local SLOT="$1"
mkdir -p "$(dirname "$ROUTER_CONF")"
cat > "$ROUTER_CONF" <<NGINX
upstream app {
server ${SLOT}-app:5000;
}
server {
listen 80;
location / {
proxy_pass http://app;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
}
NGINX
}
if [ "$CURRENT" != "none" ]; then
echo "==> Resetting router to current slot ($CURRENT)..."
_write_router_conf "$CURRENT"
$COMPOSE restart router
fi
# ── Start & health check ───────────────────────────────────
echo "==> Starting $TARGET (waiting for health check)..."
if ! $COMPOSE --profile "$TARGET" up -d --wait; then
echo "!!! Health check failed — dumping logs"
echo "--- ${TARGET}-app logs ---"
$COMPOSE --profile "$TARGET" logs --tail=60 "${TARGET}-app" 2>&1 || true
echo "--- router logs ---"
$COMPOSE logs --tail=10 router 2>&1 || true
echo "!!! Rolling back"
$COMPOSE stop "${TARGET}-app" "${TARGET}-worker" "${TARGET}-scheduler"
LATEST=$($COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
sh -c "ls -t /app/data/app.db.pre-deploy-* 2>/dev/null | head -1")
if [ -n "$LATEST" ]; then
echo "==> Restoring database from ${LATEST}..."
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
sh -c "cp '${LATEST}' /app/data/app.db"
fi
exit 1
fi
# ── Write router config and reload (new slot is healthy) ────
echo "==> Switching router to $TARGET..."
_write_router_conf "$TARGET"
$COMPOSE exec router nginx -s reload
# ── Cleanup old pre-deploy backups (keep last 3) ─────────────
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
sh -c "ls -t /app/data/app.db.pre-deploy-* 2>/dev/null | tail -n +4 | xargs rm -f" || true
# ── Stop old slot ───────────────────────────────────────────
if [ "$CURRENT" != "none" ]; then
echo "==> Stopping $CURRENT..."
$COMPOSE stop "${CURRENT}-app" "${CURRENT}-worker" "${CURRENT}-scheduler"
fi
# ── Record live slot ────────────────────────────────────────
echo "$TARGET" > "$LIVE_FILE"
echo "==> Deployed $TARGET successfully!"

View File

@@ -1,132 +0,0 @@
services:
# ── Always-on infrastructure ──────────────────────────────
router:
image: nginx:alpine
restart: unless-stopped
ports:
- "5000:80"
volumes:
- ./router/default.conf:/etc/nginx/conf.d/default.conf:ro
networks:
- net
healthcheck:
test: ["CMD", "nginx", "-t"]
interval: 30s
timeout: 5s
litestream:
image: litestream/litestream:latest
restart: unless-stopped
command: replicate -config /etc/litestream.yml
volumes:
- app-data:/app/data
- ./litestream.yml:/etc/litestream.yml:ro
# ── Blue slot ─────────────────────────────────────────────
blue-app:
profiles: ["blue"]
build:
context: .
restart: unless-stopped
env_file: ./.env
environment:
- DATABASE_PATH=/app/data/app.db
- SERVING_DUCKDB_PATH=/data/materia/analytics.duckdb
volumes:
- app-data:/app/data
- /data/materia/analytics.duckdb:/data/materia/analytics.duckdb:ro
networks:
- net
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5000/health')"]
interval: 10s
timeout: 5s
retries: 3
start_period: 15s
blue-worker:
profiles: ["blue"]
build:
context: .
restart: unless-stopped
command: python -m beanflows.worker
env_file: ./.env
environment:
- DATABASE_PATH=/app/data/app.db
volumes:
- app-data:/app/data
networks:
- net
blue-scheduler:
profiles: ["blue"]
build:
context: .
restart: unless-stopped
command: python -m beanflows.worker scheduler
env_file: ./.env
environment:
- DATABASE_PATH=/app/data/app.db
volumes:
- app-data:/app/data
networks:
- net
# ── Green slot ────────────────────────────────────────────
green-app:
profiles: ["green"]
build:
context: .
restart: unless-stopped
env_file: ./.env
environment:
- DATABASE_PATH=/app/data/app.db
- SERVING_DUCKDB_PATH=/data/materia/analytics.duckdb
volumes:
- app-data:/app/data
- /data/materia/analytics.duckdb:/data/materia/analytics.duckdb:ro
networks:
- net
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5000/health')"]
interval: 10s
timeout: 5s
retries: 3
start_period: 15s
green-worker:
profiles: ["green"]
build:
context: .
restart: unless-stopped
command: python -m beanflows.worker
env_file: ./.env
environment:
- DATABASE_PATH=/app/data/app.db
volumes:
- app-data:/app/data
networks:
- net
green-scheduler:
profiles: ["green"]
build:
context: .
restart: unless-stopped
command: python -m beanflows.worker scheduler
env_file: ./.env
environment:
- DATABASE_PATH=/app/data/app.db
volumes:
- app-data:/app/data
networks:
- net
volumes:
app-data:
networks:
net:

View File

@@ -1,56 +0,0 @@
services:
app:
build: .
restart: unless-stopped
ports:
- "5000:5000"
volumes:
- ./data:/app/data
- ./duckdb:/app/duckdb:ro
env_file: .env
environment:
- DATABASE_PATH=/app/data/app.db
- SERVING_DUCKDB_PATH=/app/duckdb/analytics.duckdb
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
worker:
build: .
restart: unless-stopped
command: python -m beanflows.worker
volumes:
- ./data:/app/data
env_file: .env
environment:
- DATABASE_PATH=/app/data/app.db
depends_on:
- app
scheduler:
build: .
restart: unless-stopped
command: python -m beanflows.worker scheduler
volumes:
- ./data:/app/data
env_file: .env
environment:
- DATABASE_PATH=/app/data/app.db
depends_on:
- app
# Optional: Litestream for backups
litestream:
image: litestream/litestream:latest
restart: unless-stopped
command: replicate -config /etc/litestream.yml
volumes:
- ./data:/app/data
- ./litestream.yml:/etc/litestream.yml:ro
depends_on:
- app
volumes:

View File

@@ -1,22 +0,0 @@
# Litestream configuration for SQLite replication
# Supports S3, Cloudflare R2, MinIO, etc.
dbs:
- path: /app/data/app.db
replicas:
# Option 1: AWS S3
# - url: s3://your-bucket/beanflows/app.db
# access-key-id: ${AWS_ACCESS_KEY_ID}
# secret-access-key: ${AWS_SECRET_ACCESS_KEY}
# region: us-east-1
# Option 2: Cloudflare R2
# - url: s3://your-bucket/beanflows/app.db
# access-key-id: ${R2_ACCESS_KEY_ID}
# secret-access-key: ${R2_SECRET_ACCESS_KEY}
# endpoint: https://${R2_ACCOUNT_ID}.r2.cloudflarestorage.com
# Option 3: Local file backup (for development)
- path: /app/data/backups
retention: 24h
snapshot-interval: 1h

View File

@@ -1,15 +0,0 @@
upstream app {
server blue-app:5000;
}
server {
listen 80;
location / {
proxy_pass http://app;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}

View File

@@ -42,41 +42,71 @@ async def get_subscription(user_id: int) -> dict | None:
)
async def upsert_billing_customer(user_id: int, provider_customer_id: str) -> None:
"""Create or update billing customer record."""
await execute(
"""INSERT INTO billing_customers (user_id, provider_customer_id)
VALUES (?, ?)
ON CONFLICT(user_id) DO UPDATE SET provider_customer_id = excluded.provider_customer_id""",
(user_id, provider_customer_id),
)
async def get_billing_customer(user_id: int) -> dict | None:
"""Get billing customer record for a user."""
return await fetch_one(
"SELECT * FROM billing_customers WHERE user_id = ?",
(user_id,),
)
async def upsert_subscription(
user_id: int,
plan: str,
status: str,
provider_customer_id: str,
provider_subscription_id: str,
current_period_end: str = None,
) -> int:
"""Create or update subscription."""
"""Create or update subscription, keyed by provider_subscription_id."""
now = datetime.utcnow().isoformat()
customer_col = "paddle_customer_id" # legacy column, kept for existing rows
subscription_col = "provider_subscription_id"
existing = await fetch_one("SELECT id FROM subscriptions WHERE user_id = ?", (user_id,))
existing = await fetch_one(
"SELECT id FROM subscriptions WHERE provider_subscription_id = ?",
(provider_subscription_id,),
)
if existing:
await execute(
f"""UPDATE subscriptions
SET plan = ?, status = ?, {customer_col} = ?, {subscription_col} = ?,
current_period_end = ?, updated_at = ?
WHERE user_id = ?""",
(plan, status, provider_customer_id, provider_subscription_id,
current_period_end, now, user_id),
"""UPDATE subscriptions
SET plan = ?, status = ?, current_period_end = ?, updated_at = ?
WHERE provider_subscription_id = ?""",
(plan, status, current_period_end, now, provider_subscription_id),
)
return existing["id"]
else:
return await execute(
f"""INSERT INTO subscriptions
(user_id, plan, status, {customer_col}, {subscription_col},
current_period_end, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
(user_id, plan, status, provider_customer_id, provider_subscription_id,
current_period_end, now, now),
"""INSERT INTO subscriptions
(user_id, plan, status, provider_subscription_id, current_period_end, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?)""",
(user_id, plan, status, provider_subscription_id, current_period_end, now, now),
)
async def record_transaction(
user_id: int,
provider_transaction_id: str,
type: str = "payment",
amount_cents: int = None,
currency: str = "USD",
status: str = "pending",
) -> int:
"""Record a billing transaction. Idempotent on provider_transaction_id."""
now = datetime.utcnow().isoformat()
return await execute(
"""INSERT OR IGNORE INTO transactions
(user_id, provider_transaction_id, type, amount_cents, currency, status, created_at)
VALUES (?, ?, ?, ?, ?, ?, ?)""",
(user_id, provider_transaction_id, type, amount_cents, currency, status, now),
)
@@ -251,11 +281,14 @@ async def webhook():
if event_type == "subscription.activated":
plan = custom_data.get("plan", "starter")
uid = int(user_id) if user_id else 0
customer_id = data.get("customer_id")
if uid and customer_id:
await upsert_billing_customer(uid, str(customer_id))
await upsert_subscription(
user_id=int(user_id) if user_id else 0,
user_id=uid,
plan=plan,
status="active",
provider_customer_id=str(data.get("customer_id", "")),
provider_subscription_id=data.get("id", ""),
current_period_end=data.get("current_billing_period", {}).get("ends_at"),
)