Compare commits
16 Commits
3a8dd6ba00
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e872ba0204 | ||
|
|
8d1dbace0f | ||
|
|
cddcd4463e | ||
|
|
efb5a165e7 | ||
|
|
d58fa67238 | ||
|
|
66d484955d | ||
|
|
2e928de156 | ||
|
|
1de5d69906 | ||
|
|
dd07b0218b | ||
|
|
a5d2a61cfb | ||
|
|
3faa29d8e5 | ||
|
|
2d79627ca9 | ||
|
|
33c8b4edbd | ||
|
|
5e22f2e1ae | ||
|
|
37b48d8f1c | ||
|
|
dee0600ee8 |
@@ -1,6 +1,6 @@
|
||||
# Changes here will be overwritten by Copier; NEVER EDIT MANUALLY
|
||||
_commit: v0.19.0
|
||||
_src_path: /home/Deeman/Projects/quart_saas_boilerplate
|
||||
_src_path: git@gitlab.com:deemanone/materia_saas_boilerplate.master.git
|
||||
author_email: hendrik@beanflows.coffee
|
||||
author_name: Hendrik Deeman
|
||||
base_url: https://beanflows.coffee
|
||||
|
||||
@@ -36,3 +36,10 @@ RATE_LIMIT_WINDOW=60
|
||||
# Waitlist (set to true to enable waitlist gate on /auth/signup)
|
||||
WAITLIST_MODE=false
|
||||
RESEND_AUDIENCE_WAITLIST=
|
||||
|
||||
# R2 Backup (optional — enables materia-backup.timer when all three are set)
|
||||
# Get from: Cloudflare Dashboard → R2 → Manage R2 API Tokens
|
||||
# R2_ENDPOINT format: https://<account_id>.r2.cloudflarestorage.com
|
||||
R2_ACCESS_KEY_ID=
|
||||
R2_SECRET_ACCESS_KEY=
|
||||
R2_ENDPOINT=
|
||||
|
||||
@@ -8,12 +8,16 @@ ADMIN_EMAILS=ENC[AES256_GCM,data:W7kmtrgck47tGpiHy4bIoF7TZouqjNGPHK+zQoZvxT9iz1r
|
||||
DATABASE_PATH=ENC[AES256_GCM,data:Rzif9KAhrVn/F3U=,iv:VgXwn8b38/dFkiTYHDiKe660eWtGPdbeMPC4Xc2RPHk=,tag:OSlbuCeQHcVigj0zxnH+5Q==,type:str]
|
||||
DUCKDB_PATH=ENC[AES256_GCM,data:UWMI9RTAHBNgb9EOxnmKUZovyGedu/xz5/yoOFpd,iv:oWVAoDtboVAC+SCTf+b/mQ+zzCGSRTrf3fjt1femqng=,tag:B46K6jTM0iVWQvL1FJlbyg==,type:str]
|
||||
SERVING_DUCKDB_PATH=ENC[AES256_GCM,data:Y3bouhWcgp3d9v1KGuXuPZIFiIe/WKnVwEVs799T,iv:uTpVqvRYOhUKM2JNiFsX/YK/sfmajWI899vtmuWuozA=,tag:z8ASJTKzG6lSUBLuvzciwQ==,type:str]
|
||||
#ENC[AES256_GCM,data:BUei2Df8W9g=,iv:NpOYrAUKS8tRwkGrh9SfyCowHaLSCaB9WrUEleVKp+Q=,tag:W5mebu7kkxC/ZAY+7NDfcA==,type:comment]
|
||||
R2_ACCESS_KEY_ID=ENC[AES256_GCM,data:pd3cnES/P1/m7ydbes4QLD6lgLY1GVIVYman+oGGv4Y=,iv:UTlG2vehbaaI9Hs72Pgr84963EILdyZNDiV14n7HPtM=,tag:fXKpaVzlZLeiQeQaYS7uzg==,type:str]
|
||||
R2_SECRET_ACCESS_KEY=ENC[AES256_GCM,data:KAY/y2sWeEudSOoljyDCNVaxqTJf31qtOOlZCPdgg3jJRYhRzXDfVJQ1LhNc0lRu4aGuXW295NNTtfASNCYg0A==,iv:O3DkVlD1r25Nb/jYeg4/lyzDnzux2XyYBPgd6+OmeW0=,tag:E4Onwxo7Vbocd30o8gKwmA==,type:str]
|
||||
R2_ENDPOINT=ENC[AES256_GCM,data:PsiOwvRyxqaGtGoCkWp+xvWk3q/FJB68PanNbDFWD1f/B2/ZNP2P8Opy1jkQ13Eql1oXRyMtYVZ4LG1PdHtpon4=,iv:+5ZjakxyB6JUmMK/ayQW4XJffyTPr+f7kb2BXUq6Ics=,tag:+ohog6SewrSh/SLoM3t35A==,type:str]
|
||||
#ENC[AES256_GCM,data:E3cNcRc=,iv:GR/I/NNyv/Ha6ZMH8nd0GZstJLI9MNLCutEKefuBDpk=,tag:dHOwKaKKPoWSt2TiVJVXJA==,type:comment]
|
||||
MAGIC_LINK_EXPIRY_MINUTES=ENC[AES256_GCM,data:w1I=,iv:CGm9QV5OeVaDVBbRXJL/qO7RnOeSemG+zh3QCgww688=,tag:lfv4wxdx4hzFRC8vPu0Txg==,type:str]
|
||||
SESSION_LIFETIME_DAYS=ENC[AES256_GCM,data:9fA=,iv:uBe1LugrsipQpOQX3wLFf4Er+v1SIQKNEcdglsmDwKM=,tag:g5lyQgBUCpWNWb2bkCmS3Q==,type:str]
|
||||
#ENC[AES256_GCM,data:Rd7HVrAHuomB78FCbYDB,iv:kxl7/gArMFCkWuQiv+hXWxCzgNkwDbe2WMs7p9/rlXQ=,tag:+IOGQO/HziVl32CDjiI9Pg==,type:comment]
|
||||
RESEND_API_KEY=ENC[AES256_GCM,data:srgytZ80mgTWF9DePH8QUR6TqrxI,iv:fCttiplfgdso2lKT2wPaS57SZ3npu0r2GIMnZLcAi7Q=,tag:k7OrEr2J5ikDWeDdZ6raRg==,type:str]
|
||||
EMAIL_FROM=ENC[AES256_GCM,data:oI1SUEpq5lbRT1FmHQ7QecDSj222kQ==,iv:ou981i5Ksx89IzDmudYFVuKWnHqXFXfcMI1jLwBAtPQ=,tag:QYmUIsgcqccmgrOJX+1Kvg==,type:str]
|
||||
RESEND_API_KEY=ENC[AES256_GCM,data:rbnmeF4TqhG6Z8FOgtTu1A8y6aMWQH7cu04eye88utZeLwag,iv:hg5zYYzeygee13QutIY2uXAAp3msVMDf6XoPSqtsMKE=,tag:aR0wYZ636VEpbvCN1lad3w==,type:str]
|
||||
EMAIL_FROM=ENC[AES256_GCM,data:STVZgvdgAuX1keZZ6KXrFhLz2h0KA0yRQRl+FIPMNT459SsY,iv:J/gG8kgJzqvI80UiGWKV7g0rrW4NI3KQTsyYEnVf0Uk=,tag:8uL7vS3f2PCPDbeH+DBRLQ==,type:str]
|
||||
#ENC[AES256_GCM,data:BLQ9NzKrxA==,iv:7Lc0e7NxwMWZ3T405KAdaNXWtGnnHHWcp6oI8m2GJio=,tag:/NMk8DWNjxrRoDcYjDjvPQ==,type:comment]
|
||||
PADDLE_API_KEY=ENC[AES256_GCM,data:fS/C0Iygf+S1xjss49D2w8/LlcfI,iv:wLNuuqpBGnClizMRTIRtMdsu8SytU5p13zpkLbXEnNI=,tag:4//Cj5GQ/EolpKxOyEMkNg==,type:str]
|
||||
PADDLE_WEBHOOK_SECRET=ENC[AES256_GCM,data:8Z/ODGntXsms8i+p+enaBVZjJuUa9ZIe,iv:NBr4IlxG60eQf7E43oDCCKKKDYeQSB1zMXL/z4YckP8=,tag:M4bF4y74bdLZgQ5dWkHFnQ==,type:str]
|
||||
@@ -24,21 +28,21 @@ PADDLE_PRICE_PRO=ENC[AES256_GCM,data:qk74BtToWDvY32eaYKyB1G3q+znH,iv:TLwWA7erfJP
|
||||
RATE_LIMIT_REQUESTS=ENC[AES256_GCM,data:c78c,iv:f7ZIb5n/f4DeMg5WKzVE/lbgfT7RfftnB3amrvuviE8=,tag:nPAI9P9oTV84cHWXOmYacw==,type:str]
|
||||
RATE_LIMIT_WINDOW=ENC[AES256_GCM,data:rTs=,iv:s4ns8X4FPtOdmNtZ35xwgMk5F+kdiAnz0BKdhf6qN3k=,tag:6RSI4kp9ENb5iNj7jXY86Q==,type:str]
|
||||
#ENC[AES256_GCM,data:IiDU8DxK2LgK,iv:n0zJ+UixDFs2u1rLSxJ/VnWXYJZ8Vda/BQdyS+RujEE=,tag:GfVtYNoHmy9GX5+ZW7QjPg==,type:comment]
|
||||
WAITLIST_MODE=ENC[AES256_GCM,data:e0tSBHY=,iv:L83mH2xgqLakaq9wb4RymKeXb7l67MNo38zGmSbhi48=,tag:i0z/OalFlgvj/lP4ipzfYQ==,type:str]
|
||||
RESEND_AUDIENCE_WAITLIST=ENC[AES256_GCM,data:FcQEW8NGrdY7naM1LZuqaAEllNpMjIV9,iv:v0XxXCsjmk1rigORy8vrf1NNzYfn093x2sNb1JAPXuY=,tag:XjLmhewcV3M+Lk4zUhIWbg==,type:str]
|
||||
WAITLIST_MODE=ENC[AES256_GCM,data:PL6dKA==,iv:1447ZD6aAO33qcVV+LHAlpNbLznJmzm2MLf2pAgHsIA=,tag:J/WmINlDCGlHW6xSSMRDZg==,type:str]
|
||||
#ENC[AES256_GCM,data:LgHFs0MBe0NfkE0DMJNYUkZh,iv:/C+IKpNQgSbOcwW9+1wN2gfwtY/OT5InkFDyJdPNw/M=,tag:jqEcXMfhowRVNSnrSs3ENg==,type:comment]
|
||||
UMAMI_SCRIPT_URL=ENC[AES256_GCM,data:85Nyjy8Rho38dyerGD5Mmw==,iv:+MXncm4quelDuV4QTI2Qqgt9G9ZffIkVDYpIdfOVI5Y=,tag:6LVNGEipfo+XWfdA6g7O5w==,type:str]
|
||||
UMAMI_WEBSITE_ID=ENC[AES256_GCM,data:ArK+fRNSVlXQBnbCOl6+,iv:1nhATMUcBq9m+GLGlkVXaJhFOH9yVfngux7ZPi1bzLM=,tag:SJSSl8G9rztaCbf49e54eQ==,type:str]
|
||||
UMAMI_SCRIPT_URL=ENC[AES256_GCM,data:93ElrUmwstsR5gTx2AxFQ7vS14qZDYU3Yb7yXkeg28sA,iv:GzlnuNA8O6aTVYQYIsGhFJPewJQ3eIXm4Tiob/Yg/Ek=,tag:hh/LO6nlZx7xwohZhD/bcg==,type:str]
|
||||
UMAMI_WEBSITE_ID=ENC[AES256_GCM,data:ZM3JMBncWZ0qYyGwxSbePkDNs9/Tw4+LfAWssB/nazL9t6h4,iv:x7HlnTqRwE4skZJv1t+K3uXHvyY9/ENa10R4QWaKWiQ=,tag:nXfGGZekAAHk1zLzNzvgpg==,type:str]
|
||||
UMAMI_API_URL=ENC[AES256_GCM,data:ErcfZNoB9ESrRqSG/laQMvRJjx2ieeSRlFQ=,iv:WAvEGFsVS2y07dnGZI2KcSpJQ93WjUqW93en1bS03kk=,tag:F1a/yZihEfH/PpUfZHGKCw==,type:str]
|
||||
UMAMI_API_TOKEN=ENC[AES256_GCM,data:N7xqH1we7SvCkPLxI7olyV3/bKaiDhUoVYfj/OgVja39+NyeZKeWwkpKy5Mg8Hs/KXBwDJgN8NHA5T4gJgvoZ4MC+n1FMMaAOcmZkTGH412Uu8SEiTlfj4ZLUC5v1+izzAPC6BgBngDEz5SOsH+sdh2Tb/eImIgGw/CxLjyRBk2LsCrKsG8KtYAJ71peH8FF8s+najkhYaZ7zUH2Kl7xcPtyBnMfF+0PfnUSaoEatucAXX0LEf+pCEPp80wbKb1sWafXmfAEXgRX+Lv+rjxSJxmr1xoJ5eG5loNs3gHUoj9WOwqwmws61pnhRtoIKi8sc0wu4D09m/RWuDm3dVqtWQ8AxuS/dmqx5VICVaBoDM6t9ePIpirVct/Hxge+68XKKKI0VaW6uWXECJn8T1vLyChNc0x5Z1mqi1E6tmcdvm6O/EBL/Qgw/MaHQk4UwgmEvRIBnqxxUcjYllS4CGADdcxmbEfNknMWFrCS/x0EIoI0dLzjst9xT/peUQk=,iv:OdJqyCreI9i8cYi+58cU8ZboCPWOPTCqo/iFO9zkh4M=,tag:8giR+gN/1pjWj+tbZOlilA==,type:str]
|
||||
#ENC[AES256_GCM,data:zx6ieYt6brZX6IrIgGkfGCqDlf0FOw==,iv:3dBgRYc9eI/Dhx109NUMh2yW2Fmqegg0n3rsjcbzJEw=,tag:4lbfJT/n1T53D0peeI4IhQ==,type:comment]
|
||||
LANDING_DIR=ENC[AES256_GCM,data:3YAGFB10q6g6ZLIHdDuvzMaD59+E,iv:S9NVxU/w+cwU1OPWjOEjnG8ocMdWrqR9VG4rFa4h4uA=,tag:0vq5Cn0Di1cUmbLrv1C1Uw==,type:str]
|
||||
ALERT_WEBHOOK_URL=ENC[AES256_GCM,data:ARYR45VFPLX37u5UNn9fJeBNXDj8,iv:rWDphUHYX/nLD46fDNfx3ZyFEbYK1hMksHCGqWTI66o=,tag:qE1FR6Sj+k07Yb+SlV3Vgw==,type:str]
|
||||
#ENC[AES256_GCM,data:ySDq589xP4ZwGD5JTQxh1Lr89h8zoz7RDLYfSl2Up/TSFF1tqA==,iv:oBQMgWLlT+r4TbtdLPSs7q7stg/qnEEbsu65+HjGBqQ=,tag:JiySwKWJIuZbEsY0sWJnQA==,type:comment]
|
||||
GITLAB_READ_TOKEN=ENC[AES256_GCM,data:JRxX3H9mj3DCa0kyi7aGqvop,iv:W/oqCW7sDv791VclZteW0M+jkab3unGVWJoB//w4FJ4=,tag:3FJbkKPxH/obs67Hcd80+A==,type:str]
|
||||
ALERT_WEBHOOK_URL=ENC[AES256_GCM,data:ArkJg/hBzLN8P/Q+jmbmWOM2iQVLybBCaoCGMJgaYQM=,iv:zroifzAQ4rGn+QLF/SZUPeWmIOFkLWq8QVtVWUeiYOk=,tag:8oVeHuXStKxCLaP77TMxDA==,type:str]
|
||||
NTFY_TOKEN=ENC[AES256_GCM,data:63Y734rhyTCHt4hdw4S/LPOZ/eEktk6X7SMFsFidwps=,iv:3OJEXCN5sqGyOJwE6fOniBXXslT/rOMASOotE1s+quk=,tag:NTc3YJp6dtNRpLGxIDLO8Q==,type:str]
|
||||
sops_age__list_0__map_enc=-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBrUWNTNnlLMWgzU21RY3p4\nUjRaVnJzK3lBS2hUTVZCcXI0Y0Q1K2QrOERJCi9qeVc2UVlleWZldUpUckF3WWVM\nMVhWanpGdGlGdXhGV2FnQytQYnZCSncKLS0tIER6RlNqMDhXMlFObkhOVmtVOXZw\nSVRHZTVzYkl5aytSWmNEblhTOVJCOGcKjWhIRS+pjFCMNp52Nt5GyLMhG9Xich7O\n8AlIkVaNN96Q7bVa52norLUQNQOprIGwEu5JXdUFU5Y3ULnoCTQimQ==\n-----END AGE ENCRYPTED FILE-----\n
|
||||
sops_age__list_0__map_recipient=age1f5002gj4s78jju45jd28kuejtcfhn5cdujz885fl7z2p9ym68pnsgky87a
|
||||
sops_age__list_1__map_enc=-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBQTllrSWJnU2I4WitWYVY0\nL1RRdFZudzdoSU1Ycmorb3ZRS0p0YnNBR2gwCmQ1T0M0YlpuNFo4SzVMUC9iTlM0\ndHhpd0NOZldJcVBsU0d6M3BwVVJaWjQKLS0tICtVVUVTRm1QSFZCNncvb0RqdC8r\nTmpMM3NmMFBKMDN6QjlIdko3NmFubEkK94oIMrcOYcBy69NjWn6NyWqhvKcP/0Az\nbOuqR0tgSs5xb8s9UUFHRpegZ3uJhQz4VMvOBN8fYaQjO5+4X22D9A==\n-----END AGE ENCRYPTED FILE-----\n
|
||||
sops_age__list_1__map_recipient=age1frqzxxwumn0zfkl3mp647t3zgx7uudksevsqrnxvycfhkl84yvmqskdzq5
|
||||
sops_lastmodified=2026-02-26T19:25:04Z
|
||||
sops_mac=ENC[AES256_GCM,data:LnKzPbxRoxtzw54ZqYuuZxq458Q8Mpo5edT7GvuLrw19NsYPmWMBcFmyXZH6WorEdVyy0YYYJLhiBHCm4J1rnYDCa/331xMtg+qG9N++u1OcpOGZI5QSMbEEFArSLWfOPHqdbYYZ4a5KiRd9L05bkW9kXsfLztbBzHtnxgzoQxQ=,iv:q2eMBkAv9M/liBlm5Tj6+g1V+CdgBYxlxfng2DqFH1Y=,tag:D3MTaywCb2rE4h9CH2EhKA==,type:str]
|
||||
sops_lastmodified=2026-02-27T12:28:08Z
|
||||
sops_mac=ENC[AES256_GCM,data:5olw6kL/IWhL3MtQZev0s58EuKaaWpiXpsEdPJEhGHvrjEVfqrI0gCaPdzn5fuCu2hXu0iP13zP3MbInhaWlpCUc8A0LZFjzvkvPPHYKgFrAgzImDa78OJlZDeesYu4co8JoX3FjyRBL4YxFKq7UNPRzmlNJBPTtWFPIe8EtUfQ=,iv:rApUbbZNDhII9pwtfRlPHGLWljVbarusv5PVz9B9AYs=,tag:r8FBVGbubqxUdrH2sAJRgg==,type:str]
|
||||
sops_unencrypted_suffix=_unencrypted
|
||||
sops_version=3.12.1
|
||||
|
||||
64
.gitlab-ci.yml
Normal file
64
.gitlab-ci.yml
Normal file
@@ -0,0 +1,64 @@
|
||||
image: python:3.13
|
||||
|
||||
stages:
|
||||
- test
|
||||
- tag
|
||||
|
||||
variables:
|
||||
UV_CACHE_DIR: "$CI_PROJECT_DIR/.uv-cache"
|
||||
|
||||
cache:
|
||||
paths:
|
||||
- .uv-cache/
|
||||
|
||||
.uv_setup: &uv_setup
|
||||
- curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
- source $HOME/.local/bin/env
|
||||
|
||||
workflow:
|
||||
rules:
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
|
||||
# ── Tests ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
test:cli:
|
||||
stage: test
|
||||
before_script:
|
||||
- *uv_setup
|
||||
script:
|
||||
- uv sync --all-packages
|
||||
- uv run pytest tests/
|
||||
|
||||
test:sqlmesh:
|
||||
stage: test
|
||||
before_script:
|
||||
- *uv_setup
|
||||
script:
|
||||
- uv sync --all-packages
|
||||
- cd transform/sqlmesh_materia && uv run sqlmesh test
|
||||
|
||||
test:web:
|
||||
stage: test
|
||||
before_script:
|
||||
- *uv_setup
|
||||
script:
|
||||
- uv sync --all-packages
|
||||
- cd web && uv run pytest tests/ -x -q
|
||||
- cd web && uv run ruff check src/ tests/
|
||||
|
||||
# ── Tag (pull-based deploy) ───────────────────────────────────────────────────
|
||||
# Creates v<N> tag after all tests pass. The on-server supervisor polls for new
|
||||
# tags every 60s and deploys automatically. No SSH keys or deploy credentials
|
||||
# needed in CI — only the built-in CI_JOB_TOKEN.
|
||||
|
||||
tag:
|
||||
stage: tag
|
||||
image: alpine:latest
|
||||
before_script:
|
||||
- apk add --no-cache git
|
||||
script:
|
||||
- git tag "v${CI_PIPELINE_IID}"
|
||||
- git push "https://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}/${CI_PROJECT_PATH}.git" "v${CI_PIPELINE_IID}"
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
13
CHANGELOG.md
13
CHANGELOG.md
@@ -4,6 +4,19 @@ All notable changes to BeanFlows are documented here.
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
### Changed
|
||||
- **Monorepo copier migration**: moved all deployment files from `web/` to repo root so
|
||||
`copier update` can manage them from the template
|
||||
- `Dockerfile` at root: updated for monorepo layout (`web/src/` paths, `--package beanflows`)
|
||||
- `docker-compose.yml`, `docker-compose.prod.yml`, `deploy.sh`, `litestream.yml`, `router/`
|
||||
all moved to root
|
||||
- `deploy.sh`: fixed sops path (`$APP_DIR/.env.prod.sops`, was `$APP_DIR/../.env.prod.sops`)
|
||||
- `.copier-answers.yml` at root: points to local template, `_commit: v0.19.0`
|
||||
- `.env.example` at root: updated paths for root-relative DuckDB locations
|
||||
- `web/src/beanflows/core.py` (`Config`): added `ENABLE_CMS`, `ENABLE_DAAS`, `ENABLE_DIRECTORY`,
|
||||
`ENABLE_LEADS`, `BUSINESS_MODEL` feature flags (mirrors copier.yml questions)
|
||||
- `supervisor.py`: `web_code_changed()` now checks root `Dockerfile`; deploy script is `./deploy.sh`
|
||||
|
||||
### Added
|
||||
- **ICE certified stock aging report** — Monthly age-bucket × port breakdown extracted via ICE API, stored as gzip CSV, modelled through raw→foundation→serving, exposed at `GET /api/v1/commodities/<code>/stocks/aging`
|
||||
- **ICE historical warehouse stocks by port** — End-of-month data from Nov 1996 to present, downloaded from static ICE URL, full SQLMesh pipeline, exposed at `GET /api/v1/commodities/<code>/stocks/by-port`
|
||||
|
||||
11
CLAUDE.md
11
CLAUDE.md
@@ -118,11 +118,11 @@ uv add --package new_source extract-core niquests
|
||||
- Each tick: git pull (tag-based) → due extractors → SQLMesh → export_serving → web deploy if changed
|
||||
- Crash-safe: systemd `Restart=always` + 10-minute backoff on tick failure
|
||||
|
||||
**CI/CD** (`.gitlab/.gitlab-ci.yml`) — pull-based, no SSH:
|
||||
- `test` stage: pytest, sqlmesh test, web pytest
|
||||
- `tag` stage: creates `v${CI_PIPELINE_IID}` tag after tests pass (master branch only)
|
||||
**CI/CD** (`.gitea/workflows/ci.yaml`) — pull-based, no SSH:
|
||||
- `test-cli`, `test-sqlmesh`, `test-web` jobs: pytest, sqlmesh test, web pytest
|
||||
- `tag` job: creates `v${github.run_number}` tag after all tests pass (master branch only)
|
||||
- Supervisor polls for new tags every 60s, checks out latest, runs `uv sync`
|
||||
- No SSH keys or deploy credentials in CI — only `CI_JOB_TOKEN` (built-in)
|
||||
- No SSH keys or deploy credentials in CI — only `github.token` (built-in Gitea Actions)
|
||||
|
||||
**CLI modules** (`src/materia/`):
|
||||
- `cli.py` — Typer app with subcommands: pipeline, secrets, version
|
||||
@@ -209,3 +209,6 @@ Read `coding_philosophy.md` for the full guide. Key points:
|
||||
| `SERVING_DUCKDB_PATH` | `analytics.duckdb` | Path to the serving DB (read by web app) |
|
||||
| `ALERT_WEBHOOK_URL` | _(empty)_ | ntfy.sh URL for supervisor failure alerts |
|
||||
| `SUPERVISOR_GIT_PULL` | _(unset)_ | Set to any value to enable tag-based git pull in supervisor |
|
||||
| `R2_ACCESS_KEY_ID` | _(empty)_ | Cloudflare R2 access key — enables backup timer when all three R2 vars are set |
|
||||
| `R2_SECRET_ACCESS_KEY` | _(empty)_ | Cloudflare R2 secret key |
|
||||
| `R2_ENDPOINT` | _(empty)_ | Cloudflare account ID (used to construct R2 endpoint URL) |
|
||||
|
||||
@@ -5,7 +5,7 @@ services:
|
||||
image: nginx:alpine
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "5000:80"
|
||||
- "5001:80"
|
||||
volumes:
|
||||
- ./router/default.conf:/etc/nginx/conf.d/default.conf:ro
|
||||
networks:
|
||||
|
||||
@@ -5,5 +5,5 @@ Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/bin/rclone sync /data/materia/landing/ r2:materia-raw/landing/ --log-level INFO
|
||||
ExecStart=/usr/bin/rclone sync /data/materia/landing/ r2:backup/materia/landing/ --log-level INFO
|
||||
TimeoutStartSec=1800
|
||||
|
||||
@@ -3,12 +3,14 @@
|
||||
#
|
||||
# Get credentials from: Cloudflare Dashboard → R2 → Manage R2 API Tokens
|
||||
# Or from Pulumi ESC: esc env open beanflows/prod --format shell
|
||||
#
|
||||
# Bucket: backup (syncs to backup/materia/landing/)
|
||||
|
||||
[r2]
|
||||
type = s3
|
||||
provider = Cloudflare
|
||||
access_key_id = <R2_ACCESS_KEY_ID>
|
||||
secret_access_key = <R2_SECRET_ACCESS_KEY>
|
||||
endpoint = https://<CLOUDFLARE_ACCOUNT_ID>.r2.cloudflarestorage.com
|
||||
endpoint = <R2_ENDPOINT>
|
||||
acl = private
|
||||
no_check_bucket = true
|
||||
|
||||
@@ -15,7 +15,7 @@ set -euo pipefail
|
||||
|
||||
SERVICE_USER="beanflows_service"
|
||||
REPO_DIR="/opt/materia"
|
||||
GITLAB_PROJECT="deemanone/materia"
|
||||
GITEA_REPO="ssh://git@git.padelnomics.io:2222/deemanone/beanflows.git"
|
||||
UV="/home/${SERVICE_USER}/.local/bin/uv"
|
||||
|
||||
[ "$(id -u)" = "0" ] || { echo "ERROR: Run as root"; exit 1; }
|
||||
@@ -35,7 +35,7 @@ if [ -d "${REPO_DIR}/.git" ]; then
|
||||
sudo -u "${SERVICE_USER}" git -C "${REPO_DIR}" fetch --tags --prune-tags origin
|
||||
else
|
||||
sudo -u "${SERVICE_USER}" git clone \
|
||||
"git@gitlab.com:${GITLAB_PROJECT}.git" "${REPO_DIR}"
|
||||
"${GITEA_REPO}" "${REPO_DIR}"
|
||||
fi
|
||||
|
||||
LATEST_TAG=$(sudo -u "${SERVICE_USER}" \
|
||||
@@ -47,23 +47,74 @@ fi
|
||||
# ── Decrypt secrets ───────────────────────────────────────────────────────────
|
||||
|
||||
sudo -u "${SERVICE_USER}" bash -c \
|
||||
"sops --input-type dotenv --output-type dotenv -d ${REPO_DIR}/.env.prod.sops > ${REPO_DIR}/.env"
|
||||
"SOPS_AGE_KEY_FILE=/home/${SERVICE_USER}/.config/sops/age/keys.txt \
|
||||
sops --input-type dotenv --output-type dotenv -d ${REPO_DIR}/.env.prod.sops > ${REPO_DIR}/.env"
|
||||
chmod 600 "${REPO_DIR}/.env"
|
||||
|
||||
# ── Python dependencies ───────────────────────────────────────────────────────
|
||||
|
||||
sudo -u "${SERVICE_USER}" bash -c "cd ${REPO_DIR} && ${UV} sync --all-packages"
|
||||
|
||||
# ── Systemd service ───────────────────────────────────────────────────────────
|
||||
# ── Systemd supervisor service ────────────────────────────────────────────────
|
||||
|
||||
cp "${REPO_DIR}/infra/supervisor/materia-supervisor.service" /etc/systemd/system/
|
||||
systemctl daemon-reload
|
||||
systemctl enable --now materia-supervisor
|
||||
|
||||
# ── R2 backup timer (optional) ────────────────────────────────────────────────
|
||||
# Enabled only when R2_ACCESS_KEY_ID, R2_SECRET_ACCESS_KEY, and R2_ENDPOINT
|
||||
# are present in .env. Idempotent: rclone.conf is always regenerated from env
|
||||
# (so credential rotations take effect on re-run); systemd units are only copied
|
||||
# when they differ from what's already installed.
|
||||
|
||||
_env_get() { grep -E "^${1}=" "${REPO_DIR}/.env" 2>/dev/null | head -1 | cut -d= -f2- | tr -d '"'"'" || true; }
|
||||
|
||||
R2_KEY=$(_env_get R2_ACCESS_KEY_ID)
|
||||
R2_SECRET=$(_env_get R2_SECRET_ACCESS_KEY)
|
||||
R2_ENDPOINT=$(_env_get R2_ENDPOINT)
|
||||
|
||||
if [ -n "${R2_KEY}" ] && [ -n "${R2_SECRET}" ] && [ -n "${R2_ENDPOINT}" ]; then
|
||||
echo "$(date '+%H:%M:%S') ==> Configuring R2 backup..."
|
||||
|
||||
RCLONE_CONF_DIR="/home/${SERVICE_USER}/.config/rclone"
|
||||
RCLONE_CONF="${RCLONE_CONF_DIR}/rclone.conf"
|
||||
|
||||
sudo -u "${SERVICE_USER}" mkdir -p "${RCLONE_CONF_DIR}"
|
||||
|
||||
# Always write from env so credential rotations take effect automatically.
|
||||
cat > "${RCLONE_CONF}" <<EOF
|
||||
[r2]
|
||||
type = s3
|
||||
provider = Cloudflare
|
||||
access_key_id = ${R2_KEY}
|
||||
secret_access_key = ${R2_SECRET}
|
||||
endpoint = ${R2_ENDPOINT}
|
||||
acl = private
|
||||
no_check_bucket = true
|
||||
EOF
|
||||
chown "${SERVICE_USER}:${SERVICE_USER}" "${RCLONE_CONF}"
|
||||
chmod 600 "${RCLONE_CONF}"
|
||||
|
||||
UNITS_CHANGED=0
|
||||
for unit in materia-backup.service materia-backup.timer; do
|
||||
if ! diff -q "${REPO_DIR}/infra/backup/${unit}" "/etc/systemd/system/${unit}" >/dev/null 2>&1; then
|
||||
cp "${REPO_DIR}/infra/backup/${unit}" /etc/systemd/system/
|
||||
UNITS_CHANGED=1
|
||||
fi
|
||||
done
|
||||
[ "${UNITS_CHANGED}" = "1" ] && systemctl daemon-reload
|
||||
|
||||
systemctl enable --now materia-backup.timer
|
||||
echo "$(date '+%H:%M:%S') ==> R2 backup timer enabled."
|
||||
else
|
||||
echo "$(date '+%H:%M:%S') ==> R2_ACCESS_KEY_ID / R2_SECRET_ACCESS_KEY / R2_ENDPOINT not set — skipping backup timer."
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "=== Bootstrap complete! ==="
|
||||
echo ""
|
||||
echo "Check status: systemctl status materia-supervisor"
|
||||
echo "View logs: journalctl -u materia-supervisor -f"
|
||||
echo "Workflow status: sudo -u ${SERVICE_USER} ${UV} run -p ${REPO_DIR} python src/materia/supervisor.py status"
|
||||
echo "Backup timer: systemctl list-timers materia-backup.timer"
|
||||
echo "Tag: $(sudo -u "${SERVICE_USER}" git -C "${REPO_DIR}" describe --tags --always)"
|
||||
|
||||
@@ -7,7 +7,7 @@ Single-server local-first setup for BeanFlows.coffee on Hetzner NVMe.
|
||||
```
|
||||
Hetzner Server (NVMe)
|
||||
├── beanflows_service (system user, nologin)
|
||||
│ ├── ~/.ssh/materia_deploy # ed25519 deploy key for GitLab read access
|
||||
│ ├── ~/.ssh/beanflows_deploy # ed25519 deploy key for Gitea read access
|
||||
│ └── ~/.config/sops/age/keys.txt # age keypair (auto-discovered by SOPS)
|
||||
├── /opt/materia/ # Git repo (owned by beanflows_service, latest release tag)
|
||||
├── /opt/materia/.env # Decrypted from .env.prod.sops at deploy time
|
||||
@@ -24,7 +24,7 @@ Hetzner Server (NVMe)
|
||||
1. **Extract** — Supervisor runs due extractors per `infra/supervisor/workflows.toml`
|
||||
2. **Transform** — SQLMesh reads landing → writes `lakehouse.duckdb`
|
||||
3. **Export** — `export_serving` copies `serving.*` → `analytics.duckdb` (atomic rename)
|
||||
4. **Backup** — rclone syncs `/data/materia/landing/` → R2 `materia-raw/landing/`
|
||||
4. **Backup** — rclone syncs `/data/materia/landing/` → R2 `backup/materia/landing/`
|
||||
5. **Web** — Web app reads `analytics.duckdb` read-only (per-thread connections)
|
||||
|
||||
## Setup (new server)
|
||||
@@ -37,11 +37,11 @@ bash infra/setup_server.sh
|
||||
|
||||
This creates the `beanflows_service` user, data directories, installs all tools (git, curl, age, sops, rclone, uv), generates an ed25519 SSH deploy key and an age keypair (both as the service user). It prints both public keys.
|
||||
|
||||
### 2. Add keys to GitLab and SOPS
|
||||
### 2. Add keys to Gitea and SOPS
|
||||
|
||||
```bash
|
||||
# Add the SSH deploy key to GitLab:
|
||||
# → Repository Settings → Deploy Keys → Add key (read-only)
|
||||
# Add the SSH deploy key to Gitea:
|
||||
# → git.padelnomics.io → beanflows repo → Settings → Deploy Keys → Add key (read-only)
|
||||
|
||||
# Add the server age public key to .sops.yaml on your workstation,
|
||||
# then re-encrypt prod secrets to include the server key:
|
||||
@@ -59,20 +59,7 @@ ssh root@<server_ip> 'bash -s' < infra/bootstrap_supervisor.sh
|
||||
|
||||
This clones the repo via SSH, decrypts secrets, installs Python dependencies, and starts the supervisor service. No access tokens required — access is via the SSH deploy key. (All tools must already be installed by setup_server.sh.)
|
||||
|
||||
### 4. Set up R2 backup
|
||||
|
||||
```bash
|
||||
apt install rclone
|
||||
# Configure rclone as the service user (used by the backup timer):
|
||||
sudo -u beanflows_service mkdir -p /home/beanflows_service/.config/rclone
|
||||
sudo -u beanflows_service cp infra/backup/rclone.conf.example \
|
||||
/home/beanflows_service/.config/rclone/rclone.conf
|
||||
# Fill in R2 credentials from .env.prod.sops (ACCESS_KEY_ID, SECRET_ACCESS_KEY, bucket endpoint)
|
||||
cp infra/backup/materia-backup.service /etc/systemd/system/
|
||||
cp infra/backup/materia-backup.timer /etc/systemd/system/
|
||||
systemctl daemon-reload
|
||||
systemctl enable --now materia-backup.timer
|
||||
```
|
||||
If `R2_ACCESS_KEY_ID`, `R2_SECRET_ACCESS_KEY`, and `R2_ENDPOINT` are present in `.env.prod.sops`, bootstrap also generates `rclone.conf` and enables `materia-backup.timer` automatically. No manual R2 setup step needed.
|
||||
|
||||
## Secrets management
|
||||
|
||||
@@ -100,8 +87,8 @@ SOPS auto-discovers the service user's age key at `~/.config/sops/age/keys.txt`
|
||||
|
||||
No SSH keys or deploy credentials in CI.
|
||||
|
||||
1. CI runs tests (`test:cli`, `test:sqlmesh`, `test:web`)
|
||||
2. On master, CI creates tag `v${CI_PIPELINE_IID}` using built-in `CI_JOB_TOKEN`
|
||||
1. CI runs tests (`test-cli`, `test-sqlmesh`, `test-web`)
|
||||
2. On master, CI creates tag `v${github.run_number}` using built-in `github.token`
|
||||
3. Supervisor polls for new tags every 60s
|
||||
4. When a new tag appears: `git checkout --detach <tag>` + `uv sync --all-packages`
|
||||
5. If `web/` files changed: `./web/deploy.sh` (Docker blue/green + health check)
|
||||
|
||||
@@ -41,7 +41,7 @@ usermod -aG docker "${SERVICE_USER}"
|
||||
|
||||
log "Creating directories..."
|
||||
mkdir -p "${APP_DIR}" "${DATA_DIR}/landing"
|
||||
chown "${SERVICE_USER}:${SERVICE_USER}" "${APP_DIR}"
|
||||
chown -R "${SERVICE_USER}:${SERVICE_USER}" "${APP_DIR}"
|
||||
chown -R "${SERVICE_USER}:${SERVICE_USER}" "${DATA_DIR}"
|
||||
|
||||
# ── System tools ──────────────────────────────────────────────────────────────
|
||||
@@ -68,7 +68,8 @@ fi
|
||||
|
||||
if [ ! -f "${SSH_DIR}/config" ]; then
|
||||
cat > "${SSH_DIR}/config" <<EOF
|
||||
Host gitlab.com
|
||||
Host git.padelnomics.io
|
||||
Port 2222
|
||||
IdentityFile ${DEPLOY_KEY}
|
||||
IdentitiesOnly yes
|
||||
EOF
|
||||
@@ -76,7 +77,7 @@ EOF
|
||||
chmod 600 "${SSH_DIR}/config"
|
||||
fi
|
||||
|
||||
ssh-keyscan -H gitlab.com >> "${SSH_DIR}/known_hosts" 2>/dev/null
|
||||
ssh-keyscan -H -p 2222 git.padelnomics.io >> "${SSH_DIR}/known_hosts" 2>/dev/null
|
||||
sort -u "${SSH_DIR}/known_hosts" -o "${SSH_DIR}/known_hosts"
|
||||
chown "${SERVICE_USER}:${SERVICE_USER}" "${SSH_DIR}/known_hosts"
|
||||
chmod 644 "${SSH_DIR}/known_hosts"
|
||||
|
||||
@@ -240,7 +240,7 @@ def run_shell(cmd: str, timeout_seconds: int = SUBPROCESS_TIMEOUT_SECONDS) -> bo
|
||||
def run_transform() -> None:
|
||||
"""Run SQLMesh — evaluates model staleness internally."""
|
||||
logger.info("Running SQLMesh transform")
|
||||
ok = run_shell("uv run sqlmesh -p transform/sqlmesh_materia run")
|
||||
ok = run_shell("uv run sqlmesh -p transform/sqlmesh_materia plan prod --auto-apply")
|
||||
if not ok:
|
||||
send_alert("SQLMesh transform failed")
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ def test_secrets_test_command(mock_secrets):
|
||||
"""Test secrets test command."""
|
||||
result = runner.invoke(app, ["secrets", "test"])
|
||||
assert result.exit_code == 0
|
||||
assert "ESC connection successful" in result.stdout
|
||||
assert "SOPS decryption successful" in result.stdout
|
||||
|
||||
|
||||
def test_secrets_list_command(mock_secrets):
|
||||
|
||||
@@ -121,7 +121,7 @@ def test_extract_cot_year_skips_existing_file(tmp_path, monkeypatch):
|
||||
mock_head.headers = {"etag": f'"{etag}"'}
|
||||
mock_session.head.return_value = mock_head
|
||||
|
||||
result = cot_execute.extract_cot_year(2024, mock_session)
|
||||
result = cot_execute.extract_cot_year(2024, mock_session, cot_execute.COT_URL_FUTURES_ONLY, "cot")
|
||||
|
||||
assert result == 0
|
||||
mock_session.get.assert_not_called() # No download should occur
|
||||
@@ -141,7 +141,7 @@ def test_extract_cot_year_returns_false_on_404(tmp_path, monkeypatch):
|
||||
mock_head.status_code = 404
|
||||
mock_session.head.return_value = mock_head
|
||||
|
||||
result = cot_execute.extract_cot_year(2006, mock_session)
|
||||
result = cot_execute.extract_cot_year(2006, mock_session, cot_execute.COT_URL_FUTURES_ONLY, "cot")
|
||||
|
||||
assert result == 0
|
||||
mock_session.get.assert_not_called()
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
# Changes here will be overwritten by Copier; NEVER EDIT MANUALLY
|
||||
_commit: v0.17.0
|
||||
_src_path: git@gitlab.com:deemanone/materia_saas_boilerplate.master.git
|
||||
author_email: hendrik@beanflows.coffee
|
||||
author_name: Hendrik Deeman
|
||||
base_url: https://beanflows.coffee
|
||||
business_model: saas
|
||||
description: Commodity analytics for coffee traders
|
||||
enable_cms: true
|
||||
enable_daas: true
|
||||
enable_directory: false
|
||||
enable_i18n: false
|
||||
enable_leads: false
|
||||
payment_provider: paddle
|
||||
project_name: BeanFlows
|
||||
project_slug: beanflows
|
||||
@@ -1,38 +0,0 @@
|
||||
# App
|
||||
APP_NAME=BeanFlows
|
||||
SECRET_KEY=change-me-generate-a-real-secret
|
||||
BASE_URL=http://localhost:5001
|
||||
DEBUG=true
|
||||
ADMIN_EMAILS=admin@beanflows.coffee
|
||||
|
||||
# Database
|
||||
DATABASE_PATH=data/app.db
|
||||
# DUCKDB_PATH points to the full pipeline DB (lakehouse.duckdb) — used by SQLMesh and export_serving.
|
||||
# SERVING_DUCKDB_PATH points to the serving-only export (analytics.duckdb) — used by the web app.
|
||||
# Run `uv run materia pipeline run export_serving` after each SQLMesh transform to populate it.
|
||||
DUCKDB_PATH=../local.duckdb
|
||||
SERVING_DUCKDB_PATH=../analytics.duckdb
|
||||
|
||||
# Auth
|
||||
MAGIC_LINK_EXPIRY_MINUTES=15
|
||||
SESSION_LIFETIME_DAYS=30
|
||||
|
||||
# Email (Resend)
|
||||
RESEND_API_KEY=
|
||||
EMAIL_FROM=hello@example.com
|
||||
|
||||
|
||||
# Paddle
|
||||
PADDLE_API_KEY=
|
||||
PADDLE_WEBHOOK_SECRET=
|
||||
PADDLE_PRICE_STARTER=
|
||||
PADDLE_PRICE_PRO=
|
||||
|
||||
|
||||
# Rate limiting
|
||||
RATE_LIMIT_REQUESTS=100
|
||||
RATE_LIMIT_WINDOW=60
|
||||
|
||||
# Waitlist (set to true to enable waitlist gate on /auth/signup)
|
||||
WAITLIST_MODE=false
|
||||
RESEND_AUDIENCE_WAITLIST=
|
||||
@@ -1,33 +0,0 @@
|
||||
# CSS build stage (Tailwind standalone CLI, no Node.js)
|
||||
FROM debian:bookworm-slim AS css-build
|
||||
ADD https://github.com/tailwindlabs/tailwindcss/releases/latest/download/tailwindcss-linux-x64 /usr/local/bin/tailwindcss
|
||||
RUN chmod +x /usr/local/bin/tailwindcss
|
||||
WORKDIR /app
|
||||
COPY src/ ./src/
|
||||
RUN tailwindcss -i ./src/beanflows/static/css/input.css \
|
||||
-o ./src/beanflows/static/css/output.css --minify
|
||||
|
||||
|
||||
# Build stage
|
||||
FROM python:3.12-slim AS build
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.8 /uv /uvx /bin/
|
||||
WORKDIR /app
|
||||
ENV UV_COMPILE_BYTECODE=1 UV_LINK_MODE=copy
|
||||
COPY uv.lock pyproject.toml README.md ./
|
||||
COPY src/ ./src/
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv sync --no-dev --frozen
|
||||
|
||||
# Runtime stage
|
||||
FROM python:3.12-slim AS runtime
|
||||
ENV PATH="/app/.venv/bin:$PATH"
|
||||
RUN useradd -m -u 1000 appuser
|
||||
WORKDIR /app
|
||||
RUN mkdir -p /app/data && chown -R appuser:appuser /app
|
||||
COPY --from=build --chown=appuser:appuser /app .
|
||||
COPY --from=css-build /app/src/beanflows/static/css/output.css ./src/beanflows/static/css/output.css
|
||||
USER appuser
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV DATABASE_PATH=/app/data/app.db
|
||||
EXPOSE 5000
|
||||
CMD ["hypercorn", "beanflows.app:app", "--bind", "0.0.0.0:5000", "--workers", "1"]
|
||||
125
web/deploy.sh
125
web/deploy.sh
@@ -1,125 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
APP_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
# ── Verify sops is installed (setup_server.sh installs it to /usr/local/bin) ──
|
||||
if ! command -v sops &>/dev/null; then
|
||||
echo "ERROR: sops not found — run infra/setup_server.sh first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ── Decrypt secrets (SOPS auto-discovers age key from ~/.config/sops/age/) ────
|
||||
echo "==> Decrypting secrets from .env.prod.sops..."
|
||||
sops --input-type dotenv --output-type dotenv -d "$APP_DIR/../.env.prod.sops" > "$APP_DIR/.env"
|
||||
chmod 600 "$APP_DIR/.env"
|
||||
|
||||
COMPOSE="docker compose -f docker-compose.prod.yml"
|
||||
LIVE_FILE=".live-slot"
|
||||
ROUTER_CONF="router/default.conf"
|
||||
|
||||
# ── Determine slots ─────────────────────────────────────────
|
||||
|
||||
CURRENT=$(cat "$LIVE_FILE" 2>/dev/null || echo "none")
|
||||
|
||||
if [ "$CURRENT" = "blue" ]; then
|
||||
TARGET="green"
|
||||
else
|
||||
TARGET="blue"
|
||||
fi
|
||||
|
||||
echo "==> Current: $CURRENT → Deploying: $TARGET"
|
||||
|
||||
# ── Build ───────────────────────────────────────────────────
|
||||
|
||||
echo "==> Building $TARGET..."
|
||||
$COMPOSE --profile "$TARGET" build
|
||||
|
||||
# ── Backup DB before migration ────────────────────────────────
|
||||
|
||||
BACKUP_TAG="pre-deploy-$(date +%Y%m%d-%H%M%S)"
|
||||
echo "==> Backing up database (${BACKUP_TAG})..."
|
||||
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
|
||||
sh -c "cp /app/data/app.db /app/data/app.db.${BACKUP_TAG} 2>/dev/null || true"
|
||||
|
||||
# ── Migrate ─────────────────────────────────────────────────
|
||||
|
||||
echo "==> Running migrations..."
|
||||
$COMPOSE --profile "$TARGET" run --rm "${TARGET}-app" \
|
||||
python -m beanflows.migrations.migrate
|
||||
|
||||
# ── Ensure router points to current live slot before --wait ──
|
||||
# nginx resolves upstream hostnames — if config points to a stopped slot,
|
||||
# the health check fails. Reset router to current slot while target starts.
|
||||
|
||||
_write_router_conf() {
|
||||
local SLOT="$1"
|
||||
mkdir -p "$(dirname "$ROUTER_CONF")"
|
||||
cat > "$ROUTER_CONF" <<NGINX
|
||||
upstream app {
|
||||
server ${SLOT}-app:5000;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
|
||||
location / {
|
||||
proxy_pass http://app;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
}
|
||||
}
|
||||
NGINX
|
||||
}
|
||||
|
||||
if [ "$CURRENT" != "none" ]; then
|
||||
echo "==> Resetting router to current slot ($CURRENT)..."
|
||||
_write_router_conf "$CURRENT"
|
||||
$COMPOSE restart router
|
||||
fi
|
||||
|
||||
# ── Start & health check ───────────────────────────────────
|
||||
|
||||
echo "==> Starting $TARGET (waiting for health check)..."
|
||||
if ! $COMPOSE --profile "$TARGET" up -d --wait; then
|
||||
echo "!!! Health check failed — dumping logs"
|
||||
echo "--- ${TARGET}-app logs ---"
|
||||
$COMPOSE --profile "$TARGET" logs --tail=60 "${TARGET}-app" 2>&1 || true
|
||||
echo "--- router logs ---"
|
||||
$COMPOSE logs --tail=10 router 2>&1 || true
|
||||
echo "!!! Rolling back"
|
||||
$COMPOSE stop "${TARGET}-app" "${TARGET}-worker" "${TARGET}-scheduler"
|
||||
LATEST=$($COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
|
||||
sh -c "ls -t /app/data/app.db.pre-deploy-* 2>/dev/null | head -1")
|
||||
if [ -n "$LATEST" ]; then
|
||||
echo "==> Restoring database from ${LATEST}..."
|
||||
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
|
||||
sh -c "cp '${LATEST}' /app/data/app.db"
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ── Write router config and reload (new slot is healthy) ────
|
||||
|
||||
echo "==> Switching router to $TARGET..."
|
||||
_write_router_conf "$TARGET"
|
||||
$COMPOSE exec router nginx -s reload
|
||||
|
||||
# ── Cleanup old pre-deploy backups (keep last 3) ─────────────
|
||||
|
||||
$COMPOSE run --rm --entrypoint "" "${TARGET}-app" \
|
||||
sh -c "ls -t /app/data/app.db.pre-deploy-* 2>/dev/null | tail -n +4 | xargs rm -f" || true
|
||||
|
||||
# ── Stop old slot ───────────────────────────────────────────
|
||||
|
||||
if [ "$CURRENT" != "none" ]; then
|
||||
echo "==> Stopping $CURRENT..."
|
||||
$COMPOSE stop "${CURRENT}-app" "${CURRENT}-worker" "${CURRENT}-scheduler"
|
||||
fi
|
||||
|
||||
# ── Record live slot ────────────────────────────────────────
|
||||
|
||||
echo "$TARGET" > "$LIVE_FILE"
|
||||
echo "==> Deployed $TARGET successfully!"
|
||||
@@ -1,132 +0,0 @@
|
||||
services:
|
||||
# ── Always-on infrastructure ──────────────────────────────
|
||||
|
||||
router:
|
||||
image: nginx:alpine
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "5000:80"
|
||||
volumes:
|
||||
- ./router/default.conf:/etc/nginx/conf.d/default.conf:ro
|
||||
networks:
|
||||
- net
|
||||
healthcheck:
|
||||
test: ["CMD", "nginx", "-t"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
|
||||
litestream:
|
||||
image: litestream/litestream:latest
|
||||
restart: unless-stopped
|
||||
command: replicate -config /etc/litestream.yml
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
- ./litestream.yml:/etc/litestream.yml:ro
|
||||
|
||||
# ── Blue slot ─────────────────────────────────────────────
|
||||
|
||||
blue-app:
|
||||
profiles: ["blue"]
|
||||
build:
|
||||
context: .
|
||||
restart: unless-stopped
|
||||
env_file: ./.env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
- SERVING_DUCKDB_PATH=/data/materia/analytics.duckdb
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
- /data/materia/analytics.duckdb:/data/materia/analytics.duckdb:ro
|
||||
networks:
|
||||
- net
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5000/health')"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 15s
|
||||
|
||||
blue-worker:
|
||||
profiles: ["blue"]
|
||||
build:
|
||||
context: .
|
||||
restart: unless-stopped
|
||||
command: python -m beanflows.worker
|
||||
env_file: ./.env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
networks:
|
||||
- net
|
||||
|
||||
blue-scheduler:
|
||||
profiles: ["blue"]
|
||||
build:
|
||||
context: .
|
||||
restart: unless-stopped
|
||||
command: python -m beanflows.worker scheduler
|
||||
env_file: ./.env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
networks:
|
||||
- net
|
||||
|
||||
# ── Green slot ────────────────────────────────────────────
|
||||
|
||||
green-app:
|
||||
profiles: ["green"]
|
||||
build:
|
||||
context: .
|
||||
restart: unless-stopped
|
||||
env_file: ./.env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
- SERVING_DUCKDB_PATH=/data/materia/analytics.duckdb
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
- /data/materia/analytics.duckdb:/data/materia/analytics.duckdb:ro
|
||||
networks:
|
||||
- net
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5000/health')"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 15s
|
||||
|
||||
green-worker:
|
||||
profiles: ["green"]
|
||||
build:
|
||||
context: .
|
||||
restart: unless-stopped
|
||||
command: python -m beanflows.worker
|
||||
env_file: ./.env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
networks:
|
||||
- net
|
||||
|
||||
green-scheduler:
|
||||
profiles: ["green"]
|
||||
build:
|
||||
context: .
|
||||
restart: unless-stopped
|
||||
command: python -m beanflows.worker scheduler
|
||||
env_file: ./.env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
volumes:
|
||||
- app-data:/app/data
|
||||
networks:
|
||||
- net
|
||||
|
||||
volumes:
|
||||
app-data:
|
||||
|
||||
networks:
|
||||
net:
|
||||
@@ -1,56 +0,0 @@
|
||||
services:
|
||||
app:
|
||||
build: .
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "5000:5000"
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
- ./duckdb:/app/duckdb:ro
|
||||
env_file: .env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
- SERVING_DUCKDB_PATH=/app/duckdb/analytics.duckdb
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:5000/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
worker:
|
||||
build: .
|
||||
restart: unless-stopped
|
||||
command: python -m beanflows.worker
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
env_file: .env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
depends_on:
|
||||
- app
|
||||
|
||||
scheduler:
|
||||
build: .
|
||||
restart: unless-stopped
|
||||
command: python -m beanflows.worker scheduler
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
env_file: .env
|
||||
environment:
|
||||
- DATABASE_PATH=/app/data/app.db
|
||||
depends_on:
|
||||
- app
|
||||
|
||||
# Optional: Litestream for backups
|
||||
litestream:
|
||||
image: litestream/litestream:latest
|
||||
restart: unless-stopped
|
||||
command: replicate -config /etc/litestream.yml
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
- ./litestream.yml:/etc/litestream.yml:ro
|
||||
depends_on:
|
||||
- app
|
||||
|
||||
volumes:
|
||||
@@ -1,22 +0,0 @@
|
||||
# Litestream configuration for SQLite replication
|
||||
# Supports S3, Cloudflare R2, MinIO, etc.
|
||||
|
||||
dbs:
|
||||
- path: /app/data/app.db
|
||||
replicas:
|
||||
# Option 1: AWS S3
|
||||
# - url: s3://your-bucket/beanflows/app.db
|
||||
# access-key-id: ${AWS_ACCESS_KEY_ID}
|
||||
# secret-access-key: ${AWS_SECRET_ACCESS_KEY}
|
||||
# region: us-east-1
|
||||
|
||||
# Option 2: Cloudflare R2
|
||||
# - url: s3://your-bucket/beanflows/app.db
|
||||
# access-key-id: ${R2_ACCESS_KEY_ID}
|
||||
# secret-access-key: ${R2_SECRET_ACCESS_KEY}
|
||||
# endpoint: https://${R2_ACCOUNT_ID}.r2.cloudflarestorage.com
|
||||
|
||||
# Option 3: Local file backup (for development)
|
||||
- path: /app/data/backups
|
||||
retention: 24h
|
||||
snapshot-interval: 1h
|
||||
@@ -1,15 +0,0 @@
|
||||
upstream app {
|
||||
server blue-app:5000;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
|
||||
location / {
|
||||
proxy_pass http://app;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
}
|
||||
@@ -65,6 +65,7 @@ ALLOWED_METRICS = frozenset({
|
||||
|
||||
_local = threading.local()
|
||||
_db_path: str = ""
|
||||
_conn: duckdb.DuckDBPyConnection | None = None # test override: set to bypass _db_path / _local
|
||||
|
||||
|
||||
def open_analytics_db() -> None:
|
||||
@@ -110,12 +111,15 @@ async def fetch_analytics(sql: str, params: list | None = None) -> list[dict]:
|
||||
"""Run a read-only DuckDB query off the event loop. Returns list of dicts.
|
||||
Returns empty list if analytics DB is not configured (SERVING_DUCKDB_PATH unset
|
||||
or file missing at startup) — dashboard routes degrade gracefully.
|
||||
|
||||
If the module-level _conn is set (test override), it is used directly in place
|
||||
of the per-thread _get_conn() path.
|
||||
"""
|
||||
if not _db_path:
|
||||
if _conn is None and not _db_path:
|
||||
return []
|
||||
|
||||
def _query():
|
||||
conn = _get_conn()
|
||||
conn = _conn if _conn is not None else _get_conn()
|
||||
cursor = conn.cursor()
|
||||
result = cursor.execute(sql, params or [])
|
||||
columns = [desc[0] for desc in result.description]
|
||||
|
||||
@@ -4,23 +4,53 @@ Payment provider: paddle
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from functools import wraps
|
||||
from pathlib import Path
|
||||
|
||||
from quart import Blueprint, render_template, request, redirect, url_for, flash, g, jsonify, session
|
||||
|
||||
import httpx
|
||||
|
||||
|
||||
from ..core import config, fetch_one, fetch_all, execute
|
||||
|
||||
from ..core import verify_hmac_signature
|
||||
|
||||
from ..auth.routes import login_required
|
||||
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Billing event hook system
|
||||
# =============================================================================
|
||||
|
||||
_billing_hooks: dict[str, list] = {}
|
||||
|
||||
|
||||
def on_billing_event(*event_types: str):
|
||||
"""Decorator: register a handler for one or more billing event types."""
|
||||
def decorator(func):
|
||||
for event_type in event_types:
|
||||
_billing_hooks.setdefault(event_type, []).append(func)
|
||||
return func
|
||||
return decorator
|
||||
|
||||
|
||||
async def _fire_hooks(event_type: str, data: dict) -> None:
|
||||
"""Fire all registered hooks for an event type, isolating per-hook failures."""
|
||||
for hook in _billing_hooks.get(event_type, []):
|
||||
try:
|
||||
await hook(event_type, data)
|
||||
except Exception as e:
|
||||
logger.error("Hook %s failed for event %s: %s", hook.__name__, event_type, e)
|
||||
|
||||
|
||||
def _paddle_client():
|
||||
"""Return a configured Paddle SDK client."""
|
||||
from paddle_billing import Client, Environment, Options
|
||||
env = Environment.SANDBOX if config.PADDLE_ENVIRONMENT == "sandbox" else Environment.PRODUCTION
|
||||
return Client(config.PADDLE_API_KEY, options=Options(environment=env))
|
||||
|
||||
|
||||
# Blueprint with its own template folder
|
||||
bp = Blueprint(
|
||||
"billing",
|
||||
@@ -42,44 +72,74 @@ async def get_subscription(user_id: int) -> dict | None:
|
||||
)
|
||||
|
||||
|
||||
async def upsert_billing_customer(user_id: int, provider_customer_id: str) -> None:
|
||||
"""Create or update billing customer record."""
|
||||
await execute(
|
||||
"""INSERT INTO billing_customers (user_id, provider_customer_id)
|
||||
VALUES (?, ?)
|
||||
ON CONFLICT(user_id) DO UPDATE SET provider_customer_id = excluded.provider_customer_id""",
|
||||
(user_id, provider_customer_id),
|
||||
)
|
||||
|
||||
|
||||
async def get_billing_customer(user_id: int) -> dict | None:
|
||||
"""Get billing customer record for a user."""
|
||||
return await fetch_one(
|
||||
"SELECT * FROM billing_customers WHERE user_id = ?",
|
||||
(user_id,),
|
||||
)
|
||||
|
||||
|
||||
async def upsert_subscription(
|
||||
user_id: int,
|
||||
plan: str,
|
||||
status: str,
|
||||
provider_customer_id: str,
|
||||
provider_subscription_id: str,
|
||||
current_period_end: str = None,
|
||||
) -> int:
|
||||
"""Create or update subscription."""
|
||||
"""Create or update subscription, keyed by provider_subscription_id."""
|
||||
now = datetime.utcnow().isoformat()
|
||||
|
||||
customer_col = "paddle_customer_id" # legacy column, kept for existing rows
|
||||
subscription_col = "provider_subscription_id"
|
||||
|
||||
|
||||
existing = await fetch_one("SELECT id FROM subscriptions WHERE user_id = ?", (user_id,))
|
||||
existing = await fetch_one(
|
||||
"SELECT id FROM subscriptions WHERE provider_subscription_id = ?",
|
||||
(provider_subscription_id,),
|
||||
)
|
||||
|
||||
if existing:
|
||||
await execute(
|
||||
f"""UPDATE subscriptions
|
||||
SET plan = ?, status = ?, {customer_col} = ?, {subscription_col} = ?,
|
||||
current_period_end = ?, updated_at = ?
|
||||
WHERE user_id = ?""",
|
||||
(plan, status, provider_customer_id, provider_subscription_id,
|
||||
current_period_end, now, user_id),
|
||||
"""UPDATE subscriptions
|
||||
SET plan = ?, status = ?, current_period_end = ?, updated_at = ?
|
||||
WHERE provider_subscription_id = ?""",
|
||||
(plan, status, current_period_end, now, provider_subscription_id),
|
||||
)
|
||||
return existing["id"]
|
||||
else:
|
||||
return await execute(
|
||||
f"""INSERT INTO subscriptions
|
||||
(user_id, plan, status, {customer_col}, {subscription_col},
|
||||
current_period_end, created_at, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
|
||||
(user_id, plan, status, provider_customer_id, provider_subscription_id,
|
||||
current_period_end, now, now),
|
||||
"""INSERT INTO subscriptions
|
||||
(user_id, plan, status, provider_subscription_id, current_period_end, created_at, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)""",
|
||||
(user_id, plan, status, provider_subscription_id, current_period_end, now, now),
|
||||
)
|
||||
|
||||
|
||||
async def record_transaction(
|
||||
user_id: int,
|
||||
provider_transaction_id: str,
|
||||
type: str = "payment",
|
||||
amount_cents: int = None,
|
||||
currency: str = "USD",
|
||||
status: str = "pending",
|
||||
) -> int:
|
||||
"""Record a billing transaction. Idempotent on provider_transaction_id."""
|
||||
now = datetime.utcnow().isoformat()
|
||||
return await execute(
|
||||
"""INSERT OR IGNORE INTO transactions
|
||||
(user_id, provider_transaction_id, type, amount_cents, currency, status, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)""",
|
||||
(user_id, provider_transaction_id, type, amount_cents, currency, status, now),
|
||||
)
|
||||
|
||||
|
||||
|
||||
async def get_subscription_by_provider_id(subscription_id: str) -> dict | None:
|
||||
return await fetch_one(
|
||||
@@ -165,31 +225,21 @@ async def success():
|
||||
@bp.route("/checkout/<plan>", methods=["POST"])
|
||||
@login_required
|
||||
async def checkout(plan: str):
|
||||
"""Create Paddle checkout via API."""
|
||||
"""Create Paddle checkout via SDK."""
|
||||
price_id = config.PADDLE_PRICES.get(plan)
|
||||
if not price_id:
|
||||
await flash("Invalid plan selected.", "error")
|
||||
return redirect(url_for("billing.pricing"))
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
"https://api.paddle.com/transactions",
|
||||
headers={
|
||||
"Authorization": f"Bearer {config.PADDLE_API_KEY}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
json={
|
||||
"items": [{"price_id": price_id, "quantity": 1}],
|
||||
"custom_data": {"user_id": str(g.user["id"]), "plan": plan},
|
||||
"checkout": {
|
||||
"url": f"{config.BASE_URL}/billing/success",
|
||||
},
|
||||
},
|
||||
from paddle_billing.Resources.Transactions.Operations import CreateTransaction
|
||||
txn = _paddle_client().transactions.create(
|
||||
CreateTransaction(
|
||||
items=[{"price_id": price_id, "quantity": 1}],
|
||||
custom_data={"user_id": str(g.user["id"]), "plan": plan},
|
||||
checkout={"url": f"{config.BASE_URL}/billing/success"},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
checkout_url = response.json()["data"]["checkout"]["url"]
|
||||
return redirect(checkout_url)
|
||||
)
|
||||
return redirect(txn.checkout.url)
|
||||
|
||||
|
||||
@bp.route("/manage", methods=["POST"])
|
||||
@@ -202,13 +252,8 @@ async def manage():
|
||||
return redirect(url_for("dashboard.settings"))
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(
|
||||
f"https://api.paddle.com/subscriptions/{sub['provider_subscription_id']}",
|
||||
headers={"Authorization": f"Bearer {config.PADDLE_API_KEY}"},
|
||||
)
|
||||
response.raise_for_status()
|
||||
portal_url = response.json()["data"]["management_urls"]["update_payment_method"]
|
||||
subscription = _paddle_client().subscriptions.get(sub["provider_subscription_id"])
|
||||
portal_url = subscription.management_urls.update_payment_method
|
||||
except Exception:
|
||||
await flash("Could not reach the billing portal. Please try again or contact support.", "error")
|
||||
return redirect(url_for("dashboard.settings"))
|
||||
@@ -219,28 +264,27 @@ async def manage():
|
||||
@bp.route("/cancel", methods=["POST"])
|
||||
@login_required
|
||||
async def cancel():
|
||||
"""Cancel subscription via Paddle API."""
|
||||
"""Cancel subscription via Paddle SDK."""
|
||||
sub = await get_subscription(g.user["id"])
|
||||
if sub and sub.get("provider_subscription_id"):
|
||||
async with httpx.AsyncClient() as client:
|
||||
await client.post(
|
||||
f"https://api.paddle.com/subscriptions/{sub['provider_subscription_id']}/cancel",
|
||||
headers={
|
||||
"Authorization": f"Bearer {config.PADDLE_API_KEY}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
json={"effective_from": "next_billing_period"},
|
||||
)
|
||||
from paddle_billing.Resources.Subscriptions.Operations import CancelSubscription
|
||||
_paddle_client().subscriptions.cancel(
|
||||
sub["provider_subscription_id"],
|
||||
CancelSubscription(effective_from="next_billing_period"),
|
||||
)
|
||||
return redirect(url_for("dashboard.settings"))
|
||||
|
||||
|
||||
@bp.route("/webhook/paddle", methods=["POST"])
|
||||
async def webhook():
|
||||
"""Handle Paddle webhooks."""
|
||||
import paddle_billing
|
||||
payload = await request.get_data()
|
||||
sig = request.headers.get("Paddle-Signature", "")
|
||||
|
||||
if not verify_hmac_signature(payload, sig, config.PADDLE_WEBHOOK_SECRET):
|
||||
try:
|
||||
paddle_billing.Notifications.Verifier().verify(payload, config.PADDLE_WEBHOOK_SECRET, sig)
|
||||
except Exception:
|
||||
return jsonify({"error": "Invalid signature"}), 400
|
||||
|
||||
event = json.loads(payload)
|
||||
@@ -251,11 +295,14 @@ async def webhook():
|
||||
|
||||
if event_type == "subscription.activated":
|
||||
plan = custom_data.get("plan", "starter")
|
||||
uid = int(user_id) if user_id else 0
|
||||
customer_id = data.get("customer_id")
|
||||
if uid and customer_id:
|
||||
await upsert_billing_customer(uid, str(customer_id))
|
||||
await upsert_subscription(
|
||||
user_id=int(user_id) if user_id else 0,
|
||||
user_id=uid,
|
||||
plan=plan,
|
||||
status="active",
|
||||
provider_customer_id=str(data.get("customer_id", "")),
|
||||
provider_subscription_id=data.get("id", ""),
|
||||
current_period_end=data.get("current_billing_period", {}).get("ends_at"),
|
||||
)
|
||||
|
||||
@@ -77,22 +77,15 @@ class Config:
|
||||
RESEND_AUDIENCE_WAITLIST: str = os.getenv("RESEND_AUDIENCE_WAITLIST", "")
|
||||
|
||||
PLAN_FEATURES: dict = {
|
||||
"free": ["dashboard", "coffee_only", "limited_history"],
|
||||
"starter": ["dashboard", "coffee_only", "full_history", "export", "api"],
|
||||
"pro": [
|
||||
"dashboard",
|
||||
"all_commodities",
|
||||
"full_history",
|
||||
"export",
|
||||
"api",
|
||||
"priority_support",
|
||||
],
|
||||
"free": ["basic"],
|
||||
"starter": ["basic", "export"],
|
||||
"pro": ["basic", "export", "api", "priority_support"],
|
||||
}
|
||||
|
||||
PLAN_LIMITS: dict = {
|
||||
"free": {"commodities": 1, "history_years": 5, "api_calls": 0},
|
||||
"starter": {"commodities": 1, "history_years": -1, "api_calls": 10000},
|
||||
"pro": {"commodities": -1, "history_years": -1, "api_calls": -1}, # -1 = unlimited
|
||||
"free": {"items": 100, "api_calls": 0},
|
||||
"starter": {"items": 1000, "api_calls": 10000},
|
||||
"pro": {"items": -1, "api_calls": -1}, # -1 = unlimited
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -130,7 +130,7 @@ async def index():
|
||||
user = g.user
|
||||
plan = (g.get("subscription") or {}).get("plan", "free")
|
||||
|
||||
if analytics._db_path:
|
||||
if analytics._db_path or analytics._conn is not None:
|
||||
results = await asyncio.gather(
|
||||
analytics.get_price_latest(analytics.COFFEE_TICKER),
|
||||
analytics.get_cot_positioning_latest(analytics.COFFEE_CFTC_CODE),
|
||||
@@ -208,7 +208,7 @@ async def supply():
|
||||
current_year = datetime.date.today().year
|
||||
start_year = current_year - rng["years"]
|
||||
|
||||
if analytics._db_path:
|
||||
if analytics._db_path or analytics._conn is not None:
|
||||
results = await asyncio.gather(
|
||||
analytics.get_global_time_series(
|
||||
analytics.COFFEE_COMMODITY_CODE,
|
||||
@@ -267,7 +267,7 @@ async def positioning():
|
||||
cot_weeks = rng["weeks"]
|
||||
|
||||
options_delta = None
|
||||
if analytics._db_path:
|
||||
if analytics._db_path or analytics._conn is not None:
|
||||
gather_coros = [
|
||||
analytics.get_price_latest(analytics.COFFEE_TICKER),
|
||||
analytics.get_price_time_series(analytics.COFFEE_TICKER, limit=price_limit),
|
||||
@@ -322,7 +322,7 @@ async def warehouse():
|
||||
stocks_latest = stocks_trend = aging_latest = byport_latest = byport_trend = None
|
||||
stocks_trend = aging_latest = byport_trend = []
|
||||
|
||||
if analytics._db_path:
|
||||
if analytics._db_path or analytics._conn is not None:
|
||||
if view == "stocks":
|
||||
results = await asyncio.gather(
|
||||
analytics.get_ice_stocks_latest(),
|
||||
@@ -416,7 +416,7 @@ async def weather():
|
||||
rng = RANGE_MAP[range_key]
|
||||
days = rng["days"]
|
||||
|
||||
if analytics._db_path:
|
||||
if analytics._db_path or analytics._conn is not None:
|
||||
if location_id:
|
||||
results = await asyncio.gather(
|
||||
analytics.get_weather_stress_latest(),
|
||||
|
||||
@@ -287,12 +287,20 @@ def mock_analytics(monkeypatch):
|
||||
"market_year": 2025, "production": 30000.0, "production_yoy_pct": -1.2},
|
||||
]
|
||||
|
||||
_commodities = [
|
||||
{"commodity_code": 711100, "commodity_name": "Coffee, Green"},
|
||||
{"commodity_code": 711200, "commodity_name": "Coffee, Roasted"},
|
||||
]
|
||||
|
||||
async def _ts(*a, **kw): return _time_series
|
||||
async def _top(*a, **kw): return _top_producers
|
||||
async def _stu(*a, **kw): return _stu_trend
|
||||
async def _bal(*a, **kw): return _balance
|
||||
async def _yoy(*a, **kw): return _yoy_data
|
||||
async def _cmp(*a, **kw): return []
|
||||
async def _com(*a, **kw): return _commodities
|
||||
async def _none(*a, **kw): return None
|
||||
async def _empty(*a, **kw): return []
|
||||
|
||||
monkeypatch.setattr(analytics, "get_global_time_series", _ts)
|
||||
monkeypatch.setattr(analytics, "get_top_countries", _top)
|
||||
@@ -300,5 +308,15 @@ def mock_analytics(monkeypatch):
|
||||
monkeypatch.setattr(analytics, "get_supply_demand_balance", _bal)
|
||||
monkeypatch.setattr(analytics, "get_production_yoy_by_country", _yoy)
|
||||
monkeypatch.setattr(analytics, "get_country_comparison", _cmp)
|
||||
monkeypatch.setattr(analytics, "get_available_commodities", _com)
|
||||
# Pulse-page analytics
|
||||
monkeypatch.setattr(analytics, "get_price_latest", _none)
|
||||
monkeypatch.setattr(analytics, "get_price_time_series", _empty)
|
||||
monkeypatch.setattr(analytics, "get_cot_positioning_latest", _none)
|
||||
monkeypatch.setattr(analytics, "get_cot_index_trend", _empty)
|
||||
monkeypatch.setattr(analytics, "get_ice_stocks_latest", _none)
|
||||
monkeypatch.setattr(analytics, "get_ice_stocks_trend", _empty)
|
||||
monkeypatch.setattr(analytics, "get_weather_stress_latest", _none)
|
||||
monkeypatch.setattr(analytics, "get_weather_stress_trend", _empty)
|
||||
|
||||
|
||||
|
||||
@@ -70,13 +70,13 @@ async def test_commodity_metrics(client, db, test_user, mock_analytics):
|
||||
"""GET /commodities/<code>/metrics returns time series."""
|
||||
raw_key = await _create_api_key_for_user(db, test_user["id"])
|
||||
response = await client.get(
|
||||
"/api/v1/commodities/711100/metrics?metrics=Production&metrics=Exports",
|
||||
"/api/v1/commodities/711100/metrics?metrics=production&metrics=exports",
|
||||
headers={"Authorization": f"Bearer {raw_key}"},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = await response.get_json()
|
||||
assert data["commodity_code"] == 711100
|
||||
assert "Production" in data["metrics"]
|
||||
assert "production" in data["metrics"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -95,12 +95,12 @@ async def test_commodity_countries(client, db, test_user, mock_analytics):
|
||||
"""GET /commodities/<code>/countries returns ranking."""
|
||||
raw_key = await _create_api_key_for_user(db, test_user["id"])
|
||||
response = await client.get(
|
||||
"/api/v1/commodities/711100/countries?metric=Production&limit=5",
|
||||
"/api/v1/commodities/711100/countries?metric=production&limit=5",
|
||||
headers={"Authorization": f"Bearer {raw_key}"},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = await response.get_json()
|
||||
assert data["metric"] == "Production"
|
||||
assert data["metric"] == "production"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
||||
@@ -18,10 +18,9 @@ async def test_dashboard_loads(auth_client, mock_analytics):
|
||||
assert response.status_code == 200
|
||||
|
||||
body = (await response.get_data(as_text=True))
|
||||
assert "Coffee Dashboard" in body
|
||||
assert "Global Supply" in body
|
||||
assert "Stock-to-Use" in body
|
||||
assert "Top Producing Countries" in body
|
||||
assert "Pulse" in body
|
||||
assert "Stock-to-Use Ratio" in body
|
||||
assert "KC=F Close" in body
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -30,8 +29,8 @@ async def test_dashboard_shows_metric_cards(auth_client, mock_analytics):
|
||||
response = await auth_client.get("/dashboard/")
|
||||
body = (await response.get_data(as_text=True))
|
||||
|
||||
# Latest production from mock: 172,000
|
||||
assert "172,000" in body
|
||||
assert "MM Net Position" in body
|
||||
assert "Certified Stocks" in body
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -40,8 +39,7 @@ async def test_dashboard_yoy_table(auth_client, mock_analytics):
|
||||
response = await auth_client.get("/dashboard/")
|
||||
body = (await response.get_data(as_text=True))
|
||||
|
||||
assert "Brazil" in body
|
||||
assert "Vietnam" in body
|
||||
assert "Global Supply" in body
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -59,7 +57,7 @@ async def test_dashboard_free_plan_no_csv_export(auth_client, mock_analytics):
|
||||
response = await auth_client.get("/dashboard/")
|
||||
body = (await response.get_data(as_text=True))
|
||||
|
||||
assert "CSV export available on Trader" in body
|
||||
assert "Upgrade" in body
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
||||
Reference in New Issue
Block a user