Remove R2 bucket management from Pulumi, use cpx11 for supervisor

- R2 buckets (beanflows-artifacts, beanflows-data-prod) managed manually in Cloudflare UI
- R2 API tokens don't work with Cloudflare Pulumi provider
- Use cpx11 (€4.49/mo) instead of non-existent ccx11
- Import existing SSH key (deeman@DeemanPC)
- Successfully deployed supervisor at 49.13.231.178
This commit is contained in:
Deeman
2025-10-12 23:18:52 +02:00
parent da17a29987
commit 719aa8edd9

View File

@@ -1,58 +1,47 @@
"""
BeanFlows.coffee Infrastructure
Cloudflare R2 + Iceberg + Hetzner compute stack
Hetzner compute stack for ephemeral worker orchestration
Note: R2 buckets are managed manually in Cloudflare dashboard
- beanflows-artifacts: Stores CLI and pipeline artifacts
- beanflows-data-prod: Iceberg data lakehouse
"""
import pulumi
import pulumi_cloudflare as cloudflare
import pulumi_hcloud as hcloud
# Load configuration
config = pulumi.Config()
cloudflare_account_id = config.require("cloudflare_account_id")
hetzner_location = config.get("hetzner_location") or "nbg1" # Nuremberg datacenter
# ============================================================
# Cloudflare R2 Storage + Data Catalog (Iceberg)
# R2 Bucket Names (managed manually in Cloudflare R2 UI)
# ============================================================
# R2 buckets cannot be managed via Pulumi as they require R2-specific tokens
# that don't work with the Cloudflare Pulumi provider.
# These are defined here for documentation purposes only.
# R2 bucket for artifacts (CLI + extract/transform packages)
# Note: Import existing bucket with:
# pulumi import cloudflare:index/r2Bucket:R2Bucket beanflows-artifacts <account_id>/beanflows-artifacts
artifacts_bucket = cloudflare.R2Bucket(
"beanflows-artifacts",
account_id=cloudflare_account_id,
name="beanflows-artifacts",
location="weur", # Western Europe
)
# R2 bucket for lakehouse (Iceberg tables)
# Note: Import existing bucket with:
# pulumi import cloudflare:index/r2Bucket:R2Bucket beanflows-data-prod <account_id>/beanflows-data-prod
lakehouse_bucket = cloudflare.R2Bucket(
"beanflows-data-prod",
account_id=cloudflare_account_id,
name="beanflows-data-prod",
location="weur",
)
ARTIFACTS_BUCKET = "beanflows-artifacts" # CLI + extract/transform packages
LAKEHOUSE_BUCKET = "beanflows-data-prod" # Iceberg tables (EEUR region)
# ============================================================
# Hetzner Cloud Infrastructure
# ============================================================
# SSH key for server access
# SSH key for server access (imported from existing key)
ssh_key = hcloud.SshKey(
"materia-ssh-key",
name="materia-deployment-key",
name="deeman@DeemanPC",
public_key=config.require_secret("ssh_public_key"),
opts=pulumi.ResourceOptions(protect=True),
)
# Small CCX instance for supervisor (runs materia CLI to orchestrate pipelines)
# Small CPX instance for supervisor (runs materia CLI to orchestrate pipelines)
# This is an always-on instance that creates/destroys ephemeral workers on-demand
supervisor_server = hcloud.Server(
"materia-supervisor",
name="materia-supervisor",
server_type="ccx11", # 2 vCPU, 4GB RAM, ~€4/mo (cheapest option)
server_type="cpx11", # 2 vCPU (shared), 2GB RAM, ~€4.49/mo (cheapest option)
image="ubuntu-24.04",
location=hetzner_location,
ssh_keys=[ssh_key.id],
@@ -122,18 +111,6 @@ supervisor_firewall = hcloud.FirewallAttachment(
# Outputs
# ============================================================
pulumi.export("artifacts_bucket_name", artifacts_bucket.name)
pulumi.export("lakehouse_bucket_name", lakehouse_bucket.name)
pulumi.export("supervisor_ip", supervisor_server.ipv4_address)
# Export connection info for DuckDB
pulumi.export(
"duckdb_r2_config",
pulumi.Output.all(cloudflare_account_id, lakehouse_bucket.name).apply(
lambda args: {
"account_id": args[0],
"bucket": args[1],
"catalog_uri": f"https://catalog.cloudflarestorage.com/{args[0]}/r2-data-catalog",
}
),
)
pulumi.export("artifacts_bucket_name", ARTIFACTS_BUCKET)
pulumi.export("lakehouse_bucket_name", LAKEHOUSE_BUCKET)