Greenfield Go multi-tenant IPFS Pinning Service wire-compatible with the
IPFS Pinning Services API spec. Paired 1:1 with Kubo over localhost RPC,
clustered via embedded NATS JetStream, Postgres source-of-truth with
RLS-enforced tenancy, Fiber + huma v2 for the HTTP surface, Authentik
OIDC for session login with kid-rotated HS256 JWT API tokens.
Feature-complete against the 22-milestone build plan, including the
ship-it v1.0 gap items:
* admin CLIs: drain/uncordon, maintenance, mint-token, rotate-key,
prune-denylist, rebalance --dry-run, cache-stats, cluster-presences
* TTL leader election via NATS KV, fence tokens, JetStream dedup
* rebalancer (plan/apply split), reconciler, requeue sweeper
* ristretto caches with NATS-backed cross-node invalidation
(placements live-nodes + token denylist)
* maintenance watchdog for stuck cluster-pause flag
* Prometheus /metrics with CIDR ACL, HTTP/pin/scheduler/cache gauges
* rate limiting: session (10/min) + anonymous global (120/min)
* integration tests: rebalance, refcount multi-org, RLS belt
* goreleaser (tar + deb/rpm/apk + Alpine Docker) targeting Gitea
Stack: Cobra/Viper, Fiber v2 + huma v2, embedded NATS JetStream,
pgx/sqlc/golang-migrate, ristretto, TypeID, prometheus/client_golang,
testcontainers-go.
95 lines
2.9 KiB
Nginx Configuration File
95 lines
2.9 KiB
Nginx Configuration File
# nginx LB config for the anchorage Swarm stack.
|
|
#
|
|
# Fronts three anchorage backends, round-robin by default. The upstream
|
|
# zone declaration makes it trivial to swap to least_conn or
|
|
# least_time once the cluster is busy enough to benefit.
|
|
#
|
|
# WebSocket upgrade is handled for /v1/events only, per the plan —
|
|
# ordinary GET /v1/... requests stay as HTTP/1.1 with proxy keepalive.
|
|
|
|
upstream anchorage_backends {
|
|
# Least connections biases traffic toward the lightest-loaded node,
|
|
# which matters once pin create calls fan into the same placement set.
|
|
least_conn;
|
|
|
|
server anchorage-1:8080 max_fails=3 fail_timeout=15s;
|
|
server anchorage-2:8080 max_fails=3 fail_timeout=15s;
|
|
server anchorage-3:8080 max_fails=3 fail_timeout=15s;
|
|
|
|
keepalive 64;
|
|
}
|
|
|
|
map $http_upgrade $connection_upgrade {
|
|
default upgrade;
|
|
'' close;
|
|
}
|
|
|
|
# Hide nginx version from error pages and the Server header.
|
|
server_tokens off;
|
|
|
|
log_format anchorage_json escape=json
|
|
'{'
|
|
'"time":"$time_iso8601",'
|
|
'"remote":"$remote_addr",'
|
|
'"request":"$request",'
|
|
'"status":$status,'
|
|
'"bytes":$body_bytes_sent,'
|
|
'"duration_ms":$request_time,'
|
|
'"upstream":"$upstream_addr",'
|
|
'"upstream_status":"$upstream_status",'
|
|
'"upstream_duration":"$upstream_response_time",'
|
|
'"request_id":"$http_x_request_id",'
|
|
'"user_agent":"$http_user_agent"'
|
|
'}';
|
|
|
|
server {
|
|
listen 8080;
|
|
listen [::]:8080;
|
|
|
|
access_log /var/log/nginx/access.log anchorage_json;
|
|
|
|
client_max_body_size 8m;
|
|
client_body_timeout 60s;
|
|
client_header_timeout 15s;
|
|
send_timeout 60s;
|
|
|
|
# Forward the Fiber-assigned X-Request-ID through the LB so a single
|
|
# request ID appears in LB + anchorage logs.
|
|
proxy_set_header X-Request-ID $http_x_request_id;
|
|
proxy_set_header Host $host;
|
|
proxy_set_header X-Real-IP $remote_addr;
|
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
proxy_set_header X-Forwarded-Proto $scheme;
|
|
|
|
# HTTP/1.1 + keepalive to the upstream pool.
|
|
proxy_http_version 1.1;
|
|
proxy_set_header Connection "";
|
|
|
|
# Root: normal HTTP traffic.
|
|
location / {
|
|
proxy_pass http://anchorage_backends;
|
|
proxy_connect_timeout 5s;
|
|
proxy_read_timeout 60s;
|
|
proxy_send_timeout 60s;
|
|
}
|
|
|
|
# WebSocket endpoint — no idle-timeout ceiling so long-lived status
|
|
# event streams stay open. The Upgrade/Connection headers are
|
|
# forwarded so the backend sees a proper WS upgrade.
|
|
location = /v1/events {
|
|
proxy_pass http://anchorage_backends;
|
|
proxy_set_header Upgrade $http_upgrade;
|
|
proxy_set_header Connection $connection_upgrade;
|
|
proxy_read_timeout 3600s;
|
|
proxy_send_timeout 3600s;
|
|
}
|
|
|
|
# Fast-path probes at the LB itself so the k8s / swarm healthcheck
|
|
# doesn't have to round-trip to a backend.
|
|
location = /lbz {
|
|
access_log off;
|
|
return 200 "ok\n";
|
|
add_header Content-Type text/plain;
|
|
}
|
|
}
|