Greenfield Go multi-tenant IPFS Pinning Service wire-compatible with the
IPFS Pinning Services API spec. Paired 1:1 with Kubo over localhost RPC,
clustered via embedded NATS JetStream, Postgres source-of-truth with
RLS-enforced tenancy, Fiber + huma v2 for the HTTP surface, Authentik
OIDC for session login with kid-rotated HS256 JWT API tokens.
Feature-complete against the 22-milestone build plan, including the
ship-it v1.0 gap items:
* admin CLIs: drain/uncordon, maintenance, mint-token, rotate-key,
prune-denylist, rebalance --dry-run, cache-stats, cluster-presences
* TTL leader election via NATS KV, fence tokens, JetStream dedup
* rebalancer (plan/apply split), reconciler, requeue sweeper
* ristretto caches with NATS-backed cross-node invalidation
(placements live-nodes + token denylist)
* maintenance watchdog for stuck cluster-pause flag
* Prometheus /metrics with CIDR ACL, HTTP/pin/scheduler/cache gauges
* rate limiting: session (10/min) + anonymous global (120/min)
* integration tests: rebalance, refcount multi-org, RLS belt
* goreleaser (tar + deb/rpm/apk + Alpine Docker) targeting Gitea
Stack: Cobra/Viper, Fiber v2 + huma v2, embedded NATS JetStream,
pgx/sqlc/golang-migrate, ristretto, TypeID, prometheus/client_golang,
testcontainers-go.
60 lines
1.6 KiB
Go
60 lines
1.6 KiB
Go
// Package postgres is anchorage's Postgres adapter: pgxpool lifecycle,
|
|
// schema migrations, and the concrete implementation of the store
|
|
// interfaces defined in internal/pkg/store.
|
|
package postgres
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"time"
|
|
|
|
"github.com/jackc/pgx/v5/pgxpool"
|
|
)
|
|
|
|
// PoolConfig is the subset of config.PostgresConfig the pool needs. Kept
|
|
// as a local struct so callers (tests, migrate subcommand) can construct
|
|
// one without importing the full config package.
|
|
type PoolConfig struct {
|
|
DSN string
|
|
MaxConns int
|
|
}
|
|
|
|
// NewPool constructs a pgxpool.Pool and verifies connectivity with a ping.
|
|
//
|
|
// The caller is responsible for calling pool.Close() at shutdown. A zero
|
|
// or negative MaxConns falls back to pgxpool's default.
|
|
func NewPool(ctx context.Context, cfg PoolConfig) (*pgxpool.Pool, error) {
|
|
if cfg.DSN == "" {
|
|
return nil, fmt.Errorf("postgres dsn is required")
|
|
}
|
|
|
|
poolCfg, err := pgxpool.ParseConfig(cfg.DSN)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("parse postgres dsn: %w", err)
|
|
}
|
|
|
|
if cfg.MaxConns > 0 {
|
|
poolCfg.MaxConns = int32(cfg.MaxConns)
|
|
}
|
|
|
|
// Keep idle conns short so Postgres restarts don't wedge old tcp
|
|
// connections that will never work again.
|
|
poolCfg.MaxConnIdleTime = 5 * time.Minute
|
|
poolCfg.MaxConnLifetime = 30 * time.Minute
|
|
poolCfg.HealthCheckPeriod = 30 * time.Second
|
|
|
|
pool, err := pgxpool.NewWithConfig(ctx, poolCfg)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("open postgres pool: %w", err)
|
|
}
|
|
|
|
pingCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
|
defer cancel()
|
|
if err := pool.Ping(pingCtx); err != nil {
|
|
pool.Close()
|
|
return nil, fmt.Errorf("ping postgres: %w", err)
|
|
}
|
|
|
|
return pool, nil
|
|
}
|