Greenfield Go multi-tenant IPFS Pinning Service wire-compatible with the
IPFS Pinning Services API spec. Paired 1:1 with Kubo over localhost RPC,
clustered via embedded NATS JetStream, Postgres source-of-truth with
RLS-enforced tenancy, Fiber + huma v2 for the HTTP surface, Authentik
OIDC for session login with kid-rotated HS256 JWT API tokens.
Feature-complete against the 22-milestone build plan, including the
ship-it v1.0 gap items:
* admin CLIs: drain/uncordon, maintenance, mint-token, rotate-key,
prune-denylist, rebalance --dry-run, cache-stats, cluster-presences
* TTL leader election via NATS KV, fence tokens, JetStream dedup
* rebalancer (plan/apply split), reconciler, requeue sweeper
* ristretto caches with NATS-backed cross-node invalidation
(placements live-nodes + token denylist)
* maintenance watchdog for stuck cluster-pause flag
* Prometheus /metrics with CIDR ACL, HTTP/pin/scheduler/cache gauges
* rate limiting: session (10/min) + anonymous global (120/min)
* integration tests: rebalance, refcount multi-org, RLS belt
* goreleaser (tar + deb/rpm/apk + Alpine Docker) targeting Gitea
Stack: Cobra/Viper, Fiber v2 + huma v2, embedded NATS JetStream,
pgx/sqlc/golang-migrate, ristretto, TypeID, prometheus/client_golang,
testcontainers-go.
105 lines
3.5 KiB
Go
105 lines
3.5 KiB
Go
// Package openapi registers anchorage's HTTP surface on a huma API instance.
|
|
//
|
|
// Handlers are grouped by domain (pins, tokens, orgs, audit) and registered
|
|
// via huma's code-first operations. The generated OpenAPI 3.1 spec lives at
|
|
// GET /openapi.json; Scalar UI is served at GET /docs.
|
|
package openapi
|
|
|
|
import (
|
|
"context"
|
|
|
|
"github.com/danielgtaylor/huma/v2"
|
|
"github.com/danielgtaylor/huma/v2/adapters/humafiber"
|
|
"github.com/gofiber/fiber/v2"
|
|
)
|
|
|
|
// New creates a huma.API configured to match the anchorage spec.
|
|
//
|
|
// The returned huma.API is not yet populated with routes — callers
|
|
// (see internal/app/anchorage) wire in domain handlers via Register*
|
|
// functions exported by their respective packages.
|
|
func New(app *fiber.App) huma.API {
|
|
cfg := huma.DefaultConfig("anchorage", "0.1.0")
|
|
cfg.OpenAPI.Info.Description = "Highly-available IPFS Pinning Service API. Wire-compatible with the IPFS Pinning Services API spec, plus org/admin extensions."
|
|
cfg.Servers = []*huma.Server{
|
|
{URL: "/v1", Description: "Default v1 endpoint"},
|
|
}
|
|
cfg.Components.SecuritySchemes = map[string]*huma.SecurityScheme{
|
|
"accessToken": {
|
|
Type: "http",
|
|
Scheme: "bearer",
|
|
BearerFormat: "JWT",
|
|
},
|
|
}
|
|
return humafiber.New(app, cfg)
|
|
}
|
|
|
|
// HealthResponse is the body of /health (liveness probe).
|
|
type HealthResponse struct {
|
|
Body struct {
|
|
Status string `json:"status" example:"ok"`
|
|
}
|
|
}
|
|
|
|
// ReadyResponse is the body of /ready (readiness probe). Includes the
|
|
// Reconciler drift count so operators can alert on out-of-sync nodes
|
|
// without scraping /metrics.
|
|
type ReadyResponse struct {
|
|
Body struct {
|
|
Status string `json:"status" example:"ok"`
|
|
// Drift is the current reconciler drift count (pins the store
|
|
// believes this node holds vs. pins actually in Kubo's pinset).
|
|
// 0 on a healthy node. Non-zero is not inherently "not ready" —
|
|
// a drifted node still serves requests fine — but operators
|
|
// alert on sustained non-zero values.
|
|
Drift int64 `json:"drift"`
|
|
}
|
|
}
|
|
|
|
// ReadyState is the signal the readyz callback returns to
|
|
// RegisterHealth. Ready controls the HTTP status (true → 200, false →
|
|
// 503). Drift is surfaced in the body regardless.
|
|
type ReadyState struct {
|
|
Ready bool
|
|
Reason string
|
|
Drift int64
|
|
}
|
|
|
|
// RegisterHealth registers /health and /ready.
|
|
//
|
|
// /health is a liveness probe: the process is alive and the API is
|
|
// reachable. /ready is a readiness probe: returns 503 while the node
|
|
// is drained or while mandatory dependencies (Postgres, NATS) are
|
|
// unavailable. The readyz callback receives the per-request context so
|
|
// dependency probes are scoped to the request (not the app's root
|
|
// ctx, which would stall health-checks mid-shutdown).
|
|
func RegisterHealth(api huma.API, readyz func(ctx context.Context) ReadyState) {
|
|
huma.Register(api, huma.Operation{
|
|
OperationID: "getHealth",
|
|
Method: "GET",
|
|
Path: "/health",
|
|
Summary: "Liveness probe",
|
|
}, func(_ context.Context, _ *struct{}) (*HealthResponse, error) {
|
|
out := &HealthResponse{}
|
|
out.Body.Status = "ok"
|
|
return out, nil
|
|
})
|
|
|
|
huma.Register(api, huma.Operation{
|
|
OperationID: "getReady",
|
|
Method: "GET",
|
|
Path: "/ready",
|
|
Summary: "Readiness probe (also reports reconciler drift)",
|
|
}, func(ctx context.Context, _ *struct{}) (*ReadyResponse, error) {
|
|
state := readyz(ctx)
|
|
out := &ReadyResponse{}
|
|
out.Body.Drift = state.Drift
|
|
if state.Ready {
|
|
out.Body.Status = "ok"
|
|
return out, nil
|
|
}
|
|
out.Body.Status = "not-ready: " + state.Reason
|
|
return out, huma.Error503ServiceUnavailable("not ready: " + state.Reason)
|
|
})
|
|
}
|