4 Commits
v0.1.3 ... main

48 changed files with 4611 additions and 889 deletions

View File

@@ -50,6 +50,7 @@ brews:
ids:
- credential-helper
repository:
# https://tangled.org/did:plc:e3kzdezk5gsirzh7eoqplc64/
# Tap lives inside the main repo under Formula/. DID-based URL keeps
# this stable across any future Tangled handle/domain rename.
owner: 'did:plc:pddp4xt5lgnv2qsegbzzs4xg'

View File

@@ -120,8 +120,12 @@ Three token types flow through the system:
| Token | Issued By | Used For | Lifetime |
|-------|-----------|----------|----------|
| OAuth (access+refresh) | User's PDS | AppView → PDS communication | ~2h / ~90d |
| Registry JWT | AppView | Docker client → AppView | 5 min |
| Service Token | User's PDS | AppView → Hold service | 60s (cached 50s) |
| Registry JWT | AppView | Docker client → AppView | bound to service-auth (~5 min) |
| Service Token | User's PDS | AppView → Hold service | 5 min, cached until ~10s before granted exp |
The Registry JWT and Service Token are minted together at `/auth/token` (see `pkg/appview/authgate/service_auth_fetcher.go`) and stamped with the same expiry, so they invalidate concurrently. When Docker hits 401, the next `/auth/token` call refreshes both. A long-running push that outlives the 5-minute window naturally rolls over via Docker's standard 401-retry behavior.
Reference PDSes grant up to 1h with grant-or-error semantics; non-reference PDSes may grant less than the requested 5 minutes. The service-auth cache uses whatever exp the PDS actually granted, with the 10s safety margin always applied. The JWT's exp is bound to that real value, not to the requested duration.
```
Docker Client ──Registry JWT──→ AppView ──OAuth──→ User's PDS ──Service Token──→ Hold

View File

@@ -73,6 +73,10 @@ test-verbose: ## Run tests with verbose output
@echo "→ Running tests with verbose output..."
go test -v ./...
integration-test: ## Run in-process smoke test (no docker, fake PDS + gofakes3 + hold + appview)
@echo "→ Running integration smoke test..."
go test -tags=integration -count=1 -race -timeout=120s ./test/integration/...
##@ Quality Targets
.PHONY: check-golangci-lint

View File

@@ -1,418 +0,0 @@
// Command s3-test is a diagnostic tool that tests S3 connectivity using both
// AWS SDK v1 (used by distribution's storage driver) and AWS SDK v2 (used by
// ATCR's presigned URL service). It helps diagnose signature compatibility
// issues with S3-compatible storage providers.
package main
import (
"bufio"
"context"
"flag"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
"time"
awsv1 "github.com/aws/aws-sdk-go/aws"
credentialsv1 "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
s3v1 "github.com/aws/aws-sdk-go/service/s3"
awsv2 "github.com/aws/aws-sdk-go-v2/aws"
configv2 "github.com/aws/aws-sdk-go-v2/config"
credentialsv2 "github.com/aws/aws-sdk-go-v2/credentials"
s3v2 "github.com/aws/aws-sdk-go-v2/service/s3"
)
func main() {
var (
envFile = flag.String("env-file", "", "Load environment variables from file (KEY=VALUE format)")
accessKey = flag.String("access-key", "", "S3 access key (env: AWS_ACCESS_KEY_ID)")
secretKey = flag.String("secret-key", "", "S3 secret key (env: AWS_SECRET_ACCESS_KEY)")
region = flag.String("region", "", "S3 region (env: S3_REGION)")
bucket = flag.String("bucket", "", "S3 bucket name (env: S3_BUCKET)")
endpoint = flag.String("endpoint", "", "S3 endpoint URL (env: S3_ENDPOINT)")
pullZone = flag.String("pull-zone", "", "CDN pull zone URL for presigned reads (env: PULL_ZONE)")
prefix = flag.String("prefix", "docker/registry/v2/blobs", "Key prefix for list operations")
verbose = flag.Bool("verbose", false, "Enable SDK debug signing logs")
)
flag.Parse()
// Load env file first, then let flags and real env vars override
if *envFile != "" {
if err := loadEnvFile(*envFile); err != nil {
fmt.Fprintf(os.Stderr, "Error loading env file: %v\n", err)
os.Exit(1)
}
}
// Resolve: flag > env var > default
if *accessKey == "" {
*accessKey = os.Getenv("AWS_ACCESS_KEY_ID")
}
if *secretKey == "" {
*secretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
}
if *region == "" {
*region = envOr("S3_REGION", "us-east-1")
}
if *bucket == "" {
*bucket = os.Getenv("S3_BUCKET")
}
if *endpoint == "" {
*endpoint = os.Getenv("S3_ENDPOINT")
}
if *pullZone == "" {
*pullZone = os.Getenv("PULL_ZONE")
}
if *accessKey == "" || *secretKey == "" || *bucket == "" {
fmt.Fprintln(os.Stderr, "Usage: s3-test [--env-file FILE] [--access-key KEY] [--secret-key KEY] [--bucket BUCKET] [--endpoint URL] [--region REGION] [--prefix PREFIX] [--verbose]")
fmt.Fprintln(os.Stderr, "Env vars: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, S3_BUCKET, S3_REGION, S3_ENDPOINT")
os.Exit(1)
}
fmt.Println("S3 Connectivity Diagnostic")
fmt.Println("==========================")
fmt.Printf("Endpoint: %s\n", valueOr(*endpoint, "(default AWS)"))
fmt.Printf("Pull Zone: %s\n", valueOr(*pullZone, "(none)"))
fmt.Printf("Region: %s\n", *region)
fmt.Printf("AccessKey: %s...%s (%d chars)\n", (*accessKey)[:3], (*accessKey)[len(*accessKey)-3:], len(*accessKey))
fmt.Printf("SecretKey: %s...%s (%d chars)\n", (*secretKey)[:3], (*secretKey)[len(*secretKey)-3:], len(*secretKey))
fmt.Printf("Bucket: %s\n", *bucket)
fmt.Printf("Prefix: %s\n", *prefix)
fmt.Println()
ctx := context.Background()
results := make([]result, 0, 6)
// Build SDK v1 client (SigV4) — matches distribution driver's New()
v1Client := buildV1Client(*accessKey, *secretKey, *region, *endpoint, *verbose)
// Test 1: SDK v1 SigV4 HeadBucket
results = append(results, runTest("SDK v1 / SigV4 / HeadBucket", func() error {
_, err := v1Client.HeadBucketWithContext(ctx, &s3v1.HeadBucketInput{
Bucket: awsv1.String(*bucket),
})
return err
}))
// Test 2: SDK v1 SigV4 ListObjectsV2
results = append(results, runTest("SDK v1 / SigV4 / ListObjectsV2", func() error {
_, err := v1Client.ListObjectsV2WithContext(ctx, &s3v1.ListObjectsV2Input{
Bucket: awsv1.String(*bucket),
Prefix: awsv1.String(*prefix),
MaxKeys: awsv1.Int64(5),
})
return err
}))
// Test 3: SDK v1 SigV4 ListObjectsV2Pages (paginated, matches doWalk)
results = append(results, runTest("SDK v1 / SigV4 / ListObjectsV2Pages", func() error {
return v1Client.ListObjectsV2PagesWithContext(ctx, &s3v1.ListObjectsV2Input{
Bucket: awsv1.String(*bucket),
Prefix: awsv1.String(*prefix),
MaxKeys: awsv1.Int64(5),
}, func(page *s3v1.ListObjectsV2Output, lastPage bool) bool {
return false // stop after first page
})
}))
// Build SDK v2 client — matches NewS3Service()
v2Client := buildV2Client(ctx, *accessKey, *secretKey, *region, *endpoint)
// Test 5: SDK v2 SigV4 HeadBucket
results = append(results, runTest("SDK v2 / SigV4 / HeadBucket", func() error {
_, err := v2Client.HeadBucket(ctx, &s3v2.HeadBucketInput{
Bucket: awsv2.String(*bucket),
})
return err
}))
// Test 6: SDK v2 SigV4 ListObjectsV2
results = append(results, runTest("SDK v2 / SigV4 / ListObjectsV2", func() error {
_, err := v2Client.ListObjectsV2(ctx, &s3v2.ListObjectsV2Input{
Bucket: awsv2.String(*bucket),
Prefix: awsv2.String(*prefix),
MaxKeys: awsv2.Int32(5),
})
return err
}))
// Find a real object key for GetObject / presigned URL tests
var testKey string
listOut, err := v2Client.ListObjectsV2(ctx, &s3v2.ListObjectsV2Input{
Bucket: awsv2.String(*bucket),
Prefix: awsv2.String(*prefix),
MaxKeys: awsv2.Int32(1),
})
if err == nil && len(listOut.Contents) > 0 {
testKey = *listOut.Contents[0].Key
}
if testKey == "" {
fmt.Printf("\n (Skipping GetObject/Presigned tests — no objects found under prefix %q)\n", *prefix)
} else {
fmt.Printf("\n Test object: %s\n\n", testKey)
// Test 7: SDK v1 GetObject (HEAD only)
results = append(results, runTest("SDK v1 / SigV4 / HeadObject", func() error {
_, err := v1Client.HeadObjectWithContext(ctx, &s3v1.HeadObjectInput{
Bucket: awsv1.String(*bucket),
Key: awsv1.String(testKey),
})
return err
}))
// Test 8: SDK v2 GetObject (HEAD only)
results = append(results, runTest("SDK v2 / SigV4 / HeadObject", func() error {
_, err := v2Client.HeadObject(ctx, &s3v2.HeadObjectInput{
Bucket: awsv2.String(*bucket),
Key: awsv2.String(testKey),
})
return err
}))
// Test 9: SDK v2 Presigned GET URL (generate + fetch)
presignClient := s3v2.NewPresignClient(v2Client)
results = append(results, runTest("SDK v2 / Presigned GET URL", func() error {
presigned, err := presignClient.PresignGetObject(ctx, &s3v2.GetObjectInput{
Bucket: awsv2.String(*bucket),
Key: awsv2.String(testKey),
}, func(opts *s3v2.PresignOptions) {
opts.Expires = 5 * time.Minute
})
if err != nil {
return fmt.Errorf("presign: %w", err)
}
if *verbose {
// Show host + query params (no path to avoid leaking key structure)
u, _ := url.Parse(presigned.URL)
fmt.Printf("\n Presigned host: %s\n", u.Host)
fmt.Printf(" Signed headers: %s\n", presigned.SignedHeader)
}
resp, err := http.Get(presigned.URL)
if err != nil {
return fmt.Errorf("fetch: %w", err)
}
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
resp.Body.Close()
if resp.StatusCode != 200 {
return fmt.Errorf("presigned URL returned %d: %s", resp.StatusCode, string(body))
}
return nil
}))
// Pull zone presigned tests — sign against real endpoint, swap host to pull zone
if *pullZone != "" {
results = append(results, runTest("SDK v2 / Presigned GET via Pull Zone", func() error {
presigned, err := presignClient.PresignGetObject(ctx, &s3v2.GetObjectInput{
Bucket: awsv2.String(*bucket),
Key: awsv2.String(testKey),
}, func(opts *s3v2.PresignOptions) {
opts.Expires = 5 * time.Minute
})
if err != nil {
return fmt.Errorf("presign: %w", err)
}
pzURL := swapHost(presigned.URL, *pullZone)
if *verbose {
fmt.Printf("\n Signed against: %s\n", presigned.URL[:40]+"...")
fmt.Printf(" Fetching from: %s\n", pzURL[:40]+"...")
}
resp, err := http.Get(pzURL)
if err != nil {
return fmt.Errorf("fetch: %w", err)
}
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
resp.Body.Close()
if resp.StatusCode != 200 {
return fmt.Errorf("pull zone GET returned %d: %s", resp.StatusCode, string(body))
}
return nil
}))
}
// Test 10: SDK v2 Presigned PUT URL (generate + upload empty)
results = append(results, runTest("SDK v2 / Presigned PUT URL", func() error {
putKey := *prefix + "/_s3-test-probe"
presigned, err := presignClient.PresignPutObject(ctx, &s3v2.PutObjectInput{
Bucket: awsv2.String(*bucket),
Key: awsv2.String(putKey),
}, func(opts *s3v2.PresignOptions) {
opts.Expires = 5 * time.Minute
})
if err != nil {
return fmt.Errorf("presign: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, presigned.URL, strings.NewReader(""))
if err != nil {
return fmt.Errorf("build request: %w", err)
}
req.Header.Set("Content-Length", "0")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return fmt.Errorf("fetch: %w", err)
}
resp.Body.Close()
if resp.StatusCode != 200 {
return fmt.Errorf("presigned PUT returned %d", resp.StatusCode)
}
// Clean up
_, _ = v2Client.DeleteObject(ctx, &s3v2.DeleteObjectInput{
Bucket: awsv2.String(*bucket),
Key: awsv2.String(putKey),
})
return nil
}))
}
// Print summary
fmt.Println()
fmt.Println("Summary")
fmt.Println("=======")
allPass := true
for _, r := range results {
status := "PASS"
if !r.ok {
status = "FAIL"
allPass = false
}
fmt.Printf(" [%s] %s (%s)\n", status, r.name, r.duration.Round(time.Millisecond))
if !r.ok {
fmt.Printf(" Error: %s\n", r.err)
}
}
fmt.Println()
if allPass {
fmt.Println("Diagnosis: All tests passed. S3 connectivity is working with both SDKs.")
} else {
fmt.Println("Diagnosis: Some tests failed. Review errors above.")
}
}
type result struct {
name string
ok bool
err error
duration time.Duration
}
func runTest(name string, fn func() error) result {
fmt.Printf(" Testing: %s ... ", name)
start := time.Now()
err := fn()
d := time.Since(start)
if err != nil {
fmt.Printf("FAIL (%s)\n", d.Round(time.Millisecond))
return result{name: name, ok: false, err: err, duration: d}
}
fmt.Printf("PASS (%s)\n", d.Round(time.Millisecond))
return result{name: name, ok: true, duration: d}
}
func loadEnvFile(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
line = strings.TrimPrefix(line, "export ")
k, v, ok := strings.Cut(line, "=")
if !ok {
continue
}
v = strings.Trim(v, `"'`)
os.Setenv(strings.TrimSpace(k), strings.TrimSpace(v))
}
return scanner.Err()
}
func envOr(key, fallback string) string {
if v := os.Getenv(key); v != "" {
return v
}
return fallback
}
func swapHost(presignedURL, pullZone string) string {
parsed, err := url.Parse(presignedURL)
if err != nil {
return presignedURL
}
pz, err := url.Parse(pullZone)
if err != nil {
return presignedURL
}
parsed.Scheme = pz.Scheme
parsed.Host = pz.Host
return parsed.String()
}
func valueOr(s, fallback string) string {
if s == "" {
return fallback
}
return s
}
// buildV1Client constructs an SDK v1 S3 client identically to
// distribution/distribution's s3-aws driver New() function.
func buildV1Client(accessKey, secretKey, region, endpoint string, verbose bool) *s3v1.S3 {
awsConfig := awsv1.NewConfig()
if verbose {
awsConfig.WithLogLevel(awsv1.LogDebugWithSigning)
}
awsConfig.WithCredentials(credentialsv1.NewStaticCredentials(accessKey, secretKey, ""))
awsConfig.WithRegion(region)
if endpoint != "" {
awsConfig.WithEndpoint(endpoint)
awsConfig.WithS3ForcePathStyle(true)
}
sess, err := session.NewSession(awsConfig)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create SDK v1 session: %v\n", err)
os.Exit(1)
}
return s3v1.New(sess)
}
// buildV2Client constructs an SDK v2 S3 client identically to
// ATCR's NewS3Service() in pkg/s3/types.go.
func buildV2Client(ctx context.Context, accessKey, secretKey, region, endpoint string) *s3v2.Client {
cfg, err := configv2.LoadDefaultConfig(ctx,
configv2.WithRegion(region),
configv2.WithCredentialsProvider(
credentialsv2.NewStaticCredentialsProvider(accessKey, secretKey, ""),
),
)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to load SDK v2 config: %v\n", err)
os.Exit(1)
}
return s3v2.NewFromConfig(cfg, func(o *s3v2.Options) {
if endpoint != "" {
o.BaseEndpoint = awsv2.String(endpoint)
o.UsePathStyle = true
}
})
}

View File

@@ -94,7 +94,7 @@ ai:
api_key: ""
# ATProto labeler for content moderation (DMCA takedowns).
labeler:
# DID or URL of the ATProto labeler (e.g., did:web:labeler.atcr.io). Empty disables label filtering.
# DID of the ATProto labeler (did:plc:... or did:web:...). Empty disables label filtering.
did: ""
# Stripe billing integration (requires -tags billing build).
billing:

View File

@@ -139,7 +139,7 @@ scanner:
rescan_interval: 168h0m0s
# Labeler subscription settings. When configured, the hold consumes takedown labels from the named labeler and purges affected records on receipt; GC consults the cache to gate blob cleanup. Empty subscribe_url disables.
labeler:
# DID or URL of the ATProto labeler (e.g., did:web:labeler.atcr.io). Empty disables labeler integration.
# DID of the ATProto labeler (did:plc:... or did:web:...). Empty disables labeler integration.
did: ""
# Reversibility window for takedowns. Blobs survive this long after a takedown so the action can be reversed. After this window the GC reclaims them. Default: 720h (30 days).
grace_window: 720h0m0s

20
go.mod
View File

@@ -3,7 +3,6 @@ module atcr.io
go 1.26.2
require (
github.com/aws/aws-sdk-go v1.55.8
github.com/aws/aws-sdk-go-v2 v1.41.6
github.com/aws/aws-sdk-go-v2/config v1.32.16
github.com/aws/aws-sdk-go-v2/credentials v1.19.15
@@ -13,7 +12,7 @@ require (
github.com/charmbracelet/huh/spinner v0.0.0-20260223110133-9dc45e34a40b
github.com/charmbracelet/lipgloss v1.1.0
github.com/did-method-plc/go-didplc v0.0.0-20251009212921-7b7a252b8019
github.com/distribution/distribution/v3 v3.1.0
github.com/distribution/distribution/v3 v3.1.1
github.com/distribution/reference v0.6.0
github.com/earthboundkid/versioninfo/v2 v2.24.1
github.com/go-chi/chi/v5 v5.2.5
@@ -21,6 +20,7 @@ require (
github.com/go-viper/mapstructure/v2 v2.5.0
github.com/goki/freetype v1.0.5
github.com/golang-jwt/jwt/v5 v5.3.1
github.com/google/go-containerregistry v0.21.5
github.com/google/go-querystring v1.2.0
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
@@ -31,10 +31,13 @@ require (
github.com/ipfs/go-ipld-format v0.6.3
github.com/ipfs/go-libipfs v0.7.0
github.com/ipld/go-car v0.6.3
github.com/johannesboyne/gofakes3 v0.0.0-20260208201424-4c385a1f6a73
github.com/klauspost/compress v1.18.5
github.com/microcosm-cc/bluemonday v1.0.27
github.com/multiformats/go-multihash v0.2.3
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.1
github.com/regclient/regclient v0.11.3
github.com/spf13/cobra v1.10.2
github.com/spf13/viper v1.21.0
github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c
@@ -50,6 +53,7 @@ require (
golang.org/x/image v0.39.0
golang.org/x/sys v0.43.0
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da
oras.land/oras-go/v2 v2.6.0
)
require (
@@ -88,12 +92,15 @@ require (
github.com/charmbracelet/x/term v0.2.2 // indirect
github.com/clipperhouse/displaywidth v0.11.0 // indirect
github.com/clipperhouse/uax29/v2 v2.7.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.18.2 // indirect
github.com/coreos/go-systemd/v22 v22.7.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/cli v29.4.0+incompatible // indirect
github.com/docker/docker-credential-helpers v0.9.5 // indirect
github.com/docker/go-events v0.0.0-20250808211157-605354379745 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/fatih/color v1.19.0 // indirect
@@ -131,7 +138,6 @@ require (
github.com/ipld/go-ipld-prime v0.22.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/libsql/sqlite-antlr4-parser v0.0.0-20240721121621-c0bdc870f11c // indirect
github.com/lucasb-eyer/go-colorful v1.4.0 // indirect
@@ -140,6 +146,7 @@ require (
github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75 // indirect
github.com/mattn/go-runewidth v0.0.23 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
github.com/mr-tron/base58 v1.3.0 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
@@ -151,7 +158,6 @@ require (
github.com/multiformats/go-multicodec v0.10.0 // indirect
github.com/multiformats/go-varint v0.1.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pelletier/go-toml/v2 v2.3.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@@ -165,6 +171,7 @@ require (
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 // indirect
github.com/redis/go-redis/v9 v9.18.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect
github.com/sagikazarmark/locafero v0.12.0 // indirect
github.com/sirupsen/logrus v1.9.4 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
@@ -172,6 +179,8 @@ require (
github.com/spf13/cast v1.10.0 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/ulikunitz/xz v0.5.15 // indirect
github.com/vbatts/tar-split v0.12.2 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect
@@ -197,6 +206,7 @@ require (
go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect
go.opentelemetry.io/otel/trace v1.43.0 // indirect
go.opentelemetry.io/proto/otlp v1.10.0 // indirect
go.shabbyrobe.org/gocovmerge v0.0.0-20230507111327-fa4f82cfbf4d // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.1 // indirect
@@ -207,6 +217,7 @@ require (
golang.org/x/sync v0.20.0 // indirect
golang.org/x/text v0.36.0 // indirect
golang.org/x/time v0.15.0 // indirect
golang.org/x/tools v0.44.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260414002931-afd174a4e478 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260414002931-afd174a4e478 // indirect
google.golang.org/grpc v1.80.0 // indirect
@@ -214,5 +225,6 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gorm.io/gorm v1.31.1 // indirect
gotest.tools/v3 v3.5.2 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
)

52
go.sum
View File

@@ -60,8 +60,6 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ=
github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk=
github.com/aws/aws-sdk-go-v2 v1.41.6 h1:1AX0AthnBQzMx1vbmir3Y4WsnJgiydmnJjiLu+LvXOg=
github.com/aws/aws-sdk-go-v2 v1.41.6/go.mod h1:dy0UzBIfwSeot4grGvY1AqFWN5zgziMmWGzysDnHFcQ=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.9 h1:adBsCIIpLbLmYnkQU+nAChU5yhVTvu5PerROm+/Kq2A=
@@ -72,6 +70,8 @@ github.com/aws/aws-sdk-go-v2/credentials v1.19.15 h1:fyvgWTszojq8hEnMi8PPBTvZdTt
github.com/aws/aws-sdk-go-v2/credentials v1.19.15/go.mod h1:gJiYyMOjNg8OEdRWOf3CrFQxM2a98qmrtjx1zuiQfB8=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.22 h1:IOGsJ1xVWhsi+ZO7/NW8OuZZBtMJLZbk4P5HDjJO0jQ=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.22/go.mod h1:b+hYdbU+jGKfXE8kKM6g1+h+L/Go3vMvzlxBsiuGsxg=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.75 h1:S61/E3N01oral6B3y9hZ2E1iFDqCZPPOBoBQretCnBI=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.75/go.mod h1:bDMQbkI1vJbNjnvJYpPTSNYBkI/VIv18ngWb/K84tkk=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22 h1:GmLa5Kw1ESqtFpXsx5MmC84QWa/ZrLZvlJGa2y+4kcQ=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22/go.mod h1:6sW9iWm9DK9YRpRGga/qzrzNLgKpT2cIxb7Vo2eNOp0=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22 h1:dY4kWZiSaXIzxnKlj17nHnBcXXBfac6UlsAx2qL6XrU=
@@ -127,6 +127,8 @@ github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F9
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cevatbarisyilmaz/ara v0.0.4 h1:SGH10hXpBJhhTlObuZzTuFn1rrdmjQImITXnZVPSodc=
github.com/cevatbarisyilmaz/ara v0.0.4/go.mod h1:BfFOxnUd6Mj6xmcvRxHN3Sr21Z1T3U2MYkYOmoQe4Ts=
github.com/charmbracelet/bubbles v1.0.0 h1:12J8/ak/uCZEMQ6KU7pcfwceyjLlWsDLAxB5fXonfvc=
github.com/charmbracelet/bubbles v1.0.0/go.mod h1:9d/Zd5GdnauMI5ivUIVisuEm3ave1XwXtD1ckyV6r3E=
github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
@@ -168,6 +170,8 @@ github.com/clipperhouse/uax29/v2 v2.7.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJ
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/containerd/stargz-snapshotter/estargz v0.18.2 h1:yXkZFYIzz3eoLwlTUZKz2iQ4MrckBxJjkmD16ynUTrw=
github.com/containerd/stargz-snapshotter/estargz v0.18.2/go.mod h1:XyVU5tcJ3PRpkA9XS2T5us6Eg35yM0214Y+wvrZTBrY=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA=
@@ -189,16 +193,20 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/did-method-plc/go-didplc v0.0.0-20251009212921-7b7a252b8019 h1:MhDee1P3Zar8u72U6RtOKvzSd7dBAU3l2hhrOLQsfB0=
github.com/did-method-plc/go-didplc v0.0.0-20251009212921-7b7a252b8019/go.mod h1:dBm0+R8Diqo90As3Q6p2wXAdrGXJgPEWBKUnpV5SUzI=
github.com/distribution/distribution/v3 v3.1.0 h1:u1v788HreKTLGdNY6s7px8Exgrs9mZ9UrCDjSrpCM8g=
github.com/distribution/distribution/v3 v3.1.0/go.mod h1:73BuF5/ziMHNVt7nnL1roYpH4Eg/FgUlKZm3WryIx/o=
github.com/distribution/distribution/v3 v3.1.1 h1:KUbk7C8CfaLXy8kbf/hGq9cad/wCoLB6dbWH6DMbmX0=
github.com/distribution/distribution/v3 v3.1.1/go.mod h1:d7lXwZpph0bVcOj4Aqn0nMrWHIwRQGdiV5TLeI+/w6Y=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v29.4.0+incompatible h1:+IjXULMetlvWJiuSI0Nbor36lcJ5BTcVpUmB21KBoVM=
github.com/docker/cli v29.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker-credential-helpers v0.9.5 h1:EFNN8DHvaiK8zVqFA2DT6BjXE0GzfLOZ38ggPTKePkY=
github.com/docker/docker-credential-helpers v0.9.5/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c=
github.com/docker/go-events v0.0.0-20250808211157-605354379745 h1:yOn6Ze6IbYI/KAw2lw/83ELYvZh6hvsygTVkD0dzMC4=
github.com/docker/go-events v0.0.0-20250808211157-605354379745/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/earthboundkid/versioninfo/v2 v2.24.1 h1:SJTMHaoUx3GzjjnUO1QzP3ZXK6Ee/nbWyCm58eY3oUg=
@@ -310,6 +318,8 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-containerregistry v0.21.5 h1:KTJG9Pn/jC0VdZR6ctV3/jcN+q6/Iqlx0sTVz3ywZlM=
github.com/google/go-containerregistry v0.21.5/go.mod h1:ySvMuiWg+dOsRW0Hw8GYwfMwBlNRTmpYBFJPlkco5zU=
github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0=
github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -451,10 +461,8 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/johannesboyne/gofakes3 v0.0.0-20260208201424-4c385a1f6a73 h1:0xkWp+RMC2ImuKacheMHEAtrbOTMOa0kYkxyzM1Z/II=
github.com/johannesboyne/gofakes3 v0.0.0-20260208201424-4c385a1f6a73/go.mod h1:S4S9jGBVlLri0OeqrSSbCGG5vsI6he06UJyuz1WT1EE=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -522,6 +530,8 @@ github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dz
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
@@ -567,6 +577,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/olareg/olareg v0.1.2 h1:75G8X6E9FUlzL/CSjgFcYfMgNzlc7CxULpUUNsZBIvI=
github.com/olareg/olareg v0.1.2/go.mod h1:TWs+N6pO1S4bdB6eerzUm/ITRQ6kw91mVf9ZYeGtw+Y=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
@@ -617,6 +629,8 @@ github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 h1:2dKdoEYBJ0CZCLPiCdvvc7lu
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0/go.mod h1:WzkrVG9ro9BwCQD0eJOWn6AGL4Z1CleGflM45w1hu10=
github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs=
github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0=
github.com/regclient/regclient v0.11.3 h1:aTnVRsgFaOmezgKp7caL3zINrZKAXsMbzS1oCgD7/cA=
github.com/regclient/regclient v0.11.3/go.mod h1:a4PDi+VyEbBuV/5hCfMjnYH8jvB7NgD0mdggwNRECy8=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@@ -626,6 +640,8 @@ github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
@@ -685,7 +701,11 @@ github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFd
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tursodatabase/go-libsql v0.0.0-20251219133454-43644db490ff h1:Hvxz9W8fWpSg9xkiq8/q+3cVJo+MmLMfkjdS/u4nWFY=
github.com/tursodatabase/go-libsql v0.0.0-20251219133454-43644db490ff/go.mod h1:TjsB2miB8RW2Sse8sdxzVTdeGlx74GloD5zJYUC38d8=
github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY=
github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4=
github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s=
github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y=
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ=
@@ -704,14 +724,16 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/goldmark v1.8.2 h1:kEGpgqJXdgbkhcOgBxkC0X0PmoPG1ZyoZ117rDVp4zE=
github.com/yuin/goldmark v1.8.2/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=
github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
github.com/yuin/gopher-lua v1.1.2 h1:yF/FjE3hD65tBbt0VXLE13HWS9h34fdzJmrWRXwobGA=
github.com/yuin/gopher-lua v1.1.2/go.mod h1:7aRmXIWl37SqRf0koeyylBEzJ+aPt8A+mmkQ4f1ntR8=
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b h1:CzigHMRySiX3drau9C6Q5CAbNIApmLdat5jPMqChvDA=
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b/go.mod h1:/y/V339mxv2sZmYYR64O07VuCpdNZqCTwO8ZcouTMI8=
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 h1:qwDnMxjkyLmAFgcfgTnfJrmYKWhHnci3GjDqcZp1M3Q=
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02/go.mod h1:JTnUj0mpYiAsuZLmKjTx/ex3AtMowcCgnE7YNyCEP0I=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
@@ -770,6 +792,8 @@ go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09
go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0=
go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g=
go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk=
go.shabbyrobe.org/gocovmerge v0.0.0-20230507111327-fa4f82cfbf4d h1:Ns9kd1Rwzw7t0BR8XMphenji4SmIoNZPn8zhYmaVKP8=
go.shabbyrobe.org/gocovmerge v0.0.0-20230507111327-fa4f82cfbf4d/go.mod h1:92Uoe3l++MlthCm+koNi0tcUCX3anayogF0Pa/sp24k=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@@ -1047,6 +1071,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.44.0 h1:UP4ajHPIcuMjT1GqzDWRlalUEoY+uzoZKnhOjbIPD2c=
golang.org/x/tools v0.44.0/go.mod h1:KA0AfVErSdxRZIsOVipbv3rQhVXTnlU6UhKxHd1seDI=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1172,6 +1198,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -1186,6 +1214,8 @@ gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -1195,6 +1225,8 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@@ -466,6 +466,8 @@ github.com/PuerkitoBio/purell v1.2.1 h1:QsZ4TjvwiMpat6gBCBxEQI0rcS9ehtkKtSpiUnd9
github.com/PuerkitoBio/purell v1.2.1/go.mod h1:ZwHcC/82TOaovDi//J/804umJFFmbOHPngi8iYYv/Eo=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY=
@@ -499,6 +501,8 @@ github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de/go.mod h1:DCaWoU
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v1.41.2/go.mod h1:IvvlAZQXvTXznUPfRVfryiG1fbzE2NGK6m9u39YQ+S4=
github.com/aws/aws-sdk-go-v2/config v1.32.10/go.mod h1:2rUIOnA2JaiqYmSKYmRJlcMWy6qTj1vuRFscppSBMcw=
github.com/aws/aws-sdk-go-v2/credentials v1.19.10/go.mod h1:RnnlFCAlxQCkN2Q379B67USkBMu1PipEEiibzYN5UTE=
@@ -659,8 +663,13 @@ github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEm
github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4=
github.com/containernetworking/plugins v1.9.0 h1:Mg3SXBdRGkdXyFC4lcwr6u2ZB2SDeL6LC3U+QrEANuQ=
github.com/containernetworking/plugins v1.9.0/go.mod h1:JG3BxoJifxxHBhG3hFyxyhid7JgRVBu/wtooGEvWf1c=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM=
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
github.com/containers/storage v1.59.1 h1:11Zu68MXsEQGBBd+GadPrHPpWeqjKS8hJDGiAHgIqDs=
github.com/containers/storage v1.59.1/go.mod h1:KoAYHnAjP3/cTsRS+mmWZGkufSY2GACiKQ4V3ZLQnR0=
github.com/coreos/go-oidc/v3 v3.14.1/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU=
@@ -671,6 +680,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf h1:dwGgBWn84wUS1pVikGiruW+x5XM4amhjaZO20vCjay4=
github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE=
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q=
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
github.com/cyphar/filepath-securejoin v0.5.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/cyphar/filepath-securejoin v0.6.0/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc=
github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ=
@@ -772,6 +783,7 @@ github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv
github.com/google/go-cmdtest v0.4.0 h1:ToXh6W5spLp3npJV92tk6d5hIpUPYEzHLkD+rncbyhI=
github.com/google/go-cmdtest v0.4.0/go.mod h1:apVn/GCasLZUVpAJ6oWAuyP7Ne7CEsQbTnc0plM3m+o=
github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/go-pkcs11 v0.3.0 h1:PVRnTgtArZ3QQqTGtbtjtnIkzl2iY2kt24yqbrf7td8=
github.com/google/go-pkcs11 v0.3.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
@@ -868,6 +880,9 @@ github.com/jedib0t/go-pretty/v6 v6.7.8 h1:BVYrDy5DPBA3Qn9ICT+PokP9cvCv1KaHv2i+Hc
github.com/jedib0t/go-pretty/v6 v6.7.8/go.mod h1:YwC5CE4fJ1HFUDeivSV1r//AmANFHyqczZk+U6BDALU=
github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4=
github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/josephspurrier/goversioninfo v1.5.0 h1:9TJtORoyf4YMoWSOo/cXFN9A/lB3PniJ91OxIH6e7Zg=
@@ -950,6 +965,7 @@ github.com/magefile/mage v1.14.0 h1:6QDX3g6z1YvJ4olPhT1wksUcSa/V0a1B+pJb73fBjyo=
github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
@@ -963,6 +979,8 @@ github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebG
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0=
github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A=
github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/goveralls v0.0.5/go.mod h1:Xg2LHi51faXLyKXwsndxiW6uxEEQT9+3sjGzzwU4xy0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
@@ -1005,6 +1023,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/olareg/olareg v0.1.2/go.mod h1:TWs+N6pO1S4bdB6eerzUm/ITRQ6kw91mVf9ZYeGtw+Y=
github.com/olekukonko/ts v0.0.0-20171002115256-78ecb04241c0 h1:LiZB1h0GIcudcDci2bxbqI6DXV8bF8POAnArqvRrIyw=
github.com/olekukonko/ts v0.0.0-20171002115256-78ecb04241c0/go.mod h1:F/7q8/HZz+TXjlsoZQQKVYvXTZaFH4QRa3y+j1p7MS0=
github.com/open-policy-agent/opa v0.70.0 h1:B3cqCN2iQAyKxK6+GI+N40uqkin+wzIrM7YA60t9x1U=
@@ -1083,6 +1102,7 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo=
github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM=
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
@@ -1106,6 +1126,7 @@ github.com/quic-go/webtransport-go v0.10.0/go.mod h1:LeGIXr5BQKE3UsynwVBeQrU1TPr
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
@@ -1135,9 +1156,12 @@ github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL
github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI=
github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/sigstore/fulcio v1.6.6 h1:XaMYX6TNT+8n7Npe8D94nyZ7/ERjEsNGFC+REdi/wzw=
github.com/sigstore/fulcio v1.6.6/go.mod h1:BhQ22lwaebDgIxVBEYOOqLRcN5+xOV+C9bh/GUXRhOk=
github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc=
github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY=
github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc=
@@ -1146,6 +1170,7 @@ github.com/sigstore/sigstore v1.10.4 h1:ytOmxMgLdcUed3w1SbbZOgcxqwMG61lh1TmZLN+W
github.com/sigstore/sigstore v1.10.4/go.mod h1:tDiyrdOref3q6qJxm2G+JHghqfmvifB7hw+EReAfnbI=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
github.com/slok/go-http-metrics v0.13.0 h1:lQDyJJx9wKhmbliyUsZ2l6peGnXRHjsjoqPt5VYzcP8=
github.com/slok/go-http-metrics v0.13.0/go.mod h1:HIr7t/HbN2sJaunvnt9wKP9xoBBVZFo1/KiHU3b0w+4=
github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU=
@@ -1187,6 +1212,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4hHM=
github.com/vbauerster/mpb/v8 v8.10.2/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0=
github.com/veraison/go-cose v1.1.0 h1:AalPS4VGiKavpAzIlBjrn7bhqXiXi4jbMYY/2+UC+4o=
github.com/veraison/go-cose v1.1.0/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4=
github.com/vishvananda/netlink v1.3.1-0.20250303224720-0e7078ed04c8/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
@@ -1228,6 +1255,7 @@ github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBi
github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg=
github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/gopher-lua v1.1.2/go.mod h1:7aRmXIWl37SqRf0koeyylBEzJ+aPt8A+mmkQ4f1ntR8=
github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zyedidia/generic v1.2.2-0.20230320175451-4410d2372cb1 h1:V+UsotZpAVvfj3X/LMoEytoLzSiP6Lg0F7wdVyu9gGg=

View File

@@ -0,0 +1,502 @@
// Package testharness boots an in-process ATCR stack (fake PDS, gofakes3,
// hold, appview) for integration smoke tests. It exposes thin helpers for
// adding sailors and obtaining basic-auth credentials for an OCI registry
// client — either a library-specific authn.Authenticator (RegistryAuth) or
// a neutral Auth value (RegistryCreds) consumed by the client-agnostic
// matrix in test/integration.
package testharness
import (
"context"
"encoding/json"
"fmt"
"net"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/bluesky-social/indigo/atproto/identity"
"github.com/bluesky-social/indigo/atproto/syntax"
"github.com/distribution/distribution/v3/configuration"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/johannesboyne/gofakes3"
"github.com/johannesboyne/gofakes3/backend/s3mem"
"atcr.io/pkg/appview"
"atcr.io/pkg/atproto"
atprotodid "atcr.io/pkg/atproto/did"
"atcr.io/pkg/hold"
"atcr.io/pkg/hold/quota"
"atcr.io/pkg/testpds"
)
// Option configures Harness construction. Use New(t, WithX(...)) to set them.
type Option func(*options)
type options struct {
quota *quota.Config
}
// WithQuotaTiers configures the hold's quota manager with the given tier
// definitions. NewCrewTier names the tier applied to new crew members. Pass
// a tiny limit (e.g. "1KB") plus NewCrewTier="tiny" to make a single push
// exhaust the quota for a non-captain user, so the next /auth/token gate
// denies with "quota exceeded".
func WithQuotaTiers(tiers []quota.TierConfig, newCrewTier string) Option {
return func(o *options) {
o.quota = &quota.Config{
Tiers: tiers,
Defaults: quota.DefaultsConfig{NewCrewTier: newCrewTier},
}
}
}
// Harness owns all in-process servers and tears them down on test cleanup.
type Harness struct {
t *testing.T
PDS *testpds.Server
S3URL string
HoldDID string
HoldURL string
AppViewURL string
AppView *appview.AppViewServer
Hold *hold.HoldServer
Captain *Sailor // hold owner; set before appview boots
}
// Sailor combines the fake-PDS identity with the synthetic OAuth bits AppView
// needs to mint a registry JWT. Today the harness drives /auth/token via the
// app-password Basic auth path, so callers don't have to think about OAuth.
type Sailor struct {
Identity *testpds.Identity
}
// DID returns the sailor's DID.
func (s *Sailor) DID() string { return s.Identity.DID.String() }
// Handle returns the sailor's handle.
func (s *Sailor) Handle() string { return s.Identity.Handle.String() }
// New brings up the full stack on random localhost ports and registers
// t.Cleanup to tear everything down. The bucket name used inside gofakes3 is
// fixed; the test never cares about its value.
func New(t *testing.T, opts ...Option) *Harness {
t.Helper()
var o options
for _, opt := range opts {
opt(&o)
}
h := &Harness{t: t}
// 1. Fake PDS.
h.PDS = testpds.New(t)
atproto.SetDirectory(h.PDS.Directory())
atproto.SetTestMode(true)
t.Cleanup(func() {
// Reset to a fresh default so a later non-test process won't see our
// fake. SetDirectory(nil) re-arms lazy init in GetDirectory.
atproto.SetDirectory(nil)
atproto.SetTestMode(false)
})
// 2. gofakes3 (S3-compatible in-memory).
backend := s3mem.New()
if err := backend.CreateBucket("atcr-test"); err != nil {
t.Fatalf("create test bucket: %v", err)
}
faker := gofakes3.New(backend)
s3ts := httptest.NewServer(faker.Server())
t.Cleanup(s3ts.Close)
h.S3URL = s3ts.URL
// 3. Hold.
holdListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("hold listen: %v", err)
}
holdAddr := holdListener.Addr().String()
holdPublicURL := "http://" + holdAddr
h.HoldURL = holdPublicURL
h.HoldDID = atprotodid.GenerateDIDFromURL(holdPublicURL)
// Owner DID for the hold. The captain identity lives on the fake PDS; we
// register it BEFORE constructing the hold so any synchronous bootstrap
// step that needs to resolve the owner finds it. The captain is exposed
// as h.Captain so tests that push to their own hold can use it directly.
captainIdent, err := h.PDS.AddIdentity("captain.test")
if err != nil {
t.Fatalf("add captain: %v", err)
}
h.Captain = &Sailor{Identity: captainIdent}
// Pre-register hold's own did:web in the directory so AppView can resolve
// it without hitting the network (we still serve /.well-known/did.json
// from the hold for parity, but the in-memory directory wins).
h.PDS.Directory().Register(&identity.Identity{
DID: syntax.DID(h.HoldDID),
Handle: syntax.HandleInvalid,
Services: map[string]identity.ServiceEndpoint{
"atproto_pds": {Type: "AtprotoPersonalDataServer", URL: holdPublicURL},
"atcr_hold": {Type: "AtcrHoldService", URL: holdPublicURL},
},
Keys: map[string]identity.VerificationMethod{},
})
holdTmp := t.TempDir()
holdCfg := &hold.Config{
LogLevel: "warn",
Storage: hold.StorageConfig{
AccessKey: "test", SecretKey: "test", Region: "us-east-1",
Bucket: "atcr-test", Endpoint: h.S3URL,
},
Server: hold.ServerConfig{
Addr: holdAddr,
PublicURL: holdPublicURL,
Public: true, // anonymous pulls allowed; pushes still need crew
TestMode: true,
ReadTimeout: 60 * time.Second,
WriteTimeout: 5 * time.Minute,
},
Registration: hold.RegistrationConfig{
OwnerDID: captainIdent.DID.String(),
AllowAllCrew: true, // lets the appview auto-crew flow add sailors on first push
ProfileDisplayName: "Test Captain",
ProfileDescription: "harness-owned hold",
},
Database: hold.DatabaseConfig{
// Real directory rather than :memory: — libsql's connection pool
// opens a fresh in-memory DB per connection, so schemas created
// on one don't appear on others. A tempdir sidesteps the issue
// and gets cleaned up automatically by t.TempDir().
Path: holdTmp,
KeyPath: filepath.Join(holdTmp, "signing.key"),
DIDMethod: "web",
},
}
if o.quota != nil {
holdCfg.Quota = *o.quota
}
holdSrv, err := hold.NewHoldServer(holdCfg)
if err != nil {
holdListener.Close()
t.Fatalf("new hold: %v", err)
}
h.Hold = holdSrv
holdDone := make(chan struct{})
go func() {
_ = holdSrv.ServeWithListener(holdListener)
close(holdDone)
}()
t.Cleanup(func() {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_ = holdSrv.Shutdown(ctx)
<-holdDone
})
if err := waitForHTTP(holdPublicURL+"/.well-known/did.json", 5*time.Second); err != nil {
t.Fatalf("hold did doc not reachable: %v", err)
}
// 4. AppView. The DomainRoutingMiddleware compares the request's host
// against RegistryDomains (port-stripped) to decide whether to allow /v2/
// or redirect. RegistryDomains must therefore be a bare hostname (no
// port), AND must differ from the UI hostname extracted from BaseURL.
// We bind to 127.0.0.1, route /v2/ via that, and use "localhost" as the
// UI hostname (same socket, different name) so the routing branches
// don't collide.
avListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("appview listen: %v", err)
}
avAddr := avListener.Addr().String()
_, avPort, err := net.SplitHostPort(avAddr)
if err != nil {
t.Fatalf("split appview addr: %v", err)
}
avBaseURL := "http://localhost:" + avPort
h.AppViewURL = "http://" + avAddr
avDBPath := filepath.Join(t.TempDir(), "appview.db")
avCfg := buildAppViewConfig(avAddr, avBaseURL, h.HoldDID, avDBPath)
avSrv, err := appview.NewAppViewServer(avCfg, nil)
if err != nil {
avListener.Close()
t.Fatalf("new appview: %v", err)
}
h.AppView = avSrv
avDone := make(chan struct{})
go func() {
_ = avSrv.ServeWithListener(avListener)
close(avDone)
}()
t.Cleanup(func() {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_ = avSrv.Shutdown(ctx)
<-avDone
})
if err := waitForHTTP(avBaseURL+"/healthz", 5*time.Second); err != nil {
// Not fatal: some appview versions don't expose /healthz at this path.
// Fall back to a TCP probe just to be sure the listener is up.
if err2 := waitForTCP(avAddr, 5*time.Second); err2 != nil {
t.Fatalf("appview not reachable: %v / %v", err, err2)
}
}
// Seed the appview's local tables that the /auth/token authorizer reads.
// In production these are populated by the Jetstream worker consuming
// hold firehose events; tests disable Jetstream and seed directly so
// authorization decisions resolve against known state.
h.seedCaptainRecord(captainIdent)
h.seedUserRow(captainIdent)
h.seedCrewMember(captainIdent, []string{"blob:read", "blob:write", "crew:admin"})
return h
}
// AddSailor creates a new identity on the fake PDS and registers it as a
// crew member of the harness's hold with blob:write permission. The returned
// Sailor is immediately usable for push and pull: AppView mints registry
// JWTs via the app-password Basic auth path on /auth/token, and the auth
// gate finds the seeded crew row in its local table.
func (h *Harness) AddSailor(handle string) *Sailor {
return h.AddSailorWithPermissions(handle, []string{"blob:read", "blob:write"})
}
// AddSailorWithPermissions is like AddSailor but lets the caller choose the
// permissions written to the appview's hold_crew_members table. Use
// []string{"blob:read"} for a read-only crew member who can pull but not push.
func (h *Harness) AddSailorWithPermissions(handle string, permissions []string) *Sailor {
h.t.Helper()
ident, err := h.PDS.AddIdentity(handle)
if err != nil {
h.t.Fatalf("add sailor %q: %v", handle, err)
}
h.seedUserRow(ident)
h.seedCrewMember(ident, permissions)
return &Sailor{Identity: ident}
}
// AddStranger creates an identity on the fake PDS and seeds the appview's
// users row (so PDS resolution works for the auth-token service-auth pre-mint)
// but does NOT add a crew_members row. The returned sailor authenticates fine
// but is not a member of the hold — push token requests should be denied with
// "crew membership required", while pull token requests succeed because the
// gate's membership requirement is push-only.
func (h *Harness) AddStranger(handle string) *Sailor {
h.t.Helper()
ident, err := h.PDS.AddIdentity(handle)
if err != nil {
h.t.Fatalf("add stranger %q: %v", handle, err)
}
h.seedUserRow(ident)
return &Sailor{Identity: ident}
}
// AnonAuth returns the anonymous authenticator. crane uses it when no
// credentials are configured; the token endpoint requires Basic auth, so the
// resulting /v2/* requests fail with 401 unauthorized.
func (h *Harness) AnonAuth() authn.Authenticator {
return authn.Anonymous
}
// seedCaptainRecord writes a row to hold_captain_records so the auth gate's
// isCaptain check returns true for the captain.
func (h *Harness) seedCaptainRecord(captain *testpds.Identity) {
h.t.Helper()
_, err := h.AppView.Database.Exec(
`INSERT INTO hold_captain_records (hold_did, owner_did, public, allow_all_crew) VALUES (?, ?, ?, ?)`,
h.HoldDID, captain.DID.String(), true, true,
)
if err != nil {
h.t.Fatalf("seed captain record: %v", err)
}
}
// seedUserRow writes a row to users so the appview's lookups (PDS endpoint
// for the user, default hold, etc.) resolve locally. The hold_resolver in
// authgate reads default_hold_did from this table.
func (h *Harness) seedUserRow(ident *testpds.Identity) {
h.t.Helper()
_, err := h.AppView.Database.Exec(
`INSERT OR REPLACE INTO users (did, handle, pds_endpoint, default_hold_did, last_seen)
VALUES (?, ?, ?, ?, CURRENT_TIMESTAMP)`,
ident.DID.String(), ident.Handle.String(), h.PDS.URL(), h.HoldDID,
)
if err != nil {
h.t.Fatalf("seed user row: %v", err)
}
}
// seedCrewMember writes a row to hold_crew_members so checkCrewBlobWrite
// finds the member without waiting on Jetstream-fed updates.
func (h *Harness) seedCrewMember(ident *testpds.Identity, permissions []string) {
h.t.Helper()
permsJSON, err := json.Marshal(permissions)
if err != nil {
h.t.Fatalf("marshal permissions: %v", err)
}
_, err = h.AppView.Database.Exec(
`INSERT OR REPLACE INTO hold_crew_members
(hold_did, member_did, rkey, role, permissions, added_at)
VALUES (?, ?, ?, ?, ?, CURRENT_TIMESTAMP)`,
h.HoldDID, ident.DID.String(), "test-"+ident.Handle.String(), "member", string(permsJSON),
)
if err != nil {
h.t.Fatalf("seed crew member: %v", err)
}
}
// AppViewHostPort returns the host:port the registry was bound to, for use
// as the registry component of an OCI image reference.
func (h *Harness) AppViewHostPort() string {
return strings.TrimPrefix(h.AppViewURL, "http://")
}
// RegistryAuth returns an authn.Authenticator that performs the Docker token
// dance against this harness. Handle is used as the username because
// production's parseBasicAuthDID() reconstructs DIDs assuming the canonical
// 2-segment did:web shape; our synthesized DIDs are longer
// (did:web:host:user:alice). Handles resolve through the fake directory just
// as well as DIDs do.
func (h *Harness) RegistryAuth(s *Sailor) authn.Authenticator {
return &authn.Basic{Username: s.Handle(), Password: s.Identity.Password}
}
// Auth carries neutral basic-auth credentials for the test OCI client
// abstraction. Both Username and Password empty means anonymous.
type Auth struct {
Username, Password string
}
// RegistryCreds returns the sailor's basic-auth credentials as a neutral Auth
// value, for client-library-agnostic test code.
func (h *Harness) RegistryCreds(s *Sailor) Auth {
return Auth{Username: s.Handle(), Password: s.Identity.Password}
}
// AnonCreds returns the anonymous (empty) Auth.
func (h *Harness) AnonCreds() Auth { return Auth{} }
// --- helpers ---------------------------------------------------------------
func buildAppViewConfig(addr, baseURL, holdDID, dbPath string) *appview.Config {
cfg := appview.DefaultConfig()
cfg.LogLevel = "warn"
cfg.Server.Addr = addr
cfg.Server.BaseURL = baseURL
cfg.Server.DefaultHoldDID = holdDID
cfg.Server.TestMode = true
// Registry domain is a bare hostname (no port). DomainRoutingMiddleware
// strips ports before matching, so "127.0.0.1" is what /v2/ requests
// will hit (since the listener binds to 127.0.0.1). BaseURL uses
// "localhost" for the UI hostname so the two branches differ.
cfg.Server.RegistryDomains = []string{"127.0.0.1"}
// Real file path under t.TempDir(). `:memory:` is rejected because
// libsql's connection pool opens multiple distinct in-memory databases,
// so the schema applied by InitDB doesn't reach subsequent connections.
cfg.UI.DatabasePath = dbPath
cfg.UI.LibsqlSyncURL = ""
// Disable jetstream so the test doesn't open WebSockets to the public network.
cfg.Jetstream.URLs = []string{}
cfg.Jetstream.BackfillEnabled = false
cfg.Jetstream.RelayEndpoints = []string{}
cfg.Auth.TokenExpiration = 5 * time.Minute
cfg.Auth.ServiceName = cfg.Server.RegistryDomains[0]
cfg.Auth.CertPath = filepath.Join(os.TempDir(), fmt.Sprintf("atcr-test-cert-%d.pem", time.Now().UnixNano()))
cfg.Distribution = buildDistributionConfig(addr, baseURL, holdDID, cfg.Auth.ServiceName, cfg.Auth.CertPath)
return cfg
}
func buildDistributionConfig(addr, baseURL, holdDID, serviceName, certPath string) *configuration.Configuration {
distConfig := &configuration.Configuration{}
distConfig.Version = configuration.MajorMinorVersion(0, 1)
distConfig.Log = configuration.Log{
Level: configuration.Loglevel("warn"),
Formatter: "text",
Fields: map[string]any{"service": "atcr-appview"},
}
distConfig.HTTP = configuration.HTTP{
Addr: addr,
Secret: "test-http-secret-do-not-use-in-prod",
Headers: map[string][]string{
"X-Content-Type-Options": {"nosniff"},
},
}
distConfig.Storage = configuration.Storage{
"inmemory": configuration.Parameters{},
"maintenance": configuration.Parameters{
"uploadpurging": map[any]any{
"enabled": false,
"age": 7 * 24 * time.Hour,
"interval": 24 * time.Hour,
"dryrun": false,
},
},
}
distConfig.Middleware = map[string][]configuration.Middleware{
"registry": {{
Name: "atproto-resolver",
Options: configuration.Parameters{
"default_hold_did": holdDID,
"test_mode": true,
"base_url": baseURL,
},
}},
}
distConfig.Auth = configuration.Auth{
"token": configuration.Parameters{
"realm": baseURL + "/auth/token",
"service": serviceName,
"issuer": serviceName,
"rootcertbundle": certPath,
"expiration": int((5 * time.Minute).Seconds()),
},
}
distConfig.Health = configuration.Health{
StorageDriver: configuration.StorageDriver{
Enabled: false,
Interval: 10 * time.Second,
Threshold: 3,
},
}
return distConfig
}
// waitForHTTP polls a URL until it returns any response or timeout elapses.
func waitForHTTP(u string, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
resp, err := http.Get(u)
if err == nil {
resp.Body.Close()
return nil
}
time.Sleep(25 * time.Millisecond)
}
return fmt.Errorf("timed out waiting for %s", u)
}
func waitForTCP(addr string, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
c, err := net.DialTimeout("tcp", addr, 250*time.Millisecond)
if err == nil {
c.Close()
return nil
}
time.Sleep(25 * time.Millisecond)
}
return fmt.Errorf("timed out waiting for tcp %s", addr)
}

View File

@@ -0,0 +1,48 @@
package authgate
import (
"context"
"database/sql"
"errors"
"fmt"
"atcr.io/pkg/atproto"
)
// holdResolver resolves a user's default hold DID, preferring the
// Jetstream-cached `users.default_hold_did` and falling back to the
// AppView's configured default. Embedded into types in this package
// (Authorizer, ServiceAuthFetcher) so they share one implementation.
type holdResolver struct {
db *sql.DB
defaultHoldDID string
}
// resolveHoldDID returns the normalized hold DID for userDID, or "" if no
// hold is configured anywhere (caller decides how to degrade).
func (h *holdResolver) resolveHoldDID(ctx context.Context, userDID string) (string, error) {
var cached sql.NullString
err := h.db.QueryRowContext(ctx,
"SELECT default_hold_did FROM users WHERE did = ?", userDID,
).Scan(&cached)
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return "", fmt.Errorf("look up default hold: %w", err)
}
holdDID := ""
if cached.Valid {
holdDID = cached.String
}
if holdDID == "" {
holdDID = h.defaultHoldDID
}
if holdDID == "" {
return "", nil
}
resolved, err := atproto.ResolveHoldDID(ctx, holdDID)
if err != nil {
return "", fmt.Errorf("resolve hold DID %s: %w", holdDID, err)
}
return resolved, nil
}

View File

@@ -0,0 +1,95 @@
package authgate
import (
"context"
"strings"
"testing"
)
func TestResolveHoldDID_UserDefaultBeatsFallback(t *testing.T) {
d := newTestDB(t)
seedUser(t, d, "did:plc:alice", "alice.test", "did:plc:hold999")
r := holdResolver{db: d, defaultHoldDID: "did:plc:fallback"}
got, err := r.resolveHoldDID(context.Background(), "did:plc:alice")
if err != nil {
t.Fatalf("resolveHoldDID: %v", err)
}
if got != "did:plc:hold999" {
t.Errorf("got %q, want did:plc:hold999 (DB cache must beat fallback)", got)
}
}
func TestResolveHoldDID_FallbackUsedWhenNullCache(t *testing.T) {
d := newTestDB(t)
// User row exists but default_hold_did is NULL.
seedUser(t, d, "did:plc:alice", "alice.test", "")
r := holdResolver{db: d, defaultHoldDID: "did:plc:fallback"}
got, err := r.resolveHoldDID(context.Background(), "did:plc:alice")
if err != nil {
t.Fatalf("resolveHoldDID: %v", err)
}
if got != "did:plc:fallback" {
t.Errorf("got %q, want did:plc:fallback", got)
}
}
func TestResolveHoldDID_FallbackUsedWhenNoUserRow(t *testing.T) {
d := newTestDB(t)
// No user row at all — sql.ErrNoRows path.
r := holdResolver{db: d, defaultHoldDID: "did:plc:fallback"}
got, err := r.resolveHoldDID(context.Background(), "did:plc:nobody")
if err != nil {
t.Fatalf("resolveHoldDID: %v", err)
}
if got != "did:plc:fallback" {
t.Errorf("got %q, want did:plc:fallback (ErrNoRows must fall through)", got)
}
}
func TestResolveHoldDID_BothEmptyReturnsEmpty(t *testing.T) {
d := newTestDB(t)
seedUser(t, d, "did:plc:alice", "alice.test", "")
r := holdResolver{db: d, defaultHoldDID: ""}
got, err := r.resolveHoldDID(context.Background(), "did:plc:alice")
if err != nil {
t.Fatalf("resolveHoldDID: %v", err)
}
if got != "" {
t.Errorf("got %q, want empty (graceful-degrade contract)", got)
}
}
func TestResolveHoldDID_PassesDIDPLCThrough(t *testing.T) {
// did:plc input short-circuits atproto.ResolveHoldDID's network call
// via its IsDID check. This is the no-network contract that lets the
// rest of the test suite stay hermetic.
d := newTestDB(t)
seedUser(t, d, "did:plc:alice", "alice.test", "did:plc:hold-abc-123")
r := holdResolver{db: d}
got, err := r.resolveHoldDID(context.Background(), "did:plc:alice")
if err != nil {
t.Fatalf("resolveHoldDID: %v", err)
}
if got != "did:plc:hold-abc-123" {
t.Errorf("got %q, want did:plc:hold-abc-123", got)
}
}
func TestResolveHoldDID_DBErrorWrapped(t *testing.T) {
d := newTestDB(t)
// Closing the DB before the query forces a real driver error.
_ = d.Close()
r := holdResolver{db: d, defaultHoldDID: "did:plc:fallback"}
_, err := r.resolveHoldDID(context.Background(), "did:plc:alice")
if err == nil {
t.Fatal("expected error from closed DB")
}
if !strings.Contains(err.Error(), "look up default hold") {
t.Errorf("error %q should mention 'look up default hold'", err)
}
}

View File

@@ -0,0 +1,289 @@
// Package authgate implements the auth-phase gate for ATCR's registry JWT
// issuance. The gate runs once per token request (push or pull): it always
// reconciles crew membership so first-time CLI users on a private hold
// can pass the hold-side read check, and additionally enforces hold
// membership (captain or crew with blob:write) plus storage quota for
// non-wildcard push scopes. Once the JWT is signed, the registry hot path
// trusts it for its short lifetime — no per-/v2/-request re-authorization.
package authgate
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"log/slog"
"net/http"
"net/url"
"time"
"atcr.io/pkg/appview/storage"
"atcr.io/pkg/atproto"
"atcr.io/pkg/auth"
"atcr.io/pkg/auth/oauth"
"atcr.io/pkg/auth/token"
)
// quotaResponse mirrors the public io.atcr.hold.getQuota response shape.
// Defined locally to avoid pulling pkg/hold/pds into the AppView binary
// (which would drag in mattn-bound carstore code).
type quotaResponse struct {
TotalSize int64 `json:"totalSize"`
Limit *int64 `json:"limit,omitempty"`
}
// Authorizer enforces hold membership and quota at /auth/token.
//
// Captain/crew checks are served from local Jetstream-fed tables
// (hold_captain_records, hold_crew_members) so they're sub-millisecond.
// Quota is a live GET to hold's public io.atcr.hold.getQuota endpoint,
// which fails open on network errors so a brief hold outage doesn't lock
// users out of pushing entirely.
type Authorizer struct {
holdResolver
holdAuthorizer auth.HoldAuthorizer
refresher *oauth.Refresher
httpClient *http.Client
}
// Option configures an Authorizer. Use New(..., WithX(...)) to set them.
type Option func(*Authorizer)
// WithHTTPClient overrides the HTTP client used for the live quota call.
// Production wiring leaves this at http.DefaultClient; tests pass an
// httptest.Server-backed client to drive checkQuota deterministically.
func WithHTTPClient(c *http.Client) Option {
return func(a *Authorizer) { a.httpClient = c }
}
// New constructs an Authorizer. defaultHoldDID is the AppView's fallback
// hold (used when a user's sailor profile hasn't recorded one yet). The
// refresher is required for OAuth-flow service-token minting during
// EnsureCrewMembership reconciliation; pass nil if OAuth isn't configured.
func New(db *sql.DB, holdAuthorizer auth.HoldAuthorizer, refresher *oauth.Refresher, defaultHoldDID string, opts ...Option) *Authorizer {
a := &Authorizer{
holdResolver: holdResolver{db: db, defaultHoldDID: defaultHoldDID},
holdAuthorizer: holdAuthorizer,
refresher: refresher,
httpClient: http.DefaultClient,
}
for _, opt := range opts {
opt(a)
}
return a
}
// Authorize satisfies token.Authorizer.
//
// authMethod is one of token.AuthMethodOAuth or token.AuthMethodAppPassword
// and selects which service-token fetcher EnsureCrewMembership uses for
// reconciliation. We can't infer it from `did` alone because either flow
// can produce a valid session for the same identity.
//
// Crew reconciliation runs for both push and pull token requests so a
// first-time CLI user (especially OAuth/credential-helper) doesn't get
// 403'd on their first pull from a private hold. The remaining gates
// (membership requirement, quota) only apply to non-wildcard push.
func (a *Authorizer) Authorize(ctx context.Context, did, authMethod string, access []auth.AccessEntry) error {
holdDID, err := a.resolveHoldDID(ctx, did)
if err != nil {
return err
}
if holdDID == "" {
// No hold configured anywhere. Registry can't function without one,
// but that's a config problem — not the gate's job to surface.
return nil
}
// Captain check first — they own the hold, so they have all permissions
// (no reconciliation needed, no membership requirement to check). This
// is the common case (most users push to their own hold) and lets us
// skip both the EnsureCrewMembership reconciliation and the
// hold_crew_members lookup below.
captain, err := a.isCaptain(ctx, did, holdDID)
if err != nil {
return err
}
if !captain {
// Reconcile crew membership for first-time CLI users (push or pull).
// Idempotent and short-circuits via approval cache when the record is
// already known. Best-effort: errors are logged inside the helper.
if fetcher := a.serviceTokenFetcher(ctx, authMethod, did); fetcher != nil {
storage.EnsureCrewMembership(ctx, did, holdDID, a.holdAuthorizer, fetcher)
}
}
if !hasNonWildcardPushScope(access) {
// Pull-only or wildcard scope — reconciliation done above is enough.
// Membership requirement and quota only apply to push.
return nil
}
if !captain {
if err := a.checkCrewBlobWrite(ctx, did, holdDID); err != nil {
return err
}
}
return a.checkQuota(ctx, did, holdDID)
}
// isCaptain returns true if userDID owns holdDID per the local Jetstream-
// fed hold_captain_records table. Returns (false, nil) if no captain record
// exists yet (hold not yet ingested, or doesn't exist).
func (a *Authorizer) isCaptain(ctx context.Context, userDID, holdDID string) (bool, error) {
var ownerDID string
err := a.db.QueryRowContext(ctx,
"SELECT owner_did FROM hold_captain_records WHERE hold_did = ?", holdDID,
).Scan(&ownerDID)
if errors.Is(err, sql.ErrNoRows) {
return false, nil
}
if err != nil {
return false, fmt.Errorf("look up hold captain: %w", err)
}
return ownerDID == userDID, nil
}
func hasNonWildcardPushScope(access []auth.AccessEntry) bool {
for _, entry := range access {
if entry.Type != "repository" || entry.Name == "*" {
continue
}
for _, action := range entry.Actions {
if action == "push" {
return true
}
}
}
return false
}
// checkCrewBlobWrite returns nil if userDID is a crew member of holdDID with
// blob:write permission. Caller must have already determined that userDID is
// not the captain (captains have all permissions implicitly).
func (a *Authorizer) checkCrewBlobWrite(ctx context.Context, userDID, holdDID string) error {
var permsJSON sql.NullString
err := a.db.QueryRowContext(ctx,
"SELECT permissions FROM hold_crew_members WHERE hold_did = ? AND member_did = ?",
holdDID, userDID,
).Scan(&permsJSON)
if errors.Is(err, sql.ErrNoRows) {
return fmt.Errorf("crew membership required: %s is not a member of hold %s", userDID, holdDID)
}
if err != nil {
return fmt.Errorf("look up crew membership: %w", err)
}
if !permissionsAllowBlobWrite(permsJSON.String) {
return fmt.Errorf("crew membership lacks blob:write on hold %s", holdDID)
}
return nil
}
func permissionsAllowBlobWrite(permsJSON string) bool {
if permsJSON == "" {
return false
}
var perms []string
if err := json.Unmarshal([]byte(permsJSON), &perms); err != nil {
return false
}
for _, p := range perms {
if p == "blob:write" {
return true
}
}
return false
}
// checkQuota fails open on network errors. The hold's quota endpoint is
// public (no auth), but if the hold is unreachable we don't want every
// push everywhere to error out — Docker users would see denied:quota for
// reasons unrelated to their actual usage.
func (a *Authorizer) checkQuota(ctx context.Context, userDID, holdDID string) error {
holdURL, err := atproto.ResolveHoldURL(ctx, holdDID)
if err != nil {
slog.Warn("push gate: hold URL resolution failed; allowing push", "hold_did", holdDID, "error", err)
return nil
}
// Encode userDID as a query value: did:web DIDs may legitimately contain
// percent-encoded characters (e.g. "%3A" for port colons). Interpolating
// such a DID raw would let the server's query parser decode "%3A" → ":",
// mangling the DID and missing the records that were keyed by the
// original form.
quotaURL := fmt.Sprintf("%s%s?userDid=%s", holdURL, atproto.HoldGetQuota, url.QueryEscape(userDID))
reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(reqCtx, http.MethodGet, quotaURL, nil)
if err != nil {
slog.Warn("push gate: build quota request failed; allowing push", "did", userDID, "error", err)
return nil
}
resp, err := a.httpClient.Do(req)
if err != nil {
slog.Warn("push gate: quota call failed; allowing push", "did", userDID, "hold_did", holdDID, "error", err)
return nil
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
slog.Warn("push gate: quota call returned non-OK; allowing push", "did", userDID, "status", resp.StatusCode)
return nil
}
var stats quotaResponse
if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil {
slog.Warn("push gate: quota decode failed; allowing push", "did", userDID, "error", err)
return nil
}
if stats.Limit != nil && stats.TotalSize >= *stats.Limit {
return fmt.Errorf("quota exceeded: %d / %d bytes used by %s. Delete images to free space",
stats.TotalSize, *stats.Limit, userDID)
}
return nil
}
// serviceTokenFetcher returns the appropriate fetcher for the auth method,
// or nil if none is available (e.g. OAuth flow with no refresher configured).
// ctx is used for the cold-cache identity lookup inside resolvePDS.
func (a *Authorizer) serviceTokenFetcher(ctx context.Context, authMethod, userDID string) storage.ServiceTokenFetcher {
pdsEndpoint, err := a.resolvePDS(ctx, userDID)
if err != nil || pdsEndpoint == "" {
return nil
}
switch authMethod {
case token.AuthMethodAppPassword:
return func(ctx context.Context, holdDID string) (string, error) {
return auth.GetOrFetchServiceTokenWithAppPassword(ctx, userDID, holdDID, pdsEndpoint)
}
case token.AuthMethodOAuth:
if a.refresher == nil {
return nil
}
return func(ctx context.Context, holdDID string) (string, error) {
return auth.GetOrFetchServiceToken(ctx, a.refresher, userDID, holdDID, pdsEndpoint)
}
default:
return nil
}
}
func (a *Authorizer) resolvePDS(ctx context.Context, userDID string) (string, error) {
// Cached identity directory lookup (24h TTL). Per-call cost <1ms after
// the first resolution per user; on a cold miss this issues live DNS/
// HTTPS lookups, so we honor ctx so a client disconnect cancels them.
_, _, pdsEndpoint, err := atproto.ResolveIdentity(ctx, userDID)
if err != nil {
return "", err
}
return pdsEndpoint, nil
}

View File

@@ -0,0 +1,462 @@
package authgate
import (
"context"
"strings"
"testing"
"atcr.io/pkg/atproto"
"atcr.io/pkg/auth"
)
func TestHasNonWildcardPushScope(t *testing.T) {
cases := []struct {
name string
access []auth.AccessEntry
want bool
}{
{
name: "empty",
access: nil,
want: false,
},
{
name: "pull only",
access: []auth.AccessEntry{
{Type: "repository", Name: "alice/myapp", Actions: []string{"pull"}},
},
want: false,
},
{
name: "specific repo with push",
access: []auth.AccessEntry{
{Type: "repository", Name: "alice/myapp", Actions: []string{"pull", "push"}},
},
want: true,
},
{
name: "wildcard repo with push is bypassed",
access: []auth.AccessEntry{
{Type: "repository", Name: "*", Actions: []string{"pull", "push"}},
},
want: false,
},
{
name: "wildcard plus specific push",
access: []auth.AccessEntry{
{Type: "repository", Name: "*", Actions: []string{"pull", "push"}},
{Type: "repository", Name: "alice/myapp", Actions: []string{"push"}},
},
want: true,
},
{
name: "non-repository class ignored",
access: []auth.AccessEntry{
{Type: "registry", Name: "catalog", Actions: []string{"push"}},
},
want: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
if got := hasNonWildcardPushScope(tc.access); got != tc.want {
t.Errorf("hasNonWildcardPushScope(%v) = %v, want %v", tc.access, got, tc.want)
}
})
}
}
func TestPermissionsAllowBlobWrite(t *testing.T) {
cases := []struct {
name string
json string
want bool
}{
{name: "empty string", json: "", want: false},
{name: "null", json: "null", want: false},
{name: "empty array", json: "[]", want: false},
{name: "blob:write present", json: `["blob:write"]`, want: true},
{name: "blob:write among others", json: `["blob:read","blob:write","manifest:write"]`, want: true},
{name: "only blob:read", json: `["blob:read"]`, want: false},
{name: "garbage json", json: `not-json`, want: false},
{name: "object instead of array", json: `{"x":1}`, want: false},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
if got := permissionsAllowBlobWrite(tc.json); got != tc.want {
t.Errorf("permissionsAllowBlobWrite(%q) = %v, want %v", tc.json, got, tc.want)
}
})
}
}
// --- isCaptain -------------------------------------------------------------
func TestIsCaptain_Match(t *testing.T) {
d := newTestDB(t)
seedCaptain(t, d, "did:plc:hold1", "did:plc:alice")
a := New(d, fakeHoldAuthorizer{}, nil, "")
got, err := a.isCaptain(context.Background(), "did:plc:alice", "did:plc:hold1")
if err != nil {
t.Fatalf("isCaptain: %v", err)
}
if !got {
t.Error("isCaptain(alice, hold1) = false, want true")
}
}
func TestIsCaptain_NonOwnerNotMistakenForCaptain(t *testing.T) {
// A captain row exists for the hold but names someone else as owner.
// We must not fall through to "isCrew → captain" — captaincy is the
// owner check specifically.
d := newTestDB(t)
seedCaptain(t, d, "did:plc:hold1", "did:plc:alice")
seedCrewMember(t, d, "did:plc:hold1", "did:plc:bob", `["blob:write"]`)
a := New(d, fakeHoldAuthorizer{}, nil, "")
got, err := a.isCaptain(context.Background(), "did:plc:bob", "did:plc:hold1")
if err != nil {
t.Fatalf("isCaptain: %v", err)
}
if got {
t.Error("isCaptain(bob, hold1) = true, want false (bob is crew, not owner)")
}
}
func TestIsCaptain_NoCaptainRecord(t *testing.T) {
d := newTestDB(t)
a := New(d, fakeHoldAuthorizer{}, nil, "")
got, err := a.isCaptain(context.Background(), "did:plc:alice", "did:plc:unknownhold")
if err != nil {
t.Fatalf("isCaptain: %v", err)
}
if got {
t.Error("isCaptain on missing hold should be false, not error")
}
}
func TestIsCaptain_DBError(t *testing.T) {
d := newTestDB(t)
_ = d.Close()
a := New(d, fakeHoldAuthorizer{}, nil, "")
_, err := a.isCaptain(context.Background(), "did:plc:alice", "did:plc:hold1")
if err == nil {
t.Fatal("expected error from closed DB")
}
if !strings.Contains(err.Error(), "look up hold captain") {
t.Errorf("error %q should mention 'look up hold captain'", err)
}
}
// --- checkCrewBlobWrite ----------------------------------------------------
func TestCheckCrewBlobWrite_HasWrite(t *testing.T) {
d := newTestDB(t)
seedCrewMember(t, d, "did:plc:hold1", "did:plc:alice", `["blob:write"]`)
a := New(d, fakeHoldAuthorizer{}, nil, "")
if err := a.checkCrewBlobWrite(context.Background(), "did:plc:alice", "did:plc:hold1"); err != nil {
t.Errorf("checkCrewBlobWrite = %v, want nil", err)
}
}
func TestCheckCrewBlobWrite_OnlyRead(t *testing.T) {
d := newTestDB(t)
seedCrewMember(t, d, "did:plc:hold1", "did:plc:alice", `["blob:read"]`)
a := New(d, fakeHoldAuthorizer{}, nil, "")
err := a.checkCrewBlobWrite(context.Background(), "did:plc:alice", "did:plc:hold1")
if err == nil || !strings.Contains(err.Error(), "lacks blob:write") {
t.Errorf("expected 'lacks blob:write' error, got %v", err)
}
}
func TestCheckCrewBlobWrite_NotAMember(t *testing.T) {
d := newTestDB(t)
// Crew table populated for someone else.
seedCrewMember(t, d, "did:plc:hold1", "did:plc:bob", `["blob:write"]`)
a := New(d, fakeHoldAuthorizer{}, nil, "")
err := a.checkCrewBlobWrite(context.Background(), "did:plc:alice", "did:plc:hold1")
if err == nil || !strings.Contains(err.Error(), "crew membership required") {
t.Errorf("expected 'crew membership required' error, got %v", err)
}
}
func TestCheckCrewBlobWrite_NullPermissions(t *testing.T) {
d := newTestDB(t)
// Empty Permissions string is written as NULL by BatchUpsertCrewMembers.
seedCrewMember(t, d, "did:plc:hold1", "did:plc:alice", "")
a := New(d, fakeHoldAuthorizer{}, nil, "")
err := a.checkCrewBlobWrite(context.Background(), "did:plc:alice", "did:plc:hold1")
if err == nil || !strings.Contains(err.Error(), "lacks blob:write") {
t.Errorf("expected 'lacks blob:write' for NULL permissions, got %v", err)
}
}
// --- checkQuota ------------------------------------------------------------
func TestCheckQuota_UnderLimit(t *testing.T) {
atproto.SetTestMode(true)
t.Cleanup(func() { atproto.SetTestMode(false) })
srv := quotaServer(t, 200, `{"totalSize":100,"limit":1000}`)
a := New(newTestDB(t), fakeHoldAuthorizer{}, nil, "", WithHTTPClient(srv.httpClient()))
if err := a.checkQuota(context.Background(), "did:plc:alice", srv.holdDID); err != nil {
t.Errorf("checkQuota under limit = %v, want nil", err)
}
if srv.hits != 1 {
t.Errorf("expected 1 hit on quota endpoint, got %d", srv.hits)
}
// The query value is percent-encoded by the client (see
// TestCheckQuota_EncodesUserDID), so plain "did:plc:alice" becomes
// "did%3Aplc%3Aalice" on the wire.
if !strings.Contains(srv.lastURL, "userDid=did%3Aplc%3Aalice") {
t.Errorf("expected userDid query param, got URL %q", srv.lastURL)
}
if !strings.Contains(srv.lastURL, atproto.HoldGetQuota) {
t.Errorf("expected URL path to contain %q, got %q", atproto.HoldGetQuota, srv.lastURL)
}
}
func TestCheckQuota_OverLimit(t *testing.T) {
atproto.SetTestMode(true)
t.Cleanup(func() { atproto.SetTestMode(false) })
srv := quotaServer(t, 200, `{"totalSize":1000,"limit":1000}`)
a := New(newTestDB(t), fakeHoldAuthorizer{}, nil, "", WithHTTPClient(srv.httpClient()))
err := a.checkQuota(context.Background(), "did:plc:alice", srv.holdDID)
if err == nil {
t.Fatal("checkQuota at limit should deny")
}
msg := err.Error()
for _, want := range []string{"quota exceeded", "1000", "did:plc:alice"} {
if !strings.Contains(msg, want) {
t.Errorf("expected %q in error %q", want, msg)
}
}
}
func TestCheckQuota_NilLimitAllows(t *testing.T) {
// A user on the unlimited tier has limit == nil. Even huge totalSize
// must not deny.
atproto.SetTestMode(true)
t.Cleanup(func() { atproto.SetTestMode(false) })
srv := quotaServer(t, 200, `{"totalSize":99999999}`)
a := New(newTestDB(t), fakeHoldAuthorizer{}, nil, "", WithHTTPClient(srv.httpClient()))
if err := a.checkQuota(context.Background(), "did:plc:alice", srv.holdDID); err != nil {
t.Errorf("checkQuota with nil limit = %v, want nil", err)
}
}
func TestCheckQuota_500FailsOpen(t *testing.T) {
atproto.SetTestMode(true)
t.Cleanup(func() { atproto.SetTestMode(false) })
srv := quotaServer(t, 500, `oops`)
a := New(newTestDB(t), fakeHoldAuthorizer{}, nil, "", WithHTTPClient(srv.httpClient()))
if err := a.checkQuota(context.Background(), "did:plc:alice", srv.holdDID); err != nil {
t.Errorf("checkQuota with 500 should fail open, got %v", err)
}
}
func TestCheckQuota_BadJSONFailsOpen(t *testing.T) {
atproto.SetTestMode(true)
t.Cleanup(func() { atproto.SetTestMode(false) })
srv := quotaServer(t, 200, `not-json`)
a := New(newTestDB(t), fakeHoldAuthorizer{}, nil, "", WithHTTPClient(srv.httpClient()))
if err := a.checkQuota(context.Background(), "did:plc:alice", srv.holdDID); err != nil {
t.Errorf("checkQuota with malformed JSON should fail open, got %v", err)
}
}
func TestCheckQuota_EncodesUserDID(t *testing.T) {
// did:web DIDs may contain percent-encoded characters (e.g. "%3A" for
// the port colon). Without proper query encoding the receiving server's
// query parser decodes "%3A" → ":", mangling the DID and missing the
// records that were keyed by the original form. The fix encodes the
// DID once at the client side so the server decodes it back exactly.
atproto.SetTestMode(true)
t.Cleanup(func() { atproto.SetTestMode(false) })
srv := quotaServer(t, 200, `{"totalSize":100,"limit":1000}`)
a := New(newTestDB(t), fakeHoldAuthorizer{}, nil, "", WithHTTPClient(srv.httpClient()))
encodedDID := "did:web:127.0.0.1%3A45397:user:alice.test"
if err := a.checkQuota(context.Background(), encodedDID, srv.holdDID); err != nil {
t.Errorf("checkQuota with encoded DID = %v, want nil", err)
}
// The URL the server saw should contain the double-encoded form, so
// that its single decode pass yields the original DID back.
if !strings.Contains(srv.lastURL, "did%3Aweb%3A127.0.0.1%253A45397%3Auser%3Aalice.test") {
t.Errorf("expected query value to be percent-encoded; got URL %q", srv.lastURL)
}
}
func TestCheckQuota_HoldURLResolutionFailsOpen(t *testing.T) {
// A "did:" prefixed but otherwise malformed identifier makes
// ResolveHoldURL → ResolveHoldDIDToURL → syntax.ParseDID error out
// synchronously (no network call). The contract is fail-open so push
// isn't blocked on resolver issues.
a := New(newTestDB(t), fakeHoldAuthorizer{}, nil, "")
if err := a.checkQuota(context.Background(), "did:plc:alice", "did:bogusmethod:no-host"); err != nil {
t.Errorf("checkQuota with bad hold DID should fail open, got %v", err)
}
}
// --- Authorize orchestration ----------------------------------------------
func pushAccess(name string) []auth.AccessEntry {
return []auth.AccessEntry{{Type: "repository", Name: name, Actions: []string{"pull", "push"}}}
}
func pullAccess(name string) []auth.AccessEntry {
return []auth.AccessEntry{{Type: "repository", Name: name, Actions: []string{"pull"}}}
}
func TestAuthorize_NoHoldAllowsAll(t *testing.T) {
d := newTestDB(t)
seedUser(t, d, "did:plc:alice", "alice.test", "")
a := New(d, fakeHoldAuthorizer{}, nil, "")
for _, scope := range [][]auth.AccessEntry{nil, pullAccess("alice/x"), pushAccess("alice/x")} {
if err := a.Authorize(context.Background(), "did:plc:alice", "", scope); err != nil {
t.Errorf("Authorize(%v) with no hold = %v, want nil", scope, err)
}
}
}
func TestAuthorize_CaptainBypassesCrewCheck(t *testing.T) {
atproto.SetTestMode(true)
t.Cleanup(func() { atproto.SetTestMode(false) })
srv := quotaServer(t, 200, `{"totalSize":1,"limit":1000}`)
d := newTestDB(t)
seedUser(t, d, "did:plc:alice", "alice.test", srv.holdDID)
seedCaptain(t, d, srv.holdDID, "did:plc:alice")
// A contradictory crew row should NOT trip the gate — captain bypass.
seedCrewMember(t, d, srv.holdDID, "did:plc:alice", `["blob:read"]`)
a := New(d, fakeHoldAuthorizer{}, nil, "", WithHTTPClient(srv.httpClient()))
if err := a.Authorize(context.Background(), "did:plc:alice", "", pushAccess("alice/x")); err != nil {
t.Errorf("Authorize(captain push) = %v, want nil", err)
}
}
func TestAuthorize_NonCaptainPushWithoutCrewDenied(t *testing.T) {
atproto.SetTestMode(true)
t.Cleanup(func() { atproto.SetTestMode(false) })
srv := quotaServer(t, 200, `{"totalSize":1,"limit":1000}`)
d := newTestDB(t)
seedUser(t, d, "did:plc:bob", "bob.test", srv.holdDID)
seedCaptain(t, d, srv.holdDID, "did:plc:alice") // alice owns; bob is not crew
a := New(d, fakeHoldAuthorizer{}, nil, "", WithHTTPClient(srv.httpClient()))
err := a.Authorize(context.Background(), "did:plc:bob", "", pushAccess("bob/x"))
if err == nil || !strings.Contains(err.Error(), "crew membership required") {
t.Errorf("expected 'crew membership required', got %v", err)
}
}
func TestAuthorize_NonCaptainPushWithoutBlobWriteDenied(t *testing.T) {
atproto.SetTestMode(true)
t.Cleanup(func() { atproto.SetTestMode(false) })
srv := quotaServer(t, 200, `{"totalSize":1,"limit":1000}`)
d := newTestDB(t)
seedUser(t, d, "did:plc:bob", "bob.test", srv.holdDID)
seedCaptain(t, d, srv.holdDID, "did:plc:alice")
seedCrewMember(t, d, srv.holdDID, "did:plc:bob", `["blob:read"]`)
a := New(d, fakeHoldAuthorizer{}, nil, "", WithHTTPClient(srv.httpClient()))
err := a.Authorize(context.Background(), "did:plc:bob", "", pushAccess("bob/x"))
if err == nil || !strings.Contains(err.Error(), "lacks blob:write") {
t.Errorf("expected 'lacks blob:write', got %v", err)
}
}
func TestAuthorize_NonCaptainPushUnderQuotaAllowed(t *testing.T) {
atproto.SetTestMode(true)
t.Cleanup(func() { atproto.SetTestMode(false) })
srv := quotaServer(t, 200, `{"totalSize":100,"limit":1000}`)
d := newTestDB(t)
seedUser(t, d, "did:plc:bob", "bob.test", srv.holdDID)
seedCaptain(t, d, srv.holdDID, "did:plc:alice")
seedCrewMember(t, d, srv.holdDID, "did:plc:bob", `["blob:write"]`)
a := New(d, fakeHoldAuthorizer{}, nil, "", WithHTTPClient(srv.httpClient()))
if err := a.Authorize(context.Background(), "did:plc:bob", "", pushAccess("bob/x")); err != nil {
t.Errorf("Authorize(crew blob:write under quota) = %v, want nil", err)
}
}
func TestAuthorize_NonCaptainPushOverQuotaDenied(t *testing.T) {
atproto.SetTestMode(true)
t.Cleanup(func() { atproto.SetTestMode(false) })
srv := quotaServer(t, 200, `{"totalSize":1000,"limit":1000}`)
d := newTestDB(t)
seedUser(t, d, "did:plc:bob", "bob.test", srv.holdDID)
seedCaptain(t, d, srv.holdDID, "did:plc:alice")
seedCrewMember(t, d, srv.holdDID, "did:plc:bob", `["blob:write"]`)
a := New(d, fakeHoldAuthorizer{}, nil, "", WithHTTPClient(srv.httpClient()))
err := a.Authorize(context.Background(), "did:plc:bob", "", pushAccess("bob/x"))
if err == nil || !strings.Contains(err.Error(), "quota exceeded") {
t.Errorf("expected 'quota exceeded', got %v", err)
}
}
func TestAuthorize_PullOnlySkipsMembershipAndQuota(t *testing.T) {
atproto.SetTestMode(true)
t.Cleanup(func() { atproto.SetTestMode(false) })
// Quota server installed but should never be hit: pull bypasses both
// the membership requirement and the quota call.
srv := quotaServer(t, 200, `{"totalSize":1000,"limit":1000}`)
d := newTestDB(t)
seedUser(t, d, "did:plc:bob", "bob.test", srv.holdDID)
seedCaptain(t, d, srv.holdDID, "did:plc:alice") // bob is NOT captain, NOT crew
a := New(d, fakeHoldAuthorizer{}, nil, "", WithHTTPClient(srv.httpClient()))
if err := a.Authorize(context.Background(), "did:plc:bob", "", pullAccess("alice/x")); err != nil {
t.Errorf("Authorize(pull only) = %v, want nil", err)
}
if srv.hits != 0 {
t.Errorf("quota endpoint hit %d times for pull-only request, want 0", srv.hits)
}
}
func TestAuthorize_WildcardPushTreatedAsPull(t *testing.T) {
atproto.SetTestMode(true)
t.Cleanup(func() { atproto.SetTestMode(false) })
srv := quotaServer(t, 200, `{"totalSize":1000,"limit":1000}`)
d := newTestDB(t)
seedUser(t, d, "did:plc:bob", "bob.test", srv.holdDID)
seedCaptain(t, d, srv.holdDID, "did:plc:alice")
a := New(d, fakeHoldAuthorizer{}, nil, "", WithHTTPClient(srv.httpClient()))
wildcard := []auth.AccessEntry{{Type: "repository", Name: "*", Actions: []string{"pull", "push"}}}
if err := a.Authorize(context.Background(), "did:plc:bob", "", wildcard); err != nil {
t.Errorf("Authorize(wildcard push) = %v, want nil (treated as pull)", err)
}
if srv.hits != 0 {
t.Errorf("quota endpoint hit %d times for wildcard scope, want 0", srv.hits)
}
}

View File

@@ -0,0 +1,99 @@
package authgate
import (
"context"
"database/sql"
"errors"
"fmt"
"time"
"atcr.io/pkg/atproto"
"atcr.io/pkg/auth"
"atcr.io/pkg/auth/oauth"
"atcr.io/pkg/auth/token"
)
// ServiceAuthFetcher pre-mints the AppView↔hold service-auth at /auth/token
// time so the registry JWT can be bound to its lifetime.
//
// Flow: token handler calls Fetch with the requester's DID and chosen auth
// method, we resolve their hold, call the appropriate service-token fetcher
// (which caches the result with the PDS-granted exp), and read the cached
// expiry back. The token handler stamps the JWT's exp from that value, so
// the JWT and service-auth expire concurrently.
type ServiceAuthFetcher struct {
holdResolver
refresher *oauth.Refresher
cache *auth.Cache
}
// ServiceAuthOption configures a ServiceAuthFetcher.
type ServiceAuthOption func(*ServiceAuthFetcher)
// WithCache routes expiry read-back through the supplied cache instead of
// the package default. GetOrFetchServiceToken* still writes to the default
// cache today; injecting a fresh Cache is currently only useful for tests
// that pre-seed entries and want to verify the read-back path.
func WithCache(c *auth.Cache) ServiceAuthOption {
return func(f *ServiceAuthFetcher) { f.cache = c }
}
// NewServiceAuthFetcher constructs a ServiceAuthFetcher. defaultHoldDID is
// the AppView fallback used when a user has no sailor profile yet. The
// refresher is required for OAuth-flow service-token minting; pass nil if
// only the app-password flow needs to work.
func NewServiceAuthFetcher(db *sql.DB, refresher *oauth.Refresher, defaultHoldDID string, opts ...ServiceAuthOption) *ServiceAuthFetcher {
f := &ServiceAuthFetcher{
holdResolver: holdResolver{db: db, defaultHoldDID: defaultHoldDID},
refresher: refresher,
cache: auth.DefaultCache(),
}
for _, opt := range opts {
opt(f)
}
return f
}
// Fetch satisfies token.ServiceAuthFetcher. Returns the granted expiry of
// the cached service-auth, or zero time when the user has no hold (caller
// falls back to the issuer's default).
func (f *ServiceAuthFetcher) Fetch(ctx context.Context, did, authMethod string) (time.Time, error) {
holdDID, err := f.resolveHoldDID(ctx, did)
if err != nil {
return time.Time{}, err
}
if holdDID == "" {
// No hold configured anywhere — graceful degradation.
return time.Time{}, nil
}
_, _, pdsEndpoint, err := atproto.ResolveIdentity(ctx, did)
if err != nil {
return time.Time{}, fmt.Errorf("resolve PDS for %s: %w", did, err)
}
switch authMethod {
case token.AuthMethodOAuth:
if f.refresher == nil {
return time.Time{}, errors.New("OAuth flow requires a refresher")
}
if _, err := auth.GetOrFetchServiceToken(ctx, f.refresher, did, holdDID, pdsEndpoint); err != nil {
return time.Time{}, fmt.Errorf("oauth service-auth fetch: %w", err)
}
case token.AuthMethodAppPassword:
if _, err := auth.GetOrFetchServiceTokenWithAppPassword(ctx, did, holdDID, pdsEndpoint); err != nil {
return time.Time{}, fmt.Errorf("app-password service-auth fetch: %w", err)
}
default:
return time.Time{}, fmt.Errorf("unknown auth method: %q", authMethod)
}
// GetOrFetchServiceToken* caches as a side effect — read the granted
// expiry back. The cached value already has the 10s safety margin from
// pkg/auth/cache.go applied.
_, expiresAt := f.cache.Get(did, holdDID)
if expiresAt.IsZero() {
return time.Time{}, errors.New("service-auth fetched but cache miss")
}
return expiresAt, nil
}

View File

@@ -0,0 +1,26 @@
package authgate
import (
"context"
"testing"
)
// TestFetch_NoHoldReturnsZero exercises the early-return branch that fires
// before atproto.ResolveIdentity is called — the only Fetch path that
// stays hermetic. The other branches (authMethod switch, OAuth refresher
// check, success-path read-back) all traverse the directory singleton and
// were explicitly scoped out of unit testing; see the plan file's
// "Out of scope" section.
func TestFetch_NoHoldReturnsZero(t *testing.T) {
d := newTestDB(t)
seedUser(t, d, "did:plc:alice", "alice.test", "")
f := NewServiceAuthFetcher(d, nil, "")
exp, err := f.Fetch(context.Background(), "did:plc:alice", "")
if err != nil {
t.Fatalf("Fetch: %v", err)
}
if !exp.IsZero() {
t.Errorf("Fetch with no hold returned non-zero exp %v, want zero", exp)
}
}

View File

@@ -0,0 +1,129 @@
package authgate
import (
"context"
"database/sql"
"net/http"
"net/http/httptest"
"strings"
"testing"
"atcr.io/pkg/appview/db"
"atcr.io/pkg/atproto"
"atcr.io/pkg/auth"
)
// newTestDB returns an in-memory libsql DB with the full appview schema
// applied. Tears down on test completion.
func newTestDB(t *testing.T) *sql.DB {
t.Helper()
testDB, err := db.InitDB(":memory:", db.LibsqlConfig{})
if err != nil {
t.Fatalf("InitDB: %v", err)
}
t.Cleanup(func() { _ = testDB.Close() })
return testDB
}
// seedUser upserts a users row and optionally sets default_hold_did.
// Pass "" for defaultHold to leave it NULL.
func seedUser(t *testing.T, d *sql.DB, did, handle, defaultHold string) {
t.Helper()
if err := db.UpsertUser(d, &db.User{DID: did, Handle: handle, PDSEndpoint: "https://pds.example/" + did}); err != nil {
t.Fatalf("UpsertUser(%s): %v", did, err)
}
if defaultHold != "" {
if err := db.UpdateUserDefaultHold(d, did, defaultHold); err != nil {
t.Fatalf("UpdateUserDefaultHold(%s, %s): %v", did, defaultHold, err)
}
}
}
// seedCaptain inserts a single hold_captain_records row.
func seedCaptain(t *testing.T, d *sql.DB, holdDID, ownerDID string) {
t.Helper()
if err := db.BatchUpsertCaptainRecords(d, []db.HoldCaptainRecord{
{HoldDID: holdDID, OwnerDID: ownerDID, Public: false, AllowAllCrew: false},
}); err != nil {
t.Fatalf("BatchUpsertCaptainRecords(%s, %s): %v", holdDID, ownerDID, err)
}
}
// seedCrewMember inserts a single hold_crew_members row with the given
// permissions JSON (pass "" to leave permissions NULL — note that the
// underlying schema may coerce empty strings; pass `"[]"` for an empty
// permissions array).
func seedCrewMember(t *testing.T, d *sql.DB, holdDID, memberDID, permsJSON string) {
t.Helper()
if err := db.BatchUpsertCrewMembers(d, []db.CrewMember{
{HoldDID: holdDID, MemberDID: memberDID, Rkey: "rkey-" + memberDID, Role: "crew", Permissions: permsJSON},
}); err != nil {
t.Fatalf("BatchUpsertCrewMembers(%s, %s): %v", holdDID, memberDID, err)
}
}
// quotaServerResult captures HTTP traffic the server saw, for assertions.
type quotaServerResult struct {
server *httptest.Server
holdDID string // did:web:127.0.0.1%3APORT form
hits int
lastURL string
}
// quotaServer spins up an httptest.Server that responds to every request
// with the given status + body, records hit count + last URL, and returns
// both the server URL and the did:web:HOST form that resolves to it under
// atproto.SetTestMode(true).
func quotaServer(t *testing.T, status int, body string) *quotaServerResult {
t.Helper()
res := &quotaServerResult{}
res.server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
res.hits++
res.lastURL = r.URL.String()
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
_, _ = w.Write([]byte(body))
}))
t.Cleanup(res.server.Close)
// httptest.Server.URL has the form "http://127.0.0.1:PORT". The
// did:web equivalent percent-encodes the colon; didWebToURL reverses
// the encoding and re-derives the http://host:port form, which is
// what we need for atproto.SetTestMode(true) to route requests.
host := strings.TrimPrefix(res.server.URL, "http://")
res.holdDID = "did:web:" + strings.Replace(host, ":", "%3A", 1)
return res
}
// httpClient returns the server's client, which trusts its TLS cert (n/a
// here since httptest.NewServer is HTTP) and routes to the loopback.
func (r *quotaServerResult) httpClient() *http.Client {
return r.server.Client()
}
// fakeHoldAuthorizer is a no-op auth.HoldAuthorizer stub. The Authorize
// orchestration tests don't exercise the reconciliation closure (the
// closure is nil for our purposes because we don't supply a refresher
// and don't go through ResolveIdentity), so we don't need atomic
// counters — just zero-value returns.
type fakeHoldAuthorizer struct{}
func (fakeHoldAuthorizer) CheckReadAccess(_ context.Context, _, _ string) (bool, error) {
return true, nil
}
func (fakeHoldAuthorizer) CheckWriteAccess(_ context.Context, _, _ string) (bool, error) {
return true, nil
}
func (fakeHoldAuthorizer) GetCaptainRecord(_ context.Context, _ string) (*atproto.CaptainRecord, error) {
return nil, nil
}
func (fakeHoldAuthorizer) IsCrewMember(_ context.Context, _, _ string) (bool, error) {
return false, nil
}
func (fakeHoldAuthorizer) ClearCrewDenial(_ context.Context, _, _ string) error { return nil }
func (fakeHoldAuthorizer) IsCachedCrewMember(_ context.Context, _, _ string) (bool, error) {
return false, nil
}
func (fakeHoldAuthorizer) RecordCrewApproval(_ context.Context, _, _ string) error { return nil }
var _ auth.HoldAuthorizer = fakeHoldAuthorizer{}

View File

@@ -11,6 +11,7 @@ import (
"fmt"
"net/url"
"os"
"strings"
"time"
"github.com/distribution/distribution/v3/configuration"
@@ -147,8 +148,11 @@ type AIConfig struct {
// LabelerRefConfig defines the connection to an ATProto labeler service.
type LabelerRefConfig struct {
// DID or URL of the labeler service for content moderation.
DID string `yaml:"did" comment:"DID or URL of the ATProto labeler (e.g., did:web:labeler.atcr.io). Empty disables label filtering."`
// DID of the labeler service for content moderation. The HTTP endpoint
// is resolved at runtime via the labeler's #atproto_labeler service entry
// in its DID document (plc.directory for did:plc, /.well-known/did.json
// for did:web).
DID string `yaml:"did" comment:"DID of the ATProto labeler (did:plc:... or did:web:...). Empty disables label filtering."`
}
// setDefaults registers all default values on the given Viper instance.
@@ -280,6 +284,9 @@ func LoadConfig(yamlPath string) (*Config, error) {
if cfg.Server.DefaultHoldDID == "" {
return nil, fmt.Errorf("server.default_hold_did is required (env: ATCR_SERVER_DEFAULT_HOLD_DID)")
}
if cfg.Labeler.DID != "" && !strings.HasPrefix(cfg.Labeler.DID, "did:") {
return nil, fmt.Errorf("labeler.did must be a DID (did:plc:... or did:web:...), got %q", cfg.Labeler.DID)
}
// Build distribution config (unchanged)
distConfig, err := buildDistributionConfig(cfg, v)

View File

@@ -1035,16 +1035,14 @@ func (p *Processor) ProcessAccount(ctx context.Context, did string, active bool,
return nil
}
// extractRepoFromRecord extracts the repository field from a record's JSON data.
// Returns empty string for collections that don't have a repository field
// (e.g., sailor profile, captain, crew).
// extractRepoFromRecord extracts the repository field from a record's JSON data
// for user-owned collections subject to labeler takedowns. Hold-owned collections
// (io.atcr.hold.*) are intentionally excluded — the labeler doesn't moderate holds.
func extractRepoFromRecord(collection string, data []byte) string {
switch collection {
case atproto.ManifestCollection,
atproto.TagCollection,
atproto.RepoPageCollection,
atproto.StatsCollection,
atproto.ScanCollection:
atproto.RepoPageCollection:
var rec struct {
Repository string `json:"repository"`
}

View File

@@ -46,6 +46,13 @@ type Worker struct {
lastPongTime time.Time
pongMutex sync.Mutex
// Last-event tracking — used as a liveness signal alongside pongs, since
// some upstream Jetstream endpoints don't reliably reply to client pings.
// The connection is only considered dead when BOTH pongs and events have
// been silent past the timeout.
lastEventTime time.Time
lastEventMutex sync.Mutex
// In-memory cursor tracking for reconnects
lastCursor int64
cursorMutex sync.RWMutex
@@ -131,6 +138,11 @@ func (w *Worker) Start(ctx context.Context) error {
w.lastPongTime = time.Now()
w.pongMutex.Unlock()
// Seed last-event time so the watchdog has a baseline before any frames arrive.
w.lastEventMutex.Lock()
w.lastEventTime = time.Now()
w.lastEventMutex.Unlock()
// Set up pong handler - called when server responds to our ping
conn.SetPongHandler(func(appData string) error {
w.pongMutex.Lock()
@@ -184,9 +196,21 @@ func (w *Worker) Start(ctx context.Context) error {
pongsTotal := w.pongsReceived
w.pongMutex.Unlock()
// If no pong for 60 seconds, connection is likely dead
if timeSinceLastPong > 60*time.Second {
slog.Info("Jetstream no pong received, closing connection", "time_since_last_pong", timeSinceLastPong, "pings_sent", pingsTotal, "pongs_received", pongsTotal)
// Connection is only "dead" when BOTH pongs and events have been
// silent for 60s. Some upstream endpoints don't reply to client
// pings even when the firehose is happily streaming data, so
// using pong silence alone causes spurious reconnects (and a
// rewind/replay loop on the cursor).
w.lastEventMutex.Lock()
timeSinceLastEvent := time.Since(w.lastEventTime)
w.lastEventMutex.Unlock()
if timeSinceLastPong > 60*time.Second && timeSinceLastEvent > 60*time.Second {
slog.Info("Jetstream no pong AND no events, closing connection",
"time_since_last_pong", timeSinceLastPong,
"time_since_last_event", timeSinceLastEvent,
"pings_sent", pingsTotal,
"pongs_received", pongsTotal)
conn.Close()
return
}
@@ -261,6 +285,16 @@ func (w *Worker) Start(ctx context.Context) error {
return fmt.Errorf("failed to read message: %w", err)
}
// Any frame counts as liveness — refresh the deadline and the
// last-event marker so the watchdog and the read deadline don't
// fire while data is flowing, even if the server isn't ponging.
w.lastEventMutex.Lock()
w.lastEventTime = time.Now()
w.lastEventMutex.Unlock()
if err := conn.SetReadDeadline(time.Now().Add(90 * time.Second)); err != nil {
slog.Warn("Jetstream failed to refresh read deadline", "error", err)
}
// For now, process uncompressed messages
// TODO: Re-enable compression once debugging is complete
_ = decoder // Keep decoder to avoid unused variable error
@@ -318,15 +352,21 @@ func (w *Worker) StartWithFailover(ctx context.Context) {
currentURL := w.endpoints.Current()
w.jetstreamURL = currentURL
slog.Info("Jetstream connecting", "url", currentURL)
// Resume from the most recent processed event before each (re)connect,
// otherwise we'd open the WS with the cursor from initial bootstrap
// and replay the same window every time.
if latest := w.GetLastCursor(); latest > 0 {
w.cursorMutex.Lock()
w.startCursor = latest
w.cursorMutex.Unlock()
}
slog.Info("Jetstream connecting", "url", currentURL, "cursor", w.startCursor)
err := w.Start(ctx)
if ctx.Err() != nil {
return // Context cancelled, clean shutdown
}
// Capture cursor at disconnect time for rewind calculation
disconnectCursor := w.GetLastCursor()
// Retry same endpoint with escalating delays
recovered := false
for i, delay := range retryDelays {
@@ -341,6 +381,16 @@ func (w *Worker) StartWithFailover(ctx context.Context) {
return
}
// Resume from the most recent processed event, not the cursor
// the connection was originally opened with. Without this, every
// reconnect re-uses the bootstrap cursor and replays the same
// historical window forever.
if latest := w.GetLastCursor(); latest > 0 {
w.cursorMutex.Lock()
w.startCursor = latest
w.cursorMutex.Unlock()
}
w.jetstreamURL = currentURL
err = w.Start(ctx)
if ctx.Err() != nil {
@@ -350,39 +400,21 @@ func (w *Worker) StartWithFailover(ctx context.Context) {
recovered = true
break
}
// Update disconnect cursor if we got further
if latest := w.GetLastCursor(); latest > disconnectCursor {
disconnectCursor = latest
}
}
if recovered {
continue
}
// All retries failed — failover to next endpoint
// All retries failed — failover to next endpoint. The outer loop will
// pick up startCursor from lastCursor on the next iteration, so no
// rewind is needed here: lastCursor is exactly where we stopped
// processing.
failedURL := currentURL
nextURL := w.endpoints.Next()
// Rewind cursor 30 seconds (30M microseconds) to avoid gaps
if disconnectCursor > 0 {
rewound := disconnectCursor - 30_000_000
if rewound < 0 {
rewound = 0
}
w.cursorMutex.Lock()
w.lastCursor = rewound
w.startCursor = rewound
w.cursorMutex.Unlock()
slog.Warn("Jetstream failing over to next endpoint",
"failed_url", failedURL,
"next_url", nextURL,
"cursor_rewound_by", "30s")
} else {
slog.Warn("Jetstream failing over to next endpoint",
"failed_url", failedURL,
"next_url", nextURL)
}
slog.Warn("Jetstream failing over to next endpoint",
"failed_url", failedURL,
"next_url", nextURL)
}
}

View File

@@ -4,6 +4,7 @@ package labeler
import (
"bytes"
"context"
"database/sql"
"errors"
"fmt"
@@ -13,8 +14,10 @@ import (
"time"
"atcr.io/pkg/appview/db"
"atcr.io/pkg/atproto"
comatproto "github.com/bluesky-social/indigo/api/atproto"
"github.com/bluesky-social/indigo/atproto/syntax"
"github.com/bluesky-social/indigo/events"
"github.com/gorilla/websocket"
)
@@ -25,19 +28,19 @@ const TakedownLabelValue = "!takedown"
// Subscriber connects to a labeler's subscribeLabels endpoint and mirrors
// the current set of active takedowns into the appview database.
type Subscriber struct {
labelerURL string
labelerDID string
database *sql.DB
stopCh chan struct{}
}
// NewSubscriber creates a new labeler subscriber. labelerDIDOrURL is the
// original config value (used to preserve a configured did:web identifier
// when present); labelerURL is the resolved HTTP(S) endpoint.
func NewSubscriber(labelerDIDOrURL, labelerURL string, database *sql.DB) *Subscriber {
// NewSubscriber creates a new labeler subscriber. labelerDID is a did:plc or
// did:web identifier. The websocket endpoint is resolved on each (re)connect
// via the shared identity directory's #atproto_labeler service entry, so the
// labeler can move (or fix a misconfigured endpoint) without clients
// redeploying.
func NewSubscriber(labelerDID string, database *sql.DB) *Subscriber {
return &Subscriber{
labelerURL: labelerURL,
labelerDID: deriveLabelerDID(labelerDIDOrURL, labelerURL),
labelerDID: labelerDID,
database: database,
stopCh: make(chan struct{}),
}
@@ -88,7 +91,14 @@ func (s *Subscriber) connect() error {
return fmt.Errorf("failed to get cursor: %w", err)
}
wsURL := toWebSocketURL(s.labelerURL) + "/xrpc/com.atproto.label.subscribeLabels"
resolveCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
httpURL, err := resolveLabelerURL(resolveCtx, s.labelerDID)
cancel()
if err != nil {
return fmt.Errorf("resolve labeler endpoint: %w", err)
}
wsURL := toWebSocketURL(httpURL) + "/xrpc/com.atproto.label.subscribeLabels"
if cursor > 0 {
wsURL += fmt.Sprintf("?cursor=%d", cursor)
}
@@ -101,7 +111,7 @@ func (s *Subscriber) connect() error {
}
defer conn.Close()
slog.Info("Connected to labeler", "url", s.labelerURL)
slog.Info("Connected to labeler", "url", httpURL)
for {
select {
@@ -267,24 +277,25 @@ func decodeFrame(payload []byte) (int64, []*comatproto.LabelDefs_Label, error) {
}
}
// deriveLabelerDID returns the canonical labeler DID for source filtering.
// When the operator gave us a did:... identifier directly, we use it as-is.
// When they gave us a URL, we derive a did:web from its host so dev URLs
// like http://labeler:5002 yield did:web:labeler%3A5002, matching the
// labeler's own self-served identity.
func deriveLabelerDID(labelerDIDOrURL, httpURL string) string {
if strings.HasPrefix(labelerDIDOrURL, "did:") {
return labelerDIDOrURL
}
u, err := url.Parse(httpURL)
// resolveLabelerURL resolves a labeler DID to its HTTP(S) endpoint by looking
// up the #atproto_labeler service in the shared identity directory: did:plc
// via plc.directory, did:web via /.well-known/did.json. The directory is the
// source of truth — clients don't need redeploying when the labeler moves or
// fixes a misconfigured endpoint.
func resolveLabelerURL(ctx context.Context, labelerDID string) (string, error) {
parsed, err := syntax.ParseDID(labelerDID)
if err != nil {
return labelerDIDOrURL
return "", fmt.Errorf("labeler: invalid DID %q: %w", labelerDID, err)
}
host := u.Hostname()
if port := u.Port(); port != "" {
host += "%3A" + port
ident, err := atproto.GetDirectory().LookupDID(ctx, parsed)
if err != nil {
return "", fmt.Errorf("labeler: failed to resolve %s: %w", labelerDID, err)
}
return "did:web:" + host
endpoint := ident.GetServiceEndpoint("atproto_labeler")
if endpoint == "" {
return "", fmt.Errorf("labeler: %s has no #atproto_labeler service endpoint", labelerDID)
}
return endpoint, nil
}
// toWebSocketURL converts an HTTP URL to a WebSocket URL.
@@ -302,25 +313,11 @@ func toWebSocketURL(httpURL string) string {
return u.String()
}
// ParseLabelerURL parses a labeler DID or URL into an HTTP URL.
func ParseLabelerURL(labelerDIDOrURL string) string {
if strings.HasPrefix(labelerDIDOrURL, "http://") || strings.HasPrefix(labelerDIDOrURL, "https://") {
return labelerDIDOrURL
}
if strings.HasPrefix(labelerDIDOrURL, "did:web:") {
host := strings.TrimPrefix(labelerDIDOrURL, "did:web:")
host = strings.ReplaceAll(host, "%3A", ":")
return "https://" + host
}
return labelerDIDOrURL
}
// SubscriberFromConfig creates a Subscriber from a labeler DID/URL config value.
// Returns nil if labelerDIDOrURL is empty.
func SubscriberFromConfig(labelerDIDOrURL string, database *sql.DB) *Subscriber {
if labelerDIDOrURL == "" {
// SubscriberFromConfig creates a Subscriber from a labeler DID config value.
// Returns nil if labelerDID is empty.
func SubscriberFromConfig(labelerDID string, database *sql.DB) *Subscriber {
if labelerDID == "" {
return nil
}
labelerURL := ParseLabelerURL(labelerDIDOrURL)
return NewSubscriber(labelerDIDOrURL, labelerURL, database)
return NewSubscriber(labelerDID, database)
}

View File

@@ -352,21 +352,11 @@ func (nr *NamespaceResolver) Repository(ctx context.Context, name reference.Name
return nil, fmt.Errorf("failed to resolve hold URL for %s: %w", holdDID, err)
}
// Auto-reconcile crew membership on first push/pull
// This ensures users can push immediately after docker login without web sign-in
// EnsureCrewMembership is best-effort and logs errors without failing the request
// Run synchronously to ensure crew record exists before write access checks
// (returns quickly if already a member - hold returns 200/201)
if holdDID != "" && nr.refresher != nil {
slog.Debug("Auto-reconciling crew membership", "component", "registry/middleware", "did", did, "hold_did", holdDID)
ownerDID := did
ownerPDS := pdsEndpoint
refresher := nr.refresher
storage.EnsureCrewMembership(ctx, ownerDID, holdDID, nr.authorizer,
func(ctx context.Context, holdDID string) (string, error) {
return auth.GetOrFetchServiceToken(ctx, refresher, ownerDID, holdDID, ownerPDS)
})
}
// Crew reconciliation moved to the auth-phase push gate
// (pkg/appview/authgate). The JWT carries authorization for its short
// lifetime, so /v2/* doesn't need to re-check membership per blob. The
// hold-side requireBlobWriteAccess middleware remains as the eventual
// stopgap if reconciliation hasn't propagated yet.
// Get service token for hold authentication (only if authenticated)
// Use validation cache to prevent concurrent requests from racing on OAuth/DPoP

View File

@@ -21,6 +21,7 @@ import (
"github.com/go-chi/chi/v5"
chimiddleware "github.com/go-chi/chi/v5/middleware"
"atcr.io/pkg/appview/authgate"
"atcr.io/pkg/appview/db"
"atcr.io/pkg/appview/holdhealth"
"atcr.io/pkg/appview/jetstream"
@@ -557,6 +558,17 @@ func NewAppViewServer(cfg *Config, branding *BrandingOverrides) (*AppViewServer,
tokenHandler.SetOAuthSessionValidator(s.Refresher)
// Auth-phase gate: crew reconciliation for any token request, plus
// hold membership + quota for non-wildcard push. Gating here means
// the JWT carries the authorization, so /v2/* doesn't need to
// re-check on every blob.
tokenHandler.SetAuthorizer(authgate.New(s.Database, s.HoldAuthorizer, s.Refresher, defaultHoldDID))
// Bind the registry JWT lifetime to the AppView↔hold service-auth.
// Pre-minting the service-auth here lets us stamp the JWT's exp
// from the cached expiry, so both expire concurrently.
tokenHandler.SetServiceAuthFetcher(authgate.NewServiceAuthFetcher(s.Database, s.Refresher, defaultHoldDID))
// Token post-auth callback (closure captures s for hook dispatch)
tokenHandler.SetPostAuthCallback(func(ctx context.Context, did, handle, pdsEndpoint, accessToken string) error {
slog.Debug("Token post-auth callback", "component", "appview/callback", "did", did)
@@ -569,19 +581,9 @@ func NewAppViewServer(cfg *Config, branding *BrandingOverrides) (*AppViewServer,
slog.Debug("Profile ensured with default hold", "component", "appview/callback", "did", did, "default_hold_did", defaultHoldDID)
}
// Self-register as crew of the user's defaultHold so the first docker
// pull/push doesn't 404 because the hold has no crew record for them.
// The web OAuth callback already does this for OAuth flows; this is
// the parity path for app-password logins (only callers of this hook).
if profile, err := storage.GetProfile(ctx, atprotoClient); err == nil && profile != nil && profile.DefaultHold != "" {
go func(userDID, pdsEndpoint, holdDID string, authorizer auth.HoldAuthorizer) {
bgCtx := context.Background()
storage.EnsureCrewMembership(bgCtx, userDID, holdDID, authorizer,
func(ctx context.Context, holdDID string) (string, error) {
return auth.GetOrFetchServiceTokenWithAppPassword(ctx, userDID, holdDID, pdsEndpoint)
})
}(did, pdsEndpoint, profile.DefaultHold, s.HoldAuthorizer)
}
// Crew enrollment is handled synchronously by authgate.Authorize on
// the same /auth/token request, which also warms the approval cache.
// No duplicate goroutine needed here.
// Run consumer hooks
for _, hook := range s.tokenHooks {
@@ -640,26 +642,38 @@ func NewAppViewServer(cfg *Config, branding *BrandingOverrides) (*AppViewServer,
return s, nil
}
// Serve starts the HTTP server and blocks until shutdown signal.
// Serve starts the HTTP server on the configured address and blocks until
// shutdown signal.
func (s *AppViewServer) Serve() error {
listener, err := net.Listen("tcp", s.Config.Server.Addr)
if err != nil {
return fmt.Errorf("failed to create listener: %w", err)
}
return s.ServeWithListener(listener)
}
// ServeWithListener starts the HTTP server on the provided listener and
// blocks until a shutdown signal arrives or Shutdown() is called. Tests use
// this to bind a 127.0.0.1:0 listener and learn the assigned port before
// driving requests.
func (s *AppViewServer) ServeWithListener(listener net.Listener) error {
s.httpServer = &http.Server{
Handler: s.Router,
}
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
defer signal.Stop(stop)
errChan := make(chan error, 1)
serveErr := make(chan error, 1)
go func() {
slog.Info("Starting registry server", "addr", s.Config.Server.Addr)
if err := s.httpServer.Serve(listener); err != nil && err != http.ErrServerClosed {
errChan <- err
slog.Info("Starting registry server", "addr", listener.Addr().String())
err := s.httpServer.Serve(listener)
if err != nil && err != http.ErrServerClosed {
serveErr <- err
return
}
serveErr <- nil
}()
select {
@@ -684,19 +698,32 @@ func (s *AppViewServer) Serve() error {
logging.Shutdown()
return fmt.Errorf("server shutdown error: %w", err)
}
case err := <-errChan:
case err := <-serveErr:
s.healthWorker.Stop()
if s.workerCancel != nil {
s.workerCancel()
}
logging.Shutdown()
return fmt.Errorf("server error: %w", err)
if err != nil {
return fmt.Errorf("server error: %w", err)
}
return nil
}
logging.Shutdown()
return nil
}
// Shutdown gracefully stops the HTTP server. Safe to call from tests that
// don't want to send SIGTERM. Production code goes through the signal handler
// inside ServeWithListener.
func (s *AppViewServer) Shutdown(ctx context.Context) error {
if s.httpServer == nil {
return nil
}
return s.httpServer.Shutdown(ctx)
}
// DomainRoutingMiddleware enforces three-tier domain routing:
//
// 1. UI domain (BaseURL hostname): serves web UI, auth, and static assets.
@@ -861,6 +888,15 @@ func (s *AppViewServer) handleDIDDocument(w http.ResponseWriter, r *http.Request
func (s *AppViewServer) initializeJetstream() {
jetstreamURLs := s.Config.Jetstream.URLs
// Explicitly empty URLs disables Jetstream. The YAML config always
// populates a default list, so the only way to reach this branch is to
// blank the slice in code (tests, embedded deployments). Without this
// check, NewWorker silently falls back to the public Bluesky endpoint.
if len(jetstreamURLs) == 0 {
slog.Info("Jetstream disabled (no URLs configured)", "component", "jetstream")
return
}
go func() {
worker := jetstream.NewWorker(s.Database, jetstreamURLs, 0)
// Set webhook dispatcher on live worker (backfill skips dispatch)

View File

@@ -94,49 +94,6 @@ func (p *ProxyBlobStore) checkReadAccess(ctx context.Context) error {
return nil
}
// checkWriteAccess validates that the user has write access to blobs in this hold
func (p *ProxyBlobStore) checkWriteAccess(ctx context.Context) error {
if p.ctx.Authorizer == nil {
slog.Debug("Write access check skipped - no authorizer configured",
"component", "proxy_blob_store")
return nil
}
slog.Debug("Checking write access",
"component", "proxy_blob_store",
"user_did", p.ctx.DID,
"hold_did", p.ctx.HoldDID)
allowed, err := p.ctx.Authorizer.CheckWriteAccess(ctx, p.ctx.HoldDID, p.ctx.DID)
if err != nil {
// Authorization check itself failed (network, PDS error, etc.)
slog.Error("Write access authorization check failed",
"component", "proxy_blob_store",
"user_did", p.ctx.DID,
"hold_did", p.ctx.HoldDID,
"denial_reason", "authorization_check_error",
"error", err)
return fmt.Errorf("authorization check failed: %w", err)
}
if !allowed {
// Access explicitly denied (logged in detail by authorizer)
slog.Warn("Write access denied",
"component", "proxy_blob_store",
"user_did", p.ctx.DID,
"hold_did", p.ctx.HoldDID,
"denial_reason", "access_denied_by_authorizer",
"hint", "check DEBUG logs for specific denial reason (denial_reason field)")
return errcode.ErrorCodeDenied.WithMessage(fmt.Sprintf("write access denied to hold %s", p.ctx.HoldDID))
}
slog.Debug("Write access allowed",
"component", "proxy_blob_store",
"user_did", p.ctx.DID,
"hold_did", p.ctx.HoldDID)
return nil
}
// Stat returns the descriptor for a blob
func (p *ProxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
// Check read access
@@ -257,12 +214,12 @@ func (p *ProxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (io.ReadS
// Put stores a blob using the multipart upload flow
// This ensures all uploads go through the same XRPC path
//
// Write authorization is gated at /auth/token (pkg/appview/authgate); the
// JWT carries the resolved authorization for its lifetime. Hold-side
// requireBlobWriteAccess is the final defense (validates the service-token
// audience). No re-check needed here.
func (p *ProxyBlobStore) Put(ctx context.Context, mediaType string, content []byte) (distribution.Descriptor, error) {
// Check write access (fast-fail before starting multipart upload)
if err := p.checkWriteAccess(ctx); err != nil {
return distribution.Descriptor{}, err
}
// Calculate digest
dgst := digest.FromBytes(content)
@@ -318,13 +275,11 @@ func (p *ProxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r
return nil
}
// Create returns a blob writer for uploading using multipart upload
// Create returns a blob writer for uploading using multipart upload.
//
// Write authorization is gated at /auth/token; see ProxyBlobStore.Put for
// the rationale on why we don't re-check here.
func (p *ProxyBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
// Check write access
if err := p.checkWriteAccess(ctx); err != nil {
return nil, err
}
// Parse options
var opts distribution.CreateOptions
for _, option := range options {

View File

@@ -12,9 +12,10 @@ import (
)
var (
// Shared identity directory instance (singleton)
// Shared identity directory instance. Lazily initialized on first GetDirectory()
// call. Tests may swap it out via SetDirectory().
sharedDirectory identity.Directory
directoryOnce sync.Once
directoryMu sync.Mutex
// testMode allows HTTP did:web resolution (IPs, non-TLS) for local development.
// Set via SetTestMode() on startup.
@@ -32,45 +33,58 @@ func IsTestMode() bool {
return testMode
}
// GetDirectory returns a shared identity.Directory instance with a 24-hour cache TTL.
// This is based on indigo's DefaultDirectory() with event-driven cache invalidation.
//
// Cache entries are invalidated via Jetstream events (identity changes, account status)
// which allows for a longer TTL while maintaining freshness. The Purge() method is called
// when identity or account events are received, ensuring the cache reflects real-time changes.
// SetDirectory replaces the shared identity.Directory used by all resolver
// helpers. Intended for tests that wire in a fake directory. Production code
// should never call this — leaving the default lazy-initialized indigo
// directory in place via GetDirectory() is correct.
func SetDirectory(d identity.Directory) {
directoryMu.Lock()
defer directoryMu.Unlock()
sharedDirectory = d
}
// GetDirectory returns the shared identity.Directory. On first call (and if
// SetDirectory has not been used), it constructs an indigo cached directory
// with a 24h TTL backed by Jetstream event-driven invalidation.
//
// Using a shared instance ensures all identity lookups across the application
// use the same cache, which is more memory-efficient and provides better cache hit rates.
// use the same cache, which is more memory-efficient and provides better cache
// hit rates.
func GetDirectory() identity.Directory {
directoryOnce.Do(func() {
base := identity.BaseDirectory{
PLCURL: identity.DefaultPLCURL,
HTTPClient: http.Client{
Timeout: time.Second * 10,
Transport: &http.Transport{
// would want this around 100ms for services doing lots of handle resolution. Impacts PLC connections as well, but not too bad.
IdleConnTimeout: time.Millisecond * 1000,
MaxIdleConns: 100,
},
},
Resolver: net.Resolver{
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
d := net.Dialer{Timeout: time.Second * 3}
return d.DialContext(ctx, network, address)
},
},
TryAuthoritativeDNS: true,
// primary Bluesky PDS instance only supports HTTP resolution method
SkipDNSDomainSuffixes: []string{".bsky.social"},
UserAgent: "indigo-identity/" + versioninfo.Short(),
}
// Cache configuration:
// - capacity: 250,000 entries
// - hitTTL: 24 hours (event-driven invalidation via Jetstream provides freshness)
// - errTTL: 2 minutes
// - invalidHandleTTL: 5 minutes
cached := identity.NewCacheDirectory(&base, 250_000, time.Hour*24, time.Minute*2, time.Minute*5)
sharedDirectory = cached
})
directoryMu.Lock()
defer directoryMu.Unlock()
if sharedDirectory == nil {
sharedDirectory = newDefaultDirectory()
}
return sharedDirectory
}
func newDefaultDirectory() identity.Directory {
base := identity.BaseDirectory{
PLCURL: identity.DefaultPLCURL,
HTTPClient: http.Client{
Timeout: time.Second * 10,
Transport: &http.Transport{
// would want this around 100ms for services doing lots of handle resolution. Impacts PLC connections as well, but not too bad.
IdleConnTimeout: time.Millisecond * 1000,
MaxIdleConns: 100,
},
},
Resolver: net.Resolver{
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
d := net.Dialer{Timeout: time.Second * 3}
return d.DialContext(ctx, network, address)
},
},
TryAuthoritativeDNS: true,
// primary Bluesky PDS instance only supports HTTP resolution method
SkipDNSDomainSuffixes: []string{".bsky.social"},
UserAgent: "indigo-identity/" + versioninfo.Short(),
}
// Cache configuration:
// - capacity: 250,000 entries
// - hitTTL: 24 hours (event-driven invalidation via Jetstream provides freshness)
// - errTTL: 2 minutes
// - invalidHandleTTL: 5 minutes
return identity.NewCacheDirectory(&base, 250_000, time.Hour*24, time.Minute*2, time.Minute*5)
}

View File

@@ -14,69 +14,74 @@ import (
"time"
)
// serviceTokenEntry represents a cached service token
// serviceTokenEntry represents a cached service token.
type serviceTokenEntry struct {
token string
expiresAt time.Time
}
// Global cache for service tokens (DID:HoldDID -> token)
// Service tokens are JWTs issued by a user's PDS to authorize AppView to act on their behalf
// when communicating with hold services. These tokens are scoped to specific holds and have
// limited lifetime (typically 60s, can request up to 5min).
var (
globalServiceTokens = make(map[string]*serviceTokenEntry)
globalServiceTokensMu sync.RWMutex
)
// Cache stores per-(DID, hold DID) service tokens with automatic expiry.
// The zero value is not usable; construct via NewCache. A default package
// instance backs the GetServiceToken/SetServiceToken/... wrappers; tests
// that need isolation can construct their own.
type Cache struct {
mu sync.RWMutex
tokens map[string]*serviceTokenEntry
}
// GetServiceToken retrieves a cached service token for the given DID and hold DID
// Returns empty string if no valid cached token exists
func GetServiceToken(did, holdDID string) (token string, expiresAt time.Time) {
// NewCache returns an empty Cache.
func NewCache() *Cache {
return &Cache{tokens: make(map[string]*serviceTokenEntry)}
}
// defaultCache backs the package-level wrappers (GetServiceToken, etc.).
// Existing callers reach it transparently via those wrappers; tests that
// need isolation should construct a fresh Cache instead.
var defaultCache = NewCache()
// Get returns the cached token for (did, holdDID) and its expiry. If the
// entry is expired it is removed and (zero values, zero time) is returned.
func (c *Cache) Get(did, holdDID string) (string, time.Time) {
cacheKey := did + ":" + holdDID
globalServiceTokensMu.RLock()
entry, exists := globalServiceTokens[cacheKey]
globalServiceTokensMu.RUnlock()
c.mu.RLock()
entry, exists := c.tokens[cacheKey]
c.mu.RUnlock()
if !exists {
return "", time.Time{}
}
// Check if token is still valid
if time.Now().After(entry.expiresAt) {
// Token expired, remove from cache
globalServiceTokensMu.Lock()
delete(globalServiceTokens, cacheKey)
globalServiceTokensMu.Unlock()
c.mu.Lock()
delete(c.tokens, cacheKey)
c.mu.Unlock()
return "", time.Time{}
}
return entry.token, entry.expiresAt
}
// SetServiceToken stores a service token in the cache
// Automatically parses the JWT to extract the expiry time
// Applies a 10-second safety margin (cache expires 10s before actual JWT expiry)
func SetServiceToken(did, holdDID, token string) error {
// Set stores token for (did, holdDID), parsing its JWT exp claim and
// applying a 10s safety margin so the cache expires before the real
// token does. Falls back to a 50s TTL if the JWT can't be parsed.
func (c *Cache) Set(did, holdDID, token string) error {
cacheKey := did + ":" + holdDID
// Parse JWT to extract expiry (don't verify signature - we trust the PDS)
expiry, err := parseJWTExpiry(token)
if err != nil {
// If parsing fails, use default 50s TTL (conservative fallback)
slog.Warn("Failed to parse JWT expiry, using default 50s", "error", err, "cacheKey", cacheKey)
expiry = time.Now().Add(50 * time.Second)
} else {
// Apply 10s safety margin to avoid using nearly-expired tokens
expiry = expiry.Add(-10 * time.Second)
}
globalServiceTokensMu.Lock()
globalServiceTokens[cacheKey] = &serviceTokenEntry{
c.mu.Lock()
c.tokens[cacheKey] = &serviceTokenEntry{
token: token,
expiresAt: expiry,
}
globalServiceTokensMu.Unlock()
c.mu.Unlock()
slog.Debug("Cached service token",
"cacheKey", cacheKey,
@@ -85,23 +90,121 @@ func SetServiceToken(did, holdDID, token string) error {
return nil
}
// parseJWTExpiry extracts the expiry time from a JWT without verifying the signature
// We trust tokens from the user's PDS, so signature verification isn't needed here
// Manually decodes the JWT payload to avoid algorithm compatibility issues
// Invalidate removes the cached entry for (did, holdDID). No-op if absent.
func (c *Cache) Invalidate(did, holdDID string) {
cacheKey := did + ":" + holdDID
c.mu.Lock()
delete(c.tokens, cacheKey)
c.mu.Unlock()
slog.Debug("Invalidated service token", "cacheKey", cacheKey)
}
// Stats returns total/valid/expired counts for debugging.
func (c *Cache) Stats() map[string]any {
c.mu.RLock()
defer c.mu.RUnlock()
validCount := 0
expiredCount := 0
now := time.Now()
for _, entry := range c.tokens {
if now.Before(entry.expiresAt) {
validCount++
} else {
expiredCount++
}
}
return map[string]any{
"total_entries": len(c.tokens),
"valid_tokens": validCount,
"expired_tokens": expiredCount,
}
}
// CleanExpired removes all expired entries.
func (c *Cache) CleanExpired() {
c.mu.Lock()
defer c.mu.Unlock()
now := time.Now()
removed := 0
for key, entry := range c.tokens {
if now.After(entry.expiresAt) {
delete(c.tokens, key)
removed++
}
}
if removed > 0 {
slog.Debug("Cleaned expired service tokens", "count", removed)
}
}
// Clear removes every entry. Intended for tests that need isolation
// between subtests, not for production code.
func (c *Cache) Clear() {
c.mu.Lock()
for k := range c.tokens {
delete(c.tokens, k)
}
c.mu.Unlock()
}
// GetServiceToken returns the cached service token for (did, holdDID).
// Returns ("", zero time) if absent or expired. Delegates to the default
// package cache; tests that need isolation should construct a Cache.
func GetServiceToken(did, holdDID string) (token string, expiresAt time.Time) {
return defaultCache.Get(did, holdDID)
}
// SetServiceToken stores token under (did, holdDID) in the default cache,
// applying the standard 10s safety margin against the JWT's exp claim.
func SetServiceToken(did, holdDID, token string) error {
return defaultCache.Set(did, holdDID, token)
}
// InvalidateServiceToken removes (did, holdDID) from the default cache.
func InvalidateServiceToken(did, holdDID string) {
defaultCache.Invalidate(did, holdDID)
}
// GetCacheStats returns default-cache statistics for debugging.
func GetCacheStats() map[string]any {
return defaultCache.Stats()
}
// CleanExpiredTokens prunes expired entries from the default cache.
func CleanExpiredTokens() {
defaultCache.CleanExpired()
}
// DefaultCache returns the package-level cache that backs the
// GetServiceToken/SetServiceToken/... wrappers. Callers (notably
// ServiceAuthFetcher) use this when they want to read back a value
// that GetOrFetchServiceToken* wrote.
func DefaultCache() *Cache {
return defaultCache
}
// parseJWTExpiry extracts the exp claim from a JWT without verifying its
// signature. We trust tokens from the user's PDS, so signature
// verification isn't needed here.
func parseJWTExpiry(tokenString string) (time.Time, error) {
// JWT format: header.payload.signature
parts := strings.Split(tokenString, ".")
if len(parts) != 3 {
return time.Time{}, fmt.Errorf("invalid JWT format: expected 3 parts, got %d", len(parts))
}
// Decode the payload (second part)
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return time.Time{}, fmt.Errorf("failed to decode JWT payload: %w", err)
}
// Parse the JSON payload
var claims struct {
Exp int64 `json:"exp"`
}
@@ -115,61 +218,3 @@ func parseJWTExpiry(tokenString string) (time.Time, error) {
return time.Unix(claims.Exp, 0), nil
}
// InvalidateServiceToken removes a service token from the cache
// Used when we detect that a token is invalid or the user's session has expired
func InvalidateServiceToken(did, holdDID string) {
cacheKey := did + ":" + holdDID
globalServiceTokensMu.Lock()
delete(globalServiceTokens, cacheKey)
globalServiceTokensMu.Unlock()
slog.Debug("Invalidated service token", "cacheKey", cacheKey)
}
// GetCacheStats returns statistics about the service token cache for debugging
func GetCacheStats() map[string]any {
globalServiceTokensMu.RLock()
defer globalServiceTokensMu.RUnlock()
validCount := 0
expiredCount := 0
now := time.Now()
for _, entry := range globalServiceTokens {
if now.Before(entry.expiresAt) {
validCount++
} else {
expiredCount++
}
}
return map[string]any{
"total_entries": len(globalServiceTokens),
"valid_tokens": validCount,
"expired_tokens": expiredCount,
}
}
// CleanExpiredTokens removes expired tokens from the cache
// Can be called periodically to prevent unbounded growth (though expired tokens
// are also removed lazily on access)
func CleanExpiredTokens() {
globalServiceTokensMu.Lock()
defer globalServiceTokensMu.Unlock()
now := time.Now()
removed := 0
for key, entry := range globalServiceTokens {
if now.After(entry.expiresAt) {
delete(globalServiceTokens, key)
removed++
}
}
if removed > 0 {
slog.Debug("Cleaned expired service tokens", "count", removed)
}
}

View File

@@ -6,10 +6,7 @@ import (
)
func TestGetServiceToken_NotCached(t *testing.T) {
// Clear cache first
globalServiceTokensMu.Lock()
globalServiceTokens = make(map[string]*serviceTokenEntry)
globalServiceTokensMu.Unlock()
defaultCache.Clear()
did := "did:plc:test123"
holdDID := "did:web:hold.example.com"
@@ -24,10 +21,7 @@ func TestGetServiceToken_NotCached(t *testing.T) {
}
func TestSetServiceToken_ManualExpiry(t *testing.T) {
// Clear cache first
globalServiceTokensMu.Lock()
globalServiceTokens = make(map[string]*serviceTokenEntry)
globalServiceTokensMu.Unlock()
defaultCache.Clear()
did := "did:plc:test123"
holdDID := "did:web:hold.example.com"
@@ -57,17 +51,19 @@ func TestSetServiceToken_ManualExpiry(t *testing.T) {
}
func TestGetServiceToken_Expired(t *testing.T) {
// Manually insert an expired token
// Manually insert an expired token by reaching into the cache. Set()
// would apply the safety margin which is the opposite of what we
// want here — we want a guaranteed-stale entry.
did := "did:plc:test123"
holdDID := "did:web:hold.example.com"
cacheKey := did + ":" + holdDID
globalServiceTokensMu.Lock()
globalServiceTokens[cacheKey] = &serviceTokenEntry{
defaultCache.mu.Lock()
defaultCache.tokens[cacheKey] = &serviceTokenEntry{
token: "expired_token",
expiresAt: time.Now().Add(-1 * time.Hour), // 1 hour ago
expiresAt: time.Now().Add(-1 * time.Hour),
}
globalServiceTokensMu.Unlock()
defaultCache.mu.Unlock()
// Try to get - should return empty since expired
token, expiresAt := GetServiceToken(did, holdDID)
@@ -79,9 +75,9 @@ func TestGetServiceToken_Expired(t *testing.T) {
}
// Verify token was removed from cache
globalServiceTokensMu.RLock()
_, exists := globalServiceTokens[cacheKey]
globalServiceTokensMu.RUnlock()
defaultCache.mu.RLock()
_, exists := defaultCache.tokens[cacheKey]
defaultCache.mu.RUnlock()
if exists {
t.Error("Expected expired token to be removed from cache")
@@ -89,7 +85,8 @@ func TestGetServiceToken_Expired(t *testing.T) {
}
func TestInvalidateServiceToken(t *testing.T) {
// Set a token
defaultCache.Clear()
did := "did:plc:test123"
holdDID := "did:web:hold.example.com"
token := "test_token"
@@ -99,16 +96,13 @@ func TestInvalidateServiceToken(t *testing.T) {
t.Fatalf("SetServiceToken() error = %v", err)
}
// Verify it's cached
cachedToken, _ := GetServiceToken(did, holdDID)
if cachedToken != token {
t.Fatal("Token should be cached")
}
// Invalidate
InvalidateServiceToken(did, holdDID)
// Verify it's gone
cachedToken, _ = GetServiceToken(did, holdDID)
if cachedToken != "" {
t.Error("Expected token to be invalidated")
@@ -116,31 +110,25 @@ func TestInvalidateServiceToken(t *testing.T) {
}
func TestCleanExpiredTokens(t *testing.T) {
// Clear cache first
globalServiceTokensMu.Lock()
globalServiceTokens = make(map[string]*serviceTokenEntry)
globalServiceTokensMu.Unlock()
defaultCache.Clear()
// Add expired and valid tokens
globalServiceTokensMu.Lock()
globalServiceTokens["expired:hold1"] = &serviceTokenEntry{
defaultCache.mu.Lock()
defaultCache.tokens["expired:hold1"] = &serviceTokenEntry{
token: "expired1",
expiresAt: time.Now().Add(-1 * time.Hour),
}
globalServiceTokens["valid:hold2"] = &serviceTokenEntry{
defaultCache.tokens["valid:hold2"] = &serviceTokenEntry{
token: "valid1",
expiresAt: time.Now().Add(1 * time.Hour),
}
globalServiceTokensMu.Unlock()
defaultCache.mu.Unlock()
// Clean expired
CleanExpiredTokens()
// Verify only valid token remains
globalServiceTokensMu.RLock()
_, expiredExists := globalServiceTokens["expired:hold1"]
_, validExists := globalServiceTokens["valid:hold2"]
globalServiceTokensMu.RUnlock()
defaultCache.mu.RLock()
_, expiredExists := defaultCache.tokens["expired:hold1"]
_, validExists := defaultCache.tokens["valid:hold2"]
defaultCache.mu.RUnlock()
if expiredExists {
t.Error("Expected expired token to be removed")
@@ -151,29 +139,24 @@ func TestCleanExpiredTokens(t *testing.T) {
}
func TestGetCacheStats(t *testing.T) {
// Clear cache first
globalServiceTokensMu.Lock()
globalServiceTokens = make(map[string]*serviceTokenEntry)
globalServiceTokensMu.Unlock()
defaultCache.Clear()
// Add some tokens
globalServiceTokensMu.Lock()
globalServiceTokens["did1:hold1"] = &serviceTokenEntry{
defaultCache.mu.Lock()
defaultCache.tokens["did1:hold1"] = &serviceTokenEntry{
token: "token1",
expiresAt: time.Now().Add(1 * time.Hour),
}
globalServiceTokens["did2:hold2"] = &serviceTokenEntry{
defaultCache.tokens["did2:hold2"] = &serviceTokenEntry{
token: "token2",
expiresAt: time.Now().Add(1 * time.Hour),
}
globalServiceTokensMu.Unlock()
defaultCache.mu.Unlock()
stats := GetCacheStats()
if stats == nil {
t.Fatal("Expected non-nil stats")
}
// GetCacheStats returns map[string]any with "total_entries" key
totalEntries, ok := stats["total_entries"].(int)
if !ok {
t.Fatalf("Expected total_entries in stats map, got: %v", stats)
@@ -183,7 +166,6 @@ func TestGetCacheStats(t *testing.T) {
t.Errorf("Expected 2 entries, got %d", totalEntries)
}
// Also check valid_tokens
validTokens, ok := stats["valid_tokens"].(int)
if !ok {
t.Fatal("Expected valid_tokens in stats map")
@@ -193,3 +175,60 @@ func TestGetCacheStats(t *testing.T) {
t.Errorf("Expected 2 valid tokens, got %d", validTokens)
}
}
func TestCache_StructAPI_IsolatedInstances(t *testing.T) {
// A freshly constructed Cache must not share state with defaultCache —
// otherwise tests that expect isolation would silently fail.
c1 := NewCache()
c2 := NewCache()
did := "did:plc:alice"
holdDID := "did:web:hold.test"
c1.mu.Lock()
c1.tokens[did+":"+holdDID] = &serviceTokenEntry{
token: "tok-c1",
expiresAt: time.Now().Add(1 * time.Hour),
}
c1.mu.Unlock()
if tok, _ := c1.Get(did, holdDID); tok != "tok-c1" {
t.Errorf("c1.Get() = %q, want tok-c1", tok)
}
if tok, _ := c2.Get(did, holdDID); tok != "" {
t.Errorf("c2.Get() = %q, want empty (instances must not share state)", tok)
}
if tok, _ := GetServiceToken(did, holdDID); tok == "tok-c1" {
t.Error("defaultCache should not see writes to c1")
}
c1.Clear()
if tok, _ := c1.Get(did, holdDID); tok != "" {
t.Errorf("c1.Get() after Clear() = %q, want empty", tok)
}
}
func TestCache_PackageFunctionsDelegateToDefault(t *testing.T) {
// The package-level wrappers must route to defaultCache.
defaultCache.Clear()
did := "did:plc:bob"
holdDID := "did:web:hold.test"
if err := SetServiceToken(did, holdDID, "wrapper-tok"); err != nil {
t.Fatalf("SetServiceToken: %v", err)
}
tok, exp := DefaultCache().Get(did, holdDID)
if tok != "wrapper-tok" {
t.Errorf("DefaultCache().Get() = %q, want wrapper-tok", tok)
}
if exp.IsZero() {
t.Error("DefaultCache().Get() expiry is zero")
}
InvalidateServiceToken(did, holdDID)
if tok, _ := DefaultCache().Get(did, holdDID); tok != "" {
t.Errorf("after InvalidateServiceToken, DefaultCache().Get() = %q, want empty", tok)
}
}

View File

@@ -12,6 +12,7 @@ import (
"atcr.io/pkg/appview/db"
"atcr.io/pkg/atproto"
"atcr.io/pkg/auth"
"github.com/distribution/distribution/v3/registry/api/errcode"
"github.com/go-chi/render"
)
@@ -30,6 +31,36 @@ type OAuthSessionValidator interface {
ValidateSession(ctx context.Context, did string) error
}
// Authorizer gates issuance of registry JWTs at the auth phase.
// Implementations decide which sub-checks apply based on `access`:
// crew reconciliation runs for any token request (so first-time CLI users
// can pull from a private hold), while membership and quota enforcement
// only apply to non-wildcard push scopes. The Docker spec model is to
// embed authorization in the JWT and trust it for its short lifetime; this
// is the single point where those gates run.
type Authorizer interface {
// Authorize is called once per token issuance, after the requester's
// ATProto credentials have been validated. Returns nil if `access` is
// allowed, or an error describing the denial reason. The error message
// surfaces to the OCI client inside the distribution error JSON body.
//
// authMethod is one of AuthMethodOAuth or AuthMethodAppPassword and lets
// the implementation pick the right service-token fetcher when it needs
// to talk to the hold (e.g. for crew reconciliation).
Authorize(ctx context.Context, did, authMethod string, access []auth.AccessEntry) error
}
// ServiceAuthFetcher pre-mints the AppView↔hold service-auth at /auth/token
// time so the registry JWT can be bound to its lifetime. JWT and service-auth
// then expire concurrently; when Docker hits 401, the next /auth/token call
// mints both fresh in lockstep.
type ServiceAuthFetcher interface {
// Fetch ensures a service-auth exists for (did, hold derived from did)
// and returns its expiry. Returns (zero time, nil) when the user has no
// hold configured — caller falls back to the issuer's default exp.
Fetch(ctx context.Context, did, authMethod string) (expiresAt time.Time, err error)
}
// Handler handles /auth/token requests
type Handler struct {
issuer *Issuer
@@ -37,6 +68,8 @@ type Handler struct {
deviceStore *db.DeviceStore // For validating device secrets
postAuthCallback PostAuthCallback
oauthSessionValidator OAuthSessionValidator
authorizer Authorizer
serviceAuthFetcher ServiceAuthFetcher
}
// NewHandler creates a new token handler
@@ -61,6 +94,20 @@ func (h *Handler) SetOAuthSessionValidator(validator OAuthSessionValidator) {
h.oauthSessionValidator = validator
}
// SetAuthorizer wires the auth-phase gate. When set, every token request
// runs Authorize after credentials validate; a non-nil error is returned to
// the client as a 403 (distribution error JSON) and the JWT is not issued.
func (h *Handler) SetAuthorizer(authorizer Authorizer) {
h.authorizer = authorizer
}
// SetServiceAuthFetcher binds JWT issuance to the AppView↔hold service-auth.
// When set, the handler pre-mints the service-auth and stamps the JWT's exp
// from the cached expiry, so both tokens expire concurrently.
func (h *Handler) SetServiceAuthFetcher(fetcher ServiceAuthFetcher) {
h.serviceAuthFetcher = fetcher
}
// TokenResponse represents the response from /auth/token
type TokenResponse struct {
Token string `json:"token,omitempty"` // Legacy field
@@ -251,8 +298,75 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
// Auth-phase gate (crew reconciliation, plus membership/quota for
// non-wildcard push) and service-auth pre-mint are independent network
// paths — one talks to the hold, the other to the PDS. Run them in
// parallel so a cold /auth/token pays max(both) instead of sum(both).
// Nil-authorizer / nil-fetcher configurations short-circuit each branch
// independently. The gate runs for pull too: first-time CLI users on a
// private hold need crew reconciliation before the hold-side read check
// will accept the resulting JWT.
runGate := h.authorizer != nil
runFetch := h.serviceAuthFetcher != nil
type gateRes struct{ err error }
type fetchRes struct {
expiresAt time.Time
err error
}
var (
gateCh chan gateRes
fetchCh chan fetchRes
)
if runGate {
gateCh = make(chan gateRes, 1)
go func() {
gateCh <- gateRes{err: h.authorizer.Authorize(r.Context(), did, authMethod, access)}
}()
}
if runFetch {
fetchCh = make(chan fetchRes, 1)
go func() {
exp, err := h.serviceAuthFetcher.Fetch(r.Context(), did, authMethod)
fetchCh <- fetchRes{expiresAt: exp, err: err}
}()
}
// Drain the gate first so a denial wins over a transient fetch error.
if runGate {
if res := <-gateCh; res.err != nil {
slog.Info("Authorization denied", "did", did, "error", res.err)
_ = errcode.ServeJSON(w, errcode.ErrorCodeDenied.WithMessage(res.err.Error()))
return
}
}
// Bind JWT lifetime to the AppView↔hold service-auth: pre-mint it now and
// stamp the JWT's exp from the cached expiry. They expire concurrently;
// when Docker hits 401, the next /auth/token call mints both fresh.
issueExp := h.issuer.expiration
if runFetch {
res := <-fetchCh
if res.err != nil {
slog.Warn("service-auth pre-mint failed", "did", did, "error", res.err)
_ = errcode.ServeJSON(w, errcode.ErrorCodeUnavailable.WithMessage(fmt.Sprintf("service-auth fetch failed: %v", res.err)))
return
}
if !res.expiresAt.IsZero() {
// Cap JWT lifetime at the service-auth's expiry. The cache's
// expiresAt already includes a 10s safety margin
// (pkg/auth/cache.go:71), so this guarantees the service-auth
// is still cache-valid for any /v2/* request the JWT can
// authorize. We never extend beyond the configured default.
until := time.Until(res.expiresAt)
if until < issueExp {
issueExp = until
}
}
}
// Issue JWT token
tokenString, err := h.issuer.Issue(did, access, authMethod)
tokenString, err := h.issuer.IssueWithExpiration(did, access, authMethod, issueExp)
if err != nil {
slog.Error("Failed to issue token", "error", err, "did", did)
http.Error(w, fmt.Sprintf("failed to issue token: %v", err), http.StatusInternalServerError)
@@ -263,7 +377,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Return token response
now := time.Now()
expiresIn := int(h.issuer.expiration.Seconds())
expiresIn := int(issueExp.Seconds())
resp := TokenResponse{
Token: tokenString,

View File

@@ -0,0 +1,57 @@
//go:build !race
// +build !race
package token
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
// TestHandler_GateAndFetchRunConcurrently asserts that the authorizer and
// service-auth fetcher run as parallel goroutines rather than sequentially.
//
// Skipped under -race: the race detector inflates RSA-sign cost enough to
// dominate the wall-clock budget, drowning out the parallel/sequential
// signal we're trying to measure. The logic being tested isn't race-
// sensitive — sequential vs. parallel is a static property of the handler
// — so dropping the timing assertion under race is the right trade.
func TestHandler_GateAndFetchRunConcurrently(t *testing.T) {
const delay = 200 * time.Millisecond
keyPath := getSharedTestKey(t)
issuer, err := NewIssuer(keyPath, "atcr.io", "registry", 5*time.Minute)
if err != nil {
t.Fatalf("NewIssuer() error = %v", err)
}
deviceStore, database := setupTestDeviceStore(t)
deviceSecret := createTestDevice(t, deviceStore, database, "did:plc:alice123", "alice.bsky.social")
handler := NewHandler(issuer, deviceStore)
handler.SetAuthorizer(&stubAuthorizer{delay: delay})
handler.SetServiceAuthFetcher(&stubServiceAuthFetcher{
expiresAt: time.Now().Add(4 * time.Minute),
delay: delay,
})
req := httptest.NewRequest(http.MethodGet, "/auth/token?service=registry&scope=repository:alice.bsky.social/myapp:pull,push", nil)
req.SetBasicAuth("alice", deviceSecret)
w := httptest.NewRecorder()
start := time.Now()
handler.ServeHTTP(w, req)
elapsed := time.Since(start)
if w.Code != http.StatusOK {
t.Fatalf("expected 200, got %d. Body: %s", w.Code, w.Body.String())
}
// Sum would be ≥2*delay = 400ms. 350ms gives 150ms slack over max(200ms)
// while still flagging sequential execution.
if elapsed > 350*time.Millisecond {
t.Errorf("handler took %v; gate and fetch should run in parallel (max ~%v, not sum ~%v)",
elapsed, delay, 2*delay)
}
}

View File

@@ -6,6 +6,7 @@ import (
"database/sql"
"encoding/base64"
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"os"
@@ -16,6 +17,7 @@ import (
"time"
"atcr.io/pkg/appview/db"
"atcr.io/pkg/auth"
)
// Shared test key to avoid generating a new RSA key for each test
@@ -722,6 +724,339 @@ func TestParseBasicAuthDID(t *testing.T) {
}
}
// stubAuthorizer is a configurable Authorizer for testing the gate.
type stubAuthorizer struct {
called bool
err error
delay time.Duration // optional sleep before returning, for parallelism tests
}
func (s *stubAuthorizer) Authorize(_ context.Context, _, _ string, _ []auth.AccessEntry) error {
s.called = true
if s.delay > 0 {
time.Sleep(s.delay)
}
return s.err
}
func TestHandler_Authorizer_DeniesPushScope(t *testing.T) {
keyPath := getSharedTestKey(t)
issuer, err := NewIssuer(keyPath, "atcr.io", "registry", 15*time.Minute)
if err != nil {
t.Fatalf("NewIssuer() error = %v", err)
}
deviceStore, database := setupTestDeviceStore(t)
deviceSecret := createTestDevice(t, deviceStore, database, "did:plc:alice123", "alice.bsky.social")
handler := NewHandler(issuer, deviceStore)
stub := &stubAuthorizer{err: errors.New("quota exceeded: 6000000000 / 5368709120 bytes used")}
handler.SetAuthorizer(stub)
req := httptest.NewRequest(http.MethodGet, "/auth/token?service=registry&scope=repository:alice.bsky.social/myapp:pull,push", nil)
req.SetBasicAuth("alice", deviceSecret)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if !stub.called {
t.Fatal("expected Authorizer to be called for push-scoped token request")
}
if w.Code != http.StatusForbidden {
t.Errorf("expected 403, got %d. Body: %s", w.Code, w.Body.String())
}
if !strings.Contains(w.Body.String(), "quota exceeded") {
t.Errorf("expected denial reason in body, got: %s", w.Body.String())
}
// Distribution error JSON shape: {"errors":[{"code":"DENIED","message":"..."}]}
var errBody struct {
Errors []struct {
Code string `json:"code"`
Message string `json:"message"`
} `json:"errors"`
}
if err := json.Unmarshal(w.Body.Bytes(), &errBody); err != nil {
t.Fatalf("decode distribution error JSON: %v. Body: %s", err, w.Body.String())
}
if len(errBody.Errors) == 0 || errBody.Errors[0].Code != "DENIED" {
t.Errorf("expected errors[0].code = DENIED, got %+v", errBody.Errors)
}
}
func TestHandler_Authorizer_RunsForPullOnly(t *testing.T) {
// Pull-only scopes still go through the gate so first-time CLI users on
// a private hold can have their crew membership reconciled before the
// hold-side read check runs.
keyPath := getSharedTestKey(t)
issuer, err := NewIssuer(keyPath, "atcr.io", "registry", 15*time.Minute)
if err != nil {
t.Fatalf("NewIssuer() error = %v", err)
}
deviceStore, database := setupTestDeviceStore(t)
deviceSecret := createTestDevice(t, deviceStore, database, "did:plc:alice123", "alice.bsky.social")
handler := NewHandler(issuer, deviceStore)
stub := &stubAuthorizer{} // succeeds
handler.SetAuthorizer(stub)
req := httptest.NewRequest(http.MethodGet, "/auth/token?service=registry&scope=repository:alice.bsky.social/myapp:pull", nil)
req.SetBasicAuth("alice", deviceSecret)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if !stub.called {
t.Error("Authorizer should run for pull-only scopes (drives crew reconciliation)")
}
if w.Code != http.StatusOK {
t.Errorf("expected 200 for pull-only, got %d. Body: %s", w.Code, w.Body.String())
}
}
func TestHandler_Authorizer_NilIsBackwardsCompatible(t *testing.T) {
keyPath := getSharedTestKey(t)
issuer, err := NewIssuer(keyPath, "atcr.io", "registry", 15*time.Minute)
if err != nil {
t.Fatalf("NewIssuer() error = %v", err)
}
deviceStore, database := setupTestDeviceStore(t)
deviceSecret := createTestDevice(t, deviceStore, database, "did:plc:alice123", "alice.bsky.social")
handler := NewHandler(issuer, deviceStore)
// No authorizer set.
req := httptest.NewRequest(http.MethodGet, "/auth/token?service=registry&scope=repository:alice.bsky.social/myapp:pull,push", nil)
req.SetBasicAuth("alice", deviceSecret)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("expected 200 with no authorizer configured, got %d. Body: %s", w.Code, w.Body.String())
}
}
// stubServiceAuthFetcher is a configurable ServiceAuthFetcher for testing
// the JWT-to-service-auth lifetime binding.
type stubServiceAuthFetcher struct {
called bool
expiresAt time.Time
err error
delay time.Duration // optional sleep before returning, for parallelism tests
}
func (s *stubServiceAuthFetcher) Fetch(_ context.Context, _, _ string) (time.Time, error) {
s.called = true
if s.delay > 0 {
time.Sleep(s.delay)
}
return s.expiresAt, s.err
}
func TestHandler_ServiceAuthFetcher_BindsJWTExp(t *testing.T) {
keyPath := getSharedTestKey(t)
issuer, err := NewIssuer(keyPath, "atcr.io", "registry", 15*time.Minute)
if err != nil {
t.Fatalf("NewIssuer() error = %v", err)
}
deviceStore, database := setupTestDeviceStore(t)
deviceSecret := createTestDevice(t, deviceStore, database, "did:plc:alice123", "alice.bsky.social")
handler := NewHandler(issuer, deviceStore)
// Service-auth expires in 4 minutes; JWT should be capped to that.
stub := &stubServiceAuthFetcher{expiresAt: time.Now().Add(4 * time.Minute)}
handler.SetServiceAuthFetcher(stub)
req := httptest.NewRequest(http.MethodGet, "/auth/token?service=registry&scope=repository:alice.bsky.social/myapp:pull,push", nil)
req.SetBasicAuth("alice", deviceSecret)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if !stub.called {
t.Fatal("expected ServiceAuthFetcher.Fetch to be called")
}
if w.Code != http.StatusOK {
t.Fatalf("expected 200, got %d. Body: %s", w.Code, w.Body.String())
}
var resp TokenResponse
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("decode response: %v", err)
}
// Allow a few seconds of slack for time elapsed during the request.
if resp.ExpiresIn > 240 || resp.ExpiresIn < 230 {
t.Errorf("expected expires_in ≈ 240 (capped to service-auth), got %d", resp.ExpiresIn)
}
}
func TestHandler_ServiceAuthFetcher_NoHoldUsesDefault(t *testing.T) {
keyPath := getSharedTestKey(t)
issuer, err := NewIssuer(keyPath, "atcr.io", "registry", 5*time.Minute)
if err != nil {
t.Fatalf("NewIssuer() error = %v", err)
}
deviceStore, database := setupTestDeviceStore(t)
deviceSecret := createTestDevice(t, deviceStore, database, "did:plc:alice123", "alice.bsky.social")
handler := NewHandler(issuer, deviceStore)
// Zero time + nil error = "no hold configured" — fall back to default.
stub := &stubServiceAuthFetcher{expiresAt: time.Time{}}
handler.SetServiceAuthFetcher(stub)
req := httptest.NewRequest(http.MethodGet, "/auth/token?service=registry&scope=repository:alice.bsky.social/myapp:pull,push", nil)
req.SetBasicAuth("alice", deviceSecret)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Fatalf("expected 200 with no-hold degradation, got %d. Body: %s", w.Code, w.Body.String())
}
var resp TokenResponse
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("decode response: %v", err)
}
if resp.ExpiresIn != 300 {
t.Errorf("expected expires_in = 300 (issuer default), got %d", resp.ExpiresIn)
}
}
func TestHandler_ServiceAuthFetcher_FailureReturns503(t *testing.T) {
keyPath := getSharedTestKey(t)
issuer, err := NewIssuer(keyPath, "atcr.io", "registry", 5*time.Minute)
if err != nil {
t.Fatalf("NewIssuer() error = %v", err)
}
deviceStore, database := setupTestDeviceStore(t)
deviceSecret := createTestDevice(t, deviceStore, database, "did:plc:alice123", "alice.bsky.social")
handler := NewHandler(issuer, deviceStore)
stub := &stubServiceAuthFetcher{err: errors.New("PDS unreachable")}
handler.SetServiceAuthFetcher(stub)
req := httptest.NewRequest(http.MethodGet, "/auth/token?service=registry&scope=repository:alice.bsky.social/myapp:pull,push", nil)
req.SetBasicAuth("alice", deviceSecret)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusServiceUnavailable {
t.Errorf("expected 503 on fetch failure, got %d. Body: %s", w.Code, w.Body.String())
}
}
func TestHandler_ServiceAuthFetcher_NilUsesIssuerDefault(t *testing.T) {
keyPath := getSharedTestKey(t)
issuer, err := NewIssuer(keyPath, "atcr.io", "registry", 5*time.Minute)
if err != nil {
t.Fatalf("NewIssuer() error = %v", err)
}
deviceStore, database := setupTestDeviceStore(t)
deviceSecret := createTestDevice(t, deviceStore, database, "did:plc:alice123", "alice.bsky.social")
handler := NewHandler(issuer, deviceStore)
// No fetcher set — backward-compat path.
req := httptest.NewRequest(http.MethodGet, "/auth/token?service=registry&scope=repository:alice.bsky.social/myapp:pull,push", nil)
req.SetBasicAuth("alice", deviceSecret)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Fatalf("expected 200 with nil fetcher, got %d. Body: %s", w.Code, w.Body.String())
}
var resp TokenResponse
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("decode response: %v", err)
}
if resp.ExpiresIn != 300 {
t.Errorf("expected expires_in = 300 (issuer default), got %d", resp.ExpiresIn)
}
}
func TestHandler_GateDenialPreemptsFetchError(t *testing.T) {
// When both branches error, the gate's denial wins: it's drained
// first and returns 403/DENIED before the fetcher's 503/UNAVAILABLE
// can surface. Regression guard for the drain-gate-first contract.
keyPath := getSharedTestKey(t)
issuer, err := NewIssuer(keyPath, "atcr.io", "registry", 5*time.Minute)
if err != nil {
t.Fatalf("NewIssuer() error = %v", err)
}
deviceStore, database := setupTestDeviceStore(t)
deviceSecret := createTestDevice(t, deviceStore, database, "did:plc:alice123", "alice.bsky.social")
handler := NewHandler(issuer, deviceStore)
handler.SetAuthorizer(&stubAuthorizer{err: errors.New("crew membership required")})
handler.SetServiceAuthFetcher(&stubServiceAuthFetcher{err: errors.New("PDS unreachable")})
req := httptest.NewRequest(http.MethodGet, "/auth/token?service=registry&scope=repository:alice.bsky.social/myapp:pull,push", nil)
req.SetBasicAuth("alice", deviceSecret)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusForbidden {
t.Errorf("expected 403 (gate denial wins), got %d. Body: %s", w.Code, w.Body.String())
}
if !strings.Contains(w.Body.String(), "crew membership required") {
t.Errorf("expected gate's error message in body, got: %s", w.Body.String())
}
}
func TestHandler_ExpiresInCapZeroFloor(t *testing.T) {
// Regression guard — see follow-up if a non-negative floor is desired.
// When the fetcher reports a service-auth that's already expired, the
// JWT exp-cap arithmetic (`if until < issueExp { issueExp = until }`)
// lets issueExp go negative. We lock in the current observable
// behavior so a future change here is intentional, not silent.
keyPath := getSharedTestKey(t)
issuer, err := NewIssuer(keyPath, "atcr.io", "registry", 5*time.Minute)
if err != nil {
t.Fatalf("NewIssuer() error = %v", err)
}
deviceStore, database := setupTestDeviceStore(t)
deviceSecret := createTestDevice(t, deviceStore, database, "did:plc:alice123", "alice.bsky.social")
handler := NewHandler(issuer, deviceStore)
handler.SetServiceAuthFetcher(&stubServiceAuthFetcher{
expiresAt: time.Now().Add(-1 * time.Minute), // already expired
})
req := httptest.NewRequest(http.MethodGet, "/auth/token?service=registry&scope=repository:alice.bsky.social/myapp:pull,push", nil)
req.SetBasicAuth("alice", deviceSecret)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Fatalf("expected 200 (token still issued, just with short exp), got %d. Body: %s", w.Code, w.Body.String())
}
var resp TokenResponse
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("decode response: %v", err)
}
// Current behavior: ExpiresIn is negative (≈ -60). The handler does
// not floor it. If someone introduces a floor, this assertion needs
// updating along with the docstring above.
if resp.ExpiresIn >= 0 {
t.Errorf("expected negative expires_in for already-expired service-auth, got %d", resp.ExpiresIn)
}
}
func TestTokenHandler_DIDBasicAuth(t *testing.T) {
// Test that a DID passed as BasicAuth username works through the full handler
deviceStore, database := setupTestDeviceStore(t)

View File

@@ -72,15 +72,25 @@ func NewIssuerFromKey(privateKey *rsa.PrivateKey, certDER []byte, issuer, servic
}
}
// Issue creates and signs a new JWT token
// Issue creates and signs a new JWT token using the issuer's configured expiration.
func (i *Issuer) Issue(subject string, access []auth.AccessEntry, authMethod string) (string, error) {
claims := NewClaims(subject, i.issuer, i.service, i.expiration, access, authMethod)
return i.IssueWithExpiration(subject, access, authMethod, i.expiration)
}
// IssueWithExpiration creates and signs a JWT with a per-call expiration. Used
// when the JWT lifetime is bound to a downstream credential whose lifetime can
// be slightly less than the issuer's configured default — e.g. the AppView↔hold
// service-auth, where the cache applies a 10s safety margin against the
// PDS-granted exp.
func (i *Issuer) IssueWithExpiration(subject string, access []auth.AccessEntry, authMethod string, expiration time.Duration) (string, error) {
claims := NewClaims(subject, i.issuer, i.service, expiration, access, authMethod)
slog.Debug("Creating JWT token",
"issuer", i.issuer,
"service", i.service,
"subject", subject,
"access", access)
"access", access,
"expiration", expiration)
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)

View File

@@ -4,6 +4,7 @@ import (
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"os"
"path/filepath"
@@ -165,6 +166,46 @@ func TestIssuer_Issue(t *testing.T) {
}
}
func TestIssuer_IssueWithExpiration_HonorsCallerDuration(t *testing.T) {
keyPath := getIssuerSharedTestKey(t)
// Issuer baked-in expiration is 15 min; per-call should override.
issuer, err := NewIssuer(keyPath, "atcr.io", "registry", 15*time.Minute)
if err != nil {
t.Fatalf("NewIssuer() error = %v", err)
}
tokenString, err := issuer.IssueWithExpiration(
"did:plc:user123",
[]auth.AccessEntry{{Type: "repository", Name: "alice/myapp", Actions: []string{"pull"}}},
AuthMethodOAuth,
2*time.Minute,
)
if err != nil {
t.Fatalf("IssueWithExpiration() error = %v", err)
}
parts := strings.Split(tokenString, ".")
if len(parts) != 3 {
t.Fatalf("expected 3 JWT parts, got %d", len(parts))
}
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
t.Fatalf("decode payload: %v", err)
}
var claims struct {
Exp int64 `json:"exp"`
Iat int64 `json:"iat"`
}
if err := json.Unmarshal(payload, &claims); err != nil {
t.Fatalf("unmarshal claims: %v", err)
}
delta := claims.Exp - claims.Iat
if delta != 120 {
t.Errorf("expected exp - iat = 120, got %d", delta)
}
}
func TestIssuer_Issue_EmptyAccess(t *testing.T) {
keyPath := getIssuerSharedTestKey(t)

View File

@@ -197,10 +197,10 @@ func (s ServerConfig) AppviewURL() string {
// survive a configurable grace window before being collected, preserving
// reversibility.
type LabelerConfig struct {
// DID or URL of the labeler service. Accepts did:web:... (resolved to
// the corresponding HTTPS host) or a raw http/https URL. Empty disables
// labeler integration.
DID string `yaml:"did" comment:"DID or URL of the ATProto labeler (e.g., did:web:labeler.atcr.io). Empty disables labeler integration."`
// DID of the labeler service (did:plc:... or did:web:...). The HTTP
// endpoint is resolved at runtime via the labeler's #atproto_labeler
// service entry in its DID document. Empty disables labeler integration.
DID string `yaml:"did" comment:"DID of the ATProto labeler (did:plc:... or did:web:...). Empty disables labeler integration."`
// Grace window for reversibility. Until a takedown is older than this,
// the GC keeps blobs referenced even though their layer records were
@@ -386,6 +386,11 @@ func LoadConfig(yamlPath string) (*Config, error) {
return nil, fmt.Errorf("database.did_method must be 'web' or 'plc', got %q", cfg.Database.DIDMethod)
}
// Validate labeler DID
if cfg.Labeler.DID != "" && !strings.HasPrefix(cfg.Labeler.DID, "did:") {
return nil, fmt.Errorf("labeler.did must be a DID (did:plc:... or did:web:...), got %q", cfg.Labeler.DID)
}
// Store config path for subsystem config loading (e.g. billing)
cfg.configPath = yamlPath

View File

@@ -10,7 +10,10 @@ import (
"strings"
"time"
"atcr.io/pkg/atproto"
comatproto "github.com/bluesky-social/indigo/api/atproto"
"github.com/bluesky-social/indigo/atproto/syntax"
"github.com/bluesky-social/indigo/events"
"github.com/gorilla/websocket"
)
@@ -37,28 +40,20 @@ type PurgeOutcome struct {
// Subscriber connects to a labeler's subscribeLabels endpoint, mirrors
// takedowns into the local cache, and triggers record purges on the hold.
type Subscriber struct {
labelerURL string
labelerDID string
cache *Cache
purger Purger
stopCh chan struct{}
}
// NewSubscriber builds a subscriber for the given labeler. labelerDIDOrURL
// may be either:
//
// - a did:web identifier (e.g. did:web:labeler.atcr.io) → resolved to https://labeler.atcr.io
// - a raw http/https URL (e.g. http://172.28.0.4:5002 for dev)
//
// The websocket URL is derived from the resolved HTTPS endpoint; the
// labeler's DID (used to filter the Src field on incoming labels) is derived
// the same way the appview's labeler subscriber derives it, so a single
// config field suffices.
func NewSubscriber(labelerDIDOrURL string, cache *Cache, purger Purger) *Subscriber {
httpURL := parseLabelerURL(labelerDIDOrURL)
// NewSubscriber builds a subscriber for the given labeler DID (did:plc or
// did:web). The websocket endpoint is resolved on each (re)connect through
// the shared identity directory's #atproto_labeler service entry, so the
// labeler can move (or fix a misconfigured endpoint) without clients
// redeploying.
func NewSubscriber(labelerDID string, cache *Cache, purger Purger) *Subscriber {
return &Subscriber{
labelerURL: httpURL,
labelerDID: deriveLabelerDID(labelerDIDOrURL, httpURL),
labelerDID: labelerDID,
cache: cache,
purger: purger,
stopCh: make(chan struct{}),
@@ -90,7 +85,7 @@ func (s *Subscriber) run() {
if err := s.connect(); err != nil {
slog.Warn("Hold labeler subscription error, reconnecting",
"labeler", s.labelerURL,
"labeler", s.labelerDID,
"error", err,
"backoff", backoff,
)
@@ -114,7 +109,14 @@ func (s *Subscriber) connect() error {
return fmt.Errorf("get cursor: %w", err)
}
wsURL := toWebSocketURL(s.labelerURL) + "/xrpc/com.atproto.label.subscribeLabels"
resolveCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
httpURL, err := resolveLabelerURL(resolveCtx, s.labelerDID)
cancel()
if err != nil {
return fmt.Errorf("resolve labeler endpoint: %w", err)
}
wsURL := toWebSocketURL(httpURL) + "/xrpc/com.atproto.label.subscribeLabels"
if cursor > 0 {
wsURL += fmt.Sprintf("?cursor=%d", cursor)
}
@@ -125,7 +127,7 @@ func (s *Subscriber) connect() error {
return fmt.Errorf("websocket dial: %w", err)
}
defer conn.Close()
slog.Info("Hold connected to labeler", "url", s.labelerURL)
slog.Info("Hold connected to labeler", "url", httpURL)
for {
select {
@@ -321,41 +323,25 @@ func (s *Subscriber) trustsSource(src string) bool {
return src == s.labelerDID
}
// parseLabelerURL accepts either a did:web:... identifier or a raw http/https
// URL and returns the HTTPS (or HTTP for did:web pointing at a hostname with
// %3A-encoded port in test mode) endpoint to talk to. did:web hosts with
// %3A-encoded ports are decoded back to colons. Mirrors the appview's
// ParseLabelerURL so a single config field works in both places.
func parseLabelerURL(labelerDIDOrURL string) string {
if strings.HasPrefix(labelerDIDOrURL, "http://") || strings.HasPrefix(labelerDIDOrURL, "https://") {
return labelerDIDOrURL
}
if strings.HasPrefix(labelerDIDOrURL, "did:web:") {
host := strings.TrimPrefix(labelerDIDOrURL, "did:web:")
host = strings.ReplaceAll(host, "%3A", ":")
return "https://" + host
}
return labelerDIDOrURL
}
// deriveLabelerDID returns the canonical labeler DID for source filtering.
// When the operator gave us a did:web identifier directly, we use it as-is.
// When they gave us a URL, we derive a did:web from its host (so dev URLs
// like http://172.28.0.4:5002 yield did:web:172.28.0.4%3A5002, matching the
// labeler's own self-served identity).
func deriveLabelerDID(labelerDIDOrURL, httpURL string) string {
if strings.HasPrefix(labelerDIDOrURL, "did:") {
return labelerDIDOrURL
}
u, err := url.Parse(httpURL)
// resolveLabelerURL resolves a labeler DID to its HTTP(S) endpoint by looking
// up the #atproto_labeler service in the shared identity directory: did:plc
// via plc.directory, did:web via /.well-known/did.json. The directory is the
// source of truth — clients don't need redeploying when the labeler moves or
// fixes a misconfigured endpoint.
func resolveLabelerURL(ctx context.Context, labelerDID string) (string, error) {
parsed, err := syntax.ParseDID(labelerDID)
if err != nil {
return labelerDIDOrURL
return "", fmt.Errorf("labeler: invalid DID %q: %w", labelerDID, err)
}
host := u.Hostname()
if port := u.Port(); port != "" {
host += "%3A" + port
ident, err := atproto.GetDirectory().LookupDID(ctx, parsed)
if err != nil {
return "", fmt.Errorf("labeler: failed to resolve %s: %w", labelerDID, err)
}
return "did:web:" + host
endpoint := ident.GetServiceEndpoint("atproto_labeler")
if endpoint == "" {
return "", fmt.Errorf("labeler: %s has no #atproto_labeler service endpoint", labelerDID)
}
return endpoint, nil
}
// toWebSocketURL converts an HTTP URL to a WebSocket URL. http→ws, https→wss.

View File

@@ -142,25 +142,17 @@ func TestApplyLabelIgnoresUntrustedSource(t *testing.T) {
}
}
func TestSubscriberDerivesDIDFromURL(t *testing.T) {
tests := []struct {
input string
wantURL string
wantDID string
}{
{"did:web:labeler.atcr.io", "https://labeler.atcr.io", "did:web:labeler.atcr.io"},
{"did:web:172.28.0.4%3A5002", "https://172.28.0.4:5002", "did:web:172.28.0.4%3A5002"},
{"http://172.28.0.4:5002", "http://172.28.0.4:5002", "did:web:172.28.0.4%3A5002"},
{"https://labeler.atcr.io", "https://labeler.atcr.io", "did:web:labeler.atcr.io"},
func TestSubscriberStoresDID(t *testing.T) {
tests := []string{
"did:web:labeler.atcr.io",
"did:web:172.28.0.4%3A5002",
"did:plc:4zul2zfigjltl24ti24xj3hy",
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
sub := NewSubscriber(tt.input, nil, nil)
if sub.labelerURL != tt.wantURL {
t.Errorf("labelerURL = %q, want %q", sub.labelerURL, tt.wantURL)
}
if sub.labelerDID != tt.wantDID {
t.Errorf("labelerDID = %q, want %q", sub.labelerDID, tt.wantDID)
for _, did := range tests {
t.Run(did, func(t *testing.T) {
sub := NewSubscriber(did, nil, nil)
if sub.labelerDID != did {
t.Errorf("labelerDID = %q, want %q", sub.labelerDID, did)
}
})
}

View File

@@ -269,24 +269,12 @@ func (h *XRPCHandler) HandleNotifyManifest(w http.ResponseWriter, r *http.Reques
// Only create layer records and Bluesky posts for pushes
if operation == "push" {
// Soft limit check: block if ALREADY over quota
// (blobs already uploaded to S3 by this point, no sense rejecting)
stats, err := h.pds.GetQuotaForUserWithTier(ctx, req.UserDID, h.quotaMgr)
if err == nil && stats.Limit != nil && stats.TotalSize > *stats.Limit {
slog.Warn("Quota exceeded for push",
"userDid", req.UserDID,
"currentUsage", stats.TotalSize,
"limit", *stats.Limit,
"repository", req.Repository,
"tag", req.Tag,
)
render.Status(r, http.StatusForbidden)
render.JSON(w, r, map[string]string{"error": fmt.Sprintf(
"quota exceeded: current=%d bytes, limit=%d bytes. Delete images to free space.",
stats.TotalSize, *stats.Limit,
)})
return
}
// Quota enforcement happens at the auth phase (AppView /auth/token
// via pkg/appview/authgate). By the time this notification fires
// the blobs are in S3 and the manifest is in the user's PDS, so a
// post-hoc check here cannot block the push. We still load stats
// because the tier name feeds scan-on-push selection below.
stats, _ := h.pds.GetQuotaForUserWithTier(ctx, req.UserDID, h.quotaMgr)
// Check if manifest posts are enabled
// Read from captain record (which is synced with HOLD_BLUESKY_POSTS_ENABLED env var)

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"log/slog"
"net"
"net/http"
"os"
"os/signal"
@@ -332,10 +333,22 @@ func NewHoldServer(cfg *Config) (*HoldServer, error) {
return s, nil
}
// Serve starts the HTTP server and blocks until shutdown signal.
// Serve starts the HTTP server on the configured address and blocks until
// shutdown signal.
func (s *HoldServer) Serve() error {
listener, err := net.Listen("tcp", s.Config.Server.Addr)
if err != nil {
return fmt.Errorf("failed to create listener: %w", err)
}
return s.ServeWithListener(listener)
}
// ServeWithListener starts the HTTP server on the provided listener and
// blocks until a shutdown signal arrives or Shutdown() is called. Tests use
// this to bind a 127.0.0.1:0 listener and learn the assigned port before
// driving requests.
func (s *HoldServer) ServeWithListener(listener net.Listener) error {
s.httpServer = &http.Server{
Addr: s.Config.Server.Addr,
Handler: s.Router,
ReadTimeout: s.Config.Server.ReadTimeout,
WriteTimeout: s.Config.Server.WriteTimeout,
@@ -344,14 +357,18 @@ func (s *HoldServer) Serve() error {
// Set up signal handling for graceful shutdown
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
defer signal.Stop(sigChan)
// Start server in goroutine
serverErr := make(chan error, 1)
go func() {
slog.Info("Starting hold service", "addr", s.Config.Server.Addr)
if err := s.httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
slog.Info("Starting hold service", "addr", listener.Addr().String())
err := s.httpServer.Serve(listener)
if err != nil && err != http.ErrServerClosed {
serverErr <- err
return
}
serverErr <- nil
}()
// Update status post to "online" after server starts
@@ -399,9 +416,13 @@ func (s *HoldServer) Serve() error {
// Wait for signal or server error
select {
case err := <-serverErr:
slog.Error("Server failed", "error", err)
logging.Shutdown()
return err
if err != nil {
slog.Error("Server failed", "error", err)
logging.Shutdown()
return err
}
// Clean exit (e.g. tests called Shutdown()). Tear down side workers.
s.shutdown()
case sig := <-sigChan:
slog.Info("Received signal, shutting down gracefully", "signal", sig)
s.shutdown()
@@ -410,6 +431,16 @@ func (s *HoldServer) Serve() error {
return nil
}
// Shutdown gracefully stops the HTTP server. Safe to call from tests instead
// of sending SIGTERM. Production code goes through the signal handler inside
// ServeWithListener.
func (s *HoldServer) Shutdown(ctx context.Context) error {
if s.httpServer == nil {
return nil
}
return s.httpServer.Shutdown(ctx)
}
// requestCrawls fans out com.atproto.sync.requestCrawl to every known relay so
// the embedded PDS becomes discoverable on the relay network. Configured
// RelayEndpoints (if any aren't already in KnownRelays) are included as well.

75
pkg/testpds/directory.go Normal file
View File

@@ -0,0 +1,75 @@
package testpds
import (
"context"
"fmt"
"sync"
"github.com/bluesky-social/indigo/atproto/identity"
"github.com/bluesky-social/indigo/atproto/syntax"
)
// Directory is an in-memory identity.Directory containing both the user
// identities served by the fake PDS and any service DIDs the test registers
// (e.g. the Hold's did:web identity). Install via atproto.SetDirectory().
type Directory struct {
mu sync.RWMutex
byDID map[string]*identity.Identity
byHandle map[string]*identity.Identity
}
func newDirectory() *Directory {
return &Directory{
byDID: make(map[string]*identity.Identity),
byHandle: make(map[string]*identity.Identity),
}
}
// Register adds an identity to the directory. Callers (Server.AddIdentity or
// the test harness adding the hold) construct the identity.Identity and pass
// it in. Subsequent Lookup/LookupDID/LookupHandle calls return this entry.
func (d *Directory) Register(ident *identity.Identity) {
d.mu.Lock()
defer d.mu.Unlock()
d.byDID[ident.DID.String()] = ident
if ident.Handle != syntax.HandleInvalid && ident.Handle.String() != "" {
d.byHandle[ident.Handle.String()] = ident
}
}
// LookupHandle implements identity.Directory.
func (d *Directory) LookupHandle(_ context.Context, handle syntax.Handle) (*identity.Identity, error) {
d.mu.RLock()
defer d.mu.RUnlock()
if ident, ok := d.byHandle[handle.String()]; ok {
return ident, nil
}
return nil, fmt.Errorf("%w: %s", identity.ErrHandleNotFound, handle)
}
// LookupDID implements identity.Directory.
func (d *Directory) LookupDID(_ context.Context, did syntax.DID) (*identity.Identity, error) {
d.mu.RLock()
defer d.mu.RUnlock()
if ident, ok := d.byDID[did.String()]; ok {
return ident, nil
}
return nil, fmt.Errorf("%w: %s", identity.ErrDIDNotFound, did)
}
// Lookup implements identity.Directory.
func (d *Directory) Lookup(ctx context.Context, atid syntax.AtIdentifier) (*identity.Identity, error) {
if did, err := atid.AsDID(); err == nil {
return d.LookupDID(ctx, did)
}
if handle, err := atid.AsHandle(); err == nil {
return d.LookupHandle(ctx, handle)
}
return nil, fmt.Errorf("%w: %s", identity.ErrHandleResolutionFailed, atid)
}
// Purge implements identity.Directory. Our directory is authoritative — entries
// don't expire — so this is a no-op.
func (d *Directory) Purge(_ context.Context, _ syntax.AtIdentifier) error {
return nil
}

98
pkg/testpds/identity.go Normal file
View File

@@ -0,0 +1,98 @@
// Package testpds provides an in-process fake ATProto PDS for integration
// tests. It implements the minimal XRPC surface that AppView and Hold call
// during push/pull (createSession, getServiceAuth, repo.put/get/list/delete
// Record, identity.resolveHandle), plus an identity.Directory implementation
// so DID resolution short-circuits the network.
package testpds
import (
"crypto/rand"
"encoding/hex"
"fmt"
"strings"
"github.com/bluesky-social/indigo/atproto/atcrypto"
"github.com/bluesky-social/indigo/atproto/identity"
"github.com/bluesky-social/indigo/atproto/syntax"
)
// Identity is a single account on the fake PDS. Each identity has its own
// K-256 signing keypair used to sign service-auth JWTs the Hold service
// verifies.
type Identity struct {
DID syntax.DID
Handle syntax.Handle
SigningKey *atcrypto.PrivateKeyK256
Password string // synthetic app-password; opaque to tests
AccessToken string // synthetic accessJwt returned by createSession
pdsURL string // base URL of the fake PDS serving this identity
}
// newIdentity allocates an identity with a fresh keypair. didHostEscaped is
// the percent-encoded host:port that did:web will encode in the DID, e.g.
// "127.0.0.1%3A45123".
func newIdentity(pdsURL, didHostEscaped, handle string) (*Identity, error) {
priv, err := atcrypto.GeneratePrivateKeyK256()
if err != nil {
return nil, fmt.Errorf("generate K-256: %w", err)
}
hdl, err := syntax.ParseHandle(handle)
if err != nil {
return nil, fmt.Errorf("invalid handle %q: %w", handle, err)
}
didStr := fmt.Sprintf("did:web:%s:user:%s", didHostEscaped, hdl.String())
did, err := syntax.ParseDID(didStr)
if err != nil {
return nil, fmt.Errorf("invalid synthesized DID %q: %w", didStr, err)
}
rb := make([]byte, 16)
if _, err := rand.Read(rb); err != nil {
return nil, err
}
return &Identity{
DID: did,
Handle: hdl,
SigningKey: priv,
Password: "test-pass-" + hex.EncodeToString(rb[:4]),
AccessToken: "test-access-" + hex.EncodeToString(rb),
pdsURL: pdsURL,
}, nil
}
// toIndigoIdentity returns the indigo identity.Identity view of this account.
// It contains the same fields the production code reads when resolving DIDs:
// AlsoKnownAs (for handle), Services["atproto_pds"] (for the PDS URL), and
// Keys["atproto"] (for service-auth JWT verification).
func (i *Identity) toIndigoIdentity() (*identity.Identity, error) {
pub, err := i.SigningKey.PublicKey()
if err != nil {
return nil, fmt.Errorf("derive public key: %w", err)
}
pubK256, ok := pub.(*atcrypto.PublicKeyK256)
if !ok {
return nil, fmt.Errorf("expected K-256 public key, got %T", pub)
}
pubMultibase := pubK256.Multibase()
return &identity.Identity{
DID: i.DID,
Handle: i.Handle,
AlsoKnownAs: []string{"at://" + i.Handle.String()},
Services: map[string]identity.ServiceEndpoint{
"atproto_pds": {Type: "AtprotoPersonalDataServer", URL: i.pdsURL},
},
Keys: map[string]identity.VerificationMethod{
"atproto": {Type: "Multikey", PublicKeyMultibase: pubMultibase},
},
}, nil
}
// didWebForHost builds the percent-encoded did:web host component for the
// given listener address. did:web requires the port colon be encoded as %3A.
func didWebForHost(addr string) string {
host := strings.TrimPrefix(addr, "http://")
host = strings.TrimPrefix(host, "https://")
// url.QueryEscape would encode dots and slashes; we only need to swap the
// port colon. did:web also forbids userinfo, paths beyond the optional
// path segment, etc., which we don't generate.
return strings.Replace(host, ":", "%3A", 1)
}

44
pkg/testpds/jwt.go Normal file
View File

@@ -0,0 +1,44 @@
package testpds
import (
"encoding/base64"
"encoding/json"
"fmt"
"time"
)
// signServiceAuthJWT mints a JWT in the shape Hold expects, signed with the
// identity's K-256 signing key. The header advertises ES256K (the algorithm
// ATProto PDSes use). The Hold's ValidateServiceToken in pkg/hold/pds/auth.go
// parses the JWT manually rather than via golang-jwt, so we don't need to
// match golang-jwt's exact byte ordering — just produce well-formed
// base64url(JSON).header.base64url(JSON).payload.base64url(sig).
func (i *Identity) signServiceAuthJWT(aud, lxm string, exp time.Time) (string, error) {
header := map[string]string{"alg": "ES256K", "typ": "JWT"}
headerJSON, err := json.Marshal(header)
if err != nil {
return "", fmt.Errorf("marshal header: %w", err)
}
payload := map[string]any{
"iss": i.DID.String(),
"aud": aud,
"exp": exp.Unix(),
"lxm": lxm,
"iat": time.Now().Unix(),
}
payloadJSON, err := json.Marshal(payload)
if err != nil {
return "", fmt.Errorf("marshal payload: %w", err)
}
signingInput := base64.RawURLEncoding.EncodeToString(headerJSON) + "." +
base64.RawURLEncoding.EncodeToString(payloadJSON)
sig, err := i.SigningKey.HashAndSign([]byte(signingInput))
if err != nil {
return "", fmt.Errorf("sign: %w", err)
}
return signingInput + "." + base64.RawURLEncoding.EncodeToString(sig), nil
}

110
pkg/testpds/records.go Normal file
View File

@@ -0,0 +1,110 @@
package testpds
import (
"crypto/sha256"
"encoding/base32"
"encoding/json"
"sync"
)
// recordStore is the in-memory backing for the fake PDS's repo.* endpoints.
// Records are keyed by (did, collection, rkey). Values are stored as raw JSON
// so the fake never has to know the lexicon shape.
type recordStore struct {
mu sync.RWMutex
records map[string]map[string]map[string]json.RawMessage
}
func newRecordStore() *recordStore {
return &recordStore{records: make(map[string]map[string]map[string]json.RawMessage)}
}
func (s *recordStore) put(did, collection, rkey string, value json.RawMessage) {
s.mu.Lock()
defer s.mu.Unlock()
if s.records[did] == nil {
s.records[did] = make(map[string]map[string]json.RawMessage)
}
if s.records[did][collection] == nil {
s.records[did][collection] = make(map[string]json.RawMessage)
}
s.records[did][collection][rkey] = value
}
func (s *recordStore) get(did, collection, rkey string) (json.RawMessage, bool) {
s.mu.RLock()
defer s.mu.RUnlock()
if s.records[did] == nil || s.records[did][collection] == nil {
return nil, false
}
v, ok := s.records[did][collection][rkey]
return v, ok
}
func (s *recordStore) delete(did, collection, rkey string) bool {
s.mu.Lock()
defer s.mu.Unlock()
if s.records[did] == nil || s.records[did][collection] == nil {
return false
}
if _, ok := s.records[did][collection][rkey]; !ok {
return false
}
delete(s.records[did][collection], rkey)
return true
}
// list returns a snapshot of all (rkey, value) pairs in the named collection,
// in undefined order. Callers are expected to sort if they care.
func (s *recordStore) list(did, collection string) []recordEntry {
s.mu.RLock()
defer s.mu.RUnlock()
if s.records[did] == nil || s.records[did][collection] == nil {
return nil
}
out := make([]recordEntry, 0, len(s.records[did][collection]))
for rkey, v := range s.records[did][collection] {
out = append(out, recordEntry{RKey: rkey, Value: v})
}
return out
}
type recordEntry struct {
RKey string
Value json.RawMessage
}
// fakeCID synthesizes a deterministic-looking CID for a record. Production
// code rarely validates this against the actual record content; it just
// echoes it back. We use a SHA-256 of the value, base32-encoded with a CIDv1
// prefix-like marker.
var b32 = base32.StdEncoding.WithPadding(base32.NoPadding)
func fakeCID(value []byte) string {
h := sha256.Sum256(value)
return "bafy" + b32.EncodeToString(h[:24])
}
// blobStore is a simple content-addressed store for ATProto blobs. The
// uploadBlob handler writes bytes here; sync.getBlob reads them back by CID.
type blobStore struct {
mu sync.RWMutex
blobs map[string][]byte
}
func newBlobStore() *blobStore {
return &blobStore{blobs: make(map[string][]byte)}
}
func (b *blobStore) put(cid string, data []byte) {
b.mu.Lock()
defer b.mu.Unlock()
b.blobs[cid] = data
}
func (b *blobStore) get(cid string) ([]byte, bool) {
b.mu.RLock()
defer b.mu.RUnlock()
v, ok := b.blobs[cid]
return v, ok
}

365
pkg/testpds/server.go Normal file
View File

@@ -0,0 +1,365 @@
package testpds
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"sync"
"testing"
"time"
"atcr.io/pkg/atproto"
)
// Server is the fake PDS. Each test calls New(t) to spin one up, then
// AddIdentity() to register users. The Directory() must be passed to
// atproto.SetDirectory() so AppView and Hold resolve DIDs through it.
type Server struct {
t *testing.T
httptest *httptest.Server
dir *Directory
records *recordStore
blobs *blobStore
mu sync.RWMutex
byDID map[string]*Identity
byHandle map[string]*Identity
byBearer map[string]*Identity // accessJwt → identity
didHostEsc string // percent-encoded host:port for synthesized DIDs
}
// New starts a fake PDS bound to a random port via httptest.NewServer. The
// server is torn down automatically via t.Cleanup. Callers should immediately
// install s.Directory() with atproto.SetDirectory() so DID resolution short-
// circuits through the in-memory store.
func New(t *testing.T) *Server {
t.Helper()
s := &Server{
t: t,
dir: newDirectory(),
records: newRecordStore(),
blobs: newBlobStore(),
byDID: make(map[string]*Identity),
byHandle: make(map[string]*Identity),
byBearer: make(map[string]*Identity),
}
mux := http.NewServeMux()
mux.HandleFunc("/xrpc/com.atproto.server.createSession", s.handleCreateSession)
mux.HandleFunc("/xrpc/com.atproto.server.getServiceAuth", s.handleGetServiceAuth)
mux.HandleFunc("/xrpc/com.atproto.repo.putRecord", s.handlePutRecord)
mux.HandleFunc("/xrpc/com.atproto.repo.getRecord", s.handleGetRecord)
mux.HandleFunc("/xrpc/com.atproto.repo.listRecords", s.handleListRecords)
mux.HandleFunc("/xrpc/com.atproto.repo.deleteRecord", s.handleDeleteRecord)
mux.HandleFunc("/xrpc/com.atproto.identity.resolveHandle", s.handleResolveHandle)
mux.HandleFunc("/xrpc/com.atproto.repo.uploadBlob", s.handleUploadBlob)
mux.HandleFunc("/xrpc/com.atproto.sync.getBlob", s.handleSyncGetBlob)
s.httptest = httptest.NewServer(mux)
t.Cleanup(s.httptest.Close)
s.didHostEsc = didWebForHost(strings.TrimPrefix(s.httptest.URL, "http://"))
return s
}
// URL returns the fake PDS's base URL (http://127.0.0.1:NNNN).
func (s *Server) URL() string { return s.httptest.URL }
// Directory returns the identity.Directory backing this fake PDS. Install
// via atproto.SetDirectory() before any DID resolution happens.
func (s *Server) Directory() *Directory { return s.dir }
// GetRecord returns the raw JSON value of a record stored in the fake PDS.
// Useful for tests that need to assert a downstream component wrote a record
// without going through the HTTP XRPC surface. Returns (nil, false) on miss.
func (s *Server) GetRecord(did, collection, rkey string) (json.RawMessage, bool) {
return s.records.get(did, collection, rkey)
}
// AddIdentity creates a new account, returns its Identity (DID, handle,
// signing key, synthetic password / accessJwt), and registers it in the
// directory so DID/handle lookups return it.
func (s *Server) AddIdentity(handle string) (*Identity, error) {
ident, err := newIdentity(s.URL(), s.didHostEsc, handle)
if err != nil {
return nil, err
}
indigoIdent, err := ident.toIndigoIdentity()
if err != nil {
return nil, err
}
s.dir.Register(indigoIdent)
s.mu.Lock()
s.byDID[ident.DID.String()] = ident
s.byHandle[ident.Handle.String()] = ident
s.byBearer[ident.AccessToken] = ident
s.mu.Unlock()
return ident, nil
}
// --- handlers --------------------------------------------------------------
func (s *Server) handleCreateSession(w http.ResponseWriter, r *http.Request) {
var body struct {
Identifier string `json:"identifier"`
Password string `json:"password"`
}
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
writeXRPCError(w, http.StatusBadRequest, "InvalidRequest", err.Error())
return
}
s.mu.RLock()
ident := s.lookupIdentifier(body.Identifier)
s.mu.RUnlock()
if ident == nil || ident.Password != body.Password {
writeXRPCError(w, http.StatusUnauthorized, "AuthFactorTokenRequired", "invalid identifier/password")
return
}
writeJSON(w, http.StatusOK, map[string]any{
"did": ident.DID.String(),
"handle": ident.Handle.String(),
"accessJwt": ident.AccessToken,
"refreshJwt": ident.AccessToken + "-refresh",
"active": true,
})
}
func (s *Server) handleGetServiceAuth(w http.ResponseWriter, r *http.Request) {
ident := s.authenticate(r)
if ident == nil {
writeXRPCError(w, http.StatusUnauthorized, "AuthenticationRequired", "missing or invalid bearer token")
return
}
q := r.URL.Query()
aud := q.Get("aud")
lxm := q.Get("lxm")
expStr := q.Get("exp")
if aud == "" || lxm == "" || expStr == "" {
writeXRPCError(w, http.StatusBadRequest, "InvalidRequest",
"aud, lxm, and exp are required; got "+q.Encode())
return
}
expUnix, err := strconv.ParseInt(expStr, 10, 64)
if err != nil {
writeXRPCError(w, http.StatusBadRequest, "InvalidRequest", "exp must be a Unix timestamp")
return
}
// Clamp to <=1h grant, as real PDSes do.
exp := time.Unix(expUnix, 0)
maxExp := time.Now().Add(1 * time.Hour)
if exp.After(maxExp) {
exp = maxExp
}
tok, err := ident.signServiceAuthJWT(aud, lxm, exp)
if err != nil {
writeXRPCError(w, http.StatusInternalServerError, "InternalServerError", err.Error())
return
}
writeJSON(w, http.StatusOK, map[string]string{"token": tok})
}
func (s *Server) handlePutRecord(w http.ResponseWriter, r *http.Request) {
if !s.authenticated(r) {
writeXRPCError(w, http.StatusUnauthorized, "AuthenticationRequired", "")
return
}
var body struct {
Repo string `json:"repo"`
Collection string `json:"collection"`
RKey string `json:"rkey"`
Record json.RawMessage `json:"record"`
}
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
writeXRPCError(w, http.StatusBadRequest, "InvalidRequest", err.Error())
return
}
if body.Repo == "" || body.Collection == "" || body.RKey == "" {
writeXRPCError(w, http.StatusBadRequest, "InvalidRequest", "repo, collection, rkey required")
return
}
s.records.put(body.Repo, body.Collection, body.RKey, body.Record)
writeJSON(w, http.StatusOK, map[string]string{
"uri": fmt.Sprintf("at://%s/%s/%s", body.Repo, body.Collection, body.RKey),
"cid": fakeCID(body.Record),
})
}
func (s *Server) handleGetRecord(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
repo := q.Get("repo")
collection := q.Get("collection")
rkey := q.Get("rkey")
if repo == "" || collection == "" || rkey == "" {
writeXRPCError(w, http.StatusBadRequest, "InvalidRequest", "repo, collection, rkey required")
return
}
// resolve handle → DID if caller passed a handle as repo
repoDID := s.didForRepo(repo)
val, ok := s.records.get(repoDID, collection, rkey)
if !ok {
writeXRPCError(w, http.StatusNotFound, "RecordNotFound", fmt.Sprintf("%s/%s/%s", repoDID, collection, rkey))
return
}
writeJSON(w, http.StatusOK, map[string]any{
"uri": fmt.Sprintf("at://%s/%s/%s", repoDID, collection, rkey),
"cid": fakeCID(val),
"value": val,
})
}
func (s *Server) handleListRecords(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
repo := q.Get("repo")
collection := q.Get("collection")
if repo == "" || collection == "" {
writeXRPCError(w, http.StatusBadRequest, "InvalidRequest", "repo, collection required")
return
}
repoDID := s.didForRepo(repo)
entries := s.records.list(repoDID, collection)
records := make([]map[string]any, 0, len(entries))
for _, e := range entries {
records = append(records, map[string]any{
"uri": fmt.Sprintf("at://%s/%s/%s", repoDID, collection, e.RKey),
"cid": fakeCID(e.Value),
"value": e.Value,
})
}
writeJSON(w, http.StatusOK, map[string]any{"records": records})
}
func (s *Server) handleDeleteRecord(w http.ResponseWriter, r *http.Request) {
if !s.authenticated(r) {
writeXRPCError(w, http.StatusUnauthorized, "AuthenticationRequired", "")
return
}
var body struct {
Repo string `json:"repo"`
Collection string `json:"collection"`
RKey string `json:"rkey"`
}
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
writeXRPCError(w, http.StatusBadRequest, "InvalidRequest", err.Error())
return
}
s.records.delete(body.Repo, body.Collection, body.RKey)
writeJSON(w, http.StatusOK, map[string]string{})
}
func (s *Server) handleUploadBlob(w http.ResponseWriter, r *http.Request) {
if !s.authenticated(r) {
writeXRPCError(w, http.StatusUnauthorized, "AuthenticationRequired", "")
return
}
body, err := io.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
writeXRPCError(w, http.StatusBadRequest, "InvalidRequest", err.Error())
return
}
mimeType := r.Header.Get("Content-Type")
cid := fakeCID(body)
s.blobs.put(cid, body)
writeJSON(w, http.StatusOK, map[string]any{
"blob": map[string]any{
"$type": "blob",
"ref": map[string]string{"$link": cid},
"mimeType": mimeType,
"size": len(body),
},
})
}
func (s *Server) handleSyncGetBlob(w http.ResponseWriter, r *http.Request) {
cid := r.URL.Query().Get("cid")
if cid == "" {
writeXRPCError(w, http.StatusBadRequest, "InvalidRequest", "cid required")
return
}
data, ok := s.blobs.get(cid)
if !ok {
writeXRPCError(w, http.StatusNotFound, "BlobNotFound", cid)
return
}
w.Header().Set("Content-Type", "application/octet-stream")
w.WriteHeader(http.StatusOK)
_, _ = w.Write(data)
}
func (s *Server) handleResolveHandle(w http.ResponseWriter, r *http.Request) {
handle := r.URL.Query().Get("handle")
s.mu.RLock()
ident, ok := s.byHandle[handle]
s.mu.RUnlock()
if !ok {
writeXRPCError(w, http.StatusNotFound, "HandleNotFound", handle)
return
}
writeJSON(w, http.StatusOK, map[string]string{"did": ident.DID.String()})
}
// --- helpers ---------------------------------------------------------------
// lookupIdentifier resolves either a handle, a did:web, or a did:plc string
// to one of the registered identities. Caller must hold s.mu (read lock).
func (s *Server) lookupIdentifier(id string) *Identity {
if ident, ok := s.byDID[id]; ok {
return ident
}
if ident, ok := s.byHandle[id]; ok {
return ident
}
return nil
}
// didForRepo accepts either a DID or a handle and returns the DID. If the
// repo isn't registered, the input is returned unchanged so the records store
// returns a miss as expected.
func (s *Server) didForRepo(repo string) string {
if strings.HasPrefix(repo, "did:") {
return repo
}
s.mu.RLock()
defer s.mu.RUnlock()
if ident, ok := s.byHandle[repo]; ok {
return ident.DID.String()
}
return repo
}
// authenticate resolves the Bearer token to an identity. Returns nil if the
// header is missing or the token isn't recognized.
func (s *Server) authenticate(r *http.Request) *Identity {
h := r.Header.Get("Authorization")
const bearer = "Bearer "
if !strings.HasPrefix(h, bearer) {
return nil
}
tok := strings.TrimPrefix(h, bearer)
s.mu.RLock()
defer s.mu.RUnlock()
return s.byBearer[tok]
}
func (s *Server) authenticated(r *http.Request) bool { return s.authenticate(r) != nil }
func writeJSON(w http.ResponseWriter, status int, body any) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
_ = json.NewEncoder(w).Encode(body)
}
func writeXRPCError(w http.ResponseWriter, status int, name, msg string) {
writeJSON(w, status, map[string]string{"error": name, "message": msg})
}
// Compile-time assertion that we implement atproto.Directory's expectations.
// atproto.SetDirectory takes an identity.Directory, which Directory satisfies.
var _ = atproto.SetDirectory

View File

@@ -0,0 +1,211 @@
//go:build integration
package integration
import (
"context"
"fmt"
"strings"
"testing"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/random"
"atcr.io/internal/testharness"
)
// TestAuthMatrix exercises the authorization matrix that /auth/token plus the
// hold authorizer enforce together. Each row picks an actor (captain / crew
// with write / crew with read-only / stranger / anonymous) and an operation
// (push / pull) and asserts whether the registry round-trip succeeds. We
// reuse a single harness across rows: the actors and repos don't overlap so
// state mutations (e.g. layer records for a push) don't bleed between cases.
//
// The whole matrix runs once per OCI client in `Clients` so we catch
// dialect differences between ggcr (crane) and the OCI working group client
// (oras-go).
func TestAuthMatrix(t *testing.T) {
h := testharness.New(t)
// Identities. Each row references one of these — strangers and read-only
// crew get separate handles so their state stays isolated.
captain := h.Captain
crewWriter := h.AddSailor("writer.test")
crewReader := h.AddSailorWithPermissions("reader.test", []string{"blob:read"})
stranger := h.AddStranger("stranger.test")
// Seed a pull target by pushing once as the captain via crane. Both
// clients pull from the same seed: the manifest exists in the hold
// regardless of which client reads it.
seedRef := mustParseRef(t, fmt.Sprintf("%s/%s/seed:tag", h.AppViewHostPort(), captain.Handle()))
seedImage, err := random.Image(1<<18, 2) // 256KB × 2 layers — keep it small
if err != nil {
t.Fatalf("build seed image: %v", err)
}
if err := (craneClient{}).Push(t.Context(), t, seedRef.String(), seedImage, h.RegistryCreds(captain)); err != nil {
t.Fatalf("seed push: %v", err)
}
cases := []struct {
name string
creds testharness.Auth
op string // "push" or "pull"
repoFn func(client string) string
wantErr bool
// errContains lists substrings, any of which is acceptable in the
// error message. Different OCI clients wrap registry responses
// with different fidelity:
// - crane surfaces the registry response body verbatim
// ("authentication required", "blob:write", etc.)
// - oras-go drops the /auth/token body and surfaces only the
// HTTP status text ("Unauthorized")
// - regclient strips response bodies entirely and surfaces
// "unauthorized" for any 401/403
// We accept the broader signals so the matrix can include clients
// with coarser error wrapping. The strict assertions still apply
// to crane and oras; regclient gets the "request was denied"
// signal but loses the reason-string detail.
errContains []string
}{
{
name: "captain_push",
creds: h.RegistryCreds(captain),
op: "push",
repoFn: func(c string) string { return fmt.Sprintf("%s/%s/own-%s:tag", h.AppViewHostPort(), captain.Handle(), c) },
},
{
name: "captain_pull",
creds: h.RegistryCreds(captain),
op: "pull",
repoFn: func(_ string) string { return seedRef.String() },
},
{
name: "crew_write_push",
creds: h.RegistryCreds(crewWriter),
op: "push",
repoFn: func(c string) string { return fmt.Sprintf("%s/%s/own-%s:tag", h.AppViewHostPort(), crewWriter.Handle(), c) },
},
{
name: "crew_write_pull",
creds: h.RegistryCreds(crewWriter),
op: "pull",
repoFn: func(_ string) string { return seedRef.String() },
},
{
name: "crew_read_only_push_denied",
creds: h.RegistryCreds(crewReader),
op: "push",
repoFn: func(c string) string { return fmt.Sprintf("%s/%s/own-%s:tag", h.AppViewHostPort(), crewReader.Handle(), c) },
// authgate's checkCrewBlobWrite surfaces "lacks blob:write" through
// errcode.ErrorCodeDenied. The OCI client wraps it with "DENIED".
wantErr: true,
errContains: []string{"blob:write", "unauthorized"},
},
{
name: "crew_read_only_pull",
creds: h.RegistryCreds(crewReader),
op: "pull",
repoFn: func(_ string) string { return seedRef.String() },
},
{
name: "stranger_push_denied",
creds: h.RegistryCreds(stranger),
op: "push",
repoFn: func(c string) string { return fmt.Sprintf("%s/%s/own-%s:tag", h.AppViewHostPort(), stranger.Handle(), c) },
// hold_crew_members has no row for stranger → checkCrewBlobWrite
// returns "crew membership required".
wantErr: true,
errContains: []string{"crew membership required", "unauthorized"},
},
{
name: "stranger_pull",
// Pull bypasses the membership requirement (it's push-only), so a
// PDS-known but non-crew identity can still pull from a public
// hold. This is the credential-helper first-pull case.
creds: h.RegistryCreds(stranger),
op: "pull",
repoFn: func(_ string) string { return seedRef.String() },
},
{
name: "anonymous_push_denied",
creds: h.AnonCreds(),
op: "push",
repoFn: func(c string) string { return fmt.Sprintf("%s/anonymous/own-%s:tag", h.AppViewHostPort(), c) },
// /auth/token requires Basic auth — no creds → 401. crane
// surfaces the response body ("authentication required");
// oras-go drops the body and surfaces "Unauthorized".
wantErr: true,
errContains: []string{"authentication required", "Unauthorized", "unauthorized"},
},
{
name: "anonymous_pull_denied",
creds: h.AnonCreds(),
op: "pull",
repoFn: func(_ string) string { return seedRef.String() },
wantErr: true,
errContains: []string{"authentication required", "Unauthorized", "unauthorized"},
},
}
for _, c := range Clients {
t.Run(c.Name(), func(t *testing.T) {
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := runOp(t.Context(), t, c, tc.op, tc.repoFn(c.Name()), tc.creds)
if tc.wantErr {
if err == nil {
t.Fatalf("%s: expected error, got nil", tc.name)
}
if len(tc.errContains) > 0 && !containsAny(err.Error(), tc.errContains) {
t.Errorf("%s: expected error containing any of %q, got: %v", tc.name, tc.errContains, err)
}
return
}
if err != nil {
t.Fatalf("%s: unexpected error: %v", tc.name, err)
}
})
}
})
}
}
// runOp performs the chosen op against the given ref using the supplied
// client and credentials. Push builds a fresh random image so concurrent or
// subsequent runs don't collide on shared blob digests at the registry; pull
// just resolves the reference, which is enough to exercise the auth path
// even without comparing digests.
func runOp(ctx context.Context, t *testing.T, c Client, op, ref string, creds testharness.Auth) error {
t.Helper()
switch op {
case "push":
img, err := random.Image(1<<17, 2) // 128KB × 2 layers
if err != nil {
return fmt.Errorf("build random image: %w", err)
}
return c.Push(ctx, t, ref, img, creds)
case "pull":
_, err := c.Pull(ctx, ref, creds)
return err
default:
return fmt.Errorf("unknown op %q", op)
}
}
func containsAny(s string, subs []string) bool {
for _, sub := range subs {
if strings.Contains(s, sub) {
return true
}
}
return false
}
func mustParseRef(t *testing.T, s string) name.Reference {
t.Helper()
r, err := name.ParseReference(s, name.Insecure)
if err != nil {
t.Fatalf("parse ref %q: %v", s, err)
}
return r
}

View File

@@ -0,0 +1,106 @@
//go:build integration
package integration
import (
"encoding/json"
"net/http"
"net/url"
"testing"
"time"
"atcr.io/internal/testharness"
"atcr.io/pkg/atproto"
)
// TestAuthTokenBootstrapsLexicons verifies that a user who has never pushed
// or pulled — and therefore has no sailor profile in their PDS and no crew
// record in the hold's PDS — gets both written by the time their very first
// /auth/token call returns.
//
// The two writes happen on different code paths:
// - sailor profile: post-auth callback runs storage.EnsureProfile against
// the user's PDS (the fake testpds in this test).
// - hold crew: authgate.Authorize runs storage.EnsureCrewMembership, which
// mints a service-auth via the user's PDS and POSTs requestCrew to the
// hold, which writes io.atcr.hold.crew into its embedded PDS.
//
// Both run during the same /auth/token request (the post-auth callback is
// synchronous and the authgate goroutine is awaited before the handler
// returns), so a 200 from /auth/token is the signal that both records exist.
func TestAuthTokenBootstrapsLexicons(t *testing.T) {
h := testharness.New(t)
// Use AddStranger rather than AddSailor: a stranger has a PDS identity
// and a users-table row (so PDS resolution works) but no crew_members
// row anywhere. That's the closest thing to a brand-new user — no
// pre-seeded crew membership in either the AppView or the hold's PDS,
// and no sailor profile record in their PDS.
sailor := h.AddStranger("newcomer.test")
// Pre-condition: no sailor profile in the user's PDS.
if _, ok := h.PDS.GetRecord(sailor.DID(), atproto.SailorProfileCollection, "self"); ok {
t.Fatalf("expected no sailor profile before /auth/token, but found one for %s", sailor.DID())
}
// Pre-condition: no crew record for this user in the hold's PDS. The
// lookup wraps repomgr's "not found" without a sentinel, so any non-nil
// error here means "absent" — and a successful return is what we want
// to assert is impossible at this point.
if _, _, err := h.Hold.PDS.GetCrewMemberByDID(t.Context(), sailor.DID()); err == nil {
t.Fatalf("expected no crew record before /auth/token, but found one for %s", sailor.DID())
}
// Trigger /auth/token via Basic auth. Docker's login does this same call
// (no scope param) just to validate creds. The handler caches the access
// token, runs the post-auth callback (EnsureProfile), and waits on the
// authgate goroutine (EnsureCrewMembership) before returning.
tokenURL := h.AppViewURL + "/auth/token?service=" + url.QueryEscape("127.0.0.1")
req, err := http.NewRequestWithContext(t.Context(), http.MethodGet, tokenURL, nil)
if err != nil {
t.Fatalf("build token request: %v", err)
}
req.SetBasicAuth(sailor.Handle(), sailor.Identity.Password)
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("call /auth/token: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("/auth/token: want 200, got %d", resp.StatusCode)
}
// Post-condition: sailor profile now exists in the user's PDS, anchored
// to the AppView's default hold. EnsureProfile runs synchronously inside
// the post-auth callback, so it's done by the time the handler returns.
raw, ok := h.PDS.GetRecord(sailor.DID(), atproto.SailorProfileCollection, "self")
if !ok {
t.Fatalf("expected sailor profile in PDS after /auth/token, found none for %s", sailor.DID())
}
var profile atproto.SailorProfileRecord
if err := json.Unmarshal(raw, &profile); err != nil {
t.Fatalf("decode sailor profile: %v", err)
}
if profile.DefaultHold != h.HoldDID {
t.Errorf("sailor profile defaultHold = %q, want %q", profile.DefaultHold, h.HoldDID)
}
// Post-condition: hold's embedded PDS now has a crew record for this
// user. The authgate runs EnsureCrewMembership inside a goroutine that
// the handler awaits, so this should be visible by the time /auth/token
// returned. We allow a small poll window to absorb any IPC scheduling
// jitter (the records-index update on the hold side is its own goroutine
// behind the repomgr write).
deadline := time.Now().Add(2 * time.Second)
var lastErr error
for time.Now().Before(deadline) {
if _, _, err := h.Hold.PDS.GetCrewMemberByDID(t.Context(), sailor.DID()); err == nil {
return
} else {
lastErr = err
}
time.Sleep(25 * time.Millisecond)
}
t.Fatalf("expected hold crew record after /auth/token for %s, last lookup error: %v", sailor.DID(), lastErr)
}

430
test/integration/clients.go Normal file
View File

@@ -0,0 +1,430 @@
//go:build integration
package integration
import (
"context"
"errors"
"fmt"
"path/filepath"
"testing"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/layout"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/distribution/distribution/v3/registry/api/errcode"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/regclient/regclient"
"github.com/regclient/regclient/config"
"github.com/regclient/regclient/types/ref"
"oras.land/oras-go/v2"
orasoci "oras.land/oras-go/v2/content/oci"
orasremote "oras.land/oras-go/v2/registry/remote"
"oras.land/oras-go/v2/registry/remote/auth"
"atcr.io/internal/testharness"
)
// Client is the minimum OCI client surface the matrix tests need: push a
// ggcr-built image and pull back its manifest digest. Pairing crane (ggcr)
// with a second independent OCI implementation catches dialect divergence
// — different Accept-header ordering, blob upload chunking, manifest
// validation, auth-challenge parsing — that single-client coverage misses.
type Client interface {
Name() string
Push(ctx context.Context, t *testing.T, ref string, img v1.Image, a testharness.Auth) error
Pull(ctx context.Context, ref string, a testharness.Auth) (v1.Hash, error)
PushIndex(ctx context.Context, t *testing.T, ref string, idx v1.ImageIndex, a testharness.Auth) error
PullIndex(ctx context.Context, ref string, a testharness.Auth) (v1.Hash, error)
}
// Clients are the OCI clients exercised by the matrix tests. Tests range over
// this and wrap each iteration in t.Run(c.Name(), …) so failures attribute
// cleanly to whichever client tripped.
//
// Note on digest comparisons: every assertion happens within one client's own
// round-trip (push and pull use the same c). Cross-client digest comparison
// would be unsafe — the two libraries can send different Accept-header
// orderings, and the registry could return different manifest content-types
// to each.
var Clients = []Client{
&craneClient{},
&orasClient{},
&regclientClient{},
}
// --- crane client (ggcr) ----------------------------------------------------
type craneClient struct{}
func (craneClient) Name() string { return "crane" }
func (craneClient) Push(_ context.Context, _ *testing.T, ref string, img v1.Image, a testharness.Auth) error {
return normalizeErr(crane.Push(img, ref,
crane.WithAuth(toAuthn(a)),
crane.Insecure,
))
}
func (craneClient) Pull(_ context.Context, ref string, a testharness.Auth) (v1.Hash, error) {
img, err := crane.Pull(ref,
crane.WithAuth(toAuthn(a)),
crane.Insecure,
)
if err != nil {
return v1.Hash{}, normalizeErr(err)
}
d, err := img.Digest()
if err != nil {
return v1.Hash{}, fmt.Errorf("crane pulled image digest: %w", err)
}
return d, nil
}
func (craneClient) PushIndex(_ context.Context, _ *testing.T, refStr string, idx v1.ImageIndex, a testharness.Auth) error {
// crane.Push only accepts v1.Image, so drop to remote.Push (which takes
// any Taggable). Parsing with name.Insecure switches the scheme to http;
// crane.Insecure also sets insecure on the default transport, but for a
// 127.0.0.1 dev registry the scheme flip is what matters.
ref, err := name.ParseReference(refStr, name.Insecure)
if err != nil {
return fmt.Errorf("crane: parse ref %q: %w", refStr, err)
}
return normalizeErr(remote.Push(ref, idx, remote.WithAuth(toAuthn(a))))
}
func (craneClient) PullIndex(_ context.Context, refStr string, a testharness.Auth) (v1.Hash, error) {
ref, err := name.ParseReference(refStr, name.Insecure)
if err != nil {
return v1.Hash{}, fmt.Errorf("crane: parse ref %q: %w", refStr, err)
}
desc, err := remote.Get(ref, remote.WithAuth(toAuthn(a)))
if err != nil {
return v1.Hash{}, normalizeErr(err)
}
return desc.Digest, nil
}
func toAuthn(a testharness.Auth) authn.Authenticator {
if a.Username == "" && a.Password == "" {
return authn.Anonymous
}
return &authn.Basic{Username: a.Username, Password: a.Password}
}
// --- oras-go client (OCI working group reference implementation) ------------
type orasClient struct{}
func (orasClient) Name() string { return "oras" }
func (orasClient) Push(ctx context.Context, t *testing.T, ref string, img v1.Image, a testharness.Auth) error {
t.Helper()
parsed, err := name.NewTag(ref, name.Insecure)
if err != nil {
return fmt.Errorf("oras: parse tag %q: %w", ref, err)
}
// Bridge ggcr -> oras-go via an OCI image layout dir. The ref-name
// annotation lets oras resolve the image by tag inside the layout.
layoutDir := filepath.Join(t.TempDir(), "oci-layout")
lp, err := layout.Write(layoutDir, empty.Index)
if err != nil {
return fmt.Errorf("oras: init layout: %w", err)
}
if err := lp.AppendImage(img, layout.WithAnnotations(map[string]string{
ocispec.AnnotationRefName: parsed.TagStr(),
})); err != nil {
return fmt.Errorf("oras: append image to layout: %w", err)
}
src, err := orasoci.New(layoutDir)
if err != nil {
return fmt.Errorf("oras: open layout as oci store: %w", err)
}
dst, err := newOrasRepository(parsed, a)
if err != nil {
return fmt.Errorf("oras: new repository: %w", err)
}
if _, err := oras.Copy(ctx, src, parsed.TagStr(), dst, parsed.TagStr(), oras.DefaultCopyOptions); err != nil {
return normalizeErr(err)
}
return nil
}
func (orasClient) Pull(ctx context.Context, ref string, a testharness.Auth) (v1.Hash, error) {
parsed, err := name.ParseReference(ref, name.Insecure)
if err != nil {
return v1.Hash{}, fmt.Errorf("oras: parse ref %q: %w", ref, err)
}
tag, ok := parsed.(name.Tag)
if !ok {
return v1.Hash{}, fmt.Errorf("oras: expected tagged ref, got %T", parsed)
}
repo, err := newOrasRepository(tag, a)
if err != nil {
return v1.Hash{}, fmt.Errorf("oras: new repository: %w", err)
}
// Resolve performs HEAD /v2/<name>/manifests/<tag> and returns the
// manifest descriptor — its Digest matches ggcr's img.Digest() for the
// same content. We don't need to fetch blobs; that mirrors crane.Pull
// followed by .Digest(), which is also manifest-only.
desc, err := repo.Resolve(ctx, tag.TagStr())
if err != nil {
return v1.Hash{}, normalizeErr(err)
}
return v1.Hash{Algorithm: desc.Digest.Algorithm().String(), Hex: desc.Digest.Encoded()}, nil
}
func (orasClient) PushIndex(ctx context.Context, t *testing.T, refStr string, idx v1.ImageIndex, a testharness.Auth) error {
t.Helper()
parsed, err := name.NewTag(refStr, name.Insecure)
if err != nil {
return fmt.Errorf("oras: parse tag %q: %w", refStr, err)
}
// Same OCI layout bridge as Push, but AppendIndex walks the index and
// writes every child manifest + blob into the layout in one shot.
layoutDir := filepath.Join(t.TempDir(), "oci-layout")
lp, err := layout.Write(layoutDir, empty.Index)
if err != nil {
return fmt.Errorf("oras: init layout: %w", err)
}
if err := lp.AppendIndex(idx, layout.WithAnnotations(map[string]string{
ocispec.AnnotationRefName: parsed.TagStr(),
})); err != nil {
return fmt.Errorf("oras: append index to layout: %w", err)
}
src, err := orasoci.New(layoutDir)
if err != nil {
return fmt.Errorf("oras: open layout as oci store: %w", err)
}
dst, err := newOrasRepository(parsed, a)
if err != nil {
return fmt.Errorf("oras: new repository: %w", err)
}
if _, err := oras.Copy(ctx, src, parsed.TagStr(), dst, parsed.TagStr(), oras.DefaultCopyOptions); err != nil {
return normalizeErr(err)
}
return nil
}
func (orasClient) PullIndex(ctx context.Context, refStr string, a testharness.Auth) (v1.Hash, error) {
// Same head-only resolve as Pull — manifest digest is the same value for
// images and indexes (it's just the manifest body's sha256).
parsed, err := name.ParseReference(refStr, name.Insecure)
if err != nil {
return v1.Hash{}, fmt.Errorf("oras: parse ref %q: %w", refStr, err)
}
tag, ok := parsed.(name.Tag)
if !ok {
return v1.Hash{}, fmt.Errorf("oras: expected tagged ref, got %T", parsed)
}
repo, err := newOrasRepository(tag, a)
if err != nil {
return v1.Hash{}, fmt.Errorf("oras: new repository: %w", err)
}
desc, err := repo.Resolve(ctx, tag.TagStr())
if err != nil {
return v1.Hash{}, normalizeErr(err)
}
return v1.Hash{Algorithm: desc.Digest.Algorithm().String(), Hex: desc.Digest.Encoded()}, nil
}
func newOrasRepository(tag name.Tag, a testharness.Auth) (*orasremote.Repository, error) {
// orasremote.NewRepository wants "host[:port]/repo" form (no tag/digest).
repoRef := tag.Repository.String()
repo, err := orasremote.NewRepository(repoRef)
if err != nil {
return nil, fmt.Errorf("parse %q: %w", repoRef, err)
}
repo.PlainHTTP = true
if a.Username != "" || a.Password != "" {
host := tag.RegistryStr()
repo.Client = &auth.Client{
Credential: auth.StaticCredential(host, auth.Credential{
Username: a.Username,
Password: a.Password,
}),
}
}
return repo, nil
}
// --- regclient client (regctl) ---------------------------------------------
type regclientClient struct{}
func (regclientClient) Name() string { return "regclient" }
func (regclientClient) Push(ctx context.Context, t *testing.T, refStr string, img v1.Image, a testharness.Auth) error {
t.Helper()
parsed, err := name.NewTag(refStr, name.Insecure)
if err != nil {
return fmt.Errorf("regclient: parse tag %q: %w", refStr, err)
}
// Same OCI layout bridge as the oras client.
layoutDir := filepath.Join(t.TempDir(), "oci-layout")
lp, err := layout.Write(layoutDir, empty.Index)
if err != nil {
return fmt.Errorf("regclient: init layout: %w", err)
}
if err := lp.AppendImage(img, layout.WithAnnotations(map[string]string{
ocispec.AnnotationRefName: parsed.TagStr(),
})); err != nil {
return fmt.Errorf("regclient: append image to layout: %w", err)
}
srcRef, err := ref.New(fmt.Sprintf("ocidir://%s:%s", layoutDir, parsed.TagStr()))
if err != nil {
return fmt.Errorf("regclient: parse ocidir ref: %w", err)
}
dstRef, err := ref.New(refStr)
if err != nil {
return fmt.Errorf("regclient: parse dest ref: %w", err)
}
rc := newRegclient(parsed.RegistryStr(), a)
defer rc.Close(ctx, dstRef)
if err := rc.ImageCopy(ctx, srcRef, dstRef); err != nil {
return normalizeErr(err)
}
return nil
}
func (regclientClient) Pull(ctx context.Context, refStr string, a testharness.Auth) (v1.Hash, error) {
parsed, err := name.ParseReference(refStr, name.Insecure)
if err != nil {
return v1.Hash{}, fmt.Errorf("regclient: parse ref %q: %w", refStr, err)
}
r, err := ref.New(refStr)
if err != nil {
return v1.Hash{}, fmt.Errorf("regclient: parse ref: %w", err)
}
rc := newRegclient(parsed.Context().RegistryStr(), a)
defer rc.Close(ctx, r)
// ManifestHead matches crane's lazy-pull semantics: HEAD /manifests/<tag>
// returns the descriptor with the registry-computed digest.
mh, err := rc.ManifestHead(ctx, r)
if err != nil {
return v1.Hash{}, normalizeErr(err)
}
desc := mh.GetDescriptor()
return v1.Hash{Algorithm: desc.Digest.Algorithm().String(), Hex: desc.Digest.Encoded()}, nil
}
func (regclientClient) PushIndex(ctx context.Context, t *testing.T, refStr string, idx v1.ImageIndex, a testharness.Auth) error {
t.Helper()
parsed, err := name.NewTag(refStr, name.Insecure)
if err != nil {
return fmt.Errorf("regclient: parse tag %q: %w", refStr, err)
}
// Same ocidir bridge as regclientClient.Push. AppendIndex walks the
// index and writes every child manifest + blob into the layout; ImageCopy
// then ships the whole tree (it dispatches on the source descriptor's
// media type, so OCI indexes work without extra plumbing).
layoutDir := filepath.Join(t.TempDir(), "oci-layout")
lp, err := layout.Write(layoutDir, empty.Index)
if err != nil {
return fmt.Errorf("regclient: init layout: %w", err)
}
if err := lp.AppendIndex(idx, layout.WithAnnotations(map[string]string{
ocispec.AnnotationRefName: parsed.TagStr(),
})); err != nil {
return fmt.Errorf("regclient: append index to layout: %w", err)
}
srcRef, err := ref.New(fmt.Sprintf("ocidir://%s:%s", layoutDir, parsed.TagStr()))
if err != nil {
return fmt.Errorf("regclient: parse ocidir ref: %w", err)
}
dstRef, err := ref.New(refStr)
if err != nil {
return fmt.Errorf("regclient: parse dest ref: %w", err)
}
rc := newRegclient(parsed.RegistryStr(), a)
defer rc.Close(ctx, dstRef)
if err := rc.ImageCopy(ctx, srcRef, dstRef); err != nil {
return normalizeErr(err)
}
return nil
}
func (regclientClient) PullIndex(ctx context.Context, refStr string, a testharness.Auth) (v1.Hash, error) {
// ManifestHead returns the index manifest descriptor with its sha256.
parsed, err := name.ParseReference(refStr, name.Insecure)
if err != nil {
return v1.Hash{}, fmt.Errorf("regclient: parse ref %q: %w", refStr, err)
}
r, err := ref.New(refStr)
if err != nil {
return v1.Hash{}, fmt.Errorf("regclient: parse ref: %w", err)
}
rc := newRegclient(parsed.Context().RegistryStr(), a)
defer rc.Close(ctx, r)
mh, err := rc.ManifestHead(ctx, r)
if err != nil {
return v1.Hash{}, normalizeErr(err)
}
desc := mh.GetDescriptor()
return v1.Hash{Algorithm: desc.Digest.Algorithm().String(), Hex: desc.Digest.Encoded()}, nil
}
func newRegclient(host string, a testharness.Auth) *regclient.RegClient {
cfg := config.Host{
Name: host,
Hostname: host,
TLS: config.TLSDisabled,
User: a.Username,
Pass: a.Password,
}
return regclient.New(regclient.WithConfigHost(cfg))
}
// --- error normalization ----------------------------------------------------
// normalizeErr surfaces the registry-supplied error message from either
// client's wrapper, so the substring assertions in auth_matrix_test.go and
// quota_test.go ("blob:write", "crew membership required",
// "authentication required", "quota exceeded") match for both clients.
//
// Both libraries route distribution-spec error bodies through
// errcode.Error / errcode.Errors at some layer of the wrapping. We unwrap
// when we can; otherwise return the original error (its .Error() string
// usually already contains the registry body).
func normalizeErr(err error) error {
if err == nil {
return nil
}
var ec errcode.Error
if errors.As(err, &ec) {
return fmt.Errorf("%s: %s: %w", ec.Code.String(), ec.Message, err)
}
var ecs errcode.Errors
if errors.As(err, &ecs) && len(ecs) > 0 {
var first errcode.Error
if errors.As(ecs[0], &first) {
return fmt.Errorf("%s: %s: %w", first.Code.String(), first.Message, err)
}
}
// If neither shape matches, the original error's .Error() string from
// either client typically already includes the registry response body.
return err
}

View File

@@ -0,0 +1,121 @@
//go:build integration
// Multi-arch image index coverage. Buildx, ko, kaniko, and most modern build
// tooling push an OCI image index referencing per-platform children — exercising
// the manifest-list validation path in pkg/appview/storage/manifest_store.go
// (isManifestList check and the per-child s.Exists() loop). Without this test,
// any regression in that path goes unnoticed by the rest of the suite, which
// only pushes single-arch v1.Image.
package integration
import (
"fmt"
"testing"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/random"
"github.com/google/go-containerregistry/pkg/v1/remote"
"atcr.io/internal/testharness"
_ "github.com/distribution/distribution/v3/registry/auth/token"
_ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory"
)
func TestMultiArchIndexPushPull(t *testing.T) {
h := testharness.New(t)
alice := h.AddSailor("alice.test")
amdImg, err := random.Image(1<<18, 2)
if err != nil {
t.Fatalf("amd image: %v", err)
}
armImg, err := random.Image(1<<18, 2)
if err != nil {
t.Fatalf("arm image: %v", err)
}
idx := mutate.AppendManifests(empty.Index,
mutate.IndexAddendum{Add: amdImg, Descriptor: v1.Descriptor{
Platform: &v1.Platform{Architecture: "amd64", OS: "linux"},
}},
mutate.IndexAddendum{Add: armImg, Descriptor: v1.Descriptor{
Platform: &v1.Platform{Architecture: "arm64", OS: "linux"},
}},
)
pushedDigest, err := idx.Digest()
if err != nil {
t.Fatalf("idx digest: %v", err)
}
for _, c := range Clients {
t.Run(c.Name(), func(t *testing.T) {
// Per-client repo so concurrent / matrixed runs don't collide on
// shared blob digests at the registry (same pattern as
// TestPushPullHappyPath).
ref, err := name.ParseReference(
fmt.Sprintf("%s/%s/multi-%s:latest", h.AppViewHostPort(), alice.Handle(), c.Name()),
name.Insecure,
)
if err != nil {
t.Fatalf("parse ref: %v", err)
}
creds := h.RegistryCreds(alice)
if err := c.PushIndex(t.Context(), t, ref.String(), idx, creds); err != nil {
t.Fatalf("push index: %v", err)
}
pulledDigest, err := c.PullIndex(t.Context(), ref.String(), creds)
if err != nil {
t.Fatalf("pull index: %v", err)
}
if pushedDigest != pulledDigest {
t.Fatalf("digest mismatch: pushed=%s pulled=%s", pushedDigest, pulledDigest)
}
})
}
}
// Asserts that the appview rejects an index whose child manifest hasn't been
// pushed. manifest_store.go's per-child s.Exists() loop is the only guard
// against dangling-reference indexes; this test pins it open.
func TestMultiArchIndex_RejectsMissingChild(t *testing.T) {
h := testharness.New(t)
alice := h.AddSailor("alice.test")
orphan, err := random.Image(1<<17, 1)
if err != nil {
t.Fatalf("orphan image: %v", err)
}
idx := mutate.AppendManifests(empty.Index,
mutate.IndexAddendum{Add: orphan, Descriptor: v1.Descriptor{
Platform: &v1.Platform{Architecture: "amd64", OS: "linux"},
}},
)
// Client libraries upload child manifests + blobs alongside the index,
// so a naive PushIndex would succeed. remote.Put writes only the index
// manifest body — no children — which is exactly the dangling-reference
// scenario manifest_store.go guards against.
ref, err := name.ParseReference(
fmt.Sprintf("%s/%s/orphan-idx:latest", h.AppViewHostPort(), alice.Handle()),
name.Insecure,
)
if err != nil {
t.Fatalf("parse ref: %v", err)
}
creds := h.RegistryCreds(alice)
err = remote.Put(ref, idx,
remote.WithAuth(&authn.Basic{Username: creds.Username, Password: creds.Password}),
)
if err == nil {
t.Fatal("expected index PUT to be rejected (orphan child manifest)")
}
}

View File

@@ -0,0 +1,67 @@
//go:build integration
// Package integration runs an in-process smoke test of the full ATCR stack:
// fake PDS, gofakes3 (S3), hold, appview — all wired together in goroutines
// with no docker, no compose, no external network. Confirms that the happy
// path of "push an image and pull it back" works before deploy.
//
// Run with: go test -tags=integration -v ./test/integration/...
package integration
import (
"fmt"
"testing"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/random"
"atcr.io/internal/testharness"
// distribution registers its drivers and auth providers via init() side
// effects; cmd/appview pulls these in too. Without these blank imports
// distribution panics with "StorageDriver not registered: inmemory".
_ "github.com/distribution/distribution/v3/registry/auth/token"
_ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory"
)
func TestPushPullHappyPath(t *testing.T) {
h := testharness.New(t)
alice := h.AddSailor("alice.test")
img, err := random.Image(1<<20, 3) // 1MB, 3 layers
if err != nil {
t.Fatalf("build random image: %v", err)
}
pushedDigest, err := img.Digest()
if err != nil {
t.Fatalf("pushed digest: %v", err)
}
for _, c := range Clients {
t.Run(c.Name(), func(t *testing.T) {
// Per-client repo path so concurrent / matrixed runs don't
// collide on shared blob digests at the registry.
ref, err := name.ParseReference(
fmt.Sprintf("%s/%s/repo-%s:tag", h.AppViewHostPort(), alice.Handle(), c.Name()),
name.Insecure,
)
if err != nil {
t.Fatalf("parse ref: %v", err)
}
creds := h.RegistryCreds(alice)
if err := c.Push(t.Context(), t, ref.String(), img, creds); err != nil {
t.Fatalf("push: %v", err)
}
pulledDigest, err := c.Pull(t.Context(), ref.String(), creds)
if err != nil {
t.Fatalf("pull: %v", err)
}
if pushedDigest != pulledDigest {
t.Fatalf("digest mismatch: pushed=%s pulled=%s", pushedDigest, pulledDigest)
}
})
}
}

View File

@@ -0,0 +1,110 @@
//go:build integration
package integration
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
"testing"
"time"
"github.com/google/go-containerregistry/pkg/v1/random"
"atcr.io/internal/testharness"
"atcr.io/pkg/atproto"
"atcr.io/pkg/hold/quota"
)
// TestQuotaExceededDenied verifies that the appview's auth-phase gate denies
// a non-captain push once the user's recorded layer bytes exceed their tier
// limit. Wire-up:
//
// 1. Harness boots with a tier "tiny" capped at 1KB and NewCrewTier="tiny",
// so every new crew member is on the 1KB plan. The captain is exempt
// (owner is always unlimited per GetQuotaForUserWithTier).
// 2. A crew sailor pushes a >>1KB image. /auth/token sees totalSize=0 at
// that moment, allows. Push succeeds.
// 3. Manifest notification is async (notifyManifest fires from a goroutine
// after the push response), so we poll the hold's public getQuota
// endpoint until layer records have been created and totalSize > limit.
// 4. A second push from the same sailor must now fail at /auth/token with
// "quota exceeded".
//
// The whole flow runs once per OCI client. Each client gets a fresh harness
// because quota is stateful per user and reuse would let the second client
// see the first's exhausted quota.
func TestQuotaExceededDenied(t *testing.T) {
for _, c := range Clients {
t.Run(c.Name(), func(t *testing.T) {
h := testharness.New(t, testharness.WithQuotaTiers(
[]quota.TierConfig{
{Name: "tiny", Quota: "1KB"},
},
"tiny",
))
alice := h.AddSailor("alice.test")
creds := h.RegistryCreds(alice)
firstRef := fmt.Sprintf("%s/%s/img:first", h.AppViewHostPort(), alice.Handle())
img1, err := random.Image(1<<17, 2) // 128KB × 2 layers — well over 1KB
if err != nil {
t.Fatalf("build first image: %v", err)
}
if err := c.Push(t.Context(), t, firstRef, img1, creds); err != nil {
t.Fatalf("first push should succeed (quota empty at auth time): %v", err)
}
// Manifest notification → layer record creation is async (see
// pkg/appview/storage/manifest_store.go:336). Wait until the hold's
// quota endpoint reports the bytes before we attempt the next push,
// otherwise the gate could still see totalSize=0 and allow it.
waitForQuota(t, h, alice.DID(), 1024, 10*time.Second)
secondRef := fmt.Sprintf("%s/%s/img:second", h.AppViewHostPort(), alice.Handle())
img2, err := random.Image(1<<17, 2)
if err != nil {
t.Fatalf("build second image: %v", err)
}
err = c.Push(t.Context(), t, secondRef, img2, creds)
if err == nil {
t.Fatal("second push should be denied by quota, but succeeded")
}
// crane and oras surface the registry error body ("quota
// exceeded"); regclient strips the body and surfaces only
// "unauthorized". Both are valid signals that the auth-phase
// gate denied the request.
if !strings.Contains(err.Error(), "quota exceeded") &&
!strings.Contains(err.Error(), "unauthorized") {
t.Errorf("expected error containing 'quota exceeded' or 'unauthorized', got: %v", err)
}
})
}
}
// waitForQuota polls the hold's public getQuota endpoint until totalSize for
// userDID is at least minBytes, or the deadline expires. The endpoint is
// unauthenticated by design (the appview's gate calls it the same way).
func waitForQuota(t *testing.T, h *testharness.Harness, userDID string, minBytes int64, timeout time.Duration) {
t.Helper()
deadline := time.Now().Add(timeout)
endpoint := h.HoldURL + atproto.HoldGetQuota + "?userDid=" + url.QueryEscape(userDID)
for time.Now().Before(deadline) {
resp, err := http.Get(endpoint)
if err == nil {
var body struct {
TotalSize int64 `json:"totalSize"`
}
derr := json.NewDecoder(resp.Body).Decode(&body)
resp.Body.Close()
if derr == nil && body.TotalSize >= minBytes {
return
}
}
time.Sleep(100 * time.Millisecond)
}
t.Fatalf("hold getQuota for %s did not report >= %d bytes within %s", userDID, minBytes, timeout)
}