Compare commits
118 Commits
v0.0.2
...
vulnerabil
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3b5b89b378 | ||
|
|
8c5f9da2cf | ||
|
|
15d2be9210 | ||
|
|
5a41f876ff | ||
|
|
d4b9d84df1 | ||
|
|
f07376c3d0 | ||
|
|
2f2b8c8275 | ||
|
|
9af56daa34 | ||
|
|
55afa99efa | ||
|
|
6793ba6a50 | ||
|
|
c7fdb748ae | ||
|
|
5a3b3f3372 | ||
|
|
9d773d484a | ||
|
|
6ef2aaf709 | ||
|
|
b0799cd94d | ||
|
|
93b1d0d4ba | ||
|
|
e62ebdaa53 | ||
|
|
4cfe6f221d | ||
|
|
0cf03109be | ||
|
|
0b22082f89 | ||
|
|
1727801df3 | ||
|
|
6bc929f2dc | ||
|
|
6024953571 | ||
|
|
28ee948d0f | ||
|
|
c831d3f735 | ||
|
|
162d91d079 | ||
|
|
d75a27557a | ||
|
|
c79d0ac3ab | ||
|
|
bf93dfba03 | ||
|
|
e17600db28 | ||
|
|
35ba417a96 | ||
|
|
8d1040b0d7 | ||
|
|
ba97e19ef3 | ||
|
|
771cd4390a | ||
|
|
8201d9977d | ||
|
|
2026780e11 | ||
|
|
2f27f22650 | ||
|
|
2b0501a437 | ||
|
|
e2d65c627f | ||
|
|
f75d9ceafb | ||
|
|
0c4d1cae8f | ||
|
|
2a795ed5cd | ||
|
|
ec90f43d3e | ||
|
|
d7e9580aa1 | ||
|
|
9eb69e2ea7 | ||
|
|
dd79b8a0ee | ||
|
|
a8815737fd | ||
|
|
751fa1a3f0 | ||
|
|
220022c9c5 | ||
|
|
957b216c79 | ||
|
|
b5a0e19843 | ||
|
|
97d1b3cdd5 | ||
|
|
30ea5256f3 | ||
|
|
aff5d7248c | ||
|
|
3809bcab25 | ||
|
|
1b1400a6fb | ||
|
|
0e4dd9af20 | ||
|
|
26c1b4e28e | ||
|
|
fde8421dac | ||
|
|
3e9a496a5d | ||
|
|
a118904cb8 | ||
|
|
9daf364d61 | ||
|
|
c966fab53e | ||
|
|
16f354b7b9 | ||
|
|
0404ea025b | ||
|
|
2708af614a | ||
|
|
c37abe377f | ||
|
|
61479d15ed | ||
|
|
78207ba65a | ||
|
|
7cde02bf02 | ||
|
|
1f72d90726 | ||
|
|
abf48407cc | ||
|
|
08fb8abb41 | ||
|
|
ce7160cdca | ||
|
|
5d52007104 | ||
|
|
4ca90fc3af | ||
|
|
b155534d1b | ||
|
|
965e73881b | ||
|
|
7228b532ba | ||
|
|
1b3a4eea47 | ||
|
|
fa931aca3b | ||
|
|
90ef4e90e5 | ||
|
|
1658a53cad | ||
|
|
b4e1a0869f | ||
|
|
6f3c1fc0ba | ||
|
|
f4b84ca75f | ||
|
|
80b65ee619 | ||
|
|
606c8a842a | ||
|
|
d41686c340 | ||
|
|
48414be75d | ||
|
|
50d5eea4a5 | ||
|
|
0db35bacad | ||
|
|
003dab263d | ||
|
|
7cf6da09f9 | ||
|
|
963786f7cc | ||
|
|
29ccb15e54 | ||
|
|
0dc2294c87 | ||
|
|
70e802764b | ||
|
|
08086e5afc | ||
|
|
fade86abaa | ||
|
|
a271d3d8e3 | ||
|
|
2bd7db16a4 | ||
|
|
379f23283c | ||
|
|
8a3f88a104 | ||
|
|
74f665f9e0 | ||
|
|
6b897fe23b | ||
|
|
bd7d8c62b0 | ||
|
|
4c930e8ae5 | ||
|
|
21e6d08f75 | ||
|
|
12935490d4 | ||
|
|
c0f1011ed6 | ||
|
|
4221985b90 | ||
|
|
d726e464a6 | ||
|
|
764642d271 | ||
|
|
18fe0684d3 | ||
|
|
2ee8bd8786 | ||
|
|
46c75ab44a | ||
|
|
f450d910c7 |
@@ -26,11 +26,13 @@ ATCR_HTTP_ADDR=:5000
|
||||
# Storage Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Default hold service endpoint for users without their own storage (REQUIRED)
|
||||
# Default hold service DID for users without their own storage (REQUIRED)
|
||||
# Users with a sailor profile defaultHold setting will override this
|
||||
# Docker: Use container name (http://atcr-hold:8080)
|
||||
# Local dev: Use localhost (http://127.0.0.1:8080)
|
||||
ATCR_DEFAULT_HOLD=http://127.0.0.1:8080
|
||||
# Format: did:web:hostname[:port]
|
||||
# Docker: did:web:atcr-hold:8080
|
||||
# Local dev: did:web:127.0.0.1:8080
|
||||
# Production: did:web:hold01.atcr.io
|
||||
ATCR_DEFAULT_HOLD_DID=did:web:127.0.0.1:8080
|
||||
|
||||
# ==============================================================================
|
||||
# Authentication Configuration
|
||||
@@ -47,6 +49,16 @@ ATCR_DEFAULT_HOLD=http://127.0.0.1:8080
|
||||
# JWT token expiration in seconds (default: 300 = 5 minutes)
|
||||
# ATCR_TOKEN_EXPIRATION=300
|
||||
|
||||
# Path to OAuth client P-256 signing key (auto-generated on first run)
|
||||
# Used for confidential OAuth client authentication (production only)
|
||||
# Localhost deployments always use public OAuth clients (no key needed)
|
||||
# Default: /var/lib/atcr/oauth/client.key
|
||||
# ATCR_OAUTH_KEY_PATH=/var/lib/atcr/oauth/client.key
|
||||
|
||||
# OAuth client display name (shown in authorization screens)
|
||||
# Default: AT Container Registry
|
||||
# ATCR_CLIENT_NAME=AT Container Registry
|
||||
|
||||
# ==============================================================================
|
||||
# UI Configuration
|
||||
# ==============================================================================
|
||||
@@ -59,16 +71,36 @@ ATCR_UI_ENABLED=true
|
||||
# Default: /var/lib/atcr/ui.db
|
||||
# ATCR_UI_DATABASE_PATH=/var/lib/atcr/ui.db
|
||||
|
||||
# Skip database migrations on startup (default: false)
|
||||
# Set to "true" to skip running migrations (useful for tests or fresh databases)
|
||||
# Production: Keep as "false" to ensure migrations are applied
|
||||
SKIP_DB_MIGRATIONS=false
|
||||
|
||||
# ==============================================================================
|
||||
# Logging Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Log level: debug, info, warn, error (default: info)
|
||||
# ATCR_LOG_LEVEL=info
|
||||
ATCR_LOG_LEVEL=debug
|
||||
|
||||
# Log formatter: text, json (default: text)
|
||||
# ATCR_LOG_FORMATTER=text
|
||||
|
||||
# ==============================================================================
|
||||
# Hold Health Check Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# How often to check health of hold endpoints in the background (default: 15m)
|
||||
# Queries database for unique hold endpoints and checks if they're reachable
|
||||
# Examples: 5m, 15m, 30m, 1h
|
||||
# ATCR_HEALTH_CHECK_INTERVAL=15m
|
||||
|
||||
# How long to cache health check results (default: 15m)
|
||||
# Cached results avoid redundant health checks on page renders
|
||||
# Should be >= ATCR_HEALTH_CHECK_INTERVAL for efficiency
|
||||
# Examples: 15m, 30m, 1h
|
||||
# ATCR_HEALTH_CACHE_TTL=15m
|
||||
|
||||
# ==============================================================================
|
||||
# Jetstream Configuration (ATProto event streaming)
|
||||
# ==============================================================================
|
||||
|
||||
@@ -24,13 +24,25 @@ HOLD_PUBLIC_URL=http://127.0.0.1:8080
|
||||
# Falls back to proxy mode automatically for non-S3 drivers.
|
||||
STORAGE_DRIVER=filesystem
|
||||
|
||||
# For S3/Storj/Minio:
|
||||
# S3 Access Credentials
|
||||
AWS_ACCESS_KEY_ID=your_access_key
|
||||
AWS_SECRET_ACCESS_KEY=your_secret_key
|
||||
|
||||
# S3 Region
|
||||
# Examples: us-east-1, us-west-2, eu-west-1
|
||||
# For UpCloud: us-chi1, us-nyc1, de-fra1, uk-lon1, sg-sin1
|
||||
# Default: us-east-1
|
||||
AWS_REGION=us-east-1
|
||||
|
||||
# S3 Bucket Name
|
||||
S3_BUCKET=atcr-blobs
|
||||
|
||||
# For Storj/Minio (optional - custom S3 endpoint):
|
||||
# S3 Endpoint (for S3-compatible services like Storj, Minio, UpCloud)
|
||||
# Examples:
|
||||
# - Storj: https://gateway.storjshare.io
|
||||
# - UpCloud: https://[bucket-id].upcloudobjects.com
|
||||
# - Minio: http://minio:9000
|
||||
# Leave empty for AWS S3
|
||||
# S3_ENDPOINT=https://gateway.storjshare.io
|
||||
|
||||
# For filesystem driver:
|
||||
@@ -49,6 +61,37 @@ S3_BUCKET=atcr-blobs
|
||||
# Default: false
|
||||
HOLD_PUBLIC=false
|
||||
|
||||
# ==============================================================================
|
||||
# Embedded PDS Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Directory path for embedded PDS carstore (SQLite database)
|
||||
# Default: /var/lib/atcr-hold
|
||||
# If empty, embedded PDS is disabled
|
||||
#
|
||||
# Note: This should be a directory path, NOT a file path
|
||||
# Carstore creates db.sqlite3 inside this directory
|
||||
#
|
||||
# The embedded PDS makes the hold a proper ATProto user with:
|
||||
# - did:web identity (derived from HOLD_PUBLIC_URL hostname)
|
||||
# - DID document at /.well-known/did.json
|
||||
# - XRPC endpoints for crew management
|
||||
# - ATProto blob endpoints (wraps existing presigned URL logic)
|
||||
HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
|
||||
# Path to signing key (auto-generated on first run if missing)
|
||||
# Default: {HOLD_DATABASE_DIR}/signing.key
|
||||
# HOLD_KEY_PATH=/var/lib/atcr-hold/signing.key
|
||||
|
||||
# ==============================================================================
|
||||
# Bluesky Integration
|
||||
# ==============================================================================
|
||||
|
||||
# Enable Bluesky posts when users push container images (default: false)
|
||||
# When enabled, the hold's embedded PDS will create posts announcing image pushes
|
||||
# Synced to captain record's enableBlueskyPosts field on startup
|
||||
# HOLD_BLUESKY_POSTS_ENABLED=false
|
||||
|
||||
# ==============================================================================
|
||||
# Registration (REQUIRED)
|
||||
# ==============================================================================
|
||||
@@ -67,3 +110,40 @@ HOLD_PUBLIC=false
|
||||
# - Skips OAuth if records exist
|
||||
#
|
||||
HOLD_OWNER=did:plc:your-did-here
|
||||
|
||||
# ==============================================================================
|
||||
# Scanner Configuration (SBOM & Vulnerability Scanning)
|
||||
# ==============================================================================
|
||||
|
||||
# Enable automatic SBOM generation and vulnerability scanning on image push
|
||||
# Default: true
|
||||
HOLD_SBOM_ENABLED=true
|
||||
|
||||
# Number of concurrent scanner worker threads
|
||||
# Default: 2
|
||||
HOLD_SBOM_WORKERS=2
|
||||
|
||||
# Enable vulnerability scanning with Grype
|
||||
# If false, only SBOM generation (Syft) will run
|
||||
# Default: true
|
||||
HOLD_VULN_ENABLED=true
|
||||
|
||||
# Path to Grype vulnerability database
|
||||
# Database is auto-downloaded and cached at this location
|
||||
# Default: /var/lib/atcr-hold/grype-db
|
||||
# HOLD_VULN_DB_PATH=/var/lib/atcr-hold/grype-db
|
||||
|
||||
# How often to update vulnerability database
|
||||
# Examples: 24h, 12h, 48h
|
||||
# Default: 24h
|
||||
# HOLD_VULN_DB_UPDATE_INTERVAL=24h
|
||||
|
||||
# ==============================================================================
|
||||
# Logging Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Log level: debug, info, warn, error (default: info)
|
||||
ATCR_LOG_LEVEL=debug
|
||||
|
||||
# Log formatter: text, json (default: text)
|
||||
# ATCR_LOG_FORMATTER=text
|
||||
|
||||
26
.golangci.yml
Normal file
26
.golangci.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
# golangci-lint configuration for ATCR
|
||||
# See: https://golangci-lint.run/usage/configuration/
|
||||
version: "2"
|
||||
linters:
|
||||
settings:
|
||||
staticcheck:
|
||||
checks:
|
||||
- "all"
|
||||
- "-SA1019" # Ignore deprecated package warnings for github.com/ipfs/go-ipfs-blockstore
|
||||
# Cannot upgrade to github.com/ipfs/boxo/blockstore due to opentelemetry
|
||||
# dependency conflicts with distribution/distribution
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- (github.com/distribution/distribution/v3/registry/storage/driver.FileWriter).Cancel
|
||||
- (github.com/distribution/distribution/v3.BlobWriter).Cancel
|
||||
- (*database/sql.Tx).Rollback
|
||||
- (*database/sql.Rows).Close
|
||||
- (*net/http.Server).Shutdown
|
||||
|
||||
exclusions:
|
||||
presets:
|
||||
- std-error-handling
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
@@ -6,6 +6,7 @@ version: 2
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go generate ./...
|
||||
|
||||
builds:
|
||||
# Credential helper - cross-platform native binary distribution
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Triggers on version tags and builds cross-platform binaries using GoReleaser
|
||||
|
||||
when:
|
||||
- event: ["push", "manual"]
|
||||
- event: ["manual"]
|
||||
# TODO: Trigger only on version tags (v1.0.0, v2.1.3, etc.)
|
||||
branch: ["main"]
|
||||
|
||||
|
||||
27
.tangled/workflows/tests.yml
Normal file
27
.tangled/workflows/tests.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
when:
|
||||
- event: ["push"]
|
||||
branch: ["main"]
|
||||
- event: ["pull_request"]
|
||||
branch: ["main"]
|
||||
|
||||
engine: "nixery"
|
||||
|
||||
dependencies:
|
||||
nixpkgs:
|
||||
- gcc
|
||||
- go
|
||||
- curl
|
||||
|
||||
steps:
|
||||
- name: Download and Generate
|
||||
environment:
|
||||
CGO_ENABLED: 1
|
||||
command: |
|
||||
go mod download
|
||||
go generate ./...
|
||||
|
||||
- name: Run Tests
|
||||
environment:
|
||||
CGO_ENABLED: 1
|
||||
command: |
|
||||
go test -cover ./...
|
||||
355
CLAUDE.md
355
CLAUDE.md
@@ -14,13 +14,24 @@ ATCR (ATProto Container Registry) is an OCI-compliant container registry that us
|
||||
go build -o bin/atcr-appview ./cmd/appview
|
||||
go build -o bin/atcr-hold ./cmd/hold
|
||||
go build -o bin/docker-credential-atcr ./cmd/credential-helper
|
||||
go build -o bin/oauth-helper ./cmd/oauth-helper
|
||||
|
||||
# Run tests
|
||||
go test ./...
|
||||
|
||||
# Run tests for specific package
|
||||
go test ./pkg/atproto/...
|
||||
go test ./pkg/appview/storage/...
|
||||
|
||||
# Run specific test
|
||||
go test -run TestManifestStore ./pkg/atproto/...
|
||||
|
||||
# Run with race detector
|
||||
go test -race ./...
|
||||
|
||||
# Run tests with verbose output
|
||||
go test -v ./...
|
||||
|
||||
# Update dependencies
|
||||
go mod tidy
|
||||
|
||||
@@ -51,7 +62,12 @@ export STORAGE_DRIVER=filesystem
|
||||
export STORAGE_ROOT_DIR=/tmp/atcr-hold
|
||||
export HOLD_OWNER=did:plc:your-did-here
|
||||
./bin/atcr-hold
|
||||
# Check logs for OAuth URL, visit in browser to complete registration
|
||||
# Hold starts immediately with embedded PDS
|
||||
|
||||
# Request Bluesky relay crawl (makes your PDS discoverable)
|
||||
./deploy/request-crawl.sh hold01.atcr.io
|
||||
# Or specify a different relay:
|
||||
./deploy/request-crawl.sh hold01.atcr.io https://custom-relay.example.com/xrpc/com.atproto.sync.requestCrawl
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
@@ -75,9 +91,10 @@ ATCR uses **distribution/distribution** as a library and extends it through midd
|
||||
|
||||
2. **Hold Service** (`cmd/hold`) - Optional BYOS component
|
||||
- Lightweight HTTP server for presigned URLs
|
||||
- Embedded PDS with captain + crew records
|
||||
- Supports S3, Storj, Minio, filesystem, etc.
|
||||
- Authorization based on PDS records (hold.public, crew records)
|
||||
- Auto-registration via OAuth
|
||||
- Authorization based on captain record (public, allowAllCrew)
|
||||
- Self-describing via DID resolution
|
||||
- Configured entirely via environment variables
|
||||
|
||||
3. **Credential Helper** (`cmd/credential-helper`) - Client-side OAuth
|
||||
@@ -94,32 +111,45 @@ ATCR uses **distribution/distribution** as a library and extends it through midd
|
||||
2. HTTP Request → /v2/alice/myapp/manifests/latest
|
||||
3. Registry Middleware (pkg/appview/middleware/registry.go)
|
||||
→ Resolves "alice" to DID and PDS endpoint
|
||||
→ Queries alice's sailor profile for defaultHold
|
||||
→ Queries alice's sailor profile for defaultHold (returns DID if set)
|
||||
→ If not set, checks alice's io.atcr.hold records
|
||||
→ Falls back to AppView's default_storage_endpoint
|
||||
→ Stores DID/PDS/storage endpoint in context
|
||||
→ Falls back to AppView's default_hold_did
|
||||
→ Stores DID/PDS/hold DID in RegistryContext
|
||||
4. Routing Repository (pkg/appview/storage/routing_repository.go)
|
||||
→ Creates RoutingRepository
|
||||
→ Returns ATProto ManifestStore for manifests
|
||||
→ Returns ProxyBlobStore for blobs
|
||||
5. Blob PUT → Resolved hold service (redirects to S3/storage)
|
||||
6. Manifest PUT → alice's PDS as io.atcr.manifest record (includes holdEndpoint)
|
||||
→ Returns ProxyBlobStore for blobs (routes to hold DID)
|
||||
5. Blob PUT → ProxyBlobStore calls hold's XRPC multipart upload endpoints:
|
||||
a. POST /xrpc/io.atcr.hold.initiateUpload (gets uploadID)
|
||||
b. POST /xrpc/io.atcr.hold.getPartUploadUrl (gets presigned URL for each part)
|
||||
c. PUT to S3 presigned URL (or PUT /xrpc/io.atcr.hold.uploadPart for buffered mode)
|
||||
d. POST /xrpc/io.atcr.hold.completeUpload (finalizes upload)
|
||||
6. Manifest PUT → alice's PDS as io.atcr.manifest record (includes holdDid + holdEndpoint)
|
||||
→ Manifest also uploaded to PDS blob storage (ATProto CID format)
|
||||
```
|
||||
|
||||
#### Push with BYOS (Bring Your Own Storage)
|
||||
```
|
||||
1. Client: docker push atcr.io/alice/myapp:latest
|
||||
2. Registry Middleware resolves alice → did:plc:alice123
|
||||
3. Hold discovery via findStorageEndpoint():
|
||||
a. Check alice's sailor profile for defaultHold
|
||||
b. If not set, check alice's io.atcr.hold records
|
||||
c. Fall back to AppView's default_storage_endpoint
|
||||
4. Found: alice's profile has defaultHold = "https://alice-storage.fly.dev"
|
||||
5. Routing Repository returns ProxyBlobStore(alice-storage.fly.dev)
|
||||
6. ProxyBlobStore calls alice-storage.fly.dev for presigned URL
|
||||
7. Storage service validates alice's DID, generates S3 presigned URL
|
||||
8. Client redirected to upload blob directly to alice's S3/Storj
|
||||
9. Manifest stored in alice's PDS with holdEndpoint = "https://alice-storage.fly.dev"
|
||||
3. Hold discovery via findHoldDID():
|
||||
a. Check alice's sailor profile for defaultHold (returns DID if set)
|
||||
b. If not set, check alice's io.atcr.hold records (legacy)
|
||||
c. Fall back to AppView's default_hold_did
|
||||
4. Found: alice's profile has defaultHold = "did:web:alice-storage.fly.dev"
|
||||
5. Routing Repository returns ProxyBlobStore(did:web:alice-storage.fly.dev)
|
||||
6. ProxyBlobStore:
|
||||
a. Resolves hold DID → https://alice-storage.fly.dev (did:web resolution)
|
||||
b. Gets service token from alice's PDS via com.atproto.server.getServiceAuth
|
||||
c. Calls hold XRPC endpoints with service token authentication:
|
||||
- POST /xrpc/io.atcr.hold.initiateUpload
|
||||
- POST /xrpc/io.atcr.hold.getPartUploadUrl (returns presigned S3 URL)
|
||||
- PUT to S3 presigned URL (direct upload to alice's S3/Storj)
|
||||
- POST /xrpc/io.atcr.hold.completeUpload
|
||||
7. Hold service validates service token, checks crew membership, generates presigned URLs
|
||||
8. Manifest stored in alice's PDS with:
|
||||
- holdDid = "did:web:alice-storage.fly.dev" (primary)
|
||||
- holdEndpoint = "https://alice-storage.fly.dev" (backward compat)
|
||||
```
|
||||
|
||||
#### Pull Flow
|
||||
@@ -127,15 +157,22 @@ ATCR uses **distribution/distribution** as a library and extends it through midd
|
||||
1. Client: docker pull atcr.io/alice/myapp:latest
|
||||
2. GET /v2/alice/myapp/manifests/latest
|
||||
3. AppView fetches manifest from alice's PDS
|
||||
4. Manifest contains holdEndpoint = "https://alice-storage.fly.dev"
|
||||
5. Hold endpoint cached: (alice's DID, "myapp") → "https://alice-storage.fly.dev"
|
||||
4. Manifest contains:
|
||||
- holdDid = "did:web:alice-storage.fly.dev" (primary reference)
|
||||
- holdEndpoint = "https://alice-storage.fly.dev" (legacy fallback)
|
||||
5. Hold DID cached: (alice's DID, "myapp") → "did:web:alice-storage.fly.dev"
|
||||
TTL: 10 minutes (covers typical pull operations)
|
||||
6. Client requests blobs: GET /v2/alice/myapp/blobs/sha256:abc123
|
||||
7. AppView checks cache, routes to hold from manifest (not re-discovered)
|
||||
8. ProxyBlobStore calls alice-storage.fly.dev for presigned download URL
|
||||
9. Client redirected to download blob directly from alice's S3
|
||||
7. AppView checks cache, routes to hold DID from manifest (not re-discovered)
|
||||
8. ProxyBlobStore:
|
||||
a. Resolves hold DID → https://alice-storage.fly.dev
|
||||
b. Gets service token from alice's PDS via com.atproto.server.getServiceAuth
|
||||
c. Calls GET /xrpc/com.atproto.sync.getBlob?did={userDID}&cid=sha256:abc123&method=GET
|
||||
d. Hold returns presigned download URL in JSON response
|
||||
9. Client redirected to download blob directly from alice's S3 via presigned URL
|
||||
```
|
||||
|
||||
**Key insight:** Pull uses the historical `holdEndpoint` from the manifest, ensuring blobs are fetched from the hold where they were originally pushed, even if alice later changes her default hold.
|
||||
**Key insight:** Pull uses the historical `holdDid` from the manifest, ensuring blobs are fetched from the hold where they were originally pushed, even if alice later changes her default hold. Hold cache (10min TTL) avoids re-querying PDS for each blob during the same pull operation.
|
||||
|
||||
### Name Resolution
|
||||
|
||||
@@ -184,21 +221,26 @@ ATCR implements the full ATProto OAuth specification with mandatory security fea
|
||||
**Key Components** (`pkg/auth/oauth/`):
|
||||
|
||||
1. **Client** (`client.go`) - Core OAuth client with encapsulated configuration
|
||||
- Constructor: `NewClient(baseURL)` - accepts base URL, derives client ID/redirect URI
|
||||
- `NewClientWithKey(baseURL, dpopKey)` - for token refresh with stored DPoP key
|
||||
- `ClientID()` - computes localhost vs production client ID dynamically
|
||||
- Uses indigo's `NewLocalhostConfig()` for localhost (public client)
|
||||
- Uses `NewPublicConfig()` for production base (upgraded to confidential if key provided)
|
||||
- `RedirectURI()` - returns `baseURL + "/auth/oauth/callback"`
|
||||
- `GetDefaultScopes()` - returns ATCR registry scopes
|
||||
- `GetConfigRef()` - returns mutable config for `SetClientSecret()` calls
|
||||
- All OAuth flows (authorization, token exchange, refresh) in one place
|
||||
|
||||
2. **DPoP Transport** (`transport.go`) - HTTP RoundTripper that auto-adds DPoP headers
|
||||
2. **Keys** (`keys.go`) - P-256 key management for confidential clients
|
||||
- `GenerateOrLoadClientKey()` - generates or loads P-256 key from disk
|
||||
- Follows hold service pattern: auto-generation, 0600 permissions, /var/lib/atcr/oauth/
|
||||
- `GenerateKeyID()` - derives key ID from public key hash
|
||||
- `PrivateKeyToMultibase()` - converts key for `SetClientSecret()` API
|
||||
- **Key type:** P-256 (ES256) for OAuth standard compatibility (not K-256 like PDS keys)
|
||||
|
||||
3. **Token Storage** (`tokenstorage.go`) - Persists refresh tokens and DPoP keys for AppView
|
||||
- File-based storage in `/var/lib/atcr/refresh-tokens.json` (AppView)
|
||||
3. **Token Storage** (`store.go`) - Persists OAuth sessions for AppView
|
||||
- SQLite-backed storage in UI database (not file-based)
|
||||
- Client uses `~/.atcr/oauth-token.json` (credential helper)
|
||||
|
||||
4. **Refresher** (`refresher.go`) - Token refresh manager for AppView
|
||||
- Caches access tokens with automatic refresh
|
||||
- Caches OAuth sessions with automatic token refresh (handled by indigo library)
|
||||
- Per-DID locking prevents concurrent refresh races
|
||||
- Uses Client methods for consistency
|
||||
|
||||
@@ -207,36 +249,59 @@ ATCR implements the full ATProto OAuth specification with mandatory security fea
|
||||
- `GET /auth/oauth/callback` - handles OAuth callback
|
||||
- Uses Client methods for authorization and token exchange
|
||||
|
||||
6. **Interactive Flow** (`flow.go`) - Reusable OAuth flow for CLI tools
|
||||
6. **Interactive Flow** (`interactive.go`) - Reusable OAuth flow for CLI tools
|
||||
- Used by credential helper and hold service registration
|
||||
- Two-phase callback setup ensures PAR metadata availability
|
||||
|
||||
**Client Configuration:**
|
||||
- **Localhost:** Always public client (no client authentication)
|
||||
- Client ID: `http://localhost?redirect_uri=...&scope=...` (query-based)
|
||||
- No P-256 key generation
|
||||
- **Production:** Confidential client with P-256 private key (if key exists)
|
||||
- Client ID: `{baseURL}/client-metadata.json` (metadata endpoint)
|
||||
- Key path: `/var/lib/atcr/oauth/client.key` (auto-generated on first run)
|
||||
- Key algorithm: ES256 (P-256, not K-256)
|
||||
- Upgraded via `config.SetClientSecret(key, keyID)`
|
||||
|
||||
**Authentication Flow:**
|
||||
```
|
||||
1. User configures Docker to use the credential helper (adds to config.json)
|
||||
2. On first docker push/pull, helper generates ECDSA P-256 DPoP key
|
||||
3. Resolve handle → DID → PDS endpoint
|
||||
4. Discover OAuth server metadata from PDS
|
||||
5. PAR request with DPoP header → get request_uri
|
||||
6. Open browser for user authorization
|
||||
7. Exchange code for token with DPoP proof
|
||||
8. Save: access token, refresh token, DPoP key, DID, handle
|
||||
2. On first docker push/pull, Docker calls credential helper
|
||||
3. Credential helper opens browser → AppView OAuth page
|
||||
4. AppView handles OAuth flow:
|
||||
- Resolves handle → DID → PDS endpoint
|
||||
- Discovers OAuth server metadata from PDS
|
||||
- PAR request with DPoP header → get request_uri
|
||||
- User authorizes in browser
|
||||
- AppView exchanges code for OAuth token with DPoP proof
|
||||
- AppView stores: OAuth session (tokens managed by indigo library with DPoP), DID, handle
|
||||
5. AppView shows device approval page: "Can [device] push to your account?"
|
||||
6. User approves device
|
||||
7. AppView issues registry JWT with validated DID
|
||||
8. AppView returns JSON token to credential helper (via callback or browser display)
|
||||
9. Credential helper saves registry JWT locally
|
||||
10. Helper returns registry JWT to Docker
|
||||
|
||||
Later (subsequent docker push):
|
||||
9. Docker calls credential helper
|
||||
10. Helper loads token, refreshes if needed
|
||||
11. Helper calls /auth/exchange with OAuth token + handle
|
||||
12. AppView validates token via PDS getSession
|
||||
13. AppView ensures sailor profile exists (creates with defaultHold if first login)
|
||||
14. AppView issues registry JWT with validated DID
|
||||
15. Helper returns JWT to Docker
|
||||
11. Docker calls credential helper
|
||||
12. Helper returns cached registry JWT (or re-authenticates if expired)
|
||||
```
|
||||
|
||||
**Key distinction:** The credential helper never manages OAuth tokens directly. AppView owns the OAuth session (including DPoP handling via indigo library) and issues registry JWTs to the credential helper. AppView needs the OAuth session for:
|
||||
- Writing manifests to user's PDS (with DPoP authentication)
|
||||
- Getting service tokens from user's PDS (with DPoP authentication)
|
||||
- Service tokens are then used to authenticate to hold services (Bearer tokens, not DPoP)
|
||||
|
||||
**Security:**
|
||||
- Tokens validated against authoritative source (user's PDS)
|
||||
- No trust in client-provided identity information
|
||||
- DPoP binds tokens to specific client key
|
||||
- 15-minute token expiry for registry JWTs
|
||||
- **Confidential clients** (production): Client authentication via P-256 private key JWT assertion
|
||||
- Prevents client impersonation attacks
|
||||
- Key stored in `/var/lib/atcr/oauth/client.key` with 0600 permissions
|
||||
- Automatically generated on first run
|
||||
- **Public clients** (localhost): No client authentication (development only)
|
||||
|
||||
### Key Components
|
||||
|
||||
@@ -254,12 +319,14 @@ Later (subsequent docker push):
|
||||
- Uses XRPC protocol (com.atproto.repo.*)
|
||||
|
||||
**lexicon.go**: ATProto record schemas
|
||||
- `ManifestRecord`: OCI manifest stored as ATProto record (includes `holdEndpoint` field)
|
||||
- `ManifestRecord`: OCI manifest stored as ATProto record (includes `holdDid` + `holdEndpoint` fields)
|
||||
- `TagRecord`: Tag pointing to manifest digest
|
||||
- `HoldRecord`: Storage hold definition (for BYOS)
|
||||
- `HoldCrewRecord`: Hold crew membership/permissions
|
||||
- `SailorProfileRecord`: User profile with `defaultHold` preference
|
||||
- Collections: `io.atcr.manifest`, `io.atcr.tag`, `io.atcr.hold`, `io.atcr.hold.crew`, `io.atcr.sailor.profile`
|
||||
- `HoldRecord`: Storage hold definition (LEGACY - for old BYOS model)
|
||||
- `HoldCrewRecord`: Hold crew membership (LEGACY - stored in owner's PDS)
|
||||
- `CaptainRecord`: Hold ownership record (NEW - stored in hold's embedded PDS at rkey "self")
|
||||
- `CrewRecord`: Hold crew membership (NEW - stored in hold's embedded PDS, one record per member)
|
||||
- `SailorProfileRecord`: User profile with `defaultHold` preference (can be DID or URL)
|
||||
- Collections: `io.atcr.manifest`, `io.atcr.tag`, `io.atcr.hold` (legacy), `io.atcr.hold.crew` (used by both legacy and new models), `io.atcr.hold.captain` (new), `io.atcr.sailor.profile`
|
||||
|
||||
**profile.go**: Sailor profile management
|
||||
- `EnsureProfile()`: Creates profile with default hold on first authentication
|
||||
@@ -274,26 +341,29 @@ Later (subsequent docker push):
|
||||
#### Storage Layer (`pkg/appview/storage/`)
|
||||
|
||||
**routing_repository.go**: Routes content by type
|
||||
- `Manifests()` → returns ATProto ManifestStore (caches instance for hold endpoint extraction)
|
||||
- `Manifests()` → returns ATProto ManifestStore (caches instance for hold DID extraction)
|
||||
- `Blobs()` → checks hold cache for pull, uses discovery for push
|
||||
- Pull: Uses cached `holdEndpoint` from manifest (historical reference)
|
||||
- Push: Uses discovery-based endpoint from `findStorageEndpoint()`
|
||||
- Always returns ProxyBlobStore (routes to hold service)
|
||||
- Pull: Uses cached `holdDid` from manifest (historical reference)
|
||||
- Push: Uses discovery-based DID from `findHoldDID()` in middleware
|
||||
- Always returns ProxyBlobStore (routes to hold service via DID)
|
||||
- Implements `distribution.Repository` interface
|
||||
- Uses RegistryContext to pass DID, PDS endpoint, hold DID, OAuth refresher, etc.
|
||||
|
||||
**hold_cache.go**: In-memory hold endpoint cache
|
||||
- Caches `(DID, repository) → holdEndpoint` for pull operations
|
||||
**hold_cache.go**: In-memory hold DID cache
|
||||
- Caches `(DID, repository) → holdDid` for pull operations
|
||||
- TTL: 10 minutes (covers typical pull operations)
|
||||
- Cleanup: Background goroutine runs every 5 minutes
|
||||
- **NOTE:** Simple in-memory cache for MVP. For production: use Redis or similar
|
||||
- Prevents expensive ATProto lookups on every blob request
|
||||
- Prevents expensive PDS manifest lookups on every blob request during pull
|
||||
|
||||
**proxy_blob_store.go**: External storage proxy
|
||||
- Calls user's storage service for presigned URLs
|
||||
- Issues HTTP redirects for blob uploads/downloads
|
||||
**proxy_blob_store.go**: External storage proxy (routes to hold via XRPC)
|
||||
- Resolves hold DID → HTTP URL for XRPC requests (did:web resolution)
|
||||
- Gets service tokens from user's PDS (`com.atproto.server.getServiceAuth`)
|
||||
- Calls hold XRPC endpoints with service token authentication:
|
||||
- Multipart upload: initiateUpload, getPartUploadUrl, uploadPart, completeUpload, abortUpload
|
||||
- Blob read: com.atproto.sync.getBlob (returns presigned download URL)
|
||||
- Implements full `distribution.BlobStore` interface
|
||||
- Supports multipart uploads for large blobs
|
||||
- Used when user has `io.atcr.hold` record
|
||||
- Supports both presigned URL mode (S3 direct) and buffered mode (proxy via hold)
|
||||
|
||||
#### AppView Web UI (`pkg/appview/`)
|
||||
|
||||
@@ -333,41 +403,74 @@ The AppView includes a web interface for browsing the registry:
|
||||
|
||||
#### Hold Service (`cmd/hold/`)
|
||||
|
||||
Lightweight standalone service for BYOS (Bring Your Own Storage):
|
||||
Lightweight standalone service for BYOS (Bring Your Own Storage) with embedded PDS:
|
||||
|
||||
**Architecture:**
|
||||
- Reuses distribution's storage driver factory
|
||||
- Supports all distribution drivers: S3, Storj, Minio, Azure, GCS, filesystem
|
||||
- Authorization follows ATProto's public-by-default model
|
||||
- Generates presigned URLs (15min expiry) or proxies uploads/downloads
|
||||
- **Embedded PDS**: Each hold has a full ATProto PDS for storing captain + crew records
|
||||
- **DID**: Hold identified by did:web (e.g., `did:web:hold01.atcr.io`)
|
||||
- **Storage**: Reuses distribution's storage driver factory (S3, Storj, Minio, Azure, GCS, filesystem)
|
||||
- **Authorization**: Based on captain + crew records in embedded PDS
|
||||
- **Blob operations**: Generates presigned URLs (15min expiry) or proxies uploads/downloads via XRPC
|
||||
|
||||
**Authorization Model:**
|
||||
|
||||
Read access:
|
||||
- **Public hold** (`HOLD_PUBLIC=true`): Anonymous + all authenticated users
|
||||
- **Private hold** (`HOLD_PUBLIC=false`): Authenticated users only (any ATCR user)
|
||||
- **Private hold** (`HOLD_PUBLIC=false`): Requires authentication + crew membership with blob:read permission
|
||||
|
||||
Write access:
|
||||
- Hold owner OR crew members only
|
||||
- Verified via `io.atcr.hold.crew` records in owner's PDS
|
||||
- Hold owner OR crew members with blob:write permission
|
||||
- Verified via `io.atcr.hold.crew` records in hold's embedded PDS
|
||||
|
||||
Key insight: "Private" gates anonymous access, not authenticated access. This reflects ATProto's current limitation (no private PDS records yet).
|
||||
**Embedded PDS Endpoints** (`pkg/hold/pds/xrpc.go`):
|
||||
|
||||
**Endpoints:**
|
||||
- `POST /get-presigned-url` - Get download URL for blob
|
||||
- `POST /put-presigned-url` - Get upload URL for blob
|
||||
- `GET /blobs/{digest}` - Proxy download (fallback if no presigned URL support)
|
||||
- `PUT /blobs/{digest}` - Proxy upload (fallback)
|
||||
- `POST /register` - Manual registration endpoint
|
||||
- `GET /health` - Health check
|
||||
Standard ATProto sync endpoints:
|
||||
- `GET /xrpc/com.atproto.sync.getRepo?did={did}` - Download full repository as CAR file
|
||||
- `GET /xrpc/com.atproto.sync.getRepo?did={did}&since={rev}` - Download repository diff since revision
|
||||
- `GET /xrpc/com.atproto.sync.getRepoStatus?did={did}` - Get repository hosting status and current revision
|
||||
- `GET /xrpc/com.atproto.sync.subscribeRepos` - WebSocket firehose for real-time events
|
||||
- `GET /xrpc/com.atproto.sync.listRepos` - List all repositories (single-user PDS)
|
||||
- `GET /xrpc/com.atproto.sync.getBlob?did={did}&cid={digest}` - Get blob or presigned download URL
|
||||
|
||||
**Configuration:** Environment variables (see `.env.example`)
|
||||
- `HOLD_PUBLIC_URL` - Public URL of hold service (required)
|
||||
Repository management:
|
||||
- `GET /xrpc/com.atproto.repo.describeRepo?repo={did}` - Repository metadata
|
||||
- `GET /xrpc/com.atproto.repo.getRecord?repo={did}&collection={col}&rkey={key}` - Get record
|
||||
- `GET /xrpc/com.atproto.repo.listRecords?repo={did}&collection={col}` - List records (supports pagination)
|
||||
- `POST /xrpc/com.atproto.repo.deleteRecord` - Delete record (owner/crew admin only)
|
||||
- `POST /xrpc/com.atproto.repo.uploadBlob` - Upload ATProto blob (owner/crew admin only)
|
||||
|
||||
DID resolution:
|
||||
- `GET /.well-known/did.json` - DID document (did:web resolution)
|
||||
- `GET /.well-known/atproto-did` - DID for handle resolution
|
||||
|
||||
Crew management:
|
||||
- `POST /xrpc/io.atcr.hold.requestCrew` - Request crew membership (authenticated users)
|
||||
|
||||
**OCI Multipart Upload Endpoints** (`pkg/hold/oci/xrpc.go`):
|
||||
|
||||
All require blob:write permission via service token authentication:
|
||||
- `POST /xrpc/io.atcr.hold.initiateUpload` - Start multipart upload session
|
||||
- `POST /xrpc/io.atcr.hold.getPartUploadUrl` - Get presigned URL for uploading a part
|
||||
- `PUT /xrpc/io.atcr.hold.uploadPart` - Direct buffered part upload (alternative to presigned URLs)
|
||||
- `POST /xrpc/io.atcr.hold.completeUpload` - Finalize multipart upload and move to final location
|
||||
- `POST /xrpc/io.atcr.hold.abortUpload` - Cancel multipart upload and cleanup temp data
|
||||
|
||||
**AppView-to-Hold Authentication:**
|
||||
- AppView uses service tokens from user's PDS (`com.atproto.server.getServiceAuth`)
|
||||
- Service tokens are scoped to specific hold DIDs and include the user's DID
|
||||
- Hold validates tokens and checks crew membership for authorization
|
||||
- Tokens cached for 50 seconds (valid for 60 seconds from PDS)
|
||||
|
||||
**Configuration:** Environment variables (see `.env.hold.example`)
|
||||
- `HOLD_PUBLIC_URL` - Public URL of hold service (required, used for did:web generation)
|
||||
- `STORAGE_DRIVER` - Storage driver type (s3, filesystem)
|
||||
- `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` - S3 credentials
|
||||
- `S3_BUCKET`, `S3_ENDPOINT` - S3 configuration
|
||||
- `HOLD_PUBLIC` - Allow public reads (default: false)
|
||||
- `HOLD_OWNER` - DID for auto-registration (optional)
|
||||
- `HOLD_OWNER` - DID for captain record creation (optional)
|
||||
- `HOLD_ALLOW_ALL_CREW` - Allow any authenticated user to register as crew (default: false)
|
||||
- `HOLD_DATABASE_PATH` - Path for embedded PDS database (required)
|
||||
- `HOLD_DATABASE_KEY_PATH` - Path for PDS signing keys (optional, generated if missing)
|
||||
|
||||
**Deployment:** Can run on Fly.io, Railway, Docker, Kubernetes, etc.
|
||||
|
||||
@@ -379,17 +482,29 @@ Manifests are stored as records with this structure:
|
||||
"$type": "io.atcr.manifest",
|
||||
"repository": "myapp",
|
||||
"digest": "sha256:abc123...",
|
||||
"holdEndpoint": "https://hold1.alice.com",
|
||||
"holdDid": "did:web:hold01.atcr.io",
|
||||
"holdEndpoint": "https://hold1.atcr.io",
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"config": { "digest": "sha256:...", "size": 1234 },
|
||||
"layers": [
|
||||
{ "digest": "sha256:...", "size": 5678 }
|
||||
],
|
||||
"manifestBlob": {
|
||||
"$type": "blob",
|
||||
"ref": { "$link": "bafyrei..." },
|
||||
"mimeType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"size": 1234
|
||||
},
|
||||
"createdAt": "2025-09-30T..."
|
||||
}
|
||||
```
|
||||
|
||||
**Key fields:**
|
||||
- `holdDid` - DID of the hold service where blobs are stored (PRIMARY reference, new)
|
||||
- `holdEndpoint` - HTTP URL of hold service (DEPRECATED, kept for backward compatibility)
|
||||
- `manifestBlob` - Reference to manifest blob in ATProto blob storage (CID format)
|
||||
|
||||
Record key = manifest digest (without algorithm prefix)
|
||||
Collection = `io.atcr.manifest`
|
||||
|
||||
@@ -405,7 +520,7 @@ ATCR uses a "sailor profile" to manage user preferences for hold (storage) selec
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.sailor.profile",
|
||||
"defaultHold": "https://hold1.alice.com",
|
||||
"defaultHold": "did:web:hold1.alice.com",
|
||||
"createdAt": "2025-10-02T...",
|
||||
"updatedAt": "2025-10-02T..."
|
||||
}
|
||||
@@ -413,14 +528,15 @@ ATCR uses a "sailor profile" to manage user preferences for hold (storage) selec
|
||||
|
||||
**Profile Management:**
|
||||
- Created automatically on first authentication (OAuth or Basic Auth)
|
||||
- If AppView has `default_storage_endpoint` configured, profile gets that as `defaultHold`
|
||||
- `defaultHold` can be a DID (preferred, e.g., `did:web:hold01.atcr.io`) or legacy URL
|
||||
- If AppView has `default_hold_did` configured, profile gets that as `defaultHold`
|
||||
- Users can update their profile to change default hold (future: via UI)
|
||||
- Setting `defaultHold` to null opts out of defaults (use own holds or AppView default)
|
||||
|
||||
**Hold Resolution Priority** (in `findStorageEndpoint()`):
|
||||
1. **Profile's `defaultHold`** - User's explicit preference
|
||||
2. **User's `io.atcr.hold` records** - User's own holds
|
||||
3. **AppView's `default_storage_endpoint`** - Fallback default
|
||||
**Hold Resolution Priority** (in `findHoldDID()` in middleware):
|
||||
1. **Profile's `defaultHold`** - User's explicit preference (DID or URL)
|
||||
2. **User's `io.atcr.hold` records** - User's own holds (legacy BYOS model)
|
||||
3. **AppView's `default_hold_did`** - Fallback default (configured in middleware)
|
||||
|
||||
This ensures:
|
||||
- Users can join shared holds by setting their profile's `defaultHold`
|
||||
@@ -431,7 +547,7 @@ This ensures:
|
||||
### Key Design Decisions
|
||||
|
||||
1. **No fork of distribution**: Uses distribution as library, extends via middleware
|
||||
2. **Hybrid storage**: Manifests in ATProto (small, federated), blobs in S3 or BYOS (cheap, scalable)
|
||||
2. **Hybrid storage**: Manifests in ATProto (small), blobs in S3 or BYOS (cheap, scalable)
|
||||
3. **Content addressing**: Manifests stored by digest, blobs deduplicated globally
|
||||
4. **ATProto-native**: Manifests are first-class ATProto records, discoverable via AT Protocol
|
||||
5. **OCI compliant**: Fully compatible with Docker/containerd/podman
|
||||
@@ -452,7 +568,7 @@ See `.env.appview.example` for all available options. Key environment variables:
|
||||
**Server:**
|
||||
- `ATCR_HTTP_ADDR` - HTTP listen address (default: `:5000`)
|
||||
- `ATCR_BASE_URL` - Public URL for OAuth/JWT realm (auto-detected in dev)
|
||||
- `ATCR_DEFAULT_HOLD` - Default hold endpoint for blob storage (REQUIRED)
|
||||
- `ATCR_DEFAULT_HOLD_DID` - Default hold DID for blob storage (REQUIRED, e.g., `did:web:hold01.atcr.io`)
|
||||
|
||||
**Authentication:**
|
||||
- `ATCR_AUTH_KEY_PATH` - JWT signing key path (default: `/var/lib/atcr/auth/private-key.pem`)
|
||||
@@ -476,11 +592,13 @@ See `.env.hold.example` for all available options. Key environment variables:
|
||||
- `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` - S3 credentials
|
||||
- `S3_BUCKET`, `S3_ENDPOINT` - S3 configuration
|
||||
- `HOLD_PUBLIC` - Allow public reads (default: false)
|
||||
- `HOLD_OWNER` - DID for auto-registration (optional)
|
||||
- `HOLD_OWNER` - DID for captain record creation (optional)
|
||||
- `HOLD_ALLOW_ALL_CREW` - Allow any authenticated user to register as crew (default: false)
|
||||
|
||||
**Credential Helper**:
|
||||
- Token storage: `~/.atcr/oauth-token.json`
|
||||
- Contains: access token, refresh token, DPoP key (PEM), DID, handle
|
||||
- Token storage: `~/.atcr/credential-helper-token.json` (or Docker's credential store)
|
||||
- Contains: Registry JWT issued by AppView (NOT OAuth tokens)
|
||||
- OAuth session managed entirely by AppView
|
||||
|
||||
### Development Notes
|
||||
|
||||
@@ -515,12 +633,12 @@ When writing tests:
|
||||
**Modifying storage routing**:
|
||||
1. Edit `pkg/appview/storage/routing_repository.go`
|
||||
2. Update `Blobs()` method to change routing logic
|
||||
3. Consider context values: `storage.endpoint`, `atproto.did`
|
||||
3. Context is passed via RegistryContext struct (holds DID, PDS endpoint, hold DID, OAuth refresher, etc.)
|
||||
|
||||
**Changing name resolution**:
|
||||
1. Modify `pkg/atproto/resolver.go` for DID/handle resolution
|
||||
2. Update `pkg/appview/middleware/registry.go` if changing routing logic
|
||||
3. Remember: `findStorageEndpoint()` queries PDS for `io.atcr.hold` records
|
||||
3. Remember: `findHoldDID()` checks sailor profile, then `io.atcr.hold` records (legacy), then default hold DID
|
||||
|
||||
**Working with OAuth client**:
|
||||
- Client is self-contained: pass `baseURL`, it handles client ID/redirect URI/scopes
|
||||
@@ -530,11 +648,12 @@ When writing tests:
|
||||
- Client methods are consistent across authorization, token exchange, and refresh flows
|
||||
|
||||
**Adding BYOS support for a user**:
|
||||
1. User sets environment variables (storage credentials, public URL)
|
||||
2. User runs hold service with `HOLD_OWNER` set - auto-registration via OAuth
|
||||
3. Hold service creates `io.atcr.hold` + `io.atcr.hold.crew` records in PDS
|
||||
4. AppView automatically queries PDS and routes blobs to user's storage
|
||||
5. No AppView changes needed - fully decentralized
|
||||
1. User sets environment variables (storage credentials, public URL, HOLD_OWNER)
|
||||
2. User runs hold service - creates captain + crew records in embedded PDS
|
||||
3. Hold creates `io.atcr.hold.captain` + `io.atcr.hold.crew` records
|
||||
4. User sets sailor profile `defaultHold` to point to their hold
|
||||
5. AppView automatically queries hold's PDS and routes blobs to user's storage
|
||||
6. No AppView changes needed - fully decentralized
|
||||
|
||||
**Supporting a new storage backend**:
|
||||
1. Ensure driver is registered in `cmd/hold/main.go` imports
|
||||
@@ -544,11 +663,14 @@ When writing tests:
|
||||
5. Update `.env.example` with new driver's env vars
|
||||
|
||||
**Working with the database**:
|
||||
- Schema defined in `pkg/appview/db/schema.go`
|
||||
- Queries in `pkg/appview/db/queries.go`
|
||||
- Stores for OAuth, devices, sessions in separate files
|
||||
- Run migrations automatically on startup
|
||||
- Database path configurable via `ATCR_UI_DATABASE_PATH` env var
|
||||
- **Base schema** defined in `pkg/appview/db/schema.sql` - source of truth for fresh installations
|
||||
- **Migrations** in `pkg/appview/db/migrations/*.yaml` - only for ALTER/UPDATE/DELETE on existing databases
|
||||
- **Queries** in `pkg/appview/db/queries.go`
|
||||
- **Stores** for OAuth, devices, sessions in separate files
|
||||
- **Execution order**: schema.sql first, then migrations (automatically on startup)
|
||||
- **Database path** configurable via `ATCR_UI_DATABASE_PATH` env var
|
||||
- **Adding new tables**: Add to `schema.sql` only (no migration needed)
|
||||
- **Altering tables**: Create migration AND update `schema.sql` to keep them in sync
|
||||
|
||||
**Adding web UI features**:
|
||||
- Add handler in `pkg/appview/handlers/`
|
||||
@@ -559,13 +681,20 @@ When writing tests:
|
||||
|
||||
## Important Context Values
|
||||
|
||||
When working with the codebase, these context values are used for routing:
|
||||
When working with the codebase, routing information is passed via the `RegistryContext` struct (`pkg/appview/storage/context.go`):
|
||||
|
||||
- `atproto.did` - Resolved DID for the user (e.g., `did:plc:alice123`)
|
||||
- `atproto.pds` - User's PDS endpoint (e.g., `https://bsky.social`)
|
||||
- `atproto.identity` - Original identity string (handle or DID)
|
||||
- `storage.endpoint` - Storage service URL (if user has `io.atcr.registry` record)
|
||||
- `auth.did` - Authenticated DID from validated token
|
||||
- `DID` - User's DID (e.g., `did:plc:alice123`)
|
||||
- `PDSEndpoint` - User's PDS endpoint (e.g., `https://bsky.social`)
|
||||
- `HoldDID` - Hold service DID (e.g., `did:web:hold01.atcr.io`)
|
||||
- `Repository` - Image repository name (e.g., `myapp`)
|
||||
- `ATProtoClient` - Client for calling user's PDS with OAuth/Basic Auth
|
||||
- `Refresher` - OAuth token refresher for service token requests
|
||||
- `Database` - Database for metrics tracking
|
||||
- `Authorizer` - Hold authorizer for access control
|
||||
|
||||
Legacy context keys (deprecated):
|
||||
- `hold.did` - Hold DID (now in RegistryContext)
|
||||
- `auth.did` - Authenticated DID from validated token (now in auth middleware)
|
||||
|
||||
## Documentation References
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.25.2-trixie AS builder
|
||||
FROM docker.io/golang:1.25.2-trixie AS builder
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends sqlite3 libsqlite3-dev && \
|
||||
@@ -11,6 +11,7 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go generate ./...
|
||||
RUN CGO_ENABLED=1 go build \
|
||||
-ldflags="-s -w -linkmode external -extldflags '-static'" \
|
||||
-tags sqlite_omit_load_extension \
|
||||
@@ -39,7 +40,8 @@ LABEL org.opencontainers.image.title="ATCR AppView" \
|
||||
org.opencontainers.image.documentation="https://tangled.org/@evan.jarrett.net/at-container-registry" \
|
||||
org.opencontainers.image.licenses="MIT" \
|
||||
org.opencontainers.image.version="0.1.0" \
|
||||
io.atcr.icon="https://imgs.blue/evan.jarrett.net/1TpTNrRelfloN2emuWZDrWmPT0o93bAjEnozjD6UPgoVV9m4"
|
||||
io.atcr.icon="https://imgs.blue/evan.jarrett.net/1TpTNrRelfloN2emuWZDrWmPT0o93bAjEnozjD6UPgoVV9m4" \
|
||||
io.atcr.readme="https://tangled.org/@evan.jarrett.net/at-container-registry/raw/main/docs/appview.md"
|
||||
|
||||
ENTRYPOINT ["/atcr-appview"]
|
||||
CMD ["serve"]
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
FROM golang:1.25.2-trixie AS builder
|
||||
FROM docker.io/golang:1.25.2-trixie AS builder
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends sqlite3 libsqlite3-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
@@ -7,8 +11,9 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 go build \
|
||||
-ldflags="-s -w" \
|
||||
RUN CGO_ENABLED=1 go build \
|
||||
-ldflags="-s -w -linkmode external -extldflags '-static'" \
|
||||
-tags sqlite_omit_load_extension \
|
||||
-trimpath \
|
||||
-o atcr-hold ./cmd/hold
|
||||
|
||||
@@ -21,7 +26,7 @@ FROM scratch
|
||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
# Copy timezone data for timestamp formatting
|
||||
COPY --from=builder /usr/share/zoneinfo /usr/share/zoneinfo
|
||||
# Copy optimized binary
|
||||
# Copy optimized binary (SQLite embedded)
|
||||
COPY --from=builder /build/atcr-hold /atcr-hold
|
||||
|
||||
# Expose default port
|
||||
@@ -35,6 +40,7 @@ LABEL org.opencontainers.image.title="ATCR Hold Service" \
|
||||
org.opencontainers.image.documentation="https://tangled.org/@evan.jarrett.net/at-container-registry" \
|
||||
org.opencontainers.image.licenses="MIT" \
|
||||
org.opencontainers.image.version="0.1.0" \
|
||||
io.atcr.icon="https://imgs.blue/evan.jarrett.net/1TpTOdtS60GdJWBYEqtK22y688jajbQ9a5kbYRFtwuqrkBAE"
|
||||
io.atcr.icon="https://imgs.blue/evan.jarrett.net/1TpTOdtS60GdJWBYEqtK22y688jajbQ9a5kbYRFtwuqrkBAE" \
|
||||
io.atcr.readme="https://tangled.org/@evan.jarrett.net/at-container-registry/raw/main/docs/hold.md"
|
||||
|
||||
ENTRYPOINT ["/atcr-hold"]
|
||||
|
||||
19
README.md
19
README.md
@@ -1,5 +1,7 @@
|
||||
# ATCR - ATProto Container Registry
|
||||
|
||||
## https://atcr.io
|
||||
|
||||
An OCI-compliant container registry that uses the AT Protocol for manifest storage and S3 for blob storage.
|
||||
|
||||
## What is ATCR?
|
||||
@@ -19,26 +21,29 @@ atcr.io/did:plc:xyz123/myapp:latest
|
||||
1. **AppView** - Registry API + web UI
|
||||
- Serves OCI Distribution API (Docker push/pull)
|
||||
- Resolves handles/DIDs to PDS endpoints
|
||||
- Routes manifests to PDS, blobs to storage
|
||||
- Routes manifests to user's PDS, blobs to hold services
|
||||
- Web interface for browsing/search
|
||||
|
||||
2. **Hold Service** - Storage service (optional BYOS)
|
||||
2. **Hold Service** - Storage service with embedded PDS (optional BYOS)
|
||||
- Each hold has a full ATProto PDS for access control (captain + crew records)
|
||||
- Identified by did:web (e.g., `did:web:hold01.atcr.io`)
|
||||
- Generates presigned URLs for S3/Storj/Minio/etc.
|
||||
- Users can deploy their own storage
|
||||
- Users can deploy their own storage and control access via crew membership
|
||||
|
||||
3. **Credential Helper** - Client authentication
|
||||
- ATProto OAuth with DPoP
|
||||
- ATProto OAuth (DPoP handled transparently)
|
||||
- Automatic authentication on first push/pull
|
||||
|
||||
**Storage model:**
|
||||
- Manifests → ATProto records (small JSON)
|
||||
- Blobs → S3 or BYOS (large binaries)
|
||||
- Manifests → ATProto records in user's PDS (small JSON, includes `holdDid` reference)
|
||||
- Blobs → Hold services via XRPC multipart upload (large binaries, stored in S3/etc.)
|
||||
- AppView uses service tokens to communicate with holds on behalf of users
|
||||
|
||||
## Features
|
||||
|
||||
- ✅ **OCI-compliant** - Works with Docker, containerd, podman
|
||||
- ✅ **Decentralized** - You own your manifest data via your PDS
|
||||
- ✅ **ATProto OAuth** - Secure authentication with DPoP
|
||||
- ✅ **ATProto OAuth** - Secure authentication (DPoP-compliant)
|
||||
- ✅ **BYOS** - Deploy your own storage service
|
||||
- ✅ **Web UI** - Browse, search, star repositories
|
||||
- ✅ **Multi-backend** - S3, Storj, Minio, Azure, GCS, filesystem
|
||||
|
||||
@@ -1,213 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/distribution/distribution/v3/configuration"
|
||||
)
|
||||
|
||||
// loadConfigFromEnv builds a complete configuration from environment variables
|
||||
// This follows the same pattern as the hold service (no config files, only env vars)
|
||||
func loadConfigFromEnv() (*configuration.Configuration, error) {
|
||||
config := &configuration.Configuration{}
|
||||
|
||||
// Version
|
||||
config.Version = configuration.MajorMinorVersion(0, 1)
|
||||
|
||||
// Logging
|
||||
config.Log = buildLogConfig()
|
||||
|
||||
// HTTP server
|
||||
httpConfig, err := buildHTTPConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build HTTP config: %w", err)
|
||||
}
|
||||
config.HTTP = httpConfig
|
||||
|
||||
// Storage (fake in-memory placeholder - all real storage is proxied)
|
||||
config.Storage = buildStorageConfig()
|
||||
|
||||
// Middleware (ATProto resolver)
|
||||
defaultHold := os.Getenv("ATCR_DEFAULT_HOLD")
|
||||
if defaultHold == "" {
|
||||
return nil, fmt.Errorf("ATCR_DEFAULT_HOLD is required")
|
||||
}
|
||||
config.Middleware = buildMiddlewareConfig(defaultHold)
|
||||
|
||||
// Auth
|
||||
baseURL := getBaseURL(httpConfig.Addr)
|
||||
authConfig, err := buildAuthConfig(baseURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build auth config: %w", err)
|
||||
}
|
||||
config.Auth = authConfig
|
||||
|
||||
// Health checks
|
||||
config.Health = buildHealthConfig()
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// buildLogConfig creates logging configuration from environment variables
|
||||
func buildLogConfig() configuration.Log {
|
||||
level := getEnvOrDefault("ATCR_LOG_LEVEL", "info")
|
||||
formatter := getEnvOrDefault("ATCR_LOG_FORMATTER", "text")
|
||||
|
||||
return configuration.Log{
|
||||
Level: configuration.Loglevel(level),
|
||||
Formatter: formatter,
|
||||
Fields: map[string]interface{}{
|
||||
"service": "atcr-appview",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// buildHTTPConfig creates HTTP server configuration from environment variables
|
||||
func buildHTTPConfig() (configuration.HTTP, error) {
|
||||
addr := getEnvOrDefault("ATCR_HTTP_ADDR", ":5000")
|
||||
debugAddr := getEnvOrDefault("ATCR_DEBUG_ADDR", ":5001")
|
||||
|
||||
return configuration.HTTP{
|
||||
Addr: addr,
|
||||
Headers: map[string][]string{
|
||||
"X-Content-Type-Options": {"nosniff"},
|
||||
},
|
||||
Debug: configuration.Debug{
|
||||
Addr: debugAddr,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// buildStorageConfig creates a fake in-memory storage config
|
||||
// This is required for distribution validation but is never actually used
|
||||
// All storage is routed through middleware to ATProto (manifests) and hold services (blobs)
|
||||
func buildStorageConfig() configuration.Storage {
|
||||
storage := configuration.Storage{}
|
||||
|
||||
// Use in-memory storage as a placeholder
|
||||
storage["inmemory"] = configuration.Parameters{}
|
||||
|
||||
// Disable upload purging
|
||||
// NOTE: Must use map[interface{}]interface{} for uploadpurging (not configuration.Parameters)
|
||||
// because distribution's validation code does a type assertion to map[interface{}]interface{}
|
||||
storage["maintenance"] = configuration.Parameters{
|
||||
"uploadpurging": map[interface{}]interface{}{
|
||||
"enabled": false,
|
||||
"age": 7 * 24 * time.Hour, // 168h
|
||||
"interval": 24 * time.Hour, // 24h
|
||||
"dryrun": false,
|
||||
},
|
||||
}
|
||||
|
||||
return storage
|
||||
}
|
||||
|
||||
// buildMiddlewareConfig creates middleware configuration
|
||||
func buildMiddlewareConfig(defaultHold string) map[string][]configuration.Middleware {
|
||||
return map[string][]configuration.Middleware{
|
||||
"registry": {
|
||||
{
|
||||
Name: "atproto-resolver",
|
||||
Options: configuration.Parameters{
|
||||
"default_storage_endpoint": defaultHold,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// buildAuthConfig creates authentication configuration from environment variables
|
||||
func buildAuthConfig(baseURL string) (configuration.Auth, error) {
|
||||
// Token configuration
|
||||
privateKeyPath := getEnvOrDefault("ATCR_AUTH_KEY_PATH", "/var/lib/atcr/auth/private-key.pem")
|
||||
certPath := getEnvOrDefault("ATCR_AUTH_CERT_PATH", "/var/lib/atcr/auth/private-key.crt")
|
||||
|
||||
// Token expiration in seconds (default: 5 minutes)
|
||||
expirationStr := getEnvOrDefault("ATCR_TOKEN_EXPIRATION", "300")
|
||||
expiration, err := strconv.Atoi(expirationStr)
|
||||
if err != nil {
|
||||
return configuration.Auth{}, fmt.Errorf("invalid ATCR_TOKEN_EXPIRATION: %w", err)
|
||||
}
|
||||
|
||||
// Auto-derive service name from base URL or use env var
|
||||
serviceName := getServiceName(baseURL)
|
||||
|
||||
// Auto-derive realm from base URL
|
||||
realm := baseURL + "/auth/token"
|
||||
|
||||
return configuration.Auth{
|
||||
"token": configuration.Parameters{
|
||||
"realm": realm,
|
||||
"service": serviceName,
|
||||
"issuer": serviceName,
|
||||
"rootcertbundle": certPath,
|
||||
"privatekey": privateKeyPath,
|
||||
"expiration": expiration,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// buildHealthConfig creates health check configuration
|
||||
func buildHealthConfig() configuration.Health {
|
||||
return configuration.Health{
|
||||
StorageDriver: configuration.StorageDriver{
|
||||
Enabled: true,
|
||||
Interval: 10 * time.Second,
|
||||
Threshold: 3,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// getBaseURL determines the base URL for the service
|
||||
// Priority: ATCR_BASE_URL env var, then derived from HTTP addr
|
||||
func getBaseURL(httpAddr string) string {
|
||||
baseURL := os.Getenv("ATCR_BASE_URL")
|
||||
if baseURL != "" {
|
||||
return baseURL
|
||||
}
|
||||
|
||||
// Auto-detect from HTTP addr
|
||||
if httpAddr[0] == ':' {
|
||||
// Just a port, assume localhost
|
||||
return fmt.Sprintf("http://127.0.0.1%s", httpAddr)
|
||||
}
|
||||
|
||||
// Full address provided
|
||||
return fmt.Sprintf("http://%s", httpAddr)
|
||||
}
|
||||
|
||||
// getServiceName extracts service name from base URL or uses env var
|
||||
func getServiceName(baseURL string) string {
|
||||
// Check env var first
|
||||
if serviceName := os.Getenv("ATCR_SERVICE_NAME"); serviceName != "" {
|
||||
return serviceName
|
||||
}
|
||||
|
||||
// Try to extract from base URL
|
||||
parsed, err := url.Parse(baseURL)
|
||||
if err == nil && parsed.Hostname() != "" {
|
||||
hostname := parsed.Hostname()
|
||||
|
||||
// Strip localhost/127.0.0.1 and use default
|
||||
if hostname == "localhost" || hostname == "127.0.0.1" {
|
||||
return "atcr.io"
|
||||
}
|
||||
|
||||
return hostname
|
||||
}
|
||||
|
||||
// Default fallback
|
||||
return "atcr.io"
|
||||
}
|
||||
|
||||
// getEnvOrDefault gets an environment variable or returns a default value
|
||||
func getEnvOrDefault(key, defaultValue string) string {
|
||||
if val := os.Getenv(key); val != "" {
|
||||
return val
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -127,7 +127,9 @@ func handleGet() {
|
||||
fmt.Fprintf(os.Stderr, "Stored credentials for %s are invalid or expired\n", appViewURL)
|
||||
// Delete the invalid credentials
|
||||
delete(allCreds.Credentials, appViewURL)
|
||||
saveDeviceCredentials(configPath, allCreds)
|
||||
if err := saveDeviceCredentials(configPath, allCreds); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to save updated credentials: %v\n", err)
|
||||
}
|
||||
// Mark as not found so we re-authorize below
|
||||
found = false
|
||||
}
|
||||
|
||||
291
cmd/hold/main.go
291
cmd/hold/main.go
@@ -1,160 +1,245 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
"atcr.io/pkg/hold"
|
||||
indigooauth "github.com/bluesky-social/indigo/atproto/auth/oauth"
|
||||
"atcr.io/pkg/hold/oci"
|
||||
"atcr.io/pkg/hold/pds"
|
||||
"atcr.io/pkg/hold/scanner"
|
||||
"atcr.io/pkg/logging"
|
||||
"atcr.io/pkg/s3"
|
||||
|
||||
// Import storage drivers
|
||||
"github.com/distribution/distribution/v3/registry/storage/driver/factory"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Load configuration from environment variables
|
||||
cfg, err := hold.LoadConfigFromEnv()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load config: %v", err)
|
||||
slog.Error("Failed to load config", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create hold service
|
||||
service, err := hold.NewHoldService(cfg)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create hold service: %v", err)
|
||||
}
|
||||
// Initialize structured logging
|
||||
logging.InitLogger(cfg.LogLevel)
|
||||
|
||||
// Setup HTTP routes
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/health", service.HealthHandler)
|
||||
mux.HandleFunc("/register", service.HandleRegister)
|
||||
mux.HandleFunc("/presigned-url", service.HandlePresignedURL)
|
||||
mux.HandleFunc("/move", service.HandleMove)
|
||||
// Initialize embedded PDS if database path is configured
|
||||
// This must happen before creating HoldService since service needs PDS for authorization
|
||||
var holdPDS *pds.HoldPDS
|
||||
var xrpcHandler *pds.XRPCHandler
|
||||
var broadcaster *pds.EventBroadcaster
|
||||
if cfg.Database.Path != "" {
|
||||
// Generate did:web from public URL
|
||||
holdDID := pds.GenerateDIDFromURL(cfg.Server.PublicURL)
|
||||
slog.Info("Initializing embedded PDS", "did", holdDID)
|
||||
|
||||
// Multipart upload endpoints
|
||||
mux.HandleFunc("/start-multipart", service.HandleStartMultipart)
|
||||
mux.HandleFunc("/part-presigned-url", service.HandleGetPartURL)
|
||||
mux.HandleFunc("/complete-multipart", service.HandleCompleteMultipart)
|
||||
mux.HandleFunc("/abort-multipart", service.HandleAbortMultipart)
|
||||
|
||||
// Buffered multipart part upload endpoint (for when presigned URLs are disabled/unavailable)
|
||||
mux.HandleFunc("/multipart-parts/", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPut {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse URL: /multipart-parts/{uploadID}/{partNumber}
|
||||
path := r.URL.Path[len("/multipart-parts/"):]
|
||||
parts := strings.Split(path, "/")
|
||||
if len(parts) != 2 {
|
||||
http.Error(w, "invalid path format, expected /multipart-parts/{uploadID}/{partNumber}", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
uploadID := parts[0]
|
||||
partNumber, err := strconv.Atoi(parts[1])
|
||||
// Initialize PDS with carstore and keys
|
||||
ctx := context.Background()
|
||||
holdPDS, err = pds.NewHoldPDS(ctx, holdDID, cfg.Server.PublicURL, cfg.Database.Path, cfg.Database.KeyPath, cfg.Registration.EnableBlueskyPosts)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("invalid part number: %v", err), http.StatusBadRequest)
|
||||
return
|
||||
slog.Error("Failed to initialize embedded PDS", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Get DID from query param
|
||||
did := r.URL.Query().Get("did")
|
||||
// Create storage driver from config (needed for bootstrap profile avatar)
|
||||
driver, err := factory.Create(ctx, cfg.Storage.Type(), cfg.Storage.Parameters())
|
||||
if err != nil {
|
||||
slog.Error("Failed to create storage driver", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
service.HandleMultipartPartUpload(w, r, uploadID, partNumber, did, service.MultipartMgr)
|
||||
})
|
||||
// Bootstrap PDS with captain record, hold owner as first crew member, and profile
|
||||
if err := holdPDS.Bootstrap(ctx, driver, cfg.Registration.OwnerDID, cfg.Server.Public, cfg.Registration.AllowAllCrew, cfg.Registration.ProfileAvatarURL); err != nil {
|
||||
slog.Error("Failed to bootstrap PDS", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Pre-register OAuth callback route (will be populated by auto-registration)
|
||||
var oauthCallbackHandler http.HandlerFunc
|
||||
mux.HandleFunc("/auth/oauth/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
if oauthCallbackHandler != nil {
|
||||
oauthCallbackHandler(w, r)
|
||||
// Create event broadcaster for subscribeRepos firehose
|
||||
// Database path: carstore creates db.sqlite3 inside cfg.Database.Path
|
||||
var dbPath string
|
||||
if cfg.Database.Path != ":memory:" {
|
||||
dbPath = cfg.Database.Path + "/db.sqlite3"
|
||||
} else {
|
||||
http.Error(w, "OAuth callback not initialized", http.StatusServiceUnavailable)
|
||||
dbPath = ":memory:"
|
||||
}
|
||||
})
|
||||
broadcaster = pds.NewEventBroadcaster(holdDID, 100, dbPath)
|
||||
|
||||
// OAuth client metadata endpoint for ATProto OAuth
|
||||
// The hold service serves its metadata at /client-metadata.json
|
||||
// This is referenced by its client ID URL
|
||||
mux.HandleFunc("/client-metadata.json", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Create a temporary config to generate metadata (indigo provides this)
|
||||
redirectURI := cfg.Server.PublicURL + "/auth/oauth/callback"
|
||||
clientID := cfg.Server.PublicURL + "/client-metadata.json"
|
||||
|
||||
// Define scopes needed for hold registration and crew management
|
||||
// Omit action parameter to allow all actions (create, update, delete)
|
||||
scopes := []string{
|
||||
"atproto",
|
||||
fmt.Sprintf("repo:%s", atproto.HoldCollection),
|
||||
fmt.Sprintf("repo:%s", atproto.HoldCrewCollection),
|
||||
fmt.Sprintf("repo:%s", atproto.SailorProfileCollection),
|
||||
// Bootstrap events from existing repo records (one-time migration)
|
||||
if err := broadcaster.BootstrapFromRepo(holdPDS); err != nil {
|
||||
slog.Warn("Failed to bootstrap events from repo", "error", err)
|
||||
}
|
||||
|
||||
config := indigooauth.NewPublicConfig(clientID, redirectURI, scopes)
|
||||
metadata := config.ClientMetadata()
|
||||
// Wire up repo event handler to broadcaster
|
||||
holdPDS.RepomgrRef().SetEventHandler(broadcaster.SetRepoEventHandler(), true)
|
||||
|
||||
// Serve as JSON
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
json.NewEncoder(w).Encode(metadata)
|
||||
})
|
||||
mux.HandleFunc("/blobs/", func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case http.MethodGet, http.MethodHead:
|
||||
service.HandleProxyGet(w, r)
|
||||
case http.MethodPut:
|
||||
service.HandleProxyPut(w, r)
|
||||
default:
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
slog.Info("Embedded PDS initialized successfully with firehose enabled")
|
||||
} else {
|
||||
slog.Error("Database path is required for embedded PDS authorization")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create blob store adapter and XRPC handlers
|
||||
var ociHandler *oci.XRPCHandler
|
||||
if holdPDS != nil {
|
||||
// Create storage driver from config
|
||||
ctx := context.Background()
|
||||
driver, err := factory.Create(ctx, cfg.Storage.Type(), cfg.Storage.Parameters())
|
||||
if err != nil {
|
||||
slog.Error("Failed to create storage driver", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
s3Service, err := s3.NewS3Service(cfg.Storage.Parameters(), cfg.Server.DisablePresignedURLs, cfg.Storage.Type())
|
||||
if err != nil {
|
||||
slog.Error("Failed to create S3 service", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create PDS XRPC handler (ATProto endpoints)
|
||||
xrpcHandler = pds.NewXRPCHandler(holdPDS, *s3Service, driver, broadcaster, nil)
|
||||
|
||||
// Initialize scanner queue if scanning is enabled
|
||||
// Use interface type to ensure proper nil checking (avoid typed nil pointer issue)
|
||||
var scanQueue oci.ScanQueue
|
||||
if cfg.Scanner.Enabled {
|
||||
slog.Info("Initializing vulnerability scanner",
|
||||
"workers", cfg.Scanner.Workers,
|
||||
"vulnEnabled", cfg.Scanner.VulnEnabled,
|
||||
"vulnDBPath", cfg.Scanner.VulnDBPath)
|
||||
|
||||
// Create scanner worker
|
||||
scanWorker := scanner.NewWorker(cfg, driver, holdPDS)
|
||||
|
||||
// Create and start scanner queue (buffer size = workers * 2 for some headroom)
|
||||
bufferSize := cfg.Scanner.Workers * 2
|
||||
concreteQueue := scanner.NewQueue(cfg.Scanner.Workers, bufferSize)
|
||||
scanWorker.Start(concreteQueue)
|
||||
|
||||
// Assign to interface variable (ensures proper nil behavior)
|
||||
scanQueue = concreteQueue
|
||||
|
||||
slog.Info("Scanner queue initialized successfully")
|
||||
} else {
|
||||
slog.Info("SBOM/vulnerability scanning disabled")
|
||||
}
|
||||
|
||||
// Create OCI XRPC handler (multipart upload endpoints)
|
||||
ociHandler = oci.NewXRPCHandler(holdPDS, *s3Service, driver, cfg.Server.DisablePresignedURLs, cfg.Registration.EnableBlueskyPosts, nil, scanQueue)
|
||||
}
|
||||
|
||||
// Setup HTTP routes with chi router
|
||||
r := chi.NewRouter()
|
||||
|
||||
// Add RealIP middleware to extract real client IP from proxy headers
|
||||
r.Use(middleware.RealIP)
|
||||
|
||||
// Add logging middleware to log all HTTP requests
|
||||
r.Use(middleware.Logger)
|
||||
|
||||
// Add CORS middleware (must be before routes)
|
||||
if xrpcHandler != nil {
|
||||
r.Use(xrpcHandler.CORSMiddleware())
|
||||
}
|
||||
|
||||
// Root page
|
||||
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
fmt.Fprintf(w, "This is a hold server. More info at https://atcr.io")
|
||||
})
|
||||
|
||||
// Register XRPC/ATProto PDS endpoints if PDS is initialized
|
||||
if xrpcHandler != nil {
|
||||
slog.Info("Registering ATProto PDS endpoints")
|
||||
xrpcHandler.RegisterHandlers(r)
|
||||
}
|
||||
|
||||
// Register OCI multipart upload endpoints
|
||||
if ociHandler != nil {
|
||||
slog.Info("Registering OCI multipart upload endpoints")
|
||||
ociHandler.RegisterHandlers(r)
|
||||
}
|
||||
|
||||
// Create server
|
||||
server := &http.Server{
|
||||
Addr: cfg.Server.Addr,
|
||||
Handler: mux,
|
||||
Handler: r,
|
||||
ReadTimeout: cfg.Server.ReadTimeout,
|
||||
WriteTimeout: cfg.Server.WriteTimeout,
|
||||
}
|
||||
|
||||
// Start server in goroutine so we can do auto-registration after it's running
|
||||
// Set up signal handling for graceful shutdown
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
// Start server in goroutine
|
||||
serverErr := make(chan error, 1)
|
||||
go func() {
|
||||
log.Printf("Starting hold service on %s", cfg.Server.Addr)
|
||||
slog.Info("Starting hold service", "addr", cfg.Server.Addr)
|
||||
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
serverErr <- err
|
||||
}
|
||||
}()
|
||||
|
||||
// Give server a moment to start
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
// Update status post to "online" after server starts
|
||||
if holdPDS != nil {
|
||||
ctx := context.Background()
|
||||
|
||||
// Auto-register if owner DID is set (now that server is running)
|
||||
if cfg.Registration.OwnerDID != "" {
|
||||
if err := service.AutoRegister(&oauthCallbackHandler); err != nil {
|
||||
log.Printf("WARNING: Auto-registration failed: %v", err)
|
||||
log.Printf("You can register manually later using the /register endpoint")
|
||||
if err := holdPDS.SetStatus(ctx, "online"); err != nil {
|
||||
slog.Warn("Failed to set status post to online", "error", err)
|
||||
} else {
|
||||
log.Printf("Successfully registered hold service in PDS")
|
||||
}
|
||||
|
||||
// Reconcile allow-all crew state
|
||||
if err := service.ReconcileAllowAllCrew(&oauthCallbackHandler); err != nil {
|
||||
log.Printf("WARNING: Failed to reconcile allow-all crew state: %v", err)
|
||||
slog.Info("Status post set to online")
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for server error or shutdown
|
||||
if err := <-serverErr; err != nil {
|
||||
log.Fatalf("Server failed: %v", err)
|
||||
// Wait for signal or server error
|
||||
select {
|
||||
case err := <-serverErr:
|
||||
slog.Error("Server failed", "error", err)
|
||||
os.Exit(1)
|
||||
case sig := <-sigChan:
|
||||
slog.Info("Received signal, shutting down gracefully", "signal", sig)
|
||||
|
||||
// Update status post to "offline" before shutdown
|
||||
if holdPDS != nil {
|
||||
ctx := context.Background()
|
||||
if err := holdPDS.SetStatus(ctx, "offline"); err != nil {
|
||||
slog.Warn("Failed to set status post to offline", "error", err)
|
||||
} else {
|
||||
slog.Info("Status post set to offline")
|
||||
}
|
||||
}
|
||||
|
||||
// Close broadcaster database connection
|
||||
if broadcaster != nil {
|
||||
if err := broadcaster.Close(); err != nil {
|
||||
slog.Warn("Failed to close broadcaster database", "error", err)
|
||||
} else {
|
||||
slog.Info("Broadcaster database closed")
|
||||
}
|
||||
}
|
||||
|
||||
// Graceful shutdown with 10 second timeout
|
||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := server.Shutdown(shutdownCtx); err != nil {
|
||||
slog.Error("Server shutdown error", "error", err)
|
||||
} else {
|
||||
slog.Info("Server shutdown complete")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
135
cmd/oauth-helper/main.go
Normal file
135
cmd/oauth-helper/main.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
indigo_oauth "github.com/bluesky-social/indigo/atproto/auth/oauth"
|
||||
)
|
||||
|
||||
func main() {
|
||||
handle := flag.String("handle", "", "Your Bluesky handle (e.g., yourname.bsky.social)")
|
||||
holdURL := flag.String("hold-url", "http://localhost:8080", "Hold service URL")
|
||||
repo := flag.String("repo", "", "Repository DID (e.g., did:web:172.28.0.3:8080)")
|
||||
collection := flag.String("collection", "io.atcr.hold.crew", "Collection to delete from")
|
||||
rkey := flag.String("rkey", "", "Record key to delete")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
if *handle == "" {
|
||||
fmt.Println("Usage: oauth-helper --handle yourname.bsky.social [options]")
|
||||
fmt.Println("\nOptions:")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
fmt.Printf("🔐 Starting OAuth flow for %s...\n\n", *handle)
|
||||
|
||||
// Create a simple HTTP server for the callback
|
||||
mux := http.NewServeMux()
|
||||
server := &http.Server{
|
||||
Addr: ":8765",
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
// Channel to receive the result
|
||||
resultChan := make(chan *oauth.InteractiveResult, 1)
|
||||
errorChan := make(chan error, 1)
|
||||
|
||||
// Register callback handler
|
||||
registerCallback := func(handler http.HandlerFunc) error {
|
||||
mux.HandleFunc("/auth/oauth/callback", handler)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Display auth URL (will open browser)
|
||||
displayAuthURL := func(authURL string) error {
|
||||
fmt.Printf("🌐 Opening browser for authorization...\n")
|
||||
fmt.Printf(" URL: %s\n\n", authURL)
|
||||
fmt.Printf(" If the browser doesn't open, visit the URL above.\n\n")
|
||||
return oauth.OpenBrowser(authURL)
|
||||
}
|
||||
|
||||
// Start server in background
|
||||
go func() {
|
||||
if err := server.ListenAndServe(); err != http.ErrServerClosed {
|
||||
errorChan <- fmt.Errorf("server error: %w", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Give server time to start
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Run interactive OAuth flow
|
||||
go func() {
|
||||
result, err := oauth.InteractiveFlowWithCallback(
|
||||
ctx,
|
||||
"http://localhost:8765",
|
||||
*handle,
|
||||
nil, // Use default scopes
|
||||
registerCallback,
|
||||
displayAuthURL,
|
||||
)
|
||||
if err != nil {
|
||||
errorChan <- err
|
||||
return
|
||||
}
|
||||
resultChan <- result
|
||||
}()
|
||||
|
||||
// Wait for result
|
||||
var result *oauth.InteractiveResult
|
||||
select {
|
||||
case result = <-resultChan:
|
||||
fmt.Printf("✅ OAuth successful!\n\n")
|
||||
case err := <-errorChan:
|
||||
log.Fatalf("❌ OAuth failed: %v\n", err)
|
||||
case <-time.After(5 * time.Minute):
|
||||
log.Fatalf("❌ OAuth timed out\n")
|
||||
}
|
||||
|
||||
// Shutdown server
|
||||
server.Shutdown(ctx)
|
||||
|
||||
// Print session information
|
||||
fmt.Printf("DID: %s\n", result.SessionData.AccountDID)
|
||||
fmt.Printf("Access Token: %s\n", result.SessionData.AccessToken)
|
||||
fmt.Printf("DPoP Key: %s\n\n", result.SessionData.DPoPPrivateKeyMultibase)
|
||||
|
||||
// Generate DPoP proof for deleteRecord endpoint if all params provided
|
||||
if *repo != "" && *rkey != "" {
|
||||
deleteURL := fmt.Sprintf("%s%s?repo=%s&collection=%s&rkey=%s",
|
||||
*holdURL, atproto.RepoDeleteRecord, *repo, *collection, *rkey)
|
||||
|
||||
dpopProof, err := generateDPoPProof(result.Session, "POST", deleteURL)
|
||||
if err != nil {
|
||||
log.Fatalf("❌ Failed to generate DPoP proof: %v\n", err)
|
||||
}
|
||||
|
||||
fmt.Printf("📋 Ready-to-use curl command:\n\n")
|
||||
fmt.Printf("curl -X POST \\\n")
|
||||
fmt.Printf(" -H \"Authorization: DPoP %s\" \\\n", result.SessionData.AccessToken)
|
||||
fmt.Printf(" -H \"DPoP: %s\" \\\n", dpopProof)
|
||||
fmt.Printf(" \"%s\"\n", deleteURL)
|
||||
} else {
|
||||
fmt.Printf("💡 To generate a curl command for deleteRecord, provide:\n")
|
||||
fmt.Printf(" --repo <did>\n")
|
||||
fmt.Printf(" --collection <collection>\n")
|
||||
fmt.Printf(" --rkey <rkey>\n")
|
||||
}
|
||||
}
|
||||
|
||||
// generateDPoPProof generates a DPoP proof JWT for a specific request
|
||||
func generateDPoPProof(session *indigo_oauth.ClientSession, method, reqURL string) (string, error) {
|
||||
// Use the session's NewHostDPoP method to generate the proof
|
||||
return session.NewHostDPoP(method, reqURL)
|
||||
}
|
||||
@@ -16,19 +16,43 @@
|
||||
# REQUIRED: Update with your domain
|
||||
APPVIEW_DOMAIN=atcr.io
|
||||
|
||||
# Hold service domain (presigned URL generator)
|
||||
# REQUIRED: Update with your domain
|
||||
HOLD_DOMAIN=hold01.atcr.io
|
||||
|
||||
# ==============================================================================
|
||||
# Hold Service Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Hold service domain (REQUIRED)
|
||||
# The hostname where the hold service will be accessible
|
||||
# Used by docker-compose.prod.yml to derive:
|
||||
# - HOLD_PUBLIC_URL: https://${HOLD_DOMAIN}
|
||||
# - ATCR_DEFAULT_HOLD_DID: did:web:${HOLD_DOMAIN}
|
||||
# Example: hold01.atcr.io
|
||||
HOLD_DOMAIN=hold01.atcr.io
|
||||
|
||||
# Your ATProto DID (REQUIRED for hold registration)
|
||||
# Get your DID from: https://bsky.social/xrpc/com.atproto.identity.resolveHandle?handle=yourhandle.bsky.social
|
||||
# Example: did:plc:abc123xyz789
|
||||
HOLD_OWNER=did:plc:pddp4xt5lgnv2qsegbzzs4xg
|
||||
|
||||
# Directory path for embedded PDS carstore (SQLite database)
|
||||
# Default: /var/lib/atcr-hold
|
||||
# If empty, embedded PDS is disabled
|
||||
#
|
||||
# Note: This should be a directory path, NOT a file path
|
||||
# Carstore creates db.sqlite3 inside this directory
|
||||
#
|
||||
# The embedded PDS makes the hold a proper ATProto user with:
|
||||
# - did:web identity (derived from HOLD_DOMAIN)
|
||||
# - DID document at /.well-known/did.json
|
||||
# - XRPC endpoints for crew management
|
||||
# - ATProto blob endpoints (wraps existing presigned URL logic)
|
||||
#
|
||||
# Example: For HOLD_DOMAIN=hold01.atcr.io, the hold becomes did:web:hold01.atcr.io
|
||||
HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
|
||||
# Path to signing key (auto-generated on first run if missing)
|
||||
# Default: {HOLD_DATABASE_DIR}/signing.key
|
||||
# HOLD_KEY_PATH=/var/lib/atcr-hold/signing.key
|
||||
|
||||
# Allow public blob reads (pulls) without authentication
|
||||
# - true: Anyone can pull images (read-only)
|
||||
# - false: Only authenticated users can pull
|
||||
@@ -63,6 +87,53 @@ HOLD_PUBLIC=false
|
||||
# Default: false
|
||||
HOLD_ALLOW_ALL_CREW=false
|
||||
|
||||
# Enable Bluesky posts when manifests are pushed
|
||||
# When enabled, the hold service creates Bluesky posts announcing new container
|
||||
# image pushes. Posts include image name, tag, size, and layer count.
|
||||
#
|
||||
# - true: Create Bluesky posts for manifest uploads
|
||||
# - false: Silent operation (no Bluesky posts)
|
||||
#
|
||||
# Note: This requires the hold owner to have OAuth credentials for posting.
|
||||
# See docs/BLUESKY_MANIFEST_POSTS.md for setup instructions.
|
||||
#
|
||||
# Default: false
|
||||
HOLD_BLUESKY_POSTS_ENABLED=true
|
||||
|
||||
# ==============================================================================
|
||||
# Scanner Configuration (SBOM & Vulnerability Scanning)
|
||||
# ==============================================================================
|
||||
|
||||
# Enable automatic SBOM generation and vulnerability scanning on image push
|
||||
# When enabled, the hold service will:
|
||||
# 1. Generate SBOM (Software Bill of Materials) using Syft
|
||||
# 2. Scan for vulnerabilities using Grype
|
||||
# 3. Store results as ORAS artifacts (OCI referrers pattern)
|
||||
# 4. Display vulnerability counts on repository pages in AppView
|
||||
#
|
||||
# Default: true
|
||||
HOLD_SBOM_ENABLED=true
|
||||
|
||||
# Number of concurrent scanner worker threads
|
||||
# Increase for faster scanning on multi-core systems
|
||||
# Default: 2
|
||||
HOLD_SBOM_WORKERS=2
|
||||
|
||||
# Enable vulnerability scanning with Grype
|
||||
# If false, only SBOM generation (Syft) will run
|
||||
# Default: true
|
||||
HOLD_VULN_ENABLED=true
|
||||
|
||||
# Path to Grype vulnerability database
|
||||
# Database is auto-downloaded and cached at this location on first run
|
||||
# Default: /var/lib/atcr-hold/grype-db
|
||||
HOLD_VULN_DB_PATH=/var/lib/atcr-hold/grype-db
|
||||
|
||||
# How often to update vulnerability database
|
||||
# Examples: 24h, 12h, 48h
|
||||
# Default: 24h
|
||||
HOLD_VULN_DB_UPDATE_INTERVAL=24h
|
||||
|
||||
# ==============================================================================
|
||||
# S3/UpCloud Object Storage Configuration
|
||||
# ==============================================================================
|
||||
@@ -79,8 +150,9 @@ AWS_SECRET_ACCESS_KEY=
|
||||
|
||||
# S3 Region (for distribution S3 driver)
|
||||
# UpCloud regions: us-chi1, us-nyc1, de-fra1, uk-lon1, sg-sin1, etc.
|
||||
# Default: us-chi1
|
||||
S3_REGION=us-chi1
|
||||
# Note: Use AWS_REGION (not S3_REGION) - this is what the hold service expects
|
||||
# Default: us-east-1
|
||||
AWS_REGION=us-chi1
|
||||
|
||||
# S3 Bucket Name
|
||||
# Create this bucket in UpCloud Object Storage
|
||||
@@ -105,21 +177,35 @@ S3_ENDPOINT=https://6vmss.upcloudobjects.com
|
||||
# AppView Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Default hold service DID (derived from HOLD_DOMAIN in docker-compose.prod.yml)
|
||||
# Uncomment to override if you want to use a different hold service as the default
|
||||
# ATCR_DEFAULT_HOLD_DID=did:web:some-other-hold.example.com
|
||||
|
||||
# JWT token expiration in seconds
|
||||
# Default: 300 (5 minutes)
|
||||
ATCR_TOKEN_EXPIRATION=300
|
||||
|
||||
# OAuth client display name (shown in authorization screens)
|
||||
# Default: AT Container Registry
|
||||
# ATCR_CLIENT_NAME=AT Container Registry
|
||||
|
||||
# Enable web UI
|
||||
# Default: true
|
||||
ATCR_UI_ENABLED=true
|
||||
|
||||
# Skip database migrations on startup
|
||||
# Default: false (migrations are applied on startup)
|
||||
# Set to "true" only for testing or when migrations are managed externally
|
||||
# Production: Keep as "false" to ensure migrations are applied
|
||||
SKIP_DB_MIGRATIONS=false
|
||||
|
||||
# ==============================================================================
|
||||
# Logging Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Log level: debug, info, warn, error
|
||||
# Default: info
|
||||
ATCR_LOG_LEVEL=info
|
||||
ATCR_LOG_LEVEL=debug
|
||||
|
||||
# Log formatter: text, json
|
||||
# Default: text
|
||||
@@ -177,7 +263,9 @@ ATCR_BACKFILL_INTERVAL=1h
|
||||
# ☐ Set APPVIEW_DOMAIN (e.g., atcr.io)
|
||||
# ☐ Set HOLD_DOMAIN (e.g., hold01.atcr.io)
|
||||
# ☐ Set HOLD_OWNER (your ATProto DID)
|
||||
# ☐ Set HOLD_DATABASE_DIR (default: /var/lib/atcr-hold) - enables embedded PDS
|
||||
# ☐ Set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
|
||||
# ☐ Set AWS_REGION (e.g., us-chi1)
|
||||
# ☐ Set S3_BUCKET (created in UpCloud Object Storage)
|
||||
# ☐ Set S3_ENDPOINT (UpCloud endpoint or custom domain)
|
||||
# ☐ Configured DNS records:
|
||||
@@ -189,5 +277,6 @@ ATCR_BACKFILL_INTERVAL=1h
|
||||
#
|
||||
# After starting:
|
||||
# ☐ Complete hold OAuth registration (run: /opt/atcr/get-hold-oauth.sh)
|
||||
# ☐ Verify hold PDS: curl https://hold01.atcr.io/.well-known/did.json
|
||||
# ☐ Test registry: docker pull atcr.io/test/image
|
||||
# ☐ Monitor logs: /opt/atcr/logs.sh
|
||||
|
||||
@@ -50,8 +50,8 @@ services:
|
||||
ATCR_BASE_URL: https://${APPVIEW_DOMAIN:-atcr.io}
|
||||
ATCR_SERVICE_NAME: ${APPVIEW_DOMAIN:-atcr.io}
|
||||
|
||||
# Storage configuration
|
||||
ATCR_DEFAULT_HOLD: https://${HOLD_DOMAIN:-hold01.atcr.io}
|
||||
# Storage configuration (derived from HOLD_DOMAIN)
|
||||
ATCR_DEFAULT_HOLD_DID: ${ATCR_DEFAULT_HOLD_DID:-did:web:${HOLD_DOMAIN:-hold01.atcr.io}}
|
||||
|
||||
# Authentication
|
||||
ATCR_AUTH_KEY_PATH: /var/lib/atcr/auth/private-key.pem
|
||||
@@ -91,32 +91,46 @@ services:
|
||||
container_name: atcr-hold
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
# Hold service configuration
|
||||
HOLD_PUBLIC_URL: https://${HOLD_DOMAIN:-hold01.atcr.io}
|
||||
# Hold service configuration (derived from HOLD_DOMAIN)
|
||||
HOLD_PUBLIC_URL: ${HOLD_PUBLIC_URL:-https://${HOLD_DOMAIN:-hold01.atcr.io}}
|
||||
HOLD_SERVER_ADDR: :8080
|
||||
HOLD_ALLOW_ALL_CREW: ${HOLD_ALLOW_ALL_CREW:-false}
|
||||
HOLD_PUBLIC: ${HOLD_PUBLIC:-false}
|
||||
HOLD_OWNER: ${HOLD_OWNER}
|
||||
HOLD_OWNER: ${HOLD_OWNER:-}
|
||||
HOLD_BLUESKY_POSTS_ENABLED: ${HOLD_BLUESKY_POSTS_ENABLED:-true}
|
||||
|
||||
# Embedded PDS configuration
|
||||
HOLD_DATABASE_DIR: ${HOLD_DATABASE_DIR:-/var/lib/atcr-hold}
|
||||
# HOLD_KEY_PATH: ${HOLD_KEY_PATH} # Optional, defaults to {HOLD_DATABASE_DIR}/signing.key
|
||||
|
||||
# Storage driver
|
||||
STORAGE_DRIVER: ${STORAGE_DRIVER:-s3}
|
||||
|
||||
# S3/UpCloud Object Storage configuration
|
||||
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID}
|
||||
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY}
|
||||
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-}
|
||||
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-}
|
||||
AWS_REGION: ${AWS_REGION:-us-chi1}
|
||||
S3_BUCKET: ${S3_BUCKET:-atcr-blobs}
|
||||
S3_ENDPOINT: ${S3_ENDPOINT}
|
||||
S3_REGION_ENDPOINT: ${S3_REGION_ENDPOINT}
|
||||
S3_ENDPOINT: ${S3_ENDPOINT:-}
|
||||
S3_REGION_ENDPOINT: ${S3_REGION_ENDPOINT:-}
|
||||
|
||||
# Scanner configuration (SBOM & Vulnerability Scanning)
|
||||
HOLD_SBOM_ENABLED: ${HOLD_SBOM_ENABLED:-true}
|
||||
HOLD_SBOM_WORKERS: ${HOLD_SBOM_WORKERS:-2}
|
||||
HOLD_VULN_ENABLED: ${HOLD_VULN_ENABLED:-true}
|
||||
HOLD_VULN_DB_PATH: ${HOLD_VULN_DB_PATH:-/var/lib/atcr-hold/grype-db}
|
||||
HOLD_VULN_DB_UPDATE_INTERVAL: ${HOLD_VULN_DB_UPDATE_INTERVAL:-24h}
|
||||
|
||||
# Logging
|
||||
ATCR_LOG_LEVEL: ${ATCR_LOG_LEVEL:-debug}
|
||||
ATCR_LOG_FORMATTER: ${ATCR_LOG_FORMATTER:-text}
|
||||
|
||||
# Optional: Filesystem storage (comment out S3 vars above)
|
||||
# STORAGE_DRIVER: filesystem
|
||||
# STORAGE_ROOT_DIR: /var/lib/atcr/hold
|
||||
volumes:
|
||||
# Only needed for filesystem driver
|
||||
# - atcr-hold-data:/var/lib/atcr/hold
|
||||
# OAuth token storage for hold registration
|
||||
- atcr-hold-tokens:/root/.atcr
|
||||
# PDS data (carstore SQLite + signing keys)
|
||||
- atcr-hold-data:/var/lib/atcr-hold
|
||||
networks:
|
||||
- atcr-network
|
||||
healthcheck:
|
||||
@@ -131,7 +145,7 @@ networks:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.28.0.0/24
|
||||
- subnet: 172.29.0.0/24
|
||||
|
||||
volumes:
|
||||
caddy_data:
|
||||
|
||||
@@ -130,11 +130,13 @@ if [ -f "deploy/.env.prod.template" ] && [ ! -f "$ATCR_DIR/.env" ]; then
|
||||
log_warn "IMPORTANT: Edit $ATCR_DIR/.env with your configuration!"
|
||||
fi
|
||||
|
||||
# Create systemd service
|
||||
log_info "Creating systemd service..."
|
||||
cat > /etc/systemd/system/atcr.service <<'EOF'
|
||||
# Create systemd services (caddy, appview, hold)
|
||||
log_info "Creating systemd services..."
|
||||
|
||||
# Caddy service (reverse proxy for both appview and hold)
|
||||
cat > /etc/systemd/system/atcr-caddy.service <<'EOF'
|
||||
[Unit]
|
||||
Description=ATCR Container Registry
|
||||
Description=ATCR Caddy Reverse Proxy
|
||||
Requires=docker.service
|
||||
After=docker.service network-online.target
|
||||
Wants=network-online.target
|
||||
@@ -145,14 +147,76 @@ RemainAfterExit=yes
|
||||
WorkingDirectory=/opt/atcr
|
||||
EnvironmentFile=/opt/atcr/.env
|
||||
|
||||
# Start containers
|
||||
ExecStart=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml up -d
|
||||
# Start caddy container
|
||||
ExecStart=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml up -d caddy
|
||||
|
||||
# Stop containers
|
||||
ExecStop=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml down
|
||||
# Stop caddy container
|
||||
ExecStop=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml stop caddy
|
||||
|
||||
# Restart containers
|
||||
ExecReload=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml restart
|
||||
# Restart caddy container
|
||||
ExecReload=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml restart caddy
|
||||
|
||||
# Always restart on failure
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# AppView service (registry + web UI)
|
||||
cat > /etc/systemd/system/atcr-appview.service <<'EOF'
|
||||
[Unit]
|
||||
Description=ATCR AppView (Registry + Web UI)
|
||||
Requires=docker.service atcr-caddy.service
|
||||
After=docker.service network-online.target atcr-caddy.service
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
WorkingDirectory=/opt/atcr
|
||||
EnvironmentFile=/opt/atcr/.env
|
||||
|
||||
# Start appview container
|
||||
ExecStart=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml up -d atcr-appview
|
||||
|
||||
# Stop appview container
|
||||
ExecStop=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml stop atcr-appview
|
||||
|
||||
# Restart appview container
|
||||
ExecReload=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml restart atcr-appview
|
||||
|
||||
# Always restart on failure
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Hold service (storage backend)
|
||||
cat > /etc/systemd/system/atcr-hold.service <<'EOF'
|
||||
[Unit]
|
||||
Description=ATCR Hold (Storage Service)
|
||||
Requires=docker.service atcr-caddy.service
|
||||
After=docker.service network-online.target atcr-caddy.service
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
WorkingDirectory=/opt/atcr
|
||||
EnvironmentFile=/opt/atcr/.env
|
||||
|
||||
# Start hold container
|
||||
ExecStart=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml up -d atcr-hold
|
||||
|
||||
# Stop hold container
|
||||
ExecStop=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml stop atcr-hold
|
||||
|
||||
# Restart hold container
|
||||
ExecReload=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml restart atcr-hold
|
||||
|
||||
# Always restart on failure
|
||||
Restart=on-failure
|
||||
@@ -166,10 +230,12 @@ EOF
|
||||
log_info "Reloading systemd daemon..."
|
||||
systemctl daemon-reload
|
||||
|
||||
# Enable service (but don't start yet - user needs to configure .env)
|
||||
systemctl enable atcr.service
|
||||
# Enable all services (but don't start yet - user needs to configure .env)
|
||||
systemctl enable atcr-caddy.service
|
||||
systemctl enable atcr-appview.service
|
||||
systemctl enable atcr-hold.service
|
||||
|
||||
log_info "Systemd service created and enabled"
|
||||
log_info "Systemd services created and enabled"
|
||||
|
||||
# Create helper scripts
|
||||
log_info "Creating helper scripts..."
|
||||
@@ -193,14 +259,6 @@ docker compose -f deploy/docker-compose.prod.yml logs -f "$@"
|
||||
EOF
|
||||
chmod +x "$ATCR_DIR/logs.sh"
|
||||
|
||||
# Script to get hold OAuth URL
|
||||
cat > "$ATCR_DIR/get-hold-oauth.sh" <<'EOF'
|
||||
#!/bin/bash
|
||||
echo "Checking atcr-hold logs for OAuth registration URL..."
|
||||
docker logs atcr-hold 2>&1 | grep -i "oauth\|authorization\|visit\|http" | tail -20
|
||||
EOF
|
||||
chmod +x "$ATCR_DIR/get-hold-oauth.sh"
|
||||
|
||||
log_info "Helper scripts created in $ATCR_DIR"
|
||||
|
||||
# Print completion message
|
||||
@@ -241,29 +299,35 @@ echo " CNAME blobs.atcr.io → atcr.us-chi1.upcloudobjects.com (gray cloud
|
||||
|
||||
cat <<'EOF'
|
||||
|
||||
4. Start ATCR:
|
||||
systemctl start atcr
|
||||
4. Start ATCR services:
|
||||
systemctl start atcr-caddy atcr-appview atcr-hold
|
||||
|
||||
5. Complete Hold OAuth registration:
|
||||
/opt/atcr/get-hold-oauth.sh
|
||||
|
||||
Visit the OAuth URL in your browser to authorize the hold service.
|
||||
|
||||
6. Check status:
|
||||
systemctl status atcr
|
||||
5. Check status:
|
||||
systemctl status atcr-caddy
|
||||
systemctl status atcr-appview
|
||||
systemctl status atcr-hold
|
||||
docker ps
|
||||
/opt/atcr/logs.sh
|
||||
|
||||
Helper Scripts:
|
||||
/opt/atcr/rebuild.sh - Rebuild and restart containers
|
||||
/opt/atcr/logs.sh [service] - View logs (e.g., logs.sh atcr-hold)
|
||||
/opt/atcr/get-hold-oauth.sh - Get hold OAuth URL
|
||||
|
||||
Service Management:
|
||||
systemctl start atcr - Start ATCR
|
||||
systemctl stop atcr - Stop ATCR
|
||||
systemctl restart atcr - Restart ATCR
|
||||
systemctl status atcr - Check status
|
||||
systemctl start atcr-caddy - Start Caddy reverse proxy
|
||||
systemctl start atcr-appview - Start AppView (registry + UI)
|
||||
systemctl start atcr-hold - Start Hold (storage service)
|
||||
|
||||
systemctl stop atcr-appview - Stop AppView only
|
||||
systemctl stop atcr-hold - Stop Hold only
|
||||
systemctl stop atcr-caddy - Stop all (stops reverse proxy)
|
||||
|
||||
systemctl restart atcr-appview - Restart AppView
|
||||
systemctl restart atcr-hold - Restart Hold
|
||||
|
||||
systemctl status atcr-caddy - Check Caddy status
|
||||
systemctl status atcr-appview - Check AppView status
|
||||
systemctl status atcr-hold - Check Hold status
|
||||
|
||||
Documentation:
|
||||
https://tangled.org/@evan.jarrett.net/at-container-registry
|
||||
|
||||
55
deploy/request-crawl.sh
Executable file
55
deploy/request-crawl.sh
Executable file
@@ -0,0 +1,55 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Request crawl for a PDS from the Bluesky relay
|
||||
#
|
||||
# Usage: ./request-crawl.sh <hostname> [relay-url]
|
||||
# Example: ./request-crawl.sh hold01.atcr.io
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
DEFAULT_RELAY="https://bsky.network/xrpc/com.atproto.sync.requestCrawl"
|
||||
|
||||
# Parse arguments
|
||||
HOSTNAME="${1:-}"
|
||||
RELAY_URL="${2:-$DEFAULT_RELAY}"
|
||||
|
||||
# Validate hostname
|
||||
if [ -z "$HOSTNAME" ]; then
|
||||
echo "Error: hostname is required" >&2
|
||||
echo "" >&2
|
||||
echo "Usage: $0 <hostname> [relay-url]" >&2
|
||||
echo "Example: $0 hold01.atcr.io" >&2
|
||||
echo "" >&2
|
||||
echo "Options:" >&2
|
||||
echo " hostname Hostname of the PDS to request crawl for (required)" >&2
|
||||
echo " relay-url Relay URL to send crawl request to (default: $DEFAULT_RELAY)" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Log what we're doing
|
||||
echo "Requesting crawl for hostname: $HOSTNAME"
|
||||
echo "Sending to relay: $RELAY_URL"
|
||||
|
||||
# Make the request
|
||||
RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$RELAY_URL" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"hostname\":\"$HOSTNAME\"}")
|
||||
|
||||
# Split response and status code
|
||||
HTTP_BODY=$(echo "$RESPONSE" | head -n -1)
|
||||
HTTP_CODE=$(echo "$RESPONSE" | tail -n 1)
|
||||
|
||||
# Check response
|
||||
if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ]; then
|
||||
echo "✅ Success! Crawl requested for $HOSTNAME"
|
||||
if [ -n "$HTTP_BODY" ]; then
|
||||
echo "Response: $HTTP_BODY"
|
||||
fi
|
||||
else
|
||||
echo "❌ Failed with status $HTTP_CODE" >&2
|
||||
if [ -n "$HTTP_BODY" ]; then
|
||||
echo "Response: $HTTP_BODY" >&2
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
@@ -13,12 +13,14 @@ services:
|
||||
environment:
|
||||
# Server configuration
|
||||
ATCR_HTTP_ADDR: :5000
|
||||
ATCR_DEFAULT_HOLD: http://atcr-hold:8080
|
||||
ATCR_DEFAULT_HOLD_DID: did:web:172.28.0.3:8080
|
||||
# UI configuration
|
||||
ATCR_UI_ENABLED: true
|
||||
ATCR_BACKFILL_ENABLED: true
|
||||
# Test mode - fallback to default hold when user's hold is unreachable
|
||||
TEST_MODE: true
|
||||
# Logging
|
||||
ATCR_LOG_LEVEL: info
|
||||
ATCR_LOG_LEVEL: debug
|
||||
volumes:
|
||||
# Auth keys (JWT signing keys)
|
||||
# - atcr-auth:/var/lib/atcr/auth
|
||||
@@ -48,6 +50,12 @@ services:
|
||||
# STORAGE_ROOT_DIR: /var/lib/atcr/hold
|
||||
TEST_MODE: true
|
||||
# DISABLE_PRESIGNED_URLS: true
|
||||
# Scanner configuration
|
||||
HOLD_SBOM_ENABLED: true
|
||||
HOLD_SBOM_WORKERS: 2
|
||||
HOLD_VULN_ENABLED: true
|
||||
# Logging
|
||||
ATCR_LOG_LEVEL: debug
|
||||
# Storage config comes from env_file (STORAGE_DRIVER, AWS_*, S3_*)
|
||||
build:
|
||||
context: .
|
||||
@@ -56,8 +64,9 @@ services:
|
||||
container_name: atcr-hold
|
||||
ports:
|
||||
- "8080:8080"
|
||||
# volumes:
|
||||
# - atcr-hold:/var/lib/atcr/hold
|
||||
volumes:
|
||||
# PDS data (carstore SQLite + signing keys)
|
||||
- atcr-hold:/var/lib/atcr-hold
|
||||
restart: unless-stopped
|
||||
dns:
|
||||
- 8.8.8.8
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,631 +0,0 @@
|
||||
# ATCR AppView UI - Version 1 Specification
|
||||
|
||||
## Overview
|
||||
|
||||
The ATCR AppView UI provides a web interface for discovering, managing, and configuring container images in the ATCR registry. Version 1 focuses on three core pages that leverage existing functionality:
|
||||
|
||||
1. **Front Page** - Federated image discovery via firehose
|
||||
2. **Settings Page** - Profile and hold configuration
|
||||
3. **Personal Page** - Manage your images and tags
|
||||
|
||||
## Architecture
|
||||
|
||||
### Tech Stack
|
||||
|
||||
- **Backend:** Go (existing AppView codebase)
|
||||
- **Frontend:** TBD (Go templates/Templ or separate SPA)
|
||||
- **Database:** SQLite (firehose data cache)
|
||||
- **Styling:** TBD (plain CSS, Tailwind, etc.)
|
||||
- **Authentication:** OAuth with DPoP (reuse existing implementation)
|
||||
|
||||
### Components
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Web UI (Browser) │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ AppView HTTP Server │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ UI Endpoints │ │ OCI API │ │ OAuth Server │ │
|
||||
│ │ /ui/* │ │ /v2/* │ │ /auth/* │ │
|
||||
│ └──────────────┘ └──────────────┘ └──────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
┌─────────┴─────────┐
|
||||
▼ ▼
|
||||
┌──────────────────┐ ┌──────────────────┐
|
||||
│ SQLite Database │ │ ATProto Client │
|
||||
│ (Firehose cache) │ │ (PDS operations) │
|
||||
└──────────────────┘ └──────────────────┘
|
||||
▲
|
||||
┌──────────────────┐ │
|
||||
│ Firehose Worker │───────────┘
|
||||
│ (Background) │
|
||||
└──────────────────┘
|
||||
▲
|
||||
│
|
||||
┌──────────────────┐
|
||||
│ ATProto Firehose │
|
||||
│ (Jetstream/Relay)│
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
## Database Schema
|
||||
|
||||
SQLite database for caching firehose data and enabling fast queries.
|
||||
|
||||
### Tables
|
||||
|
||||
**users**
|
||||
```sql
|
||||
CREATE TABLE users (
|
||||
did TEXT PRIMARY KEY,
|
||||
handle TEXT NOT NULL,
|
||||
pds_endpoint TEXT NOT NULL,
|
||||
last_seen TIMESTAMP NOT NULL,
|
||||
UNIQUE(handle)
|
||||
);
|
||||
CREATE INDEX idx_users_handle ON users(handle);
|
||||
```
|
||||
|
||||
**manifests**
|
||||
```sql
|
||||
CREATE TABLE manifests (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
hold_endpoint TEXT NOT NULL,
|
||||
schema_version INTEGER NOT NULL,
|
||||
media_type TEXT NOT NULL,
|
||||
config_digest TEXT,
|
||||
config_size INTEGER,
|
||||
raw_manifest TEXT NOT NULL, -- JSON blob
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
UNIQUE(did, repository, digest),
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX idx_manifests_did_repo ON manifests(did, repository);
|
||||
CREATE INDEX idx_manifests_created_at ON manifests(created_at DESC);
|
||||
CREATE INDEX idx_manifests_digest ON manifests(digest);
|
||||
```
|
||||
|
||||
**layers**
|
||||
```sql
|
||||
CREATE TABLE layers (
|
||||
manifest_id INTEGER NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
size INTEGER NOT NULL,
|
||||
media_type TEXT NOT NULL,
|
||||
layer_index INTEGER NOT NULL,
|
||||
PRIMARY KEY(manifest_id, layer_index),
|
||||
FOREIGN KEY(manifest_id) REFERENCES manifests(id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX idx_layers_digest ON layers(digest);
|
||||
```
|
||||
|
||||
**tags**
|
||||
```sql
|
||||
CREATE TABLE tags (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
tag TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
UNIQUE(did, repository, tag),
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX idx_tags_did_repo ON tags(did, repository);
|
||||
```
|
||||
|
||||
**firehose_cursor**
|
||||
```sql
|
||||
CREATE TABLE firehose_cursor (
|
||||
id INTEGER PRIMARY KEY CHECK (id = 1),
|
||||
cursor INTEGER NOT NULL,
|
||||
updated_at TIMESTAMP NOT NULL
|
||||
);
|
||||
```
|
||||
|
||||
## Firehose Worker
|
||||
|
||||
Background goroutine that subscribes to ATProto firehose and populates the database.
|
||||
|
||||
### Implementation
|
||||
|
||||
```go
|
||||
// pkg/ui/firehose/worker.go
|
||||
|
||||
type Worker struct {
|
||||
db *sql.DB
|
||||
jetstream *JetstreamClient
|
||||
resolver *atproto.Resolver
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
func (w *Worker) Start() error {
|
||||
// Load cursor from database
|
||||
cursor := w.loadCursor()
|
||||
|
||||
// Subscribe to firehose
|
||||
events := w.jetstream.Subscribe(cursor, []string{
|
||||
"io.atcr.manifest",
|
||||
"io.atcr.tag",
|
||||
})
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-events:
|
||||
w.handleEvent(event)
|
||||
case <-w.stopCh:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Worker) handleEvent(event FirehoseEvent) error {
|
||||
switch event.Collection {
|
||||
case "io.atcr.manifest":
|
||||
return w.handleManifest(event)
|
||||
case "io.atcr.tag":
|
||||
return w.handleTag(event)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### Event Handling
|
||||
|
||||
**Manifest create:**
|
||||
- Resolve DID → handle, PDS endpoint
|
||||
- Insert/update user record
|
||||
- Parse manifest JSON
|
||||
- Insert manifest record
|
||||
- Insert layer records
|
||||
|
||||
**Tag create/update:**
|
||||
- Insert/update tag record
|
||||
- Link to existing manifest
|
||||
|
||||
**Record deletion:**
|
||||
- Delete from database (cascade handles related records)
|
||||
|
||||
### Firehose Connection
|
||||
|
||||
Use Jetstream (bluesky-social/jetstream) or connect directly to relay:
|
||||
- **Jetstream:** Websocket to `wss://jetstream.atproto.tools/subscribe`
|
||||
- **Relay:** Websocket to relay (e.g., `wss://bsky.network/xrpc/com.atproto.sync.subscribeRepos`)
|
||||
|
||||
Jetstream is simpler and filters events server-side.
|
||||
|
||||
## Page Specifications
|
||||
|
||||
### 1. Front Page - Federated Discovery
|
||||
|
||||
**URL:** `/ui/` or `/ui/explore`
|
||||
|
||||
**Purpose:** Discover recently pushed images across all ATCR users.
|
||||
|
||||
**Layout:**
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ ATCR [Search] [@handle] [Login] │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ Recent Pushes [Filter ▼]│
|
||||
│ │
|
||||
│ ┌───────────────────────────────────────────────────────┐ │
|
||||
│ │ alice.bsky.social/nginx:latest │ │
|
||||
│ │ sha256:abc123... • hold1.alice.com • 2 hours ago │ │
|
||||
│ │ [docker pull atcr.io/alice.bsky.social/nginx:latest] │ │
|
||||
│ └───────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ ┌───────────────────────────────────────────────────────┐ │
|
||||
│ │ bob.dev/myapp:v1.2.3 │ │
|
||||
│ │ sha256:def456... • atcr-storage.fly.dev • 5 hours ago │ │
|
||||
│ │ [docker pull atcr.io/bob.dev/myapp:v1.2.3] │ │
|
||||
│ └───────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ [Load more...] │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- List of recent pushes (manifests + tags)
|
||||
- Show: handle, repository, tag, digest (truncated), timestamp, hold endpoint
|
||||
- Copy-paste pull command with click-to-copy
|
||||
- Filter by user (click handle to filter)
|
||||
- Search by repository name or tag
|
||||
- Click manifest to view details (modal or dedicated page)
|
||||
- Pagination (50 items per page)
|
||||
|
||||
**API Endpoint:**
|
||||
```
|
||||
GET /ui/api/recent-pushes
|
||||
Query params:
|
||||
- limit (default: 50)
|
||||
- offset (default: 0)
|
||||
- user (optional: filter by DID or handle)
|
||||
- repository (optional: filter by repo name)
|
||||
|
||||
Response:
|
||||
{
|
||||
"pushes": [
|
||||
{
|
||||
"did": "did:plc:alice123",
|
||||
"handle": "alice.bsky.social",
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
"digest": "sha256:abc123...",
|
||||
"hold_endpoint": "https://hold1.alice.com",
|
||||
"created_at": "2025-10-05T12:34:56Z",
|
||||
"pull_command": "docker pull atcr.io/alice.bsky.social/nginx:latest"
|
||||
}
|
||||
],
|
||||
"total": 1234,
|
||||
"offset": 0,
|
||||
"limit": 50
|
||||
}
|
||||
```
|
||||
|
||||
**Manifest Details Modal:**
|
||||
- Full manifest JSON (syntax highlighted)
|
||||
- Layer list with digests and sizes
|
||||
- Link to ATProto record (at://did/io.atcr.manifest/rkey)
|
||||
- Architecture, OS, labels
|
||||
- Creation timestamp
|
||||
|
||||
### 2. Settings Page
|
||||
|
||||
**URL:** `/ui/settings`
|
||||
|
||||
**Auth:** Requires login (OAuth)
|
||||
|
||||
**Purpose:** Configure profile and hold preferences.
|
||||
|
||||
**Layout:**
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ ATCR [@alice] [⚙️] │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ Settings │
|
||||
│ │
|
||||
│ ┌─ Identity ───────────────────────────────────────────┐ │
|
||||
│ │ Handle: alice.bsky.social │ │
|
||||
│ │ DID: did:plc:alice123abc (read-only) │ │
|
||||
│ │ PDS: https://bsky.social (read-only) │ │
|
||||
│ └───────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ ┌─ Default Hold ──────────────────────────────────────┐ │
|
||||
│ │ Current: https://hold1.alice.com │ │
|
||||
│ │ │ │
|
||||
│ │ [Dropdown: Select from your holds ▼] │ │
|
||||
│ │ • https://hold1.alice.com (Your BYOS) │ │
|
||||
│ │ • https://storage.atcr.io (AppView default) │ │
|
||||
│ │ • [Custom URL...] │ │
|
||||
│ │ │ │
|
||||
│ │ Custom hold URL: [_____________________] │ │
|
||||
│ │ │ │
|
||||
│ │ [Save] │ │
|
||||
│ └───────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ ┌─ OAuth Session ─────────────────────────────────────┐ │
|
||||
│ │ Logged in as: alice.bsky.social │ │
|
||||
│ │ Session expires: 2025-10-06 14:23:00 UTC │ │
|
||||
│ │ [Re-authenticate] │ │
|
||||
│ └───────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- Display current identity (handle, DID, PDS)
|
||||
- Default hold configuration:
|
||||
- Dropdown showing user's `io.atcr.hold` records (query from PDS)
|
||||
- Option to select AppView's default storage endpoint
|
||||
- Manual entry for custom hold URL
|
||||
- "Save" button updates `io.atcr.sailor.profile.defaultHold`
|
||||
- OAuth session status
|
||||
- Re-authenticate button (redirects to OAuth flow)
|
||||
|
||||
**API Endpoints:**
|
||||
|
||||
```
|
||||
GET /ui/api/profile
|
||||
Auth: Required (session cookie)
|
||||
Response:
|
||||
{
|
||||
"did": "did:plc:alice123",
|
||||
"handle": "alice.bsky.social",
|
||||
"pds_endpoint": "https://bsky.social",
|
||||
"default_hold": "https://hold1.alice.com",
|
||||
"holds": [
|
||||
{
|
||||
"endpoint": "https://hold1.alice.com",
|
||||
"name": "My BYOS Storage",
|
||||
"public": false
|
||||
}
|
||||
],
|
||||
"session_expires_at": "2025-10-06T14:23:00Z"
|
||||
}
|
||||
|
||||
POST /ui/api/profile/default-hold
|
||||
Auth: Required
|
||||
Body:
|
||||
{
|
||||
"hold_endpoint": "https://hold1.alice.com"
|
||||
}
|
||||
Response:
|
||||
{
|
||||
"success": true
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Personal Page - Your Images
|
||||
|
||||
**URL:** `/ui/images` or `/ui/@{handle}`
|
||||
|
||||
**Auth:** Requires login (OAuth)
|
||||
|
||||
**Purpose:** Manage your container images and tags.
|
||||
|
||||
**Layout:**
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ ATCR [@alice] [⚙️] │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ Your Images │
|
||||
│ │
|
||||
│ ┌─ nginx ──────────────────────────────────────────────┐ │
|
||||
│ │ 3 tags • 5 manifests • Last push: 2 hours ago │ │
|
||||
│ │ │ │
|
||||
│ │ Tags: │ │
|
||||
│ │ ┌────────────────────────────────────────────────┐ │ │
|
||||
│ │ │ latest → sha256:abc123... (2 hours ago) [✏️][🗑️]│ │ │
|
||||
│ │ │ v1.25 → sha256:def456... (1 day ago) [✏️][🗑️]│ │ │
|
||||
│ │ │ alpine → sha256:ghi789... (3 days ago) [✏️][🗑️]│ │ │
|
||||
│ │ └────────────────────────────────────────────────┘ │ │
|
||||
│ │ │ │
|
||||
│ │ Manifests: │ │
|
||||
│ │ ┌────────────────────────────────────────────────┐ │ │
|
||||
│ │ │ sha256:abc123... • 45MB • hold1.alice.com │ │ │
|
||||
│ │ │ linux/amd64 • 5 layers • [View] [Delete] │ │ │
|
||||
│ │ │ sha256:def456... • 42MB • hold1.alice.com │ │ │
|
||||
│ │ │ linux/amd64 • 5 layers • [View] [Delete] │ │ │
|
||||
│ │ └────────────────────────────────────────────────┘ │ │
|
||||
│ └───────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ ┌─ myapp ──────────────────────────────────────────────┐ │
|
||||
│ │ 2 tags • 2 manifests • Last push: 1 day ago │ │
|
||||
│ │ [Expand ▼] │ │
|
||||
│ └───────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Features:**
|
||||
|
||||
**Repository List:**
|
||||
- Group manifests by repository name
|
||||
- Show: tag count, manifest count, last push time
|
||||
- Collapsible/expandable repository cards
|
||||
|
||||
**Repository Details (Expanded):**
|
||||
- **Tags:** Table showing tag → manifest digest → timestamp
|
||||
- Edit tag: Modal to re-point tag to different manifest digest
|
||||
- Delete tag: Confirm dialog, removes `io.atcr.tag` record from PDS
|
||||
- **Manifests:** List of all manifests in repository
|
||||
- Show: digest (truncated), size, hold endpoint, architecture, layer count
|
||||
- View: Open manifest details modal (same as front page)
|
||||
- Delete: Confirm dialog with warning if manifest is tagged
|
||||
|
||||
**Actions:**
|
||||
- Copy pull command for each tag
|
||||
- Edit tag (re-point to different digest)
|
||||
- Delete tag
|
||||
- Delete manifest (with validation)
|
||||
|
||||
**API Endpoints:**
|
||||
|
||||
```
|
||||
GET /ui/api/images
|
||||
Auth: Required
|
||||
Response:
|
||||
{
|
||||
"repositories": [
|
||||
{
|
||||
"name": "nginx",
|
||||
"tag_count": 3,
|
||||
"manifest_count": 5,
|
||||
"last_push": "2025-10-05T10:23:45Z",
|
||||
"tags": [
|
||||
{
|
||||
"tag": "latest",
|
||||
"digest": "sha256:abc123...",
|
||||
"created_at": "2025-10-05T10:23:45Z"
|
||||
}
|
||||
],
|
||||
"manifests": [
|
||||
{
|
||||
"digest": "sha256:abc123...",
|
||||
"size": 47185920,
|
||||
"hold_endpoint": "https://hold1.alice.com",
|
||||
"architecture": "amd64",
|
||||
"os": "linux",
|
||||
"layer_count": 5,
|
||||
"created_at": "2025-10-05T10:23:45Z",
|
||||
"tagged": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
PUT /ui/api/images/{repository}/tags/{tag}
|
||||
Auth: Required
|
||||
Body:
|
||||
{
|
||||
"digest": "sha256:new-digest..."
|
||||
}
|
||||
Response:
|
||||
{
|
||||
"success": true
|
||||
}
|
||||
|
||||
DELETE /ui/api/images/{repository}/tags/{tag}
|
||||
Auth: Required
|
||||
Response:
|
||||
{
|
||||
"success": true
|
||||
}
|
||||
|
||||
DELETE /ui/api/images/{repository}/manifests/{digest}
|
||||
Auth: Required
|
||||
Response:
|
||||
{
|
||||
"success": true
|
||||
}
|
||||
```
|
||||
|
||||
## Authentication
|
||||
|
||||
### OAuth Login Flow
|
||||
|
||||
Reuse existing OAuth implementation from credential helper and AppView.
|
||||
|
||||
**Login Endpoint:** `/auth/oauth/login`
|
||||
|
||||
**Flow:**
|
||||
1. User clicks "Login" on UI
|
||||
2. Redirects to `/auth/oauth/login?return_to=/ui/images`
|
||||
3. User enters handle (e.g., "alice.bsky.social")
|
||||
4. Server resolves handle → DID → PDS → OAuth server
|
||||
5. Server initiates OAuth flow with PAR + DPoP
|
||||
6. User redirected to PDS for authorization
|
||||
7. OAuth callback to `/auth/oauth/callback`
|
||||
8. Server exchanges code for token, validates with PDS
|
||||
9. Server creates session cookie (secure, httpOnly, SameSite)
|
||||
10. Redirects to `return_to` URL or default `/ui/images`
|
||||
|
||||
**Session Management:**
|
||||
- Session cookie: `atcr_session` (JWT or opaque token)
|
||||
- Session storage: In-memory map or SQLite table
|
||||
- Session duration: 24 hours (or match OAuth token expiry)
|
||||
- Refresh: Auto-refresh OAuth token when needed
|
||||
|
||||
**Middleware:**
|
||||
```go
|
||||
// pkg/ui/middleware/auth.go
|
||||
|
||||
func RequireAuth(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
session := getSession(r)
|
||||
if session == nil {
|
||||
http.Redirect(w, r, "/auth/oauth/login?return_to="+r.URL.Path, http.StatusFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Add session info to context
|
||||
ctx := context.WithValue(r.Context(), "session", session)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation Roadmap
|
||||
|
||||
### Phase 1: Database & Firehose
|
||||
1. Define SQLite schema
|
||||
2. Implement database layer (pkg/ui/db/)
|
||||
3. Implement firehose worker (pkg/ui/firehose/)
|
||||
4. Test worker with real firehose
|
||||
|
||||
### Phase 2: API Endpoints
|
||||
1. Implement `/ui/api/recent-pushes` (front page data)
|
||||
2. Implement `/ui/api/profile` (settings page data)
|
||||
3. Implement `/ui/api/images` (personal page data)
|
||||
4. Implement tag/manifest mutation endpoints
|
||||
|
||||
### Phase 3: Authentication
|
||||
1. Implement OAuth login endpoint
|
||||
2. Implement session management
|
||||
3. Add auth middleware
|
||||
4. Test login flow
|
||||
|
||||
### Phase 4: Frontend
|
||||
1. Choose framework (templates vs SPA)
|
||||
2. Implement front page
|
||||
3. Implement settings page
|
||||
4. Implement personal page
|
||||
5. Add styling
|
||||
|
||||
### Phase 5: Polish
|
||||
1. Error handling
|
||||
2. Loading states
|
||||
3. Responsive design
|
||||
4. Testing
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. **Framework choice:** Go templates (Templ?), HTMX, or SPA (React/Vue)?
|
||||
2. **Styling:** Tailwind, plain CSS, or component library?
|
||||
3. **Manifest details:** Modal vs dedicated page?
|
||||
4. **Search:** Full-text search on repository/tag names? Requires FTS in SQLite.
|
||||
5. **Real-time updates:** WebSocket for firehose events, or polling?
|
||||
6. **Image size calculation:** Sum of layer sizes, or read from manifest?
|
||||
7. **Public profiles:** Should `/ui/@alice` show public view of alice's images?
|
||||
8. **Firehose resilience:** Reconnect logic, backfill on downtime?
|
||||
|
||||
## Dependencies
|
||||
|
||||
New Go packages needed:
|
||||
- `github.com/mattn/go-sqlite3` - SQLite driver
|
||||
- `github.com/bluesky-social/jetstream` - Firehose client (or direct websocket)
|
||||
- Session management library (or custom implementation)
|
||||
- Frontend framework (TBD)
|
||||
|
||||
## Configuration
|
||||
|
||||
Add to `config/config.yml`:
|
||||
|
||||
```yaml
|
||||
ui:
|
||||
enabled: true
|
||||
database_path: /var/lib/atcr/ui.db
|
||||
firehose:
|
||||
enabled: true
|
||||
endpoint: wss://jetstream.atproto.tools/subscribe
|
||||
collections:
|
||||
- io.atcr.manifest
|
||||
- io.atcr.tag
|
||||
session:
|
||||
duration: 24h
|
||||
cookie_name: atcr_session
|
||||
cookie_secure: true
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Session cookies:** Secure, HttpOnly, SameSite=Lax
|
||||
2. **CSRF protection:** For mutation endpoints (tag/manifest delete)
|
||||
3. **Rate limiting:** On API endpoints
|
||||
4. **Input validation:** Sanitize user input for search/filters
|
||||
5. **Authorization:** Verify authenticated user owns resources before mutation
|
||||
6. **SQL injection:** Use parameterized queries
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
1. **Database indexes:** On DID, repository, created_at, digest
|
||||
2. **Pagination:** Limit query results to avoid large payloads
|
||||
3. **Caching:** Cache profile data, hold list, manifest details
|
||||
4. **Firehose buffering:** Batch database inserts
|
||||
5. **Connection pooling:** For SQLite and HTTP clients
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
1. **Unit tests:** Database layer, API handlers
|
||||
2. **Integration tests:** Firehose worker with mock events
|
||||
3. **E2E tests:** Full login → browse → manage flow
|
||||
4. **Load testing:** Firehose worker with high event volume
|
||||
5. **Manual testing:** Real PDS, real images, real firehose
|
||||
624
docs/BYOS.md
624
docs/BYOS.md
@@ -2,216 +2,138 @@
|
||||
|
||||
## Overview
|
||||
|
||||
ATCR supports "Bring Your Own Storage" (BYOS) for blob storage. This allows users to:
|
||||
- Deploy their own storage service backed by S3/Storj/Minio/filesystem
|
||||
- Control who can use their storage (public or private)
|
||||
- Keep blob data in their own infrastructure while manifests remain in their ATProto PDS
|
||||
ATCR supports "Bring Your Own Storage" (BYOS) for blob storage. Users can:
|
||||
- Deploy their own hold service with embedded PDS
|
||||
- Control access via crew membership in the hold's PDS
|
||||
- Keep blob data in their own S3/Storj/Minio while manifests stay in their user PDS
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ ATCR AppView (API) │
|
||||
│ - Manifests → ATProto PDS │
|
||||
│ - Auth & token validation │
|
||||
│ - Blob routing (issues redirects) │
|
||||
│ - Profile management │
|
||||
└─────────────────┬───────────────────────────┘
|
||||
│
|
||||
│ Hold discovery priority:
|
||||
│ 1. io.atcr.sailor.profile.defaultHold
|
||||
│ 2. io.atcr.hold records
|
||||
│ 3. AppView default_storage_endpoint
|
||||
▼
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ User's PDS │
|
||||
│ - io.atcr.sailor.profile (hold preference) │
|
||||
│ - io.atcr.hold records (own holds) │
|
||||
│ - io.atcr.manifest records (with holdEP) │
|
||||
└─────────────────┬───────────────────────────┘
|
||||
│
|
||||
│ Redirects to hold
|
||||
▼
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ Storage Service (Hold) │
|
||||
│ - Blob storage (S3/Storj/Minio/filesystem) │
|
||||
│ - Presigned URL generation │
|
||||
│ - Authorization (DID-based) │
|
||||
└─────────────────────────────────────────────┘
|
||||
┌──────────────────────────────────────────┐
|
||||
│ ATCR AppView (API) │
|
||||
│ - Manifests → User's PDS │
|
||||
│ - Auth & service token management │
|
||||
│ - Blob routing via XRPC │
|
||||
│ - Profile management │
|
||||
└────────────┬─────────────────────────────┘
|
||||
│
|
||||
│ Hold discovery priority:
|
||||
│ 1. io.atcr.sailor.profile.defaultHold (DID)
|
||||
│ 2. io.atcr.hold records (legacy)
|
||||
│ 3. AppView default_hold_did
|
||||
▼
|
||||
┌──────────────────────────────────────────┐
|
||||
│ User's PDS │
|
||||
│ - io.atcr.sailor.profile (hold DID) │
|
||||
│ - io.atcr.manifest (with holdDid) │
|
||||
└────────────┬─────────────────────────────┘
|
||||
│
|
||||
│ Service token from user's PDS
|
||||
▼
|
||||
┌──────────────────────────────────────────┐
|
||||
│ Hold Service (did:web:hold.example.com) │
|
||||
│ ├── Embedded PDS │
|
||||
│ │ ├── Captain record (ownership) │
|
||||
│ │ └── Crew records (access control) │
|
||||
│ ├── XRPC multipart upload endpoints │
|
||||
│ └── Storage driver (S3/Storj/etc.) │
|
||||
└──────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## ATProto Records
|
||||
## Hold Service Components
|
||||
|
||||
### io.atcr.sailor.profile
|
||||
Each hold is a full ATProto actor with:
|
||||
- **DID**: `did:web:hold.example.com` (hold's identity)
|
||||
- **Embedded PDS**: Stores captain + crew records (shared data)
|
||||
- **Storage backend**: S3, Storj, Minio, filesystem, etc.
|
||||
- **XRPC endpoints**: Standard ATProto + custom OCI multipart upload
|
||||
|
||||
**NEW:** User profile for hold selection preferences. Created automatically on first authentication.
|
||||
### Records in Hold's PDS
|
||||
|
||||
**Captain record** (`io.atcr.hold.captain/self`):
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.hold.captain",
|
||||
"owner": "did:plc:alice123",
|
||||
"public": false,
|
||||
"deployedAt": "2025-10-14T...",
|
||||
"region": "iad",
|
||||
"provider": "fly.io"
|
||||
}
|
||||
```
|
||||
|
||||
**Crew records** (`io.atcr.hold.crew/{rkey}`):
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.hold.crew",
|
||||
"member": "did:plc:bob456",
|
||||
"role": "admin",
|
||||
"permissions": ["blob:read", "blob:write"],
|
||||
"addedAt": "2025-10-14T..."
|
||||
}
|
||||
```
|
||||
|
||||
### Sailor Profile (User's PDS)
|
||||
|
||||
Users set their preferred hold in their sailor profile:
|
||||
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.sailor.profile",
|
||||
"defaultHold": "https://team-hold.example.com",
|
||||
"createdAt": "2025-10-02T12:00:00Z",
|
||||
"updatedAt": "2025-10-02T12:00:00Z"
|
||||
"defaultHold": "did:web:hold.example.com",
|
||||
"createdAt": "2025-10-02T...",
|
||||
"updatedAt": "2025-10-02T..."
|
||||
}
|
||||
```
|
||||
|
||||
**Record key:** Always `"self"` (only one profile per user)
|
||||
|
||||
**Behavior:**
|
||||
- Created automatically when user first authenticates (OAuth or Basic Auth)
|
||||
- If AppView has `default_storage_endpoint`, profile gets that as initial `defaultHold`
|
||||
- User can update to join shared holds or use their own hold
|
||||
- Set `defaultHold` to `null` to opt out of defaults (use own hold or AppView default)
|
||||
|
||||
**This solves the multi-hold problem:** Users who are crew members of multiple holds can explicitly choose which one to use via their profile.
|
||||
|
||||
### io.atcr.hold
|
||||
|
||||
Users create a hold record in their PDS to configure their own storage:
|
||||
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.hold",
|
||||
"endpoint": "https://alice-storage.example.com",
|
||||
"owner": "did:plc:alice123",
|
||||
"public": false,
|
||||
"createdAt": "2025-10-01T12:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### io.atcr.hold.crew
|
||||
|
||||
Hold owners can add crew members (for shared storage):
|
||||
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.hold.crew",
|
||||
"hold": "at://did:plc:alice/io.atcr.hold/my-storage",
|
||||
"member": "did:plc:bob456",
|
||||
"role": "write",
|
||||
"addedAt": "2025-10-01T12:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** Crew records are stored in the **hold owner's PDS**, not the crew member's PDS. This ensures the hold owner maintains full control over access.
|
||||
|
||||
## Storage Service
|
||||
|
||||
### Deployment
|
||||
|
||||
The storage service is a lightweight HTTP server that:
|
||||
1. Accepts presigned URL requests
|
||||
2. Verifies DID authorization
|
||||
3. Generates presigned URLs for S3/Storj/etc
|
||||
4. Returns URLs to AppView for client redirect
|
||||
## Deployment
|
||||
|
||||
### Configuration
|
||||
|
||||
The hold service is configured entirely via environment variables. See `.env.example` for all options.
|
||||
|
||||
**Required environment variables:**
|
||||
Hold service is configured entirely via environment variables:
|
||||
|
||||
```bash
|
||||
# Hold service public URL (REQUIRED)
|
||||
HOLD_PUBLIC_URL=https://storage.example.com
|
||||
# Hold identity (REQUIRED)
|
||||
HOLD_PUBLIC_URL=https://hold.example.com
|
||||
HOLD_OWNER=did:plc:your-did-here
|
||||
|
||||
# Storage driver type
|
||||
# Storage backend
|
||||
STORAGE_DRIVER=s3
|
||||
|
||||
# For S3/Minio
|
||||
AWS_ACCESS_KEY_ID=your_access_key
|
||||
AWS_SECRET_ACCESS_KEY=your_secret_key
|
||||
AWS_REGION=us-east-1
|
||||
S3_BUCKET=my-blobs
|
||||
|
||||
# For Storj (optional - custom S3 endpoint)
|
||||
# S3_ENDPOINT=https://gateway.storjshare.io
|
||||
# Access control
|
||||
HOLD_PUBLIC=false # Require authentication for reads
|
||||
HOLD_ALLOW_ALL_CREW=false # Only explicit crew members can write
|
||||
|
||||
# For filesystem storage
|
||||
# STORAGE_DRIVER=filesystem
|
||||
# STORAGE_ROOT_DIR=/var/lib/atcr-storage
|
||||
# Embedded PDS
|
||||
HOLD_DATABASE_PATH=/var/lib/atcr-hold/hold.db
|
||||
HOLD_DATABASE_KEY_PATH=/var/lib/atcr-hold/keys
|
||||
```
|
||||
|
||||
**Authorization:**
|
||||
|
||||
ATCR follows ATProto's public-by-default model with gated anonymous access:
|
||||
|
||||
**Read Access:**
|
||||
- **Public hold** (`HOLD_PUBLIC=true`): Anonymous reads allowed (no authentication)
|
||||
- **Private hold** (`HOLD_PUBLIC=false`): Requires authentication (any ATCR user with sailor.profile)
|
||||
|
||||
**Write Access:**
|
||||
- Always requires authentication
|
||||
- Must be hold owner OR crew member (verified via `io.atcr.hold.crew` records in owner's PDS)
|
||||
|
||||
**Key Points:**
|
||||
- "Private" just means "no anonymous access" - not "limited user access"
|
||||
- Any authenticated ATCR user can read from private holds
|
||||
- Crew membership only controls WRITE access, not READ access
|
||||
- This aligns with ATProto's public records model (no private PDS records yet)
|
||||
|
||||
### Running
|
||||
### Running Locally
|
||||
|
||||
```bash
|
||||
# Build
|
||||
go build -o atcr-hold ./cmd/hold
|
||||
go build -o bin/atcr-hold ./cmd/hold
|
||||
|
||||
# Set environment variables (or use .env file)
|
||||
export HOLD_PUBLIC_URL=https://storage.example.com
|
||||
export STORAGE_DRIVER=s3
|
||||
export AWS_ACCESS_KEY_ID=...
|
||||
export AWS_SECRET_ACCESS_KEY=...
|
||||
export AWS_REGION=us-east-1
|
||||
export S3_BUCKET=my-blobs
|
||||
# Run (with env vars or .env file)
|
||||
export HOLD_PUBLIC_URL=http://localhost:8080
|
||||
export HOLD_OWNER=did:plc:your-did-here
|
||||
export STORAGE_DRIVER=filesystem
|
||||
export STORAGE_ROOT_DIR=/tmp/atcr-hold
|
||||
export HOLD_DATABASE_PATH=/tmp/atcr-hold/hold.db
|
||||
|
||||
# Run
|
||||
./atcr-hold
|
||||
./bin/atcr-hold
|
||||
```
|
||||
|
||||
**Registration (required):**
|
||||
|
||||
The hold service must be registered in a PDS to be discoverable by the AppView.
|
||||
|
||||
**Standard registration workflow:**
|
||||
|
||||
1. Set `HOLD_OWNER` to your DID:
|
||||
```bash
|
||||
export HOLD_OWNER=did:plc:your-did-here
|
||||
```
|
||||
|
||||
2. Start the hold service:
|
||||
```bash
|
||||
./atcr-hold
|
||||
```
|
||||
|
||||
3. **Check the logs** for the OAuth authorization URL:
|
||||
```
|
||||
================================================================================
|
||||
OAUTH AUTHORIZATION REQUIRED
|
||||
================================================================================
|
||||
|
||||
Please visit this URL to authorize the hold service:
|
||||
|
||||
https://bsky.app/authorize?client_id=...
|
||||
|
||||
Waiting for authorization...
|
||||
================================================================================
|
||||
```
|
||||
|
||||
4. Visit the URL in your browser and authorize
|
||||
|
||||
5. The hold service will:
|
||||
- Exchange the authorization code for a token
|
||||
- Create `io.atcr.hold` record in your PDS
|
||||
- Create `io.atcr.hold.crew` record (making you the owner)
|
||||
- Save registration state
|
||||
|
||||
6. On subsequent runs, the service checks if already registered and skips OAuth
|
||||
|
||||
**Alternative methods:**
|
||||
|
||||
- **Manual API registration**: Call `POST /register` with your own OAuth token
|
||||
- **Completely manual**: Create PDS records yourself using any ATProto client
|
||||
On first run, the hold service creates:
|
||||
- Captain record in embedded PDS (making you the owner)
|
||||
- Crew record for owner with all permissions
|
||||
- DID document at `/.well-known/did.json`
|
||||
|
||||
### Deploy to Fly.io
|
||||
|
||||
@@ -223,11 +145,11 @@ primary_region = "ord"
|
||||
|
||||
[env]
|
||||
HOLD_PUBLIC_URL = "https://my-atcr-hold.fly.dev"
|
||||
HOLD_SERVER_ADDR = ":8080"
|
||||
STORAGE_DRIVER = "s3"
|
||||
AWS_REGION = "us-east-1"
|
||||
S3_BUCKET = "my-blobs"
|
||||
HOLD_PUBLIC = "false"
|
||||
HOLD_ALLOW_ALL_CREW = "false"
|
||||
|
||||
[http_service]
|
||||
internal_port = 8080
|
||||
@@ -250,268 +172,196 @@ fly deploy
|
||||
fly secrets set AWS_ACCESS_KEY_ID=...
|
||||
fly secrets set AWS_SECRET_ACCESS_KEY=...
|
||||
fly secrets set HOLD_OWNER=did:plc:your-did-here
|
||||
|
||||
# Check logs for OAuth URL on first run
|
||||
fly logs
|
||||
|
||||
# Visit the OAuth URL shown in logs to authorize
|
||||
# The hold service will register itself in your PDS
|
||||
```
|
||||
|
||||
## Request Flow
|
||||
|
||||
### Push with BYOS
|
||||
|
||||
1. **Docker push** `atcr.io/alice/myapp:latest`
|
||||
2. **AppView** resolves `alice` → `did:plc:alice123`
|
||||
3. **AppView** discovers hold via priority logic:
|
||||
- Check alice's `io.atcr.sailor.profile` for `defaultHold`
|
||||
- If not set, check alice's `io.atcr.hold` records
|
||||
- Fall back to AppView's `default_storage_endpoint`
|
||||
4. **Found:** `alice.profile.defaultHold = "https://team-hold.example.com"`
|
||||
5. **AppView** → team-hold: POST `/put-presigned-url`
|
||||
```json
|
||||
{
|
||||
"did": "did:plc:alice123",
|
||||
"digest": "sha256:abc123...",
|
||||
"size": 1048576
|
||||
}
|
||||
```
|
||||
6. **Hold service**:
|
||||
- Verifies alice is authorized (checks crew records)
|
||||
- Generates S3 presigned upload URL (15min expiry)
|
||||
- Returns: `{"url": "https://s3.../blob?signature=..."}`
|
||||
7. **AppView** → Docker: `307 Redirect` to presigned URL
|
||||
8. **Docker** → S3: PUT blob directly (no proxy)
|
||||
9. **Manifest** stored in alice's PDS with `holdEndpoint: "https://team-hold.example.com"`
|
||||
```
|
||||
1. Client: docker push atcr.io/alice/myapp:latest
|
||||
|
||||
2. AppView resolves alice → did:plc:alice123
|
||||
|
||||
3. AppView discovers hold DID:
|
||||
- Check alice's sailor profile for defaultHold
|
||||
- Returns: "did:web:alice-storage.fly.dev"
|
||||
|
||||
4. AppView gets service token from alice's PDS:
|
||||
GET /xrpc/com.atproto.server.getServiceAuth?aud=did:web:alice-storage.fly.dev
|
||||
Response: { "token": "eyJ..." }
|
||||
|
||||
5. AppView initiates multipart upload to hold:
|
||||
POST https://alice-storage.fly.dev/xrpc/io.atcr.hold.initiateUpload
|
||||
Authorization: Bearer {serviceToken}
|
||||
Body: { "digest": "sha256:abc..." }
|
||||
Response: { "uploadId": "xyz" }
|
||||
|
||||
6. For each part:
|
||||
- AppView: POST /xrpc/io.atcr.hold.getPartUploadUrl
|
||||
- Hold validates service token, checks crew membership
|
||||
- Hold returns: { "url": "https://s3.../presigned" }
|
||||
- Client uploads directly to S3 presigned URL
|
||||
|
||||
7. AppView completes upload:
|
||||
POST /xrpc/io.atcr.hold.completeUpload
|
||||
Body: { "uploadId": "xyz", "digest": "sha256:abc...", "parts": [...] }
|
||||
|
||||
8. Manifest stored in alice's PDS:
|
||||
- holdDid: "did:web:alice-storage.fly.dev"
|
||||
- holdEndpoint: "https://alice-storage.fly.dev" (backward compat)
|
||||
```
|
||||
|
||||
### Pull with BYOS
|
||||
|
||||
1. **Docker pull** `atcr.io/alice/myapp:latest`
|
||||
2. **AppView** fetches manifest from alice's PDS
|
||||
3. **Manifest** contains `holdEndpoint: "https://team-hold.example.com"`
|
||||
4. **AppView** caches: `(alice's DID, "myapp") → "https://team-hold.example.com"` (10min TTL)
|
||||
5. **Docker** requests blobs: GET `/v2/alice/myapp/blobs/sha256:abc123`
|
||||
6. **AppView** uses **cached hold from manifest** (not re-discovered)
|
||||
7. **AppView** → team-hold: POST `/get-presigned-url`
|
||||
8. **Hold service** returns presigned download URL
|
||||
9. **AppView** → Docker: `307 Redirect`
|
||||
10. **Docker** → S3: GET blob directly
|
||||
```
|
||||
1. Client: docker pull atcr.io/alice/myapp:latest
|
||||
|
||||
**Key insight:** Pull uses the historical `holdEndpoint` from the manifest, ensuring blobs are fetched from where they were originally pushed, even if alice later changes her profile's `defaultHold`.
|
||||
2. AppView fetches manifest from alice's PDS
|
||||
|
||||
## Default Registry
|
||||
3. Manifest contains:
|
||||
- holdDid: "did:web:alice-storage.fly.dev"
|
||||
|
||||
The AppView can run its own storage service as the default:
|
||||
4. AppView caches hold DID for 10 minutes (covers pull operation)
|
||||
|
||||
### AppView config
|
||||
5. Client requests blob: GET /v2/alice/myapp/blobs/sha256:abc123
|
||||
|
||||
```yaml
|
||||
middleware:
|
||||
- name: registry
|
||||
options:
|
||||
atproto-resolver:
|
||||
default_storage_endpoint: https://storage.atcr.io
|
||||
6. AppView uses cached hold DID from manifest
|
||||
|
||||
7. AppView gets service token from alice's PDS
|
||||
|
||||
8. AppView calls hold XRPC:
|
||||
GET /xrpc/com.atproto.sync.getBlob?did={userDID}&cid=sha256:abc123
|
||||
Authorization: Bearer {serviceToken}
|
||||
Response: { "url": "https://s3.../presigned-download" }
|
||||
|
||||
9. AppView redirects client to presigned S3 URL
|
||||
|
||||
10. Client downloads directly from S3
|
||||
```
|
||||
|
||||
### Default hold service config
|
||||
**Key insight:** Pull uses the `holdDid` stored in the manifest, ensuring blobs are fetched from where they were originally pushed.
|
||||
|
||||
## Access Control
|
||||
|
||||
### Read Access
|
||||
|
||||
- **Public hold** (`HOLD_PUBLIC=true`): Anonymous + authenticated users
|
||||
- **Private hold** (`HOLD_PUBLIC=false`): Authenticated users with crew membership
|
||||
|
||||
### Write Access
|
||||
|
||||
- Hold owner (captain) OR crew members only
|
||||
- Verified via `io.atcr.hold.crew` records in hold's embedded PDS
|
||||
- Service token proves user identity (from user's PDS)
|
||||
|
||||
### Authorization Flow
|
||||
|
||||
```go
|
||||
1. AppView gets service token from user's PDS
|
||||
2. AppView sends request to hold with service token
|
||||
3. Hold validates service token (checks it's from user's PDS)
|
||||
4. Hold extracts user's DID from token
|
||||
5. Hold checks crew records in its embedded PDS
|
||||
6. If crew member found → allow, else → deny
|
||||
```
|
||||
|
||||
## Managing Crew Members
|
||||
|
||||
### Add Crew Member
|
||||
|
||||
Use ATProto client to create crew record in hold's PDS:
|
||||
|
||||
```bash
|
||||
# Accept any authenticated DID
|
||||
HOLD_PUBLIC=false # Requires authentication
|
||||
# Via XRPC (if hold supports it)
|
||||
POST https://hold.example.com/xrpc/io.atcr.hold.requestCrew
|
||||
Authorization: Bearer {userOAuthToken}
|
||||
|
||||
# Or allow public reads
|
||||
HOLD_PUBLIC=true # Public reads, auth required for writes
|
||||
# Or manually via captain's OAuth to hold's PDS
|
||||
atproto put-record \
|
||||
--pds https://hold.example.com \
|
||||
--collection io.atcr.hold.crew \
|
||||
--rkey "{memberDID}" \
|
||||
--value '{
|
||||
"$type": "io.atcr.hold.crew",
|
||||
"member": "did:plc:bob456",
|
||||
"role": "admin",
|
||||
"permissions": ["blob:read", "blob:write"]
|
||||
}'
|
||||
```
|
||||
|
||||
This provides free-tier shared storage for users who don't want to deploy their own.
|
||||
### Remove Crew Member
|
||||
|
||||
## Storage Drivers Supported
|
||||
```bash
|
||||
atproto delete-record \
|
||||
--pds https://hold.example.com \
|
||||
--collection io.atcr.hold.crew \
|
||||
--rkey "{memberDID}"
|
||||
```
|
||||
|
||||
The storage service uses distribution's storage drivers:
|
||||
## Storage Drivers
|
||||
|
||||
Hold service supports all distribution storage drivers:
|
||||
- **S3** - AWS S3, Minio, Storj (via S3 gateway)
|
||||
- **Filesystem** - Local disk (for testing)
|
||||
- **Azure** - Azure Blob Storage
|
||||
- **GCS** - Google Cloud Storage
|
||||
- **Swift** - OpenStack Swift
|
||||
- **OSS** - Alibaba Cloud OSS
|
||||
|
||||
## Quotas
|
||||
|
||||
Quotas are NOT implemented in the storage service. Instead, use:
|
||||
|
||||
- **S3**: Bucket policies, lifecycle rules
|
||||
- **Storj**: Project limits in Storj dashboard
|
||||
- **Minio**: Quota enforcement features
|
||||
- **Filesystem**: Disk quotas at OS level
|
||||
|
||||
## Security
|
||||
|
||||
### Authorization
|
||||
|
||||
Authorization is based on ATProto's public-by-default model:
|
||||
|
||||
**Read Authorization:**
|
||||
- **Public hold** (`public: true` in hold record):
|
||||
- Anonymous users: ✅ Allowed
|
||||
- Any authenticated user: ✅ Allowed
|
||||
|
||||
- **Private hold** (`public: false` in hold record):
|
||||
- Anonymous users: ❌ 401 Unauthorized
|
||||
- Any authenticated ATCR user: ✅ Allowed (no crew membership required)
|
||||
|
||||
**Write Authorization:**
|
||||
- Anonymous users: ❌ 401 Unauthorized
|
||||
- Authenticated non-crew: ❌ 403 Forbidden
|
||||
- Authenticated crew member: ✅ Allowed
|
||||
- Hold owner: ✅ Allowed
|
||||
|
||||
**Implementation:**
|
||||
- Hold service queries owner's PDS for `io.atcr.hold.crew` records
|
||||
- Crew records are public ATProto records (read without authentication)
|
||||
- "Private" holds only gate anonymous access, not authenticated user access
|
||||
- This reflects ATProto's current limitation: no private PDS records
|
||||
|
||||
### Presigned URLs
|
||||
|
||||
- 15 minute expiry
|
||||
- Client uploads/downloads directly to storage
|
||||
- No data flows through AppView or hold service
|
||||
|
||||
### Private Holds
|
||||
|
||||
"Private" holds gate anonymous access while remaining accessible to authenticated users:
|
||||
|
||||
**What "Private" Means:**
|
||||
- `HOLD_PUBLIC=false` prevents anonymous reads
|
||||
- Any authenticated ATCR user can still read
|
||||
- This aligns with ATProto's public records model
|
||||
|
||||
**Write Control:**
|
||||
- Only hold owner and crew members can write
|
||||
- Crew membership managed via `io.atcr.hold.crew` records in owner's PDS
|
||||
- Removing crew member immediately revokes write access
|
||||
|
||||
**Future: True Private Access**
|
||||
- When ATProto adds private PDS records, ATCR can support truly private repos
|
||||
- For now, "private" = "authenticated-only access"
|
||||
|
||||
## Example: Personal Storage
|
||||
|
||||
Alice wants to use her own Storj account:
|
||||
|
||||
1. **Set environment variables**:
|
||||
```bash
|
||||
export HOLD_PUBLIC_URL=https://alice-storage.fly.dev
|
||||
export HOLD_OWNER=did:plc:alice123
|
||||
export STORAGE_DRIVER=s3
|
||||
export AWS_ACCESS_KEY_ID=your_storj_access_key
|
||||
export AWS_SECRET_ACCESS_KEY=your_storj_secret_key
|
||||
export S3_ENDPOINT=https://gateway.storjshare.io
|
||||
export S3_BUCKET=alice-blobs
|
||||
```
|
||||
|
||||
2. **Deploy hold service** to Fly.io - auto-registration creates hold + crew record
|
||||
|
||||
3. **Push images** - AppView automatically routes to her storage
|
||||
|
||||
## Example: Team Hold
|
||||
|
||||
A company wants shared storage for their team:
|
||||
```bash
|
||||
# 1. Deploy hold service
|
||||
export HOLD_PUBLIC_URL=https://team-hold.fly.dev
|
||||
export HOLD_OWNER=did:plc:admin
|
||||
export HOLD_PUBLIC=false # Private
|
||||
export STORAGE_DRIVER=s3
|
||||
export AWS_ACCESS_KEY_ID=...
|
||||
export S3_BUCKET=team-blobs
|
||||
|
||||
1. **Deploy hold service** with S3 credentials and auto-registration:
|
||||
```bash
|
||||
export HOLD_PUBLIC_URL=https://company-hold.fly.dev
|
||||
export HOLD_OWNER=did:plc:admin
|
||||
export HOLD_PUBLIC=false
|
||||
export STORAGE_DRIVER=s3
|
||||
export AWS_ACCESS_KEY_ID=...
|
||||
export AWS_SECRET_ACCESS_KEY=...
|
||||
export S3_BUCKET=company-blobs
|
||||
```
|
||||
fly deploy
|
||||
|
||||
2. **Hold service auto-registers** on first run, creating:
|
||||
- Hold record in admin's PDS
|
||||
- Crew record making admin the owner
|
||||
# 2. Hold auto-creates captain + crew records on first run
|
||||
|
||||
3. **Admin adds crew members** via ATProto client or manually:
|
||||
```bash
|
||||
# Using atproto client
|
||||
atproto put-record \
|
||||
--collection io.atcr.hold.crew \
|
||||
--rkey "company-did:plc:engineer1" \
|
||||
--value '{
|
||||
"$type": "io.atcr.hold.crew",
|
||||
"hold": "at://did:plc:admin/io.atcr.hold/company",
|
||||
"member": "did:plc:engineer1",
|
||||
"role": "write"
|
||||
}'
|
||||
```
|
||||
# 3. Admin adds team members via hold's PDS (requires OAuth)
|
||||
# (TODO: Implement crew management UI/CLI)
|
||||
|
||||
4. **Team members set their profile** to use the shared hold:
|
||||
```bash
|
||||
# Engineer updates their sailor profile
|
||||
atproto put-record \
|
||||
--collection io.atcr.sailor.profile \
|
||||
--rkey "self" \
|
||||
--value '{
|
||||
"$type": "io.atcr.sailor.profile",
|
||||
"defaultHold": "https://company-hold.fly.dev"
|
||||
}'
|
||||
```
|
||||
# 4. Team members set their sailor profile:
|
||||
atproto put-record \
|
||||
--collection io.atcr.sailor.profile \
|
||||
--rkey "self" \
|
||||
--value '{
|
||||
"$type": "io.atcr.sailor.profile",
|
||||
"defaultHold": "did:web:team-hold.fly.dev"
|
||||
}'
|
||||
|
||||
5. **Hold service queries PDS** for crew records to authorize writes
|
||||
6. **Engineers push/pull** using `atcr.io/engineer1/myapp` - blobs go to company hold
|
||||
# 5. Team members can now push/pull using team hold
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
1. **No resume/partial uploads** - Storage service doesn't track upload state
|
||||
2. **No advanced features** - Just basic put/get, no deduplication logic
|
||||
3. **In-memory cache** - Hold endpoint cache is in-memory (for production, use Redis)
|
||||
4. **Manual profile updates** - No UI for updating sailor profile (must use ATProto client)
|
||||
### Current IAM Challenges
|
||||
|
||||
## Performance Optimization: S3 Presigned URLs
|
||||
See [EMBEDDED_PDS.md](./EMBEDDED_PDS.md#iam-challenges) for detailed discussion.
|
||||
|
||||
**Status:** Planned implementation (see [PRESIGNED_URLS.md](./PRESIGNED_URLS.md))
|
||||
**Known issues:**
|
||||
1. **RPC permission format**: Service tokens don't work with IP-based DIDs in local dev
|
||||
2. **Dynamic hold discovery**: AppView can't dynamically OAuth arbitrary holds from sailor profiles
|
||||
3. **Manual profile management**: No UI for updating sailor profile (must use ATProto client)
|
||||
|
||||
Currently, hold services act as proxies for blob data. With presigned URLs:
|
||||
|
||||
- **Downloads:** Docker → S3 direct (via 307 redirect)
|
||||
- **Uploads:** Docker → AppView → S3 (via presigned URL)
|
||||
- **Hold service bandwidth:** Reduced by 99.98% (only orchestration)
|
||||
|
||||
**Benefits:**
|
||||
- Hold services can run on minimal infrastructure ($5/month instances)
|
||||
- Direct S3 transfers at maximum speed
|
||||
- Scales to arbitrarily large images
|
||||
- Works with Storj, MinIO, Backblaze B2, Cloudflare R2
|
||||
|
||||
See [PRESIGNED_URLS.md](./PRESIGNED_URLS.md) for complete technical details and implementation guide.
|
||||
**Workaround:** Use hostname-based DIDs (`did:web:hold.example.com`) and public holds for now.
|
||||
|
||||
## Future Improvements
|
||||
|
||||
1. **S3 Presigned URLs** - Implement direct S3 URLs (see [PRESIGNED_URLS.md](./PRESIGNED_URLS.md))
|
||||
2. **Automatic failover** - Multiple storage endpoints, fallback to default
|
||||
3. **Storage analytics** - Track usage per DID
|
||||
4. **Quota integration** - Optional quota tracking in storage service
|
||||
5. **Profile management UI** - Web interface for users to manage their sailor profile
|
||||
6. **Distributed cache** - Redis/Memcached for hold endpoint cache in multi-instance deployments
|
||||
|
||||
## Comparison to Default Storage
|
||||
|
||||
| Feature | Default (Shared S3) | BYOS |
|
||||
|---------|---------------------|------|
|
||||
| Setup | None required | Deploy storage service |
|
||||
| Cost | Free (with quota) | User pays for S3/Storj |
|
||||
| Control | Limited | Full control |
|
||||
| Performance | Shared | Dedicated |
|
||||
| Quotas | Enforced by AppView | User managed |
|
||||
| Privacy | Blobs in shared bucket | Blobs in user's bucket |
|
||||
1. **Crew management UI** - Web interface for adding/removing crew members
|
||||
2. **Dynamic OAuth** - Support for arbitrary BYOS holds without pre-configuration
|
||||
3. **Hold migration** - Tools for moving blobs between holds
|
||||
4. **Storage analytics** - Track usage per user/repository
|
||||
5. **Distributed cache** - Redis for hold DID cache in multi-instance deployments
|
||||
|
||||
## References
|
||||
|
||||
- [EMBEDDED_PDS.md](./EMBEDDED_PDS.md) - Embedded PDS architecture and IAM details
|
||||
- [ATProto Lexicon Spec](https://atproto.com/specs/lexicon)
|
||||
- [Distribution Storage Drivers](https://distribution.github.io/distribution/storage-drivers/)
|
||||
- [S3 Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/PresignedUrlUploadObject.html)
|
||||
- [Storj Documentation](https://docs.storj.io/)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
957
docs/IMAGE_SIGNING.md
Normal file
957
docs/IMAGE_SIGNING.md
Normal file
@@ -0,0 +1,957 @@
|
||||
# Image Signing with ATProto
|
||||
|
||||
ATCR supports cryptographic signing of container images to ensure authenticity and integrity. Users have two options:
|
||||
|
||||
1. **Automatic signing (recommended)**: Credential helper signs images automatically on every push
|
||||
2. **Manual signing**: Use standard Cosign tools yourself
|
||||
|
||||
Both approaches use the OCI Referrers API bridge for verification with standard tools (Cosign, Notary, Kubernetes admission controllers).
|
||||
|
||||
## Design Constraints
|
||||
|
||||
### Why Server-Side Signing Doesn't Work
|
||||
|
||||
It's tempting to implement automatic signing on the AppView or hold (like GitHub's automatic Cosign signing), but this breaks the fundamental trust model:
|
||||
|
||||
**The problem: Signing "on behalf of" isn't real signing**
|
||||
|
||||
```
|
||||
❌ AppView signs image → Proves "AppView vouches for this"
|
||||
❌ Hold signs image → Proves "Hold vouches for this"
|
||||
❌ PDS signs image → Proves "PDS vouches for this"
|
||||
✅ Alice signs image → Proves "Alice created/approved this"
|
||||
```
|
||||
|
||||
**Why GitHub can do it:**
|
||||
- GitHub Actions runs with your GitHub identity
|
||||
- OIDC token proves "this workflow runs as alice on GitHub"
|
||||
- Fulcio certificate authority issues cert based on that proof
|
||||
- Still "alice" signing, just via GitHub's infrastructure
|
||||
|
||||
**Why ATCR can't replicate this:**
|
||||
- ATProto doesn't have OIDC/Fulcio equivalent
|
||||
- AppView can't sign "as alice" - only alice can
|
||||
- No secure server-side storage for user private keys
|
||||
- ATProto doesn't have encrypted record storage yet
|
||||
- Storing keys in AppView database = AppView controls keys, not alice
|
||||
- Hold's PDS has its own private key, but signing with it proves hold ownership, not user ownership
|
||||
|
||||
**Conclusion:** Signing must happen **client-side with user-controlled keys**.
|
||||
|
||||
### Why ATProto Record Signatures Aren't Sufficient
|
||||
|
||||
ATProto already signs all records stored in PDSs. When a manifest is stored as an `io.atcr.manifest` record, it includes:
|
||||
|
||||
```json
|
||||
{
|
||||
"uri": "at://did:plc:alice123/io.atcr.manifest/abc123",
|
||||
"cid": "bafyrei...",
|
||||
"value": { /* manifest data */ },
|
||||
"sig": "..." // ← PDS signature over record
|
||||
}
|
||||
```
|
||||
|
||||
**What this proves:**
|
||||
- ✅ Alice's PDS created and signed this record
|
||||
- ✅ Record hasn't been tampered with since signing
|
||||
- ✅ CID correctly represents the record content
|
||||
|
||||
**What this doesn't prove:**
|
||||
- ❌ Alice personally approved this image
|
||||
- ❌ Alice's private key was involved (only PDS key)
|
||||
|
||||
**The gap:**
|
||||
- A compromised or malicious PDS could create fake manifest records and sign them validly
|
||||
- PDS operator could sign manifests without user's knowledge
|
||||
- No proof that the *user* (not just their PDS) approved the image
|
||||
|
||||
**For true image signing, we need:**
|
||||
- User-controlled private keys (not PDS keys)
|
||||
- Client-side signing (where user has key access)
|
||||
- Separate signature records proving user approval
|
||||
|
||||
**Important nuance - PDS Trust Spectrum:**
|
||||
|
||||
While ATProto records are always signed by the PDS, this doesn't provide user-level signing for image verification:
|
||||
|
||||
1. **Self-hosted PDS with user-controlled keys:**
|
||||
- User runs their own PDS and controls PDS rotation keys
|
||||
- PDS signature ≈ user signature (trusted operator)
|
||||
- Still doesn't work with standard tools (Cosign/Notary)
|
||||
|
||||
2. **Shared/managed PDS (e.g., Bluesky):**
|
||||
- PDS operated by third party (bsky.social)
|
||||
- Auto-generated keys controlled by operator
|
||||
- User doesn't have access to PDS rotation keys
|
||||
- PDS signature ≠ user signature
|
||||
|
||||
**For ATCR:**
|
||||
- Credential helper signing works for all users (self-hosted or shared PDS)
|
||||
- Provides user-controlled keys separate from PDS keys
|
||||
- Works with standard verification tools via OCI Referrers API bridge
|
||||
|
||||
## Signing Options
|
||||
|
||||
### Option 1: Automatic Signing (Recommended)
|
||||
|
||||
The credential helper automatically signs images on every push - no extra commands needed.
|
||||
|
||||
**How it works:**
|
||||
- Credential helper runs on every `docker push` for authentication
|
||||
- Extended to also sign the manifest digest with user's private key
|
||||
- Private key stored securely in OS keychain
|
||||
- Signature sent to AppView and stored in ATProto
|
||||
- Completely transparent to the user
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ docker push atcr.io/alice/myapp:latest │
|
||||
└────────────────────┬────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ docker-credential-atcr (runs automatically) │
|
||||
│ │
|
||||
│ 1. Authenticate to AppView (OAuth) │
|
||||
│ 2. Get registry JWT │
|
||||
│ 3. Sign manifest digest with local private key ← NEW
|
||||
│ 4. Send signature to AppView ← NEW
|
||||
│ │
|
||||
│ Private key stored in OS keychain │
|
||||
│ (macOS Keychain, Windows Credential Manager, etc.) │
|
||||
└────────────────────┬────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ AppView │
|
||||
│ │
|
||||
│ 1. Receives signature from credential helper │
|
||||
│ 2. Stores in user's PDS (io.atcr.signature) │
|
||||
│ │
|
||||
│ OR stores in hold's PDS for BYOS scenarios │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**User experience:**
|
||||
|
||||
```bash
|
||||
# One-time setup
|
||||
docker login atcr.io
|
||||
# → Credential helper generates ECDSA key pair
|
||||
# → Private key stored in OS keychain
|
||||
# → Public key published to user's PDS
|
||||
|
||||
# Every push (automatic signing)
|
||||
docker push atcr.io/alice/myapp:latest
|
||||
# → Image pushed
|
||||
# → Automatically signed by credential helper
|
||||
# → No extra commands!
|
||||
|
||||
# Verification (standard Cosign)
|
||||
cosign verify atcr.io/alice/myapp:latest --key alice.pub
|
||||
```
|
||||
|
||||
### Option 2: Manual Signing (DIY)
|
||||
|
||||
Use standard Cosign tools yourself if you prefer manual control.
|
||||
|
||||
**How it works:**
|
||||
- You manage your own signing keys
|
||||
- You run `cosign sign` manually after pushing
|
||||
- Signatures stored in ATProto via OCI Referrers API
|
||||
- Full control over signing workflow
|
||||
|
||||
**User experience:**
|
||||
|
||||
```bash
|
||||
# Push image
|
||||
docker push atcr.io/alice/myapp:latest
|
||||
|
||||
# Sign manually with Cosign
|
||||
cosign sign atcr.io/alice/myapp:latest --key cosign.key
|
||||
|
||||
# Cosign stores signature via registry's OCI API
|
||||
# AppView receives signature and stores in ATProto
|
||||
|
||||
# Verification (same as automatic)
|
||||
cosign verify atcr.io/alice/myapp:latest --key cosign.pub
|
||||
```
|
||||
|
||||
**When to use:**
|
||||
- Need specific signing workflows (e.g., CI/CD pipelines)
|
||||
- Want to use hardware tokens (YubiKey)
|
||||
- Prefer manual control over automatic signing
|
||||
- Already using Cosign in your organization
|
||||
|
||||
### Key Management
|
||||
|
||||
**Key generation (first run):**
|
||||
1. Credential helper checks for existing signing key in OS keychain
|
||||
2. If not found, generates new ECDSA P-256 key pair (or Ed25519)
|
||||
3. Stores private key in OS keychain with access control
|
||||
4. Derives public key for publishing
|
||||
|
||||
**Public key publishing:**
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.signing.key",
|
||||
"keyId": "credential-helper-default",
|
||||
"keyType": "ecdsa-p256",
|
||||
"publicKey": "-----BEGIN PUBLIC KEY-----\nMFkw...",
|
||||
"validFrom": "2025-10-20T12:00:00Z",
|
||||
"expiresAt": null,
|
||||
"revoked": false,
|
||||
"purpose": ["image-signing"],
|
||||
"deviceId": "alice-macbook-pro",
|
||||
"createdAt": "2025-10-20T12:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Record stored in:** User's PDS at `io.atcr.signing.key/credential-helper-default`
|
||||
|
||||
**Key storage locations:**
|
||||
- **macOS:** Keychain Access (secure enclave on modern Macs)
|
||||
- **Windows:** Credential Manager / Windows Data Protection API
|
||||
- **Linux:** Secret Service API (gnome-keyring, kwallet)
|
||||
- **Fallback:** Encrypted file with restrictive permissions (0600)
|
||||
|
||||
### Signing Flow
|
||||
|
||||
```
|
||||
1. docker push atcr.io/alice/myapp:latest
|
||||
↓
|
||||
2. Docker daemon calls credential helper:
|
||||
docker-credential-atcr get atcr.io
|
||||
↓
|
||||
3. Credential helper flow:
|
||||
a. Authenticate via OAuth (existing)
|
||||
b. Receive registry JWT from AppView (existing)
|
||||
c. Fetch manifest digest from registry (NEW)
|
||||
d. Load private key from OS keychain (NEW)
|
||||
e. Sign manifest digest (NEW)
|
||||
f. Send signature to AppView via XRPC (NEW)
|
||||
↓
|
||||
4. AppView stores signature:
|
||||
{
|
||||
"$type": "io.atcr.signature",
|
||||
"repository": "alice/myapp",
|
||||
"digest": "sha256:abc123...",
|
||||
"signature": "MEUCIQDx...",
|
||||
"keyId": "credential-helper-default",
|
||||
"signatureAlgorithm": "ecdsa-p256-sha256",
|
||||
"signedAt": "2025-10-20T12:34:56Z"
|
||||
}
|
||||
↓
|
||||
5. Return registry JWT to Docker
|
||||
↓
|
||||
6. Docker proceeds with push
|
||||
```
|
||||
|
||||
### Signature Storage
|
||||
|
||||
**Option 1: User's PDS (Default)**
|
||||
- Signature stored in alice's PDS
|
||||
- Collection: `io.atcr.signature`
|
||||
- Discoverable via alice's ATProto repo
|
||||
- User owns all signing metadata
|
||||
|
||||
**Option 2: Hold's PDS (BYOS)**
|
||||
- Signature stored in hold's embedded PDS
|
||||
- Useful for shared holds with multiple users
|
||||
- Hold acts as signature repository
|
||||
- Parallel to SBOM storage model
|
||||
|
||||
**Decision logic:**
|
||||
```go
|
||||
// In AppView signature handler
|
||||
if manifest.HoldDid != "" && manifest.HoldDid != appview.DefaultHoldDid {
|
||||
// BYOS scenario - store in hold's PDS
|
||||
storeSignatureInHold(manifest.HoldDid, signature)
|
||||
} else {
|
||||
// Default - store in user's PDS
|
||||
storeSignatureInUserPDS(userDid, signature)
|
||||
}
|
||||
```
|
||||
|
||||
## Signature Format
|
||||
|
||||
Signatures are stored in a simple format in ATProto and transformed to Cosign-compatible format when served via the OCI Referrers API:
|
||||
|
||||
**ATProto storage format:**
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.signature",
|
||||
"repository": "alice/myapp",
|
||||
"digest": "sha256:abc123...",
|
||||
"signature": "base64-encoded-signature-bytes",
|
||||
"keyId": "credential-helper-default",
|
||||
"signatureAlgorithm": "ecdsa-p256-sha256",
|
||||
"signedAt": "2025-10-20T12:34:56Z",
|
||||
"format": "simple"
|
||||
}
|
||||
```
|
||||
|
||||
**OCI Referrers format (served by AppView):**
|
||||
```json
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.index.v1+json",
|
||||
"manifests": [{
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"digest": "sha256:...",
|
||||
"artifactType": "application/vnd.dev.cosign.simplesigning.v1+json",
|
||||
"annotations": {
|
||||
"dev.sigstore.cosign.signature": "MEUCIQDx...",
|
||||
"io.atcr.keyId": "credential-helper-default",
|
||||
"io.atcr.signedAt": "2025-10-20T12:34:56Z"
|
||||
}
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
This allows:
|
||||
- Simple storage in ATProto
|
||||
- Compatible with Cosign verification
|
||||
- No duplicate storage needed
|
||||
|
||||
## ATProto Records
|
||||
|
||||
### io.atcr.signing.key - Public Signing Keys
|
||||
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.signing.key",
|
||||
"keyId": "credential-helper-default",
|
||||
"keyType": "ecdsa-p256",
|
||||
"publicKey": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZI...",
|
||||
"validFrom": "2025-10-20T12:00:00Z",
|
||||
"expiresAt": "2026-10-20T12:00:00Z",
|
||||
"revoked": false,
|
||||
"purpose": ["image-signing"],
|
||||
"deviceId": "alice-macbook-pro",
|
||||
"comment": "Generated by docker-credential-atcr",
|
||||
"createdAt": "2025-10-20T12:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Record key:** `keyId` (user-chosen identifier)
|
||||
|
||||
**Fields:**
|
||||
- `keyId`: Unique identifier (e.g., `credential-helper-default`, `ci-key-1`)
|
||||
- `keyType`: Algorithm (ecdsa-p256, ed25519, rsa-2048, rsa-4096)
|
||||
- `publicKey`: PEM-encoded public key
|
||||
- `validFrom`: Key becomes valid at this time
|
||||
- `expiresAt`: Key expires (null = no expiry)
|
||||
- `revoked`: Revocation status
|
||||
- `purpose`: Key purposes (image-signing, sbom-signing, etc.)
|
||||
- `deviceId`: Optional device identifier
|
||||
- `comment`: Optional human-readable comment
|
||||
|
||||
### io.atcr.signature - Image Signatures
|
||||
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.signature",
|
||||
"repository": "alice/myapp",
|
||||
"digest": "sha256:abc123...",
|
||||
"signature": "MEUCIQDxH7...",
|
||||
"keyId": "credential-helper-default",
|
||||
"signatureAlgorithm": "ecdsa-p256-sha256",
|
||||
"signedAt": "2025-10-20T12:34:56Z",
|
||||
"format": "simple",
|
||||
"createdAt": "2025-10-20T12:34:56Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Record key:** SHA256 hash of `(digest || keyId)` for deduplication
|
||||
|
||||
**Fields:**
|
||||
- `repository`: Image repository (alice/myapp)
|
||||
- `digest`: Manifest digest being signed (sha256:...)
|
||||
- `signature`: Base64-encoded signature bytes
|
||||
- `keyId`: Reference to signing key record
|
||||
- `signatureAlgorithm`: Algorithm used
|
||||
- `signedAt`: Timestamp of signature creation
|
||||
- `format`: Signature format (simple, cosign, notary)
|
||||
|
||||
## Verification
|
||||
|
||||
Image signatures are verified using standard tools (Cosign, Notary) via the OCI Referrers API bridge. AppView transparently serves ATProto signatures as OCI artifacts, so verification "just works" with existing tooling.
|
||||
|
||||
### Integration with Docker/Kubernetes Workflows
|
||||
|
||||
**The challenge:** Cosign and Notary plugins are for **key management** (custom KMS, HSMs), not **signature storage**. Both tools expect signatures stored as OCI artifacts in the registry itself.
|
||||
|
||||
**Reality check:**
|
||||
- Cosign looks for signatures as OCI referrers or attached manifests
|
||||
- Notary looks for signatures in registry's `_notary` endpoint
|
||||
- Kubernetes admission controllers (Sigstore Policy Controller, Ratify) use these tools
|
||||
- They won't find signatures stored only in ATProto
|
||||
|
||||
**The solution:** AppView implements the **OCI Referrers API** and serves ATProto signatures as OCI artifacts on-demand.
|
||||
|
||||
### How It Works: OCI Referrers API Bridge
|
||||
|
||||
When Cosign/Notary verify an image, they call the OCI Referrers API:
|
||||
|
||||
```
|
||||
cosign verify atcr.io/alice/myapp:latest
|
||||
↓
|
||||
GET /v2/alice/myapp/referrers/sha256:abc123
|
||||
↓
|
||||
AppView:
|
||||
1. Queries alice's PDS for io.atcr.signature records
|
||||
2. Filters signatures matching digest sha256:abc123
|
||||
3. Transforms to OCI referrers format
|
||||
4. Returns as JSON
|
||||
↓
|
||||
Cosign receives OCI referrer manifest
|
||||
↓
|
||||
Verifies signature (works normally)
|
||||
```
|
||||
|
||||
**AppView endpoint implementation:**
|
||||
|
||||
```go
|
||||
// GET /v2/{owner}/{repo}/referrers/{digest}
|
||||
func (h *Handler) GetReferrers(w http.ResponseWriter, r *http.Request) {
|
||||
owner := mux.Vars(r)["owner"]
|
||||
digest := mux.Vars(r)["digest"]
|
||||
|
||||
// 1. Resolve owner → DID → PDS
|
||||
did, pds, err := h.resolver.ResolveIdentity(owner)
|
||||
|
||||
// 2. Query PDS for signatures matching digest
|
||||
signatures, err := h.atproto.ListRecords(pds, did, "io.atcr.signature")
|
||||
filtered := filterByDigest(signatures, digest)
|
||||
|
||||
// 3. Transform to OCI Index format
|
||||
index := &ocispec.Index{
|
||||
SchemaVersion: 2,
|
||||
MediaType: ocispec.MediaTypeImageIndex,
|
||||
Manifests: []ocispec.Descriptor{},
|
||||
}
|
||||
|
||||
for _, sig := range filtered {
|
||||
index.Manifests = append(index.Manifests, ocispec.Descriptor{
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
Digest: sig.Digest,
|
||||
Size: sig.Size,
|
||||
ArtifactType: "application/vnd.dev.cosign.simplesigning.v1+json",
|
||||
Annotations: map[string]string{
|
||||
"dev.sigstore.cosign.signature": sig.Signature,
|
||||
"io.atcr.keyId": sig.KeyId,
|
||||
"io.atcr.signedAt": sig.SignedAt,
|
||||
"io.atcr.source": fmt.Sprintf("at://%s/io.atcr.signature/%s", did, sig.Rkey),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// 4. Return as JSON
|
||||
w.Header().Set("Content-Type", ocispec.MediaTypeImageIndex)
|
||||
json.NewEncoder(w).Encode(index)
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- ✅ **No dual storage** - signatures only in ATProto
|
||||
- ✅ **Standard tools work** - Cosign, Notary, Kubernetes admission controllers
|
||||
- ✅ **Single source of truth** - ATProto PDS
|
||||
- ✅ **On-demand transformation** - only when needed
|
||||
- ✅ **Offline verification** - can cache public keys
|
||||
|
||||
**Trade-offs:**
|
||||
- ⚠️ AppView must be reachable during verification (but already required for image pulls)
|
||||
- ⚠️ Transformation overhead (minimal - just JSON formatting)
|
||||
|
||||
### Alternative Approaches
|
||||
|
||||
#### Option 1: Dual Storage (Not Recommended)
|
||||
|
||||
Store signatures in BOTH ATProto AND OCI registry:
|
||||
|
||||
```go
|
||||
// In credential helper or AppView
|
||||
func StoreSignature(sig Signature) error {
|
||||
// 1. Store in ATProto (user's PDS or hold's PDS)
|
||||
err := storeInATProto(sig)
|
||||
|
||||
// 2. ALSO store as OCI artifact in registry
|
||||
err = storeAsOCIReferrer(sig)
|
||||
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
**OCI Referrer format:**
|
||||
```json
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"artifactType": "application/vnd.dev.cosign.simplesigning.v1+json",
|
||||
"subject": {
|
||||
"digest": "sha256:abc123...",
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json"
|
||||
},
|
||||
"layers": [{
|
||||
"mediaType": "application/vnd.dev.cosign.simplesigning.v1+json",
|
||||
"digest": "sha256:sig...",
|
||||
"annotations": {
|
||||
"dev.sigstore.cosign.signature": "MEUCIQDx...",
|
||||
"io.atcr.source": "atproto://did:plc:alice123/io.atcr.signature/..."
|
||||
}
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- ✅ Works with standard Cosign verification
|
||||
- ✅ Kubernetes admission controllers work out of box
|
||||
- ✅ ATProto signatures still available for discovery
|
||||
- ✅ Cross-reference via `io.atcr.source` annotation
|
||||
|
||||
**Trade-offs:**
|
||||
- ❌ Duplicate storage (ATProto + OCI)
|
||||
- ❌ Consistency issues (what if one write fails?)
|
||||
- ❌ Signatures tied to specific registry
|
||||
|
||||
#### Option 2: Custom Admission Controller
|
||||
|
||||
Write Kubernetes admission controller that understands ATProto:
|
||||
|
||||
```yaml
|
||||
# admission-controller deployment
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: atcr-policy
|
||||
data:
|
||||
policy.yaml: |
|
||||
policies:
|
||||
- name: require-atcr-signatures
|
||||
images:
|
||||
- "atcr.io/*/*"
|
||||
verification:
|
||||
method: atproto
|
||||
requireSignature: true
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- ✅ Native ATProto support
|
||||
- ✅ No OCI conversion needed
|
||||
- ✅ Can enforce ATCR-specific policies
|
||||
|
||||
**Trade-offs:**
|
||||
- ❌ Doesn't work with standard tools (Cosign, Notary)
|
||||
- ❌ Additional infrastructure to maintain
|
||||
- ❌ Limited ecosystem integration
|
||||
|
||||
#### Recommendation
|
||||
|
||||
**Primary approach: OCI Referrers API Bridge**
|
||||
- Implement `/v2/{owner}/{repo}/referrers/{digest}` in AppView
|
||||
- Query ATProto on-demand and transform to OCI format
|
||||
- Works with Cosign, Notary, Kubernetes admission controllers
|
||||
- No duplicate storage, single source of truth
|
||||
|
||||
**Why this works:**
|
||||
- Cosign/Notary just make HTTP GET requests to the registry
|
||||
- AppView is already the registry - just add one endpoint
|
||||
- Transformation is simple (ATProto record → OCI descriptor)
|
||||
- Signatures stay in ATProto where they belong
|
||||
|
||||
### Cosign Verification (OCI Referrers API)
|
||||
|
||||
```bash
|
||||
# Standard Cosign works out of the box:
|
||||
cosign verify atcr.io/alice/myapp:latest \
|
||||
--key <(atcr-cli key export alice credential-helper-default)
|
||||
|
||||
# What happens:
|
||||
# 1. Cosign queries: GET /v2/alice/myapp/referrers/sha256:abc123
|
||||
# 2. AppView fetches signatures from alice's PDS
|
||||
# 3. AppView returns OCI referrers index
|
||||
# 4. Cosign downloads signature artifact
|
||||
# 5. Cosign verifies with public key
|
||||
# 6. Success!
|
||||
|
||||
# Or with public key inline:
|
||||
cosign verify atcr.io/alice/myapp:latest --key '-----BEGIN PUBLIC KEY-----
|
||||
MFkwEwYHKoZI...
|
||||
-----END PUBLIC KEY-----'
|
||||
```
|
||||
|
||||
**Fetching public keys from ATProto:**
|
||||
|
||||
Public keys are stored in ATProto records and can be fetched via standard XRPC:
|
||||
|
||||
```bash
|
||||
# Query for public keys
|
||||
curl "https://atcr.io/xrpc/com.atproto.repo.listRecords?\
|
||||
repo=did:plc:alice123&\
|
||||
collection=io.atcr.signing.key"
|
||||
|
||||
# Extract public key and save as PEM
|
||||
# Then use in Cosign:
|
||||
cosign verify atcr.io/alice/myapp:latest --key alice.pub
|
||||
```
|
||||
|
||||
### Kubernetes Policy Example (OCI Referrers API)
|
||||
|
||||
```yaml
|
||||
# Sigstore Policy Controller
|
||||
apiVersion: policy.sigstore.dev/v1beta1
|
||||
kind: ClusterImagePolicy
|
||||
metadata:
|
||||
name: atcr-images-must-be-signed
|
||||
spec:
|
||||
images:
|
||||
- glob: "atcr.io/*/*"
|
||||
authorities:
|
||||
- key:
|
||||
# Public key from ATProto record
|
||||
data: |
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFkwEwYHKoZI...
|
||||
-----END PUBLIC KEY-----
|
||||
```
|
||||
|
||||
**How it works:**
|
||||
1. Pod tries to run `atcr.io/alice/myapp:latest`
|
||||
2. Policy Controller intercepts
|
||||
3. Queries registry for OCI referrers (finds signature)
|
||||
4. Verifies signature with public key
|
||||
5. Allows pod if valid
|
||||
|
||||
### Trust Policies
|
||||
|
||||
Define what signatures are required for image execution:
|
||||
|
||||
```yaml
|
||||
# ~/.atcr/trust-policy.yaml
|
||||
policies:
|
||||
- name: production-images
|
||||
scope: "atcr.io/alice/prod-*"
|
||||
require:
|
||||
- signature: true
|
||||
- keyIds: ["ci-key-1", "alice-release-key"]
|
||||
action: enforce # block, audit, or allow
|
||||
|
||||
- name: dev-images
|
||||
scope: "atcr.io/alice/dev-*"
|
||||
require:
|
||||
- signature: false
|
||||
action: audit
|
||||
```
|
||||
|
||||
**Integration points:**
|
||||
- Kubernetes admission controller
|
||||
- Docker Content Trust equivalent
|
||||
- CI/CD pipeline gates
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Key Storage Security
|
||||
|
||||
**OS keychain benefits:**
|
||||
- ✅ Encrypted storage
|
||||
- ✅ Access control (requires user password/biometric)
|
||||
- ✅ Auditing (macOS logs keychain access)
|
||||
- ✅ Hardware-backed on modern systems (Secure Enclave, TPM)
|
||||
|
||||
**Best practices:**
|
||||
- Generate keys on device (never transmitted)
|
||||
- Use hardware-backed storage when available
|
||||
- Require user approval for key access (biometric/password)
|
||||
- Rotate keys periodically (e.g., annually)
|
||||
|
||||
### Trust Model
|
||||
|
||||
**What signatures prove:**
|
||||
- ✅ User had access to private key at signing time
|
||||
- ✅ Manifest digest matches what was signed
|
||||
- ✅ Signature created by specific key ID
|
||||
- ✅ Timestamp of signature creation
|
||||
|
||||
**What signatures don't prove:**
|
||||
- ❌ Image is free of vulnerabilities
|
||||
- ❌ Image contents are safe to run
|
||||
- ❌ User's identity is verified (depends on DID trust)
|
||||
- ❌ Private key wasn't compromised
|
||||
|
||||
**Trust dependencies:**
|
||||
- User protects their private key
|
||||
- OS keychain security
|
||||
- DID resolution accuracy (PLC directory, did:web)
|
||||
- PDS serves correct public key records
|
||||
- Signature algorithms remain secure
|
||||
|
||||
### Multi-Device Support
|
||||
|
||||
**Challenge:** User has multiple devices (laptop, desktop, CI/CD)
|
||||
|
||||
**Options:**
|
||||
|
||||
1. **Separate keys per device:**
|
||||
```json
|
||||
{
|
||||
"keyId": "alice-macbook-pro",
|
||||
"deviceId": "macbook-pro"
|
||||
},
|
||||
{
|
||||
"keyId": "alice-desktop",
|
||||
"deviceId": "desktop"
|
||||
}
|
||||
```
|
||||
- Pros: Best security (key compromise limited to one device)
|
||||
- Cons: Need to trust signatures from any device
|
||||
|
||||
2. **Shared key via secure sync:**
|
||||
- Export key from primary device
|
||||
- Import to secondary devices
|
||||
- Stored in each device's keychain
|
||||
- Pros: Single key ID to trust
|
||||
- Cons: More attack surface (key on multiple devices)
|
||||
|
||||
3. **Primary + secondary model:**
|
||||
- Primary key on main device
|
||||
- Secondary keys on other devices
|
||||
- Trust policy requires primary key signature
|
||||
- Pros: Flexible + secure
|
||||
- Cons: More complex setup
|
||||
|
||||
**Recommendation:** Separate keys per device (Option 1) for security, with trust policy accepting any of user's keys.
|
||||
|
||||
### Key Compromise Response
|
||||
|
||||
If a device is lost or private key is compromised:
|
||||
|
||||
1. **Revoke the key** via AppView web UI or XRPC API
|
||||
- Updates `io.atcr.signing.key` record: `"revoked": true`
|
||||
- Revocation is atomic and immediate
|
||||
|
||||
2. **Generate new key** on new/existing device
|
||||
- Automatic on next `docker login` from secure device
|
||||
- Credential helper generates new key pair
|
||||
|
||||
3. **Old signatures still exist but fail verification**
|
||||
- Revoked key = untrusted
|
||||
- No certificate revocation list (CRL) delays
|
||||
- Globally visible within seconds
|
||||
|
||||
### CI/CD Signing
|
||||
|
||||
For automated builds, use standard Cosign in your CI pipeline:
|
||||
|
||||
```yaml
|
||||
# .github/workflows/build.yml
|
||||
steps:
|
||||
- name: Push image
|
||||
run: docker push atcr.io/alice/myapp:latest
|
||||
|
||||
- name: Sign with Cosign
|
||||
run: cosign sign atcr.io/alice/myapp:latest --key ${{ secrets.COSIGN_KEY }}
|
||||
```
|
||||
|
||||
**Key management:**
|
||||
- Generate Cosign key pair: `cosign generate-key-pair`
|
||||
- Store private key in CI secrets (GitHub Actions, GitLab CI, etc.)
|
||||
- Publish public key to PDS via XRPC or AppView web UI
|
||||
- Cosign stores signature via registry's OCI API
|
||||
- AppView automatically stores in ATProto
|
||||
|
||||
**Or use automatic signing:**
|
||||
- Configure credential helper in CI environment
|
||||
- Signatures happen automatically on push
|
||||
- No explicit signing step needed
|
||||
|
||||
## Implementation Roadmap
|
||||
|
||||
### Phase 1: Core Signing (2-3 weeks)
|
||||
|
||||
**Week 1: Credential helper key management**
|
||||
- Generate ECDSA key pair on first run
|
||||
- Store private key in OS keychain
|
||||
- Create `io.atcr.signing.key` record in PDS
|
||||
- Handle key rotation
|
||||
|
||||
**Week 2: Signing integration**
|
||||
- Sign manifest digest after authentication
|
||||
- Send signature to AppView via XRPC
|
||||
- AppView stores in user's PDS or hold's PDS
|
||||
- Error handling and retries
|
||||
|
||||
**Week 3: OCI Referrers API**
|
||||
- Implement `GET /v2/{owner}/{repo}/referrers/{digest}` in AppView
|
||||
- Query ATProto for signatures
|
||||
- Transform to OCI Index format
|
||||
- Return Cosign-compatible artifacts
|
||||
- Test with `cosign verify`
|
||||
|
||||
### Phase 2: Enhanced Features (2-3 weeks)
|
||||
|
||||
**Key management (credential helper):**
|
||||
- Key rotation support
|
||||
- Revocation handling
|
||||
- Device identification
|
||||
- Key expiration
|
||||
|
||||
**Signature storage:**
|
||||
- Handle manual Cosign signing (via OCI API)
|
||||
- Store signatures from both automatic and manual flows
|
||||
- Signature deduplication
|
||||
- Signature audit logs
|
||||
|
||||
**AppView endpoints:**
|
||||
- XRPC endpoints for key/signature queries
|
||||
- Web UI for viewing keys and signatures
|
||||
- Key revocation via web interface
|
||||
|
||||
### Phase 3: Kubernetes Integration (2-3 weeks)
|
||||
|
||||
**Admission controller setup:**
|
||||
- Documentation for Sigstore Policy Controller
|
||||
- Example policies for ATCR images
|
||||
- Public key management (fetch from ATProto)
|
||||
- Integration testing with real clusters
|
||||
|
||||
**Advanced features:**
|
||||
- Signature caching in AppView (reduce PDS queries)
|
||||
- Multi-signature support (require N signatures)
|
||||
- Timestamp verification
|
||||
- Signature expiration policies
|
||||
|
||||
### Phase 4: UI Integration (1-2 weeks)
|
||||
|
||||
**AppView web UI:**
|
||||
- Show signature status on repository pages
|
||||
- List signing keys for users
|
||||
- Revoke keys via web interface
|
||||
- Signature verification badges
|
||||
|
||||
## Comparison: Automatic vs Manual Signing
|
||||
|
||||
| Feature | Automatic (Credential Helper) | Manual (Standard Cosign) |
|
||||
|---------|-------------------------------|--------------------------|
|
||||
| **User action** | Zero - happens on push | `cosign sign` after push |
|
||||
| **Key management** | Automatic generation/storage | User manages keys |
|
||||
| **Consistency** | Every image signed | Easy to forget |
|
||||
| **Setup** | Works with credential helper | Install Cosign, generate keys |
|
||||
| **CI/CD** | Automatic if cred helper configured | Explicit signing step |
|
||||
| **Flexibility** | Opinionated defaults | Full control over workflow |
|
||||
| **Use case** | Most users, simple workflows | Advanced users, custom workflows |
|
||||
|
||||
**Recommendation:**
|
||||
- **Start with automatic**: Best UX, works for most users
|
||||
- **Use manual** for: CI/CD pipelines, hardware tokens, custom signing workflows
|
||||
|
||||
## Complete Workflow Summary
|
||||
|
||||
### Option 1: Automatic Signing (Recommended)
|
||||
|
||||
```bash
|
||||
# Setup (one time)
|
||||
docker login atcr.io
|
||||
# → Credential helper generates ECDSA key pair
|
||||
# → Private key in OS keychain
|
||||
# → Public key published to PDS
|
||||
|
||||
# Push (automatic signing)
|
||||
docker push atcr.io/alice/myapp:latest
|
||||
# → Image pushed and signed automatically
|
||||
# → No extra commands!
|
||||
|
||||
# Verify (standard Cosign)
|
||||
cosign verify atcr.io/alice/myapp:latest --key alice.pub
|
||||
# → Cosign queries OCI Referrers API
|
||||
# → AppView returns ATProto signatures as OCI artifacts
|
||||
# → Verification succeeds ✓
|
||||
```
|
||||
|
||||
### Option 2: Manual Signing (DIY)
|
||||
|
||||
```bash
|
||||
# Push image
|
||||
docker push atcr.io/alice/myapp:latest
|
||||
|
||||
# Sign with Cosign
|
||||
cosign sign atcr.io/alice/myapp:latest --key cosign.key
|
||||
# → Cosign stores via OCI API
|
||||
# → AppView stores in ATProto
|
||||
|
||||
# Verify (same as automatic)
|
||||
cosign verify atcr.io/alice/myapp:latest --key cosign.pub
|
||||
```
|
||||
|
||||
### Kubernetes (Standard Admission Controller)
|
||||
|
||||
```yaml
|
||||
# Sigstore Policy Controller (standard)
|
||||
apiVersion: policy.sigstore.dev/v1beta1
|
||||
kind: ClusterImagePolicy
|
||||
metadata:
|
||||
name: atcr-signed-only
|
||||
spec:
|
||||
images:
|
||||
- glob: "atcr.io/*/*"
|
||||
authorities:
|
||||
- key:
|
||||
data: |
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
[Alice's public key from ATProto]
|
||||
-----END PUBLIC KEY-----
|
||||
```
|
||||
|
||||
**How admission control works:**
|
||||
1. Pod tries to start with `atcr.io/alice/myapp:latest`
|
||||
2. Policy Controller intercepts
|
||||
3. Calls `GET /v2/alice/myapp/referrers/sha256:abc123`
|
||||
4. AppView returns signatures from ATProto
|
||||
5. Policy Controller verifies with public key
|
||||
6. Pod allowed to start ✓
|
||||
|
||||
### Key Design Points
|
||||
|
||||
**User experience:**
|
||||
- ✅ Two options: automatic (credential helper) or manual (standard Cosign)
|
||||
- ✅ Standard verification tools work (Cosign, Notary, Kubernetes)
|
||||
- ✅ No custom ATCR-specific signing commands
|
||||
- ✅ User-controlled keys (OS keychain or self-managed)
|
||||
|
||||
**Architecture:**
|
||||
- **Signing**: Client-side only (credential helper or Cosign)
|
||||
- **Storage**: ATProto (user's PDS or hold's PDS via `io.atcr.signature`)
|
||||
- **Verification**: Standard tools via OCI Referrers API bridge
|
||||
- **Bridge**: AppView transforms ATProto → OCI format on-demand
|
||||
|
||||
**Why this works:**
|
||||
- ✅ No server-side signing needed (impossible with ATProto constraints)
|
||||
- ✅ Signatures discoverable via ATProto
|
||||
- ✅ No duplicate storage (single source of truth)
|
||||
- ✅ Standard OCI compliance for verification
|
||||
|
||||
## References
|
||||
|
||||
### Signing & Verification
|
||||
- [Sigstore Cosign](https://github.com/sigstore/cosign)
|
||||
- [Notary v2 Specification](https://notaryproject.dev/)
|
||||
- [Cosign Signature Specification](https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md)
|
||||
|
||||
### OCI & Registry
|
||||
- [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec)
|
||||
- [OCI Referrers API](https://github.com/oras-project/artifacts-spec/blob/main/manifest-referrers-api.md)
|
||||
- [OCI Artifacts](https://github.com/opencontainers/artifacts)
|
||||
|
||||
### ATProto
|
||||
- [ATProto Specification](https://atproto.com/)
|
||||
- [ATProto Repository Specification](https://atproto.com/specs/repository)
|
||||
|
||||
### Key Management
|
||||
- [Docker Credential Helpers](https://docs.docker.com/engine/reference/commandline/login/#credential-helpers)
|
||||
- [macOS Keychain Services](https://developer.apple.com/documentation/security/keychain_services)
|
||||
- [Windows Credential Manager](https://docs.microsoft.com/en-us/windows/security/identity-protection/credential-guard/)
|
||||
- [Linux Secret Service API](https://specifications.freedesktop.org/secret-service/)
|
||||
|
||||
### Kubernetes Integration
|
||||
- [Sigstore Policy Controller](https://docs.sigstore.dev/policy-controller/overview/)
|
||||
- [Ratify (Notary verification for Kubernetes)](https://ratify.dev/)
|
||||
820
docs/LAYER_RECORDS.md
Normal file
820
docs/LAYER_RECORDS.md
Normal file
@@ -0,0 +1,820 @@
|
||||
# Layer Records in ATProto
|
||||
|
||||
## Overview
|
||||
|
||||
This document describes the architecture for storing container layer metadata as ATProto records in the hold service's embedded PDS. This makes blob storage more "ATProto-native" by creating discoverable records for each unique layer.
|
||||
|
||||
## TL;DR
|
||||
|
||||
**Status: BUG FIXED ✅ | Layer Records Feature PLANNED 🔮**
|
||||
|
||||
### Quick Fix (IMPLEMENTED)
|
||||
|
||||
The critical bug where S3Native multipart uploads didn't move from temp → final location is now **FIXED**.
|
||||
|
||||
**What was fixed:**
|
||||
1. ✅ AppView sends real digest in complete request (not just tempDigest)
|
||||
2. ✅ Hold's CompleteMultipartUploadWithManager now accepts finalDigest parameter
|
||||
3. ✅ S3Native mode copies temp → final and deletes temp
|
||||
4. ✅ Buffered mode writes directly to final location
|
||||
|
||||
**Files changed:**
|
||||
- `pkg/appview/storage/proxy_blob_store.go` - Send real digest
|
||||
- `pkg/hold/s3.go` - Add copyBlobS3() and deleteBlobS3()
|
||||
- `pkg/hold/multipart.go` - Use finalDigest and move blob
|
||||
- `pkg/hold/blobstore_adapter.go` - Pass finalDigest through
|
||||
- `pkg/hold/pds/xrpc.go` - Update interface and handler
|
||||
|
||||
### Layer Records Feature (PLANNED)
|
||||
|
||||
Building on the quick fix, layer records will add:
|
||||
1. 🔮 Hold creates ATProto record for each unique layer
|
||||
2. 🔮 Deduplication: check layer record exists before finalizing upload
|
||||
3. 🔮 Manifest backlinks: include layer record AT-URIs
|
||||
4. 🔮 Discovery: `listRecords(io.atcr.manifest.layers)` shows all unique blobs
|
||||
|
||||
**Benefits:**
|
||||
- Makes blobs discoverable via ATProto protocol
|
||||
- Enables garbage collection (find unreferenced layers)
|
||||
- Foundation for per-layer access control
|
||||
- Audit trail for storage operations
|
||||
|
||||
## Motivation
|
||||
|
||||
**Goal:** Make hold services more ATProto-native by tracking unique blobs as records.
|
||||
|
||||
**Benefits:**
|
||||
- **Discovery:** Query `listRecords(io.atcr.manifest.layers)` to see all unique layers in a hold
|
||||
- **Auditing:** Track when unique content arrived, sizes, media types
|
||||
- **Deduplication:** One record per unique digest (not per upload)
|
||||
- **Migration:** Enumerate all blobs for moving between storage backends
|
||||
- **Future:** Foundation for per-blob access control, retention policies
|
||||
|
||||
**Key Design Decision:** Store records for **unique digests only**, not every blob upload. This mirrors the content-addressed deduplication already happening in S3.
|
||||
|
||||
## Current Upload Flow
|
||||
|
||||
### OCI Distribution Spec Pattern
|
||||
|
||||
The OCI distribution spec uses a two-phase upload:
|
||||
|
||||
1. **Initiate Upload**
|
||||
```
|
||||
POST /v2/<name>/blobs/uploads/
|
||||
→ Returns upload UUID (digest unknown at this point!)
|
||||
```
|
||||
|
||||
2. **Upload Data**
|
||||
```
|
||||
PATCH/PUT to temp location: uploads/temp-<uuid>
|
||||
→ Client streams blob data
|
||||
→ Digest not yet known
|
||||
```
|
||||
|
||||
3. **Finalize Upload**
|
||||
```
|
||||
PUT /v2/<name>/blobs/uploads/<uuid>?digest=sha256:abc123
|
||||
→ Digest provided at finalization time
|
||||
→ Registry moves: temp → final location at digest path
|
||||
```
|
||||
|
||||
**Critical insight:** In standard OCI distribution, the digest is only known at **finalization time**, not during upload. This allows clients to compute the digest as they stream data.
|
||||
|
||||
### Current ATCR Implementation
|
||||
|
||||
**Multipart Upload Flow:**
|
||||
|
||||
```
|
||||
1. Start multipart (XRPC POST with action=start, digest=sha256:abc...)
|
||||
- Client provides digest upfront (xrpc.go:849 requires req.Digest)
|
||||
- Generate uploadID (UUID)
|
||||
- S3Native: Create S3 multipart upload at FINAL path blobPath(digest)
|
||||
- Buffered: Create in-memory session with digest
|
||||
- Session stores: uploadID, digest, mode
|
||||
|
||||
2. Upload parts (XRPC POST with action=part, uploadId, partNumber)
|
||||
- S3Native: Returns presigned URLs to upload parts to final location
|
||||
- Buffered: Returns XRPC endpoint with X-Upload-Id/X-Part-Number headers
|
||||
- Parts go to final digest location (S3Native) or memory (Buffered)
|
||||
|
||||
3. Complete (XRPC POST with action=complete, uploadId, parts[])
|
||||
- S3Native: S3 CompleteMultipartUpload at final location
|
||||
- Buffered: Assemble parts, write to final location blobPath(digest)
|
||||
```
|
||||
|
||||
**Current paths:**
|
||||
- Final: `/docker/registry/v2/blobs/{algorithm}/{xx}/{hash}/data`
|
||||
- Example: `/docker/registry/v2/blobs/sha256/ab/abc123.../data`
|
||||
- Temp: `/docker/registry/v2/uploads/temp-<uuid>/data` (used during upload, then moved to final)
|
||||
|
||||
**Key insight:** Unlike standard OCI distribution spec (where digest is provided at finalization), ATCR's XRPC multipart flow requires digest upfront at start time. This is fine, but we should still use temp paths for atomic deduplication with layer records.
|
||||
|
||||
**Note:** The move operation bug described below has been fixed. The rest of this document describes the planned layer records feature.
|
||||
|
||||
## The Bug (FIXED)
|
||||
|
||||
### How It Was Fixed
|
||||
|
||||
The bug was fixed by:
|
||||
|
||||
1. **AppView** sends the real digest in complete request (not tempDigest)
|
||||
- `pkg/appview/storage/proxy_blob_store.go:740-745`
|
||||
|
||||
2. **Hold** accepts finalDigest parameter in CompleteMultipartUpload
|
||||
- `pkg/hold/multipart.go:281` - Added finalDigest parameter
|
||||
- `pkg/hold/s3.go:223-285` - Added copyBlobS3() and deleteBlobS3()
|
||||
|
||||
3. **S3Native mode** now moves blob from temp → final location
|
||||
- Complete multipart at temp location
|
||||
- Copy to final digest location
|
||||
- Delete temp
|
||||
|
||||
4. **Buffered mode** writes directly to final location (no change needed)
|
||||
|
||||
**Result:** Blobs are now correctly placed at final digest paths, downloads work correctly.
|
||||
|
||||
### The Problem (Historical Context)
|
||||
|
||||
Looking at the old `pkg/hold/multipart.go:278-317`, the `CompleteMultipartUploadWithManager` function:
|
||||
|
||||
**S3Native mode (lines 282-289):**
|
||||
```go
|
||||
if session.Mode == S3Native {
|
||||
parts := session.GetCompletedParts()
|
||||
if err := s.completeMultipartUpload(ctx, session.Digest, session.S3UploadID, parts); err != nil {
|
||||
return fmt.Errorf("failed to complete S3 multipart: %w", err)
|
||||
}
|
||||
log.Printf("Completed S3 native multipart: uploadID=%s, parts=%d", session.UploadID, len(parts))
|
||||
return nil // ❌ Missing move operation!
|
||||
}
|
||||
```
|
||||
|
||||
**What's missing:**
|
||||
1. S3 CompleteMultipartUpload assembles parts at temp location: `uploads/temp-<uuid>`
|
||||
2. **MISSING:** S3 CopyObject from `uploads/temp-<uuid>` → `blobs/sha256/ab/abc123.../data`
|
||||
3. **MISSING:** Delete temp blob
|
||||
|
||||
**Buffered mode works correctly** (lines 292-316) because it writes assembled data directly to final path `blobPath(session.Digest)`.
|
||||
|
||||
### Evidence from Design Doc
|
||||
|
||||
From `docs/XRPC_BLOB_MIGRATION.md` (lines 105-114):
|
||||
```
|
||||
1. Multipart parts uploaded → uploads/temp-{uploadID}
|
||||
2. Complete multipart → S3 assembles parts at uploads/temp-{uploadID}
|
||||
3. **Move operation** → S3 copy from uploads/temp-{uploadID} → blobs/sha256/ab/abc123...
|
||||
```
|
||||
|
||||
The move was supposed to be internalized into the complete action (lines 308-311):
|
||||
```
|
||||
Call service.CompleteMultipartUploadWithManager(ctx, session, multipartMgr)
|
||||
- This internally calls S3 CompleteMultipartUpload to assemble parts
|
||||
- Then performs server-side S3 copy from temp location to final digest location
|
||||
- Equivalent to legacy /move endpoint operation
|
||||
```
|
||||
|
||||
### The Actual Flow (Currently Broken for S3Native)
|
||||
|
||||
**AppView sends tempDigest:**
|
||||
```go
|
||||
// proxy_blob_store.go
|
||||
tempDigest := fmt.Sprintf("uploads/temp-%s", writerID)
|
||||
uploadID, err := p.startMultipartUpload(ctx, tempDigest)
|
||||
// Passes tempDigest to hold via XRPC
|
||||
```
|
||||
|
||||
**Hold receives and uses tempDigest:**
|
||||
```go
|
||||
// xrpc.go:854
|
||||
uploadID, mode, err := h.blobStore.StartMultipartUpload(ctx, req.Digest)
|
||||
// req.Digest = "uploads/temp-<writerID>" from AppView
|
||||
|
||||
// blobstore_adapter.go → multipart.go → s3.go:93
|
||||
path := blobPath(digest) // digest = "uploads/temp-<writerID>"
|
||||
// Returns: "/docker/registry/v2/uploads/temp-<writerID>/data"
|
||||
|
||||
// S3 multipart created at temp path ✅
|
||||
```
|
||||
|
||||
**Parts uploaded to temp location ✅**
|
||||
|
||||
**Complete called:**
|
||||
```go
|
||||
// proxy_blob_store.go (comment on line):
|
||||
// Complete multipart upload - XRPC complete action handles move internally
|
||||
if err := w.store.completeMultipartUpload(ctx, tempDigest, w.uploadID, w.parts); err != nil
|
||||
```
|
||||
|
||||
**Hold's CompleteMultipartUploadWithManager for S3Native:**
|
||||
```go
|
||||
// multipart.go:282-289
|
||||
if session.Mode == S3Native {
|
||||
parts := session.GetCompletedParts()
|
||||
if err := s.completeMultipartUpload(ctx, session.Digest, session.S3UploadID, parts); err != nil {
|
||||
return fmt.Errorf("failed to complete S3 multipart: %w", err)
|
||||
}
|
||||
log.Printf("Completed S3 native multipart: uploadID=%s, parts=%d", session.UploadID, len(parts))
|
||||
return nil // ❌ BUG: No move operation!
|
||||
}
|
||||
```
|
||||
|
||||
**Result:**
|
||||
- Blob is at: `/docker/registry/v2/uploads/temp-<writerID>/data` (temp location)
|
||||
- Blob should be at: `/docker/registry/v2/blobs/sha256/ab/abc123.../data` (final location)
|
||||
- **Downloads will fail** because AppView looks for blob at final digest path
|
||||
|
||||
**Why this might appear to work:**
|
||||
- Buffered mode writes directly to final path (no temp used)
|
||||
- Or S3Native isn't being used in current deployments
|
||||
- Or there's a workaround somewhere else
|
||||
|
||||
## Proposed Flow with Layer Records (Future Feature)
|
||||
|
||||
### High-Level Flow
|
||||
|
||||
**Building on the quick fix above, layer records will add:**
|
||||
1. PDS record creation for each unique layer digest
|
||||
2. Deduplication check before finalizing storage
|
||||
3. Manifest backlinks to layer records
|
||||
|
||||
**Note:** The quick fix already implements sending finalDigest in complete request. The layer records feature extends this to create ATProto records.
|
||||
|
||||
```
|
||||
1. Start multipart upload (XRPC action=start with tempDigest)
|
||||
- AppView provides tempDigest: "uploads/temp-<writerID>"
|
||||
- S3Native: Create S3 multipart at temp path: /uploads/temp-<writerID>/data
|
||||
- Buffered: Create in-memory session with temp identifier
|
||||
- Store in MultipartSession:
|
||||
* TempDigest: "uploads/temp-<writerID>" (upload location)
|
||||
* FinalDigest: null (not known yet at start time!)
|
||||
|
||||
NOTE: AppView knows the real digest (desc.Digest), but doesn't send it at start
|
||||
|
||||
2. Upload parts (XRPC action=part)
|
||||
- S3Native: Presigned URLs to temp path (uploads/temp-<uuid>)
|
||||
- Buffered: Buffer parts in memory with temp identifier
|
||||
- All parts go to temp location (not final digest location yet)
|
||||
|
||||
3. Complete upload (XRPC action=complete, uploadId, finalDigest, parts)
|
||||
- AppView NOW sends:
|
||||
* uploadId: the session ID
|
||||
* finalDigest: "sha256:abc123..." (the real digest for final location)
|
||||
* parts: array of {partNumber, etag}
|
||||
|
||||
- Hold looks up session by uploadId
|
||||
- Updates session.FinalDigest = finalDigest
|
||||
|
||||
a. Try PutRecord(io.atcr.manifest.layers, digestHash, layerRecord)
|
||||
- digestHash = finalDigest without "sha256:" prefix
|
||||
- Record key = digestHash (content-addressed, naturally idempotent)
|
||||
|
||||
b. If record already exists (PDS returns ErrRecordAlreadyExists):
|
||||
- DEDUPLICATION! Layer already tracked
|
||||
- Delete temp blob (S3 or buffered data)
|
||||
- Return existing layerRecord AT-URI
|
||||
- Client saved bandwidth/time (uploaded to temp, but not stored)
|
||||
|
||||
c. If record creation succeeds (new layer!):
|
||||
- Finalize storage:
|
||||
* S3Native: S3 CopyObject(uploads/temp-<uuid> → blobs/sha256/ab/abc123.../data)
|
||||
* Buffered: Write assembled data to final path (blobs/sha256/ab/abc123.../data)
|
||||
- Delete temp
|
||||
- Return new layerRecord AT-URI + metadata
|
||||
|
||||
d. If record creation fails (PDS error):
|
||||
- Delete temp blob
|
||||
- Return error (upload failed, no storage consumed)
|
||||
```
|
||||
|
||||
**Why use temp paths if digest is known?**
|
||||
- Deduplication check happens BEFORE committing blob to storage
|
||||
- If layer exists, we avoid expensive S3 copy to final location
|
||||
- Atomic: record creation + blob finalization together
|
||||
|
||||
### Atomic Commit Logic
|
||||
|
||||
The key is making record creation + blob finalization atomic:
|
||||
|
||||
```go
|
||||
// In CompleteMultipartUploadWithManager
|
||||
func (s *HoldService) CompleteMultipartUploadWithManager(
|
||||
ctx context.Context,
|
||||
session *MultipartSession,
|
||||
manager *MultipartManager,
|
||||
) (layerRecordURI string, err error) {
|
||||
defer manager.DeleteSession(session.UploadID)
|
||||
|
||||
// Session now has both temp and final digests
|
||||
tempDigest := session.TempDigest // "uploads/temp-<writerID>"
|
||||
finalDigest := session.FinalDigest // "sha256:abc123..." (set during complete)
|
||||
|
||||
tempPath := blobPath(tempDigest) // /uploads/temp-<writerID>/data
|
||||
finalPath := blobPath(finalDigest) // /blobs/sha256/ab/abc123.../data
|
||||
|
||||
// Extract digest hash for record key
|
||||
digestHash := strings.TrimPrefix(finalDigest, "sha256:")
|
||||
|
||||
// Build layer record
|
||||
layerRecord := &atproto.ManifestLayerRecord{
|
||||
Type: "io.atcr.manifest.layers",
|
||||
Digest: finalDigest,
|
||||
Size: session.TotalSize,
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar+gzip",
|
||||
UploadedAt: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
// Try to create layer record (idempotent with digest as rkey)
|
||||
err = s.holdPDS.PutRecord(ctx, atproto.ManifestLayersCollection, digestHash, layerRecord)
|
||||
|
||||
if err == atproto.ErrRecordAlreadyExists {
|
||||
// Dedupe! Layer already tracked
|
||||
log.Printf("Layer already exists, deduplicating: digest=%s", digest)
|
||||
s.deleteBlob(ctx, tempPath)
|
||||
|
||||
// Return existing record URI
|
||||
return fmt.Sprintf("at://%s/%s/%s",
|
||||
s.holdPDS.DID(),
|
||||
atproto.ManifestLayersCollection,
|
||||
digestHash), nil
|
||||
} else if err != nil {
|
||||
// PDS error - abort upload
|
||||
log.Printf("Failed to create layer record: %v", err)
|
||||
s.deleteBlob(ctx, tempPath)
|
||||
return "", fmt.Errorf("failed to create layer record: %w", err)
|
||||
}
|
||||
|
||||
// New layer! Finalize storage
|
||||
if session.Mode == S3Native {
|
||||
// S3 multipart already uploaded to temp path
|
||||
// Copy to final location
|
||||
if err := s.copyBlob(ctx, tempPath, finalPath); err != nil {
|
||||
// Rollback: delete layer record
|
||||
s.holdPDS.DeleteRecord(ctx, atproto.ManifestLayersCollection, digestHash)
|
||||
s.deleteBlob(ctx, tempPath)
|
||||
return "", fmt.Errorf("failed to copy blob: %w", err)
|
||||
}
|
||||
s.deleteBlob(ctx, tempPath)
|
||||
} else {
|
||||
// Buffered mode: assemble and write to final location
|
||||
data, size, err := session.AssembleBufferedParts()
|
||||
if err != nil {
|
||||
s.holdPDS.DeleteRecord(ctx, atproto.ManifestLayersCollection, digestHash)
|
||||
return "", fmt.Errorf("failed to assemble parts: %w", err)
|
||||
}
|
||||
|
||||
if err := s.writeBlob(ctx, finalPath, data); err != nil {
|
||||
s.holdPDS.DeleteRecord(ctx, atproto.ManifestLayersCollection, digestHash)
|
||||
return "", fmt.Errorf("failed to write blob: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("Wrote blob to final location: size=%d", size)
|
||||
}
|
||||
|
||||
// Success! Return new layer record URI
|
||||
layerRecordURI = fmt.Sprintf("at://%s/%s/%s",
|
||||
s.holdPDS.DID(),
|
||||
atproto.ManifestLayersCollection,
|
||||
digestHash)
|
||||
|
||||
log.Printf("Created new layer record: %s", layerRecordURI)
|
||||
return layerRecordURI, nil
|
||||
}
|
||||
```
|
||||
|
||||
## Lexicon Schema
|
||||
|
||||
### io.atcr.manifest.layers
|
||||
|
||||
```json
|
||||
{
|
||||
"lexicon": 1,
|
||||
"id": "io.atcr.manifest.layers",
|
||||
"defs": {
|
||||
"main": {
|
||||
"type": "record",
|
||||
"key": "literal:self",
|
||||
"record": {
|
||||
"type": "object",
|
||||
"required": ["digest", "size", "mediaType", "uploadedAt"],
|
||||
"properties": {
|
||||
"digest": {
|
||||
"type": "string",
|
||||
"description": "Full OCI digest (sha256:abc123...)"
|
||||
},
|
||||
"size": {
|
||||
"type": "integer",
|
||||
"description": "Size in bytes"
|
||||
},
|
||||
"mediaType": {
|
||||
"type": "string",
|
||||
"description": "Media type (e.g., application/vnd.oci.image.layer.v1.tar+gzip)"
|
||||
},
|
||||
"uploadedAt": {
|
||||
"type": "string",
|
||||
"format": "datetime",
|
||||
"description": "When this unique layer first arrived"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Record key:** Digest hash (without algorithm prefix)
|
||||
- Example: `sha256:abc123...` → record key `abc123...`
|
||||
- This makes records content-addressed and naturally deduplicates
|
||||
|
||||
### Example Record
|
||||
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.manifest.layers",
|
||||
"digest": "sha256:abc123def456...",
|
||||
"size": 12345678,
|
||||
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
|
||||
"uploadedAt": "2025-10-18T12:34:56Z"
|
||||
}
|
||||
```
|
||||
|
||||
**AT-URI:** `at://did:web:hold1.atcr.io/io.atcr.manifest.layers/abc123def456...`
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### Files to Modify
|
||||
|
||||
1. **pkg/atproto/lexicon.go**
|
||||
- Add `ManifestLayersCollection = "io.atcr.manifest.layers"`
|
||||
- Add `ManifestLayerRecord` struct
|
||||
|
||||
2. **pkg/hold/multipart.go**
|
||||
- Update `MultipartSession` struct:
|
||||
- Rename `Digest` to `TempDigest` - temp identifier (e.g., "uploads/temp-<writerID>")
|
||||
- Add `FinalDigest string` - final digest (e.g., "sha256:abc123..."), set during complete
|
||||
- Update `StartMultipartUploadWithManager` to:
|
||||
- Receive tempDigest from AppView (not final digest)
|
||||
- Create S3 multipart at temp path
|
||||
- Store TempDigest in session (FinalDigest is null at start)
|
||||
- Modify `CompleteMultipartUploadWithManager` to:
|
||||
- Try PutRecord to create layer record
|
||||
- If exists: delete temp, return existing record (dedupe)
|
||||
- If new: finalize storage (copy/move temp → final)
|
||||
- Handle rollback on errors
|
||||
|
||||
3. **pkg/hold/s3.go**
|
||||
- Add `copyBlob(src, dst)` for S3 CopyObject
|
||||
- Add `deleteBlob(path)` for cleanup
|
||||
|
||||
4. **pkg/hold/storage.go**
|
||||
- Update `blobPath()` to handle temp digests
|
||||
- Add helper for final path generation
|
||||
|
||||
5. **pkg/hold/pds/server.go**
|
||||
- Add `PutRecord(ctx, collection, rkey, record)` method to HoldPDS
|
||||
- Wraps `repomgr.CreateRecord()` or `repomgr.UpdateRecord()`
|
||||
- Returns `ErrRecordAlreadyExists` if rkey exists (for deduplication)
|
||||
- Similar pattern to existing `AddCrewMember()` method
|
||||
- Add `DeleteRecord(ctx, collection, rkey)` method (for rollback)
|
||||
- Wraps `repomgr.DeleteRecord()`
|
||||
- Add error constant: `var ErrRecordAlreadyExists = errors.New("record already exists")`
|
||||
|
||||
6. **pkg/hold/pds/xrpc.go**
|
||||
- Update `BlobStore` interface:
|
||||
- Change `CompleteMultipartUpload` signature:
|
||||
* Was: `CompleteMultipartUpload(ctx, uploadID, parts) error`
|
||||
* New: `CompleteMultipartUpload(ctx, uploadID, finalDigest, parts) (*LayerMetadata, error)`
|
||||
* Takes finalDigest to know where to move blob + create layer record
|
||||
- Update `handleMultipartOperation` complete action to:
|
||||
- Parse `finalDigest` from request body (NEW)
|
||||
- Look up session by uploadID
|
||||
- Set session.FinalDigest = finalDigest
|
||||
- Call CompleteMultipartUpload (returns LayerMetadata)
|
||||
- Include layerRecord AT-URI in response
|
||||
- Add `LayerMetadata` struct:
|
||||
```go
|
||||
type LayerMetadata struct {
|
||||
LayerRecord string // AT-URI
|
||||
Digest string
|
||||
Size int64
|
||||
Deduplicated bool
|
||||
}
|
||||
```
|
||||
|
||||
7. **pkg/appview/storage/proxy_blob_store.go**
|
||||
- Update `ProxyBlobWriter.Commit()` to send finalDigest in complete request:
|
||||
```go
|
||||
// Current: only sends tempDigest
|
||||
completeMultipartUpload(ctx, tempDigest, uploadID, parts)
|
||||
|
||||
// New: also sends finalDigest
|
||||
completeMultipartUpload(ctx, uploadID, finalDigest, parts)
|
||||
```
|
||||
- The writer already has `w.desc.Digest` (the real digest)
|
||||
- Pass both uploadID (to find session) and finalDigest (for move + layer record)
|
||||
|
||||
### API Changes
|
||||
|
||||
#### Complete Multipart Request (XRPC) - UPDATED
|
||||
|
||||
**Before:**
|
||||
```json
|
||||
{
|
||||
"action": "complete",
|
||||
"uploadId": "upload-1634567890",
|
||||
"parts": [
|
||||
{ "partNumber": 1, "etag": "abc123" },
|
||||
{ "partNumber": 2, "etag": "def456" }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**After (with finalDigest):**
|
||||
```json
|
||||
{
|
||||
"action": "complete",
|
||||
"uploadId": "upload-1634567890",
|
||||
"digest": "sha256:abc123...",
|
||||
"parts": [
|
||||
{ "partNumber": 1, "etag": "abc123" },
|
||||
{ "partNumber": 2, "etag": "def456" }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### Complete Multipart Response (XRPC)
|
||||
|
||||
**Before:**
|
||||
```json
|
||||
{
|
||||
"status": "completed"
|
||||
}
|
||||
```
|
||||
|
||||
**After:**
|
||||
```json
|
||||
{
|
||||
"status": "completed",
|
||||
"layerRecord": "at://did:web:hold1.atcr.io/io.atcr.manifest.layers/abc123...",
|
||||
"digest": "sha256:abc123...",
|
||||
"size": 12345678,
|
||||
"deduplicated": false
|
||||
}
|
||||
```
|
||||
|
||||
**Deduplication case:**
|
||||
```json
|
||||
{
|
||||
"status": "completed",
|
||||
"layerRecord": "at://did:web:hold1.atcr.io/io.atcr.manifest.layers/abc123...",
|
||||
"digest": "sha256:abc123...",
|
||||
"size": 12345678,
|
||||
"deduplicated": true
|
||||
}
|
||||
```
|
||||
|
||||
### S3 Operations
|
||||
|
||||
**S3 Native Mode:**
|
||||
```go
|
||||
// Start: Create multipart upload at TEMP path
|
||||
uploadID = s3.CreateMultipartUpload(bucket, "uploads/temp-<uuid>")
|
||||
|
||||
// Upload parts: to temp location
|
||||
s3.UploadPart(bucket, "uploads/temp-<uuid>", partNum, data)
|
||||
|
||||
// Complete: Copy temp → final
|
||||
s3.CopyObject(
|
||||
bucket, "uploads/temp-<uuid>", // source
|
||||
bucket, "blobs/sha256/ab/abc123.../data" // dest
|
||||
)
|
||||
s3.DeleteObject(bucket, "uploads/temp-<uuid>")
|
||||
```
|
||||
|
||||
**Buffered Mode:**
|
||||
```go
|
||||
// Parts buffered in memory
|
||||
session.Parts[partNum] = data
|
||||
|
||||
// Complete: Write to final location
|
||||
assembledData = session.AssembleBufferedParts()
|
||||
driver.Writer("blobs/sha256/ab/abc123.../data").Write(assembledData)
|
||||
```
|
||||
|
||||
## Manifest Integration
|
||||
|
||||
### Manifest Record Enhancement
|
||||
|
||||
When AppView writes manifests to user's PDS, include layer record references:
|
||||
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.manifest",
|
||||
"repository": "myapp",
|
||||
"digest": "sha256:manifest123...",
|
||||
"holdEndpoint": "https://hold1.atcr.io",
|
||||
"holdDid": "did:web:hold1.atcr.io",
|
||||
"layers": [
|
||||
{
|
||||
"digest": "sha256:abc123...",
|
||||
"size": 12345678,
|
||||
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
|
||||
"layerRecord": "at://did:web:hold1.atcr.io/io.atcr.manifest.layers/abc123..."
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Cross-repo references:** Manifests in user's PDS point to layer records in hold's PDS.
|
||||
|
||||
### AppView Flow
|
||||
|
||||
1. Client pushes layer to hold
|
||||
2. Hold returns `layerRecord` AT-URI in response
|
||||
3. AppView caches: `digest → layerRecord AT-URI`
|
||||
4. When writing manifest to user's PDS:
|
||||
- Add `layerRecord` field to each layer
|
||||
- Add `holdDid` to manifest root
|
||||
|
||||
## Benefits
|
||||
|
||||
1. **ATProto Discovery**
|
||||
- `listRecords(io.atcr.manifest.layers)` shows all unique layers
|
||||
- Standard ATProto queries work
|
||||
|
||||
2. **Automatic Deduplication**
|
||||
- PutRecord with digest as rkey is naturally idempotent
|
||||
- Concurrent uploads of same layer handled gracefully
|
||||
|
||||
3. **Audit Trail**
|
||||
- Track when each unique layer first arrived
|
||||
- Monitor storage growth by unique content
|
||||
|
||||
4. **Migration Support**
|
||||
- Enumerate all blobs via ATProto queries
|
||||
- Verify blob existence before migration
|
||||
|
||||
5. **Cross-Repo References**
|
||||
- Manifests link to layer records via AT-URI
|
||||
- Verifiable blob existence
|
||||
|
||||
6. **Future Features**
|
||||
- Per-layer access control
|
||||
- Retention policies
|
||||
- Layer tagging/metadata
|
||||
|
||||
## Trade-offs
|
||||
|
||||
### Complexity
|
||||
- Additional PDS writes during upload
|
||||
- S3 copy operation (temp → final)
|
||||
- Rollback logic if record creation succeeds but storage fails
|
||||
|
||||
### Performance
|
||||
- Extra latency: PDS write + S3 copy
|
||||
- BUT: Deduplication saves bandwidth on repeated uploads
|
||||
|
||||
### Storage
|
||||
- Minimal: Layer records are just metadata (~200 bytes each)
|
||||
- S3 temp → final copy uses same S3 account (no egress cost)
|
||||
|
||||
### Consistency
|
||||
- Must keep layer records and S3 blobs in sync
|
||||
- Rollback deletes layer record if storage fails
|
||||
- Orphaned records possible if process crashes mid-commit
|
||||
|
||||
## Future Considerations
|
||||
|
||||
### Garbage Collection
|
||||
|
||||
Layer records enable GC:
|
||||
```
|
||||
1. List all layer records in hold
|
||||
2. For each layer:
|
||||
- Query manifests that reference it (via AppView)
|
||||
- If no references, mark for deletion
|
||||
3. Delete unreferenced layers (record + blob)
|
||||
```
|
||||
|
||||
### Private Layers
|
||||
|
||||
Currently, holds are public or crew-only (hold-level auth). Future:
|
||||
- Per-layer permissions via layer record metadata
|
||||
- Reference from manifest proves user has access
|
||||
|
||||
### Layer Provenance
|
||||
|
||||
Track additional metadata:
|
||||
- First uploader DID
|
||||
- Upload source (manifest URI)
|
||||
- Verification status
|
||||
|
||||
## Configuration
|
||||
|
||||
Add environment variable:
|
||||
```
|
||||
HOLD_TRACK_LAYERS=true # Enable layer record creation (default: true)
|
||||
```
|
||||
|
||||
If disabled, hold service works as before (no layer records).
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
1. **Deduplication Test**
|
||||
- Upload same layer twice
|
||||
- Verify only one record created
|
||||
- Verify second upload returns same AT-URI
|
||||
|
||||
2. **Concurrent Upload Test**
|
||||
- Upload same layer from 2 clients simultaneously
|
||||
- Verify one succeeds, one dedupes
|
||||
- Verify only one blob in S3
|
||||
|
||||
3. **Rollback Test**
|
||||
- Mock S3 failure after record creation
|
||||
- Verify layer record is deleted (rollback)
|
||||
|
||||
4. **Migration Test**
|
||||
- Upload multiple layers
|
||||
- List all layer records
|
||||
- Verify blobs exist in S3
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. **What happens if S3 copy fails after record creation?**
|
||||
- Current plan: Delete layer record (rollback)
|
||||
- Alternative: Leave record, retry copy on next request?
|
||||
|
||||
2. **Should we verify blob digest matches record?**
|
||||
- On upload: Client provides digest, but we trust it
|
||||
- Could compute digest during upload to verify
|
||||
|
||||
3. **How to handle orphaned layer records?**
|
||||
- Record exists but blob missing from S3
|
||||
- Background job to verify and clean up?
|
||||
|
||||
4. **Should manifests store layer records?**
|
||||
- Yes: Strong references, verifiable
|
||||
- No: Extra complexity, larger manifests
|
||||
- **Decision:** Yes, for ATProto graph completeness
|
||||
|
||||
## Testing & Verification
|
||||
|
||||
### Verify the Quick Fix Works (Bug is Fixed)
|
||||
|
||||
After the quick fix implementation:
|
||||
|
||||
1. **Push a test image** with S3Native mode enabled
|
||||
2. **Verify blob at final location:**
|
||||
```bash
|
||||
aws s3 ls s3://bucket/docker/registry/v2/blobs/sha256/ab/abc123.../data
|
||||
```
|
||||
3. **Verify temp is cleaned up:**
|
||||
```bash
|
||||
aws s3 ls s3://bucket/docker/registry/v2/uploads/temp-* # Should be empty
|
||||
```
|
||||
4. **Pull the image** → should succeed ✅
|
||||
|
||||
### Test Layer Records Feature (When Implemented)
|
||||
|
||||
After implementing the full layer records feature:
|
||||
|
||||
1. **Push an image**
|
||||
2. **Verify layer record created:**
|
||||
```
|
||||
GET /xrpc/com.atproto.repo.getRecord?repo={holdDID}&collection=io.atcr.manifest.layers&rkey=abc123...
|
||||
```
|
||||
3. **Verify blob at final location** (same as quick fix)
|
||||
4. **Verify temp deleted** (same as quick fix)
|
||||
5. **Pull image** → should succeed
|
||||
|
||||
### Test Deduplication (Layer Records Feature)
|
||||
|
||||
1. Push same layer from different client
|
||||
2. Verify only one layer record exists
|
||||
3. Verify complete returns `deduplicated: true`
|
||||
4. Verify no duplicate blobs in S3
|
||||
5. Verify temp blob was deleted without copying (dedupe path)
|
||||
|
||||
## Summary
|
||||
|
||||
### Current State (Quick Fix Implemented)
|
||||
|
||||
The critical bug is **FIXED**:
|
||||
- ✅ S3Native mode correctly moves blobs from temp → final digest location
|
||||
- ✅ AppView sends real digest in complete requests
|
||||
- ✅ Blobs are stored at correct paths, downloads work
|
||||
- ✅ Temp uploads are cleaned up properly
|
||||
|
||||
### Future State (Layer Records Feature)
|
||||
|
||||
When implemented, layer records will make ATCR more ATProto-native by:
|
||||
- 🔮 Storing unique blobs as discoverable ATProto records
|
||||
- 🔮 Enabling deduplication via idempotent PutRecord (check before upload)
|
||||
- 🔮 Creating cross-repo references (manifest → layer records)
|
||||
- 🔮 Foundation for GC, access control, provenance tracking
|
||||
|
||||
**Next Steps:**
|
||||
1. Test the quick fix in production
|
||||
2. Plan layer records implementation (requires PDS record creation)
|
||||
3. Implement deduplication logic
|
||||
4. Add manifest backlinks to layer records
|
||||
398
docs/MINIFY.md
Normal file
398
docs/MINIFY.md
Normal file
@@ -0,0 +1,398 @@
|
||||
# CSS/JS Minification for ATCR
|
||||
|
||||
## Overview
|
||||
|
||||
ATCR embeds static assets (CSS, JavaScript) directly into the binary using Go's `embed` directive. Currently:
|
||||
|
||||
- **CSS Size:** 40KB (`pkg/appview/static/css/style.css`, 2,210 lines)
|
||||
- **Embedded:** All static files compiled into binary at build time
|
||||
- **No Minification:** Source files embedded as-is
|
||||
|
||||
**Problem:** Embedded assets increase binary size and network transfer time.
|
||||
|
||||
**Solution:** Minify CSS/JS before embedding to reduce both binary size and network transfer.
|
||||
|
||||
## Recommended Approach: `tdewolff/minify`
|
||||
|
||||
Use the pure Go `tdewolff/minify` library with `go:generate` to minify assets at build time.
|
||||
|
||||
**Benefits:**
|
||||
- Pure Go, no external dependencies (Node.js, npm)
|
||||
- Integrates with existing `go:generate` workflow
|
||||
- ~30-40% CSS size reduction (40KB → ~28KB)
|
||||
- Minifies CSS, JS, HTML, JSON, SVG, XML
|
||||
|
||||
## Implementation
|
||||
|
||||
### Step 1: Add Dependency
|
||||
|
||||
```bash
|
||||
go get github.com/tdewolff/minify/v2
|
||||
```
|
||||
|
||||
This will update `go.mod`:
|
||||
```go
|
||||
require github.com/tdewolff/minify/v2 v2.20.37
|
||||
```
|
||||
|
||||
### Step 2: Create Minification Script
|
||||
|
||||
Create `pkg/appview/static/minify_assets.go`:
|
||||
|
||||
```go
|
||||
//go:build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/tdewolff/minify/v2"
|
||||
"github.com/tdewolff/minify/v2/css"
|
||||
"github.com/tdewolff/minify/v2/js"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m := minify.New()
|
||||
m.AddFunc("text/css", css.Minify)
|
||||
m.AddFunc("text/javascript", js.Minify)
|
||||
|
||||
// Get the directory of this script
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Minify CSS
|
||||
if err := minifyFile(m, "text/css",
|
||||
filepath.Join(dir, "pkg/appview/static/css/style.css"),
|
||||
filepath.Join(dir, "pkg/appview/static/css/style.min.css"),
|
||||
); err != nil {
|
||||
log.Fatalf("Failed to minify CSS: %v", err)
|
||||
}
|
||||
|
||||
// Minify JavaScript
|
||||
if err := minifyFile(m, "text/javascript",
|
||||
filepath.Join(dir, "pkg/appview/static/js/app.js"),
|
||||
filepath.Join(dir, "pkg/appview/static/js/app.min.js"),
|
||||
); err != nil {
|
||||
log.Fatalf("Failed to minify JS: %v", err)
|
||||
}
|
||||
|
||||
fmt.Println("✓ Assets minified successfully")
|
||||
}
|
||||
|
||||
func minifyFile(m *minify.M, mediatype, src, dst string) error {
|
||||
// Read source file
|
||||
input, err := os.ReadFile(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read %s: %w", src, err)
|
||||
}
|
||||
|
||||
// Minify
|
||||
output, err := m.Bytes(mediatype, input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("minify %s: %w", src, err)
|
||||
}
|
||||
|
||||
// Write minified output
|
||||
if err := os.WriteFile(dst, output, 0644); err != nil {
|
||||
return fmt.Errorf("write %s: %w", dst, err)
|
||||
}
|
||||
|
||||
// Print size reduction
|
||||
originalSize := len(input)
|
||||
minifiedSize := len(output)
|
||||
reduction := float64(originalSize-minifiedSize) / float64(originalSize) * 100
|
||||
|
||||
fmt.Printf(" %s: %d bytes → %d bytes (%.1f%% reduction)\n",
|
||||
filepath.Base(src), originalSize, minifiedSize, reduction)
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: Add `go:generate` Directive
|
||||
|
||||
Add to `pkg/appview/ui.go` (before the `//go:embed` directive):
|
||||
|
||||
```go
|
||||
//go:generate go run ./static/minify_assets.go
|
||||
|
||||
//go:embed static
|
||||
var staticFS embed.FS
|
||||
```
|
||||
|
||||
### Step 4: Update HTML Templates
|
||||
|
||||
Update all template files to reference minified assets:
|
||||
|
||||
**Before:**
|
||||
```html
|
||||
<link rel="stylesheet" href="/static/css/style.css">
|
||||
<script src="/static/js/app.js"></script>
|
||||
```
|
||||
|
||||
**After:**
|
||||
```html
|
||||
<link rel="stylesheet" href="/static/css/style.min.css">
|
||||
<script src="/static/js/app.min.js"></script>
|
||||
```
|
||||
|
||||
**Files to update:**
|
||||
- `pkg/appview/templates/components/head.html`
|
||||
- Any other templates that reference CSS/JS directly
|
||||
|
||||
### Step 5: Build Workflow
|
||||
|
||||
```bash
|
||||
# Generate minified assets
|
||||
go generate ./pkg/appview
|
||||
|
||||
# Build binary (embeds minified assets)
|
||||
go build -o bin/atcr-appview ./cmd/appview
|
||||
|
||||
# Or build all
|
||||
go generate ./...
|
||||
go build -o bin/atcr-appview ./cmd/appview
|
||||
go build -o bin/atcr-hold ./cmd/hold
|
||||
```
|
||||
|
||||
### Step 6: Add to .gitignore
|
||||
|
||||
Add minified files to `.gitignore` since they're generated:
|
||||
|
||||
```
|
||||
# Generated minified assets
|
||||
pkg/appview/static/css/*.min.css
|
||||
pkg/appview/static/js/*.min.js
|
||||
```
|
||||
|
||||
**Alternative:** Commit minified files if you want reproducible builds without running `go generate`.
|
||||
|
||||
## Build Modes (Optional Enhancement)
|
||||
|
||||
Use build tags to serve unminified assets in development:
|
||||
|
||||
**Development (default):**
|
||||
- Edit `style.css` directly
|
||||
- No minification, easier debugging
|
||||
- Faster build times
|
||||
|
||||
**Production (with `-tags production`):**
|
||||
- Use minified assets
|
||||
- Smaller binary size
|
||||
- Optimized for deployment
|
||||
|
||||
### Implementation with Build Tags
|
||||
|
||||
**pkg/appview/ui.go** (development):
|
||||
```go
|
||||
//go:build !production
|
||||
|
||||
//go:embed static
|
||||
var staticFS embed.FS
|
||||
|
||||
func StylePath() string { return "/static/css/style.css" }
|
||||
func ScriptPath() string { return "/static/js/app.js" }
|
||||
```
|
||||
|
||||
**pkg/appview/ui_production.go** (production):
|
||||
```go
|
||||
//go:build production
|
||||
|
||||
//go:generate go run ./static/minify_assets.go
|
||||
|
||||
//go:embed static
|
||||
var staticFS embed.FS
|
||||
|
||||
func StylePath() string { return "/static/css/style.min.css" }
|
||||
func ScriptPath() string { return "/static/js/app.min.js" }
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
# Development build (unminified)
|
||||
go build ./cmd/appview
|
||||
|
||||
# Production build (minified)
|
||||
go generate ./pkg/appview
|
||||
go build -tags production ./cmd/appview
|
||||
```
|
||||
|
||||
## Alternative Approaches
|
||||
|
||||
### Option 2: External Minifier (cssnano, esbuild)
|
||||
|
||||
Use Node.js-based minifiers via `go:generate`:
|
||||
|
||||
```go
|
||||
//go:generate sh -c "npx cssnano static/css/style.css static/css/style.min.css"
|
||||
//go:generate sh -c "npx esbuild static/js/app.js --minify --outfile=static/js/app.min.js"
|
||||
```
|
||||
|
||||
**Pros:**
|
||||
- Best-in-class minification (potentially better than tdewolff)
|
||||
- Wide ecosystem of tools
|
||||
|
||||
**Cons:**
|
||||
- Requires Node.js/npm in build environment
|
||||
- Cross-platform compatibility issues (Windows vs Unix)
|
||||
- External dependency management
|
||||
|
||||
### Option 3: Runtime Gzip Compression
|
||||
|
||||
Compress assets at runtime (complementary to minification):
|
||||
|
||||
```go
|
||||
import "github.com/NYTimes/gziphandler"
|
||||
|
||||
// Wrap static handler
|
||||
mux.Handle("/static/", gziphandler.GzipHandler(appview.StaticHandler()))
|
||||
```
|
||||
|
||||
**Pros:**
|
||||
- Works for all static files (images, fonts)
|
||||
- ~70-80% size reduction over network
|
||||
- No build changes needed
|
||||
|
||||
**Cons:**
|
||||
- Doesn't reduce binary size
|
||||
- Adds runtime CPU cost
|
||||
- Should be combined with minification for best results
|
||||
|
||||
### Option 4: Brotli Compression (Better than Gzip)
|
||||
|
||||
```go
|
||||
import "github.com/andybalholm/brotli"
|
||||
|
||||
// Custom handler with brotli
|
||||
func BrotliHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "br") {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Encoding", "br")
|
||||
bw := brotli.NewWriterLevel(w, brotli.DefaultCompression)
|
||||
defer bw.Close()
|
||||
h.ServeHTTP(&brotliResponseWriter{Writer: bw, ResponseWriter: w}, r)
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## Expected Benefits
|
||||
|
||||
### File Size Reduction
|
||||
|
||||
**Current (unminified):**
|
||||
- CSS: 40KB
|
||||
- JS: ~5KB (estimated)
|
||||
- **Total embedded:** ~45KB
|
||||
|
||||
**With Minification:**
|
||||
- CSS: ~28KB (30% reduction)
|
||||
- JS: ~3KB (40% reduction)
|
||||
- **Total embedded:** ~31KB
|
||||
- **Binary size savings:** ~14KB
|
||||
|
||||
**With Minification + Gzip (network transfer):**
|
||||
- CSS: ~8KB (80% reduction from original)
|
||||
- JS: ~1.5KB (70% reduction from original)
|
||||
- **Total transferred:** ~9.5KB
|
||||
|
||||
### Performance Impact
|
||||
|
||||
- **Build time:** +1-2 seconds (running minifier)
|
||||
- **Runtime:** No impact (files pre-minified)
|
||||
- **Network:** 75% less data transferred (with gzip)
|
||||
- **Browser parsing:** Slightly faster (smaller files)
|
||||
|
||||
## Maintenance
|
||||
|
||||
### Development Workflow
|
||||
|
||||
1. **Edit source files:**
|
||||
- Modify `pkg/appview/static/css/style.css`
|
||||
- Modify `pkg/appview/static/js/app.js`
|
||||
|
||||
2. **Test locally:**
|
||||
```bash
|
||||
# Development build (unminified)
|
||||
go run ./cmd/appview serve
|
||||
```
|
||||
|
||||
3. **Build for production:**
|
||||
```bash
|
||||
# Generate minified assets
|
||||
go generate ./pkg/appview
|
||||
|
||||
# Build binary
|
||||
go build -o bin/atcr-appview ./cmd/appview
|
||||
```
|
||||
|
||||
4. **CI/CD:**
|
||||
```bash
|
||||
# In GitHub Actions / CI
|
||||
go generate ./...
|
||||
go build ./...
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
**Q: Minified assets not updating?**
|
||||
- Delete `*.min.css` and `*.min.js` files
|
||||
- Run `go generate ./pkg/appview` again
|
||||
|
||||
**Q: Build fails with "package not found"?**
|
||||
- Run `go mod tidy` to download dependencies
|
||||
|
||||
**Q: CSS broken after minification?**
|
||||
- Check for syntax errors in source CSS
|
||||
- Minifier is strict about valid CSS
|
||||
|
||||
## Integration with Existing Build
|
||||
|
||||
ATCR already uses `go:generate` for:
|
||||
- CBOR generation (`pkg/atproto/lexicon.go`)
|
||||
- License downloads (`pkg/appview/licenses/licenses.go`)
|
||||
|
||||
Minification follows the same pattern:
|
||||
```bash
|
||||
# Generate all (CBOR, licenses, minified assets)
|
||||
go generate ./...
|
||||
|
||||
# Build all binaries
|
||||
go build -o bin/atcr-appview ./cmd/appview
|
||||
go build -o bin/atcr-hold ./cmd/hold
|
||||
go build -o bin/docker-credential-atcr ./cmd/credential-helper
|
||||
```
|
||||
|
||||
## Recommendation
|
||||
|
||||
**For ATCR:**
|
||||
|
||||
1. **Immediate:** Implement Option 1 (`tdewolff/minify`)
|
||||
- Pure Go, no external dependencies
|
||||
- Integrates with existing `go:generate` workflow
|
||||
- ~30% size reduction
|
||||
|
||||
2. **Future:** Add runtime gzip/brotli compression
|
||||
- Wrap static handler with compression middleware
|
||||
- Benefits all static assets
|
||||
- Standard practice for web servers
|
||||
|
||||
3. **Long-term:** Consider build modes (development vs production)
|
||||
- Use unminified assets in development
|
||||
- Use minified assets in production builds
|
||||
- Best developer experience
|
||||
|
||||
## References
|
||||
|
||||
- [tdewolff/minify](https://github.com/tdewolff/minify) - Go minifier library
|
||||
- [NYTimes/gziphandler](https://github.com/NYTimes/gziphandler) - Gzip middleware
|
||||
- [Go embed directive](https://pkg.go.dev/embed) - Embedding static files
|
||||
- [Go generate](https://go.dev/blog/generate) - Code generation tool
|
||||
399
docs/OAUTH.md
Normal file
399
docs/OAUTH.md
Normal file
@@ -0,0 +1,399 @@
|
||||
# OAuth Implementation in ATCR
|
||||
|
||||
This document describes ATCR's OAuth implementation, which uses the ATProto OAuth specification with DPoP (Demonstrating Proof of Possession) for secure authentication.
|
||||
|
||||
## Overview
|
||||
|
||||
ATCR implements a full OAuth 2.0 + DPoP flow following the ATProto specification. The implementation uses the [indigo OAuth library](https://github.com/bluesky-social/indigo) and extends it with ATCR-specific configuration for registry operations.
|
||||
|
||||
### Key Features
|
||||
|
||||
- **DPoP (RFC 9449)**: Cryptographic proof-of-possession binds tokens to specific client keys
|
||||
- **PAR (RFC 9126)**: Pushed Authorization Requests for secure server-to-server parameter exchange
|
||||
- **PKCE (RFC 7636)**: Proof Key for Code Exchange prevents authorization code interception
|
||||
- **Confidential Clients**: Production deployments use P-256 private keys for client authentication
|
||||
- **Public Clients**: Development (localhost) uses simpler public client configuration
|
||||
|
||||
## Client Types
|
||||
|
||||
ATCR supports two OAuth client types depending on the deployment environment:
|
||||
|
||||
### Public Clients (Development)
|
||||
|
||||
**When:** `baseURL` contains `localhost` or `127.0.0.1`
|
||||
|
||||
**Configuration:**
|
||||
- Client ID: `http://localhost?redirect_uri=...&scope=...` (query-based)
|
||||
- No client authentication
|
||||
- Uses indigo's `NewLocalhostConfig()` helper
|
||||
- DPoP still required for token requests
|
||||
|
||||
**Example:**
|
||||
```go
|
||||
// Automatically uses public client for localhost
|
||||
config := oauth.NewClientConfigWithScopes("http://127.0.0.1:5000", scopes)
|
||||
```
|
||||
|
||||
### Confidential Clients (Production)
|
||||
|
||||
**When:** `baseURL` is a public domain (not localhost)
|
||||
|
||||
**Configuration:**
|
||||
- Client ID: `{baseURL}/client-metadata.json` (metadata endpoint)
|
||||
- Client authentication: P-256 (ES256) private key JWT assertion
|
||||
- Private key stored at `/var/lib/atcr/oauth/client.key`
|
||||
- Auto-generated on first run with 0600 permissions
|
||||
- Upgraded via `config.SetClientSecret(privateKey, keyID)`
|
||||
|
||||
**Example:**
|
||||
```go
|
||||
// 1. Create base config (public)
|
||||
config := oauth.NewClientConfigWithScopes("https://atcr.io", scopes)
|
||||
|
||||
// 2. Load or generate P-256 key
|
||||
privateKey, err := oauth.GenerateOrLoadClientKey("/var/lib/atcr/oauth/client.key")
|
||||
|
||||
// 3. Generate key ID
|
||||
keyID, err := oauth.GenerateKeyID(privateKey)
|
||||
|
||||
// 4. Upgrade to confidential
|
||||
err = config.SetClientSecret(privateKey, keyID)
|
||||
```
|
||||
|
||||
## Key Management
|
||||
|
||||
### P-256 Key Generation
|
||||
|
||||
ATCR uses **P-256 (NIST P-256, ES256)** keys for OAuth client authentication. This differs from the K-256 keys used for ATProto PDS signing.
|
||||
|
||||
**Why P-256?**
|
||||
- Standard OAuth/OIDC key algorithm
|
||||
- Widely supported by authorization servers
|
||||
- Compatible with indigo's `SetClientSecret()` API
|
||||
|
||||
**Key Storage:**
|
||||
- Default path: `/var/lib/atcr/oauth/client.key`
|
||||
- Configurable via: `ATCR_OAUTH_KEY_PATH` environment variable
|
||||
- File permissions: `0600` (owner read/write only)
|
||||
- Directory permissions: `0700` (owner access only)
|
||||
- Format: Raw binary bytes (not PEM)
|
||||
|
||||
**Key Lifecycle:**
|
||||
1. On first production startup, AppView checks for key at configured path
|
||||
2. If missing, generates new P-256 key using `atcrypto.GeneratePrivateKeyP256()`
|
||||
3. Saves raw key bytes to disk with restrictive permissions
|
||||
4. Logs generation event: `"Generated new P-256 OAuth client key"`
|
||||
5. On subsequent startups, loads existing key
|
||||
6. Logs load event: `"Loaded existing P-256 OAuth client key"`
|
||||
|
||||
**Key Rotation:**
|
||||
To rotate the OAuth client key:
|
||||
1. Stop the AppView service
|
||||
2. Delete or rename the existing key file
|
||||
3. Restart AppView (new key will be generated automatically)
|
||||
4. Note: Active OAuth sessions may need re-authentication
|
||||
|
||||
### Key ID Generation
|
||||
|
||||
The key ID is derived from the public key for stable identification:
|
||||
|
||||
```go
|
||||
func GenerateKeyID(privateKey *atcrypto.PrivateKeyP256) (string, error) {
|
||||
pubKey, _ := privateKey.PublicKey()
|
||||
pubKeyBytes := pubKey.Bytes()
|
||||
hash := sha256.Sum256(pubKeyBytes)
|
||||
return hex.EncodeToString(hash[:])[:8], nil
|
||||
}
|
||||
```
|
||||
|
||||
This generates an 8-character hex ID from the SHA-256 hash of the public key.
|
||||
|
||||
## Authentication Flow
|
||||
|
||||
### AppView OAuth Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Browser
|
||||
participant AppView
|
||||
participant PDS
|
||||
|
||||
User->>Browser: docker push atcr.io/alice/myapp
|
||||
Browser->>AppView: Credential helper redirects
|
||||
AppView->>PDS: Resolve handle → DID
|
||||
AppView->>PDS: Discover OAuth metadata
|
||||
AppView->>PDS: PAR request (with DPoP)
|
||||
PDS-->>AppView: request_uri
|
||||
AppView->>Browser: Redirect to authorization page
|
||||
Browser->>PDS: User authorizes
|
||||
PDS->>AppView: Authorization code
|
||||
AppView->>PDS: Token exchange (with DPoP)
|
||||
PDS-->>AppView: OAuth tokens + DPoP binding
|
||||
AppView->>User: Issue registry JWT
|
||||
```
|
||||
|
||||
### Key Steps
|
||||
|
||||
1. **Identity Resolution**
|
||||
- AppView resolves handle to DID via `.well-known/atproto-did`
|
||||
- Resolves DID to PDS endpoint via DID document
|
||||
|
||||
2. **OAuth Discovery**
|
||||
- Fetches `/.well-known/oauth-authorization-server` from PDS
|
||||
- Extracts `authorization_endpoint`, `token_endpoint`, etc.
|
||||
|
||||
3. **Pushed Authorization Request (PAR)**
|
||||
- AppView sends authorization parameters to PDS token endpoint
|
||||
- Includes DPoP header with proof JWT
|
||||
- Receives `request_uri` for authorization
|
||||
|
||||
4. **User Authorization**
|
||||
- User is redirected to PDS authorization page
|
||||
- User approves application access
|
||||
- PDS redirects back with authorization code
|
||||
|
||||
5. **Token Exchange**
|
||||
- AppView exchanges code for tokens at PDS token endpoint
|
||||
- Includes DPoP header with proof JWT
|
||||
- Receives access token, refresh token (both DPoP-bound)
|
||||
|
||||
6. **Token Storage**
|
||||
- AppView stores OAuth session in SQLite database
|
||||
- Indigo library manages token refresh automatically
|
||||
- DPoP key stored with session for future requests
|
||||
|
||||
7. **Registry JWT Issuance**
|
||||
- AppView validates OAuth session
|
||||
- Issues short-lived registry JWT (15 minutes)
|
||||
- JWT contains validated DID from PDS session
|
||||
|
||||
## DPoP Implementation
|
||||
|
||||
### What is DPoP?
|
||||
|
||||
DPoP (Demonstrating Proof of Possession) binds OAuth tokens to a specific client key, preventing token theft and replay attacks.
|
||||
|
||||
**How it works:**
|
||||
1. Client generates ephemeral key pair (or uses persistent key)
|
||||
2. Client includes DPoP proof JWT in Authorization header
|
||||
3. Proof JWT contains hash of HTTP request details
|
||||
4. Authorization server validates proof and issues DPoP-bound token
|
||||
5. Token can only be used with the same client key
|
||||
|
||||
### DPoP Headers
|
||||
|
||||
Every request to the PDS token endpoint includes a DPoP header:
|
||||
|
||||
```http
|
||||
POST /oauth/token HTTP/1.1
|
||||
Host: pds.example.com
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
DPoP: eyJhbGciOiJFUzI1NiIsInR5cCI6ImRwb3Arand0IiwiandrIjp7Imt0eSI6Ik...
|
||||
|
||||
grant_type=authorization_code&code=...&redirect_uri=...
|
||||
```
|
||||
|
||||
The DPoP header is a signed JWT containing:
|
||||
- `htm`: HTTP method (e.g., "POST")
|
||||
- `htu`: HTTP URI (e.g., "https://pds.example.com/oauth/token")
|
||||
- `jti`: Unique request identifier
|
||||
- `iat`: Timestamp
|
||||
- `jwk`: Public key (JWK format)
|
||||
|
||||
### Indigo DPoP Management
|
||||
|
||||
ATCR uses indigo's built-in DPoP management:
|
||||
|
||||
```go
|
||||
// Indigo automatically handles DPoP
|
||||
clientApp := oauth.NewClientApp(&config, store)
|
||||
|
||||
// All token requests include DPoP automatically
|
||||
tokens, err := clientApp.ProcessCallback(ctx, params)
|
||||
|
||||
// Refresh automatically includes DPoP
|
||||
session, err := clientApp.ResumeSession(ctx, did, sessionID)
|
||||
```
|
||||
|
||||
Indigo manages:
|
||||
- DPoP key generation and storage
|
||||
- DPoP proof JWT creation
|
||||
- DPoP header inclusion in token requests
|
||||
- Token binding to DPoP keys
|
||||
|
||||
## Client Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
**ATCR_OAUTH_KEY_PATH**
|
||||
- Path to OAuth client P-256 signing key
|
||||
- Default: `/var/lib/atcr/oauth/client.key`
|
||||
- Auto-generated on first run (production only)
|
||||
- Format: Raw binary P-256 private key
|
||||
|
||||
**ATCR_BASE_URL**
|
||||
- Public URL of AppView service
|
||||
- Required for OAuth redirect URIs
|
||||
- Example: `https://atcr.io`
|
||||
- Determines client type (public vs confidential)
|
||||
|
||||
**ATCR_UI_DATABASE_PATH**
|
||||
- Path to SQLite database (includes OAuth session storage)
|
||||
- Default: `/var/lib/atcr/ui.db`
|
||||
|
||||
### Client Metadata Endpoint
|
||||
|
||||
Production deployments serve OAuth client metadata at `{baseURL}/client-metadata.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"client_id": "https://atcr.io/client-metadata.json",
|
||||
"client_name": "ATCR Registry",
|
||||
"client_uri": "https://atcr.io",
|
||||
"redirect_uris": ["https://atcr.io/auth/oauth/callback"],
|
||||
"scope": "atproto blob:... repo:...",
|
||||
"grant_types": ["authorization_code", "refresh_token"],
|
||||
"response_types": ["code"],
|
||||
"token_endpoint_auth_method": "private_key_jwt",
|
||||
"token_endpoint_auth_signing_alg": "ES256",
|
||||
"jwks": {
|
||||
"keys": [
|
||||
{
|
||||
"kty": "EC",
|
||||
"crv": "P-256",
|
||||
"x": "...",
|
||||
"y": "...",
|
||||
"kid": "abc12345"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For localhost, the client ID is query-based and no metadata endpoint is used.
|
||||
|
||||
## Scope Management
|
||||
|
||||
ATCR requests the following OAuth scopes:
|
||||
|
||||
**Base scopes:**
|
||||
- `atproto`: Basic ATProto access
|
||||
|
||||
**Blob scopes (for layer/manifest media types):**
|
||||
- `blob:application/vnd.oci.image.manifest.v1+json`
|
||||
- `blob:application/vnd.docker.distribution.manifest.v2+json`
|
||||
- `blob:application/vnd.oci.image.index.v1+json`
|
||||
- `blob:application/vnd.docker.distribution.manifest.list.v2+json`
|
||||
- `blob:application/vnd.cncf.oras.artifact.manifest.v1+json`
|
||||
|
||||
**Repo scopes (for ATProto collections):**
|
||||
- `repo:io.atcr.manifest`: Manifest records
|
||||
- `repo:io.atcr.tag`: Tag records
|
||||
- `repo:io.atcr.star`: Star records
|
||||
- `repo:io.atcr.sailor.profile`: User profile records
|
||||
|
||||
**RPC scope:**
|
||||
- `rpc:com.atproto.repo.getRecord?aud=*`: Read access to any user's records
|
||||
|
||||
Scopes are automatically invalidated on startup if they change, forcing users to re-authenticate.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Token Security
|
||||
|
||||
**OAuth Tokens (managed by AppView):**
|
||||
- Stored in SQLite database
|
||||
- DPoP-bound (cannot be used without client key)
|
||||
- Automatically refreshed by indigo library
|
||||
- Used for PDS API requests (manifests, service tokens)
|
||||
|
||||
**Registry JWTs (issued to Docker clients):**
|
||||
- Short-lived (15 minutes)
|
||||
- Signed by AppView's JWT signing key
|
||||
- Contain validated DID from OAuth session
|
||||
- Used for OCI Distribution API requests
|
||||
|
||||
### Attack Prevention
|
||||
|
||||
**Token Theft:**
|
||||
- DPoP prevents stolen tokens from being used
|
||||
- Tokens are bound to specific client key
|
||||
- Attacker would need both token AND private key
|
||||
|
||||
**Client Impersonation:**
|
||||
- Confidential clients use private key JWT assertion
|
||||
- Prevents attackers from impersonating AppView
|
||||
- Public keys published in client metadata JWKS
|
||||
|
||||
**Man-in-the-Middle:**
|
||||
- All OAuth flows use HTTPS in production
|
||||
- DPoP includes HTTP method and URI in proof
|
||||
- Prevents replay attacks on different endpoints
|
||||
|
||||
**Authorization Code Interception:**
|
||||
- PKCE prevents code interception attacks
|
||||
- Code verifier required to exchange code for token
|
||||
- Protects against malicious redirect URI attacks
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**"Failed to initialize OAuth client key"**
|
||||
- Check that `/var/lib/atcr/oauth/` directory exists and is writable
|
||||
- Verify directory permissions are 0700
|
||||
- Check disk space
|
||||
|
||||
**"OAuth session not found"**
|
||||
- User needs to re-authenticate (session expired or invalidated)
|
||||
- Check that UI database is accessible
|
||||
- Verify OAuth session storage is working
|
||||
|
||||
**"Invalid DPoP proof"**
|
||||
- Clock skew between AppView and PDS
|
||||
- DPoP key mismatch (token was issued with different key)
|
||||
- Check that indigo library is managing DPoP correctly
|
||||
|
||||
**"Client authentication failed"**
|
||||
- Confidential client key may be corrupted
|
||||
- Key ID may not match public key
|
||||
- Try rotating the client key (delete and regenerate)
|
||||
|
||||
### Debugging
|
||||
|
||||
Enable debug logging to see OAuth flow details:
|
||||
|
||||
```bash
|
||||
export ATCR_LOG_LEVEL=debug
|
||||
./bin/atcr-appview serve
|
||||
```
|
||||
|
||||
Look for log messages:
|
||||
- `"Generated new P-256 OAuth client key"` - Key was auto-generated
|
||||
- `"Loaded existing P-256 OAuth client key"` - Key was loaded from disk
|
||||
- `"Configured confidential OAuth client"` - Production confidential client active
|
||||
- `"Localhost detected - using public OAuth client"` - Development public client active
|
||||
|
||||
### Testing OAuth Flow
|
||||
|
||||
Test OAuth flow manually:
|
||||
|
||||
```bash
|
||||
# 1. Start AppView in debug mode
|
||||
ATCR_LOG_LEVEL=debug ./bin/atcr-appview serve
|
||||
|
||||
# 2. Try docker login
|
||||
docker login atcr.io
|
||||
|
||||
# 3. Check logs for OAuth flow details
|
||||
# Look for: PAR request, token exchange, DPoP headers, etc.
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
- [ATProto OAuth Specification](https://atproto.com/specs/oauth)
|
||||
- [RFC 9449: OAuth 2.0 Demonstrating Proof of Possession (DPoP)](https://datatracker.ietf.org/doc/html/rfc9449)
|
||||
- [RFC 9126: OAuth 2.0 Pushed Authorization Requests (PAR)](https://datatracker.ietf.org/doc/html/rfc9126)
|
||||
- [RFC 7636: Proof Key for Code Exchange (PKCE)](https://datatracker.ietf.org/doc/html/rfc7636)
|
||||
- [Indigo OAuth Library](https://github.com/bluesky-social/indigo/tree/main/atproto/auth/oauth)
|
||||
@@ -1,183 +0,0 @@
|
||||
# README Embedding Feature
|
||||
|
||||
## Overview
|
||||
|
||||
Enhance the repository page (`/r/{handle}/{repository}`) with embedded README content fetched from the source repository, similar to Docker Hub's "Overview" tab.
|
||||
|
||||
## Current State
|
||||
|
||||
The repository page currently shows:
|
||||
- Repository metadata from OCI annotations
|
||||
- Short description from `org.opencontainers.image.description`
|
||||
- External links to source (`org.opencontainers.image.source`) and docs (`org.opencontainers.image.documentation`)
|
||||
- Tags and manifests lists
|
||||
|
||||
## Proposed Feature
|
||||
|
||||
Automatically fetch and render README.md content from the source repository when available, displaying it in an "Overview" section on the repository page.
|
||||
|
||||
## Implementation Approach
|
||||
|
||||
### 1. Source URL Detection
|
||||
|
||||
Parse `org.opencontainers.image.source` annotation to detect GitHub repositories:
|
||||
- Pattern: `https://github.com/{owner}/{repo}`
|
||||
- Extract owner and repo name
|
||||
|
||||
### 2. README Fetching
|
||||
|
||||
Fetch README.md from GitHub via raw content URL:
|
||||
```
|
||||
https://raw.githubusercontent.com/{owner}/{repo}/{branch}/README.md
|
||||
```
|
||||
|
||||
Try multiple branch names in order:
|
||||
1. `main`
|
||||
2. `master`
|
||||
3. `develop`
|
||||
|
||||
Fallback if README not found or fetch fails.
|
||||
|
||||
### 3. Markdown Rendering
|
||||
|
||||
Use a Go markdown library to render README content:
|
||||
- **Option A**: `github.com/gomarkdown/markdown` - Pure Go, fast
|
||||
- **Option B**: `github.com/yuin/goldmark` - CommonMark compliant, extensible
|
||||
- **Option C**: Call GitHub's markdown API (requires network call)
|
||||
|
||||
Recommended: `goldmark` for CommonMark compliance and GitHub-flavored markdown support.
|
||||
|
||||
### 4. Caching Strategy
|
||||
|
||||
Cache rendered README to avoid repeated fetches:
|
||||
|
||||
**Option A: In-memory cache**
|
||||
- Simple, fast
|
||||
- Lost on restart
|
||||
- Good for MVP
|
||||
|
||||
**Option B: Database cache**
|
||||
- Add `readme_html` column to `manifests` table
|
||||
- Update on new manifest pushes
|
||||
- Persistent across restarts
|
||||
- Background job to refresh periodically
|
||||
|
||||
**Option C: Hybrid**
|
||||
- Cache in database
|
||||
- Also cache in memory for frequently accessed repos
|
||||
- TTL-based refresh (e.g., 1 hour)
|
||||
|
||||
### 5. UI Integration
|
||||
|
||||
Add "Overview" section to repository page:
|
||||
- Show after repository header, before tags/manifests
|
||||
- Render markdown as HTML
|
||||
- Apply CSS styling for markdown elements (headings, code blocks, tables, etc.)
|
||||
- Handle images in README (may need to proxy or allow external images)
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
1. **Add README fetcher** (`pkg/appview/readme/fetcher.go`)
|
||||
```go
|
||||
type Fetcher struct {
|
||||
httpClient *http.Client
|
||||
cache Cache
|
||||
}
|
||||
|
||||
func (f *Fetcher) FetchGitHubReadme(sourceURL string) (string, error)
|
||||
func (f *Fetcher) RenderMarkdown(content string) (string, error)
|
||||
```
|
||||
|
||||
2. **Update database schema** (optional, for caching)
|
||||
```sql
|
||||
ALTER TABLE manifests ADD COLUMN readme_html TEXT;
|
||||
ALTER TABLE manifests ADD COLUMN readme_fetched_at TIMESTAMP;
|
||||
```
|
||||
|
||||
3. **Update RepositoryPageHandler**
|
||||
- Fetch README for repository
|
||||
- Pass rendered HTML to template
|
||||
|
||||
4. **Update repository.html template**
|
||||
- Add "Overview" section
|
||||
- Render HTML safely (use `template.HTML`)
|
||||
|
||||
5. **Add markdown CSS**
|
||||
- Style headings, code blocks, lists, tables
|
||||
- Syntax highlighting for code blocks (optional)
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **XSS Prevention**
|
||||
- Sanitize HTML output from markdown renderer
|
||||
- Use `bluemonday` or similar HTML sanitizer
|
||||
- Only allow safe HTML elements and attributes
|
||||
|
||||
2. **Rate Limiting**
|
||||
- Cache aggressively to avoid hitting GitHub rate limits
|
||||
- Consider GitHub API instead of raw content (requires token but higher limits)
|
||||
- Handle 429 responses gracefully
|
||||
|
||||
3. **Image Handling**
|
||||
- README may contain images with relative URLs
|
||||
- Options:
|
||||
- Rewrite image URLs to absolute GitHub URLs
|
||||
- Proxy images through ATCR (caching, security)
|
||||
- Block external images (simplest, but breaks many READMEs)
|
||||
|
||||
4. **Content Size**
|
||||
- Limit README size (e.g., 1MB max)
|
||||
- Truncate very long READMEs with "View on GitHub" link
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
1. **Support other platforms**
|
||||
- GitLab: `https://gitlab.com/{owner}/{repo}/-/raw/{branch}/README.md`
|
||||
- Gitea/Forgejo
|
||||
- Bitbucket
|
||||
|
||||
2. **Custom README upload**
|
||||
- Allow users to upload custom README via UI
|
||||
- Store in PDS as `io.atcr.readme` record
|
||||
- Priority: custom > source repo
|
||||
|
||||
3. **Automatic updates**
|
||||
- Background job to refresh READMEs periodically
|
||||
- Webhook support to update on push to source repo
|
||||
|
||||
4. **Syntax highlighting**
|
||||
- Use highlight.js or similar for code blocks
|
||||
- Support multiple languages
|
||||
|
||||
## Example Flow
|
||||
|
||||
1. User pushes image with label: `org.opencontainers.image.source=https://github.com/alice/myapp`
|
||||
2. Manifest stored with source URL annotation
|
||||
3. User visits `/r/alice/myapp`
|
||||
4. RepositoryPageHandler:
|
||||
- Checks cache for README
|
||||
- If not cached or expired:
|
||||
- Fetches `https://raw.githubusercontent.com/alice/myapp/main/README.md`
|
||||
- Renders markdown to HTML
|
||||
- Sanitizes HTML
|
||||
- Caches result
|
||||
- Passes README HTML to template
|
||||
5. Template renders Overview section with README content
|
||||
|
||||
## Dependencies
|
||||
|
||||
```go
|
||||
// Markdown rendering
|
||||
github.com/yuin/goldmark v1.6.0
|
||||
github.com/yuin/goldmark-emoji v1.0.2 // GitHub emoji support
|
||||
|
||||
// HTML sanitization
|
||||
github.com/microcosm-cc/bluemonday v1.0.26
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
- [OCI Image Spec - Annotations](https://github.com/opencontainers/image-spec/blob/main/annotations.md)
|
||||
- [Docker Hub Overview tab behavior](https://hub.docker.com/)
|
||||
- [Goldmark documentation](https://github.com/yuin/goldmark)
|
||||
- [GitHub raw content URLs](https://raw.githubusercontent.com/)
|
||||
691
docs/RELAY.md
Normal file
691
docs/RELAY.md
Normal file
@@ -0,0 +1,691 @@
|
||||
# Running an ATProto Relay for ATCR Hold Discovery
|
||||
|
||||
This document explains what it takes to run an ATProto relay for indexing ATCR hold records, including infrastructure requirements, configuration, and trade-offs.
|
||||
|
||||
## Overview
|
||||
|
||||
### What is an ATProto Relay?
|
||||
|
||||
An ATProto relay is a service that:
|
||||
- **Subscribes to multiple PDS hosts** and aggregates their data streams
|
||||
- **Outputs a combined "firehose"** event stream for real-time network updates
|
||||
- **Validates data integrity** and identity signatures
|
||||
- **Provides discovery endpoints** like `com.atproto.sync.listReposByCollection`
|
||||
|
||||
The relay acts as a network-wide indexer, making it possible to discover which DIDs have records of specific types (collections).
|
||||
|
||||
### Why ATCR Needs a Relay
|
||||
|
||||
ATCR uses hold captain records (`io.atcr.hold.captain`) stored in hold PDSs to enable hold discovery. The `listReposByCollection` endpoint allows AppViews to efficiently discover all holds in the network without crawling every PDS individually.
|
||||
|
||||
**The problem**: Standard Bluesky relays appear to only index collections from `did:plc` DIDs, not `did:web` DIDs. Since ATCR holds use `did:web` (e.g., `did:web:hold01.atcr.io`), they aren't discoverable via Bluesky's public relays.
|
||||
|
||||
## Recommended Approach: Phased Implementation
|
||||
|
||||
ATCR's discovery needs evolve as the network grows. Start simple, scale as needed.
|
||||
|
||||
## MVP: Minimal Discovery Service
|
||||
|
||||
For initial deployment with a small number of holds (dozens, not thousands), build a **lightweight custom discovery service** focused solely on `io.atcr.*` collections.
|
||||
|
||||
### Why Minimal Service for MVP?
|
||||
|
||||
- **Scope**: Only index `io.atcr.*` collections (manifests, tags, captain/crew, sailor profiles)
|
||||
- **Opt-in**: Only crawls PDSs that explicitly call `requestCrawl`
|
||||
- **Small scale**: Dozens of holds, not millions of users
|
||||
- **Simple storage**: SQLite sufficient for current scale
|
||||
- **Cost-effective**: $5-10/month VPS
|
||||
|
||||
### Architecture
|
||||
|
||||
**Inbound endpoints:**
|
||||
```
|
||||
POST /xrpc/com.atproto.sync.requestCrawl
|
||||
→ Hold registers itself for crawling
|
||||
|
||||
GET /xrpc/com.atproto.sync.listReposByCollection?collection=io.atcr.hold.captain
|
||||
→ AppView discovers holds
|
||||
```
|
||||
|
||||
**Outbound (client to PDS):**
|
||||
```
|
||||
1. com.atproto.repo.describeRepo → verify PDS exists
|
||||
2. com.atproto.sync.getRepo → fetch full CAR file (initial backfill)
|
||||
3. com.atproto.sync.subscribeRepos → WebSocket for real-time updates
|
||||
4. Parse events → extract io.atcr.* records → index in SQLite
|
||||
```
|
||||
|
||||
**Data flow:**
|
||||
|
||||
**Initial crawl (on requestCrawl):**
|
||||
```
|
||||
1. Hold POSTs requestCrawl → service queues crawl job
|
||||
2. Service fetches getRepo (CAR file) from hold's PDS for backfill
|
||||
3. Service parses CAR using indigo libraries
|
||||
4. Service extracts io.atcr.* records (captain, crew, manifests, etc.)
|
||||
5. Service stores: (did, collection, rkey, record_data) in SQLite
|
||||
6. Service opens WebSocket to subscribeRepos for this DID
|
||||
7. Service stores cursor for reconnection handling
|
||||
```
|
||||
|
||||
**Ongoing updates (WebSocket):**
|
||||
```
|
||||
1. Receive commit events via subscribeRepos WebSocket
|
||||
2. Parse event, filter to io.atcr.* collections only
|
||||
3. Update indexed_records incrementally (insert/update/delete)
|
||||
4. Update cursor after processing each event
|
||||
5. On disconnect: reconnect with stored cursor to resume
|
||||
```
|
||||
|
||||
**Discovery (AppView query):**
|
||||
```
|
||||
1. AppView GETs listReposByCollection?collection=io.atcr.hold.captain
|
||||
2. Service queries SQLite WHERE collection='io.atcr.hold.captain'
|
||||
3. Service returns list of DIDs with that collection
|
||||
```
|
||||
|
||||
### Implementation Requirements
|
||||
|
||||
**Technologies:**
|
||||
- Go (reuse indigo libraries for CAR parsing and WebSocket)
|
||||
- SQLite (sufficient for dozens/hundreds of holds)
|
||||
- Standard HTTP server + WebSocket client
|
||||
|
||||
**Core components:**
|
||||
|
||||
1. **HTTP handlers** (`cmd/atcr-discovery/handlers/`):
|
||||
- `requestCrawl` - queue crawl jobs
|
||||
- `listReposByCollection` - query indexed collections
|
||||
|
||||
2. **Crawler** (`pkg/discovery/crawler.go`):
|
||||
- Fetch CAR files from PDSs for initial backfill
|
||||
- Parse with `github.com/bluesky-social/indigo/repo`
|
||||
- Extract records, filter to `io.atcr.*` only
|
||||
|
||||
3. **WebSocket subscriber** (`pkg/discovery/subscriber.go`):
|
||||
- WebSocket client for `com.atproto.sync.subscribeRepos`
|
||||
- Event parsing and filtering
|
||||
- Cursor management and persistence
|
||||
- Automatic reconnection with resume
|
||||
|
||||
4. **Storage** (`pkg/discovery/storage.go`):
|
||||
- SQLite schema for indexed records
|
||||
- Indexes on (collection, did) for fast queries
|
||||
- Cursor storage for reconnection
|
||||
|
||||
5. **Worker** (`pkg/discovery/worker.go`):
|
||||
- Background crawl job processor
|
||||
- WebSocket connection manager
|
||||
- Health monitoring for subscriptions
|
||||
|
||||
**Database schema:**
|
||||
```sql
|
||||
CREATE TABLE indexed_records (
|
||||
did TEXT NOT NULL,
|
||||
collection TEXT NOT NULL,
|
||||
rkey TEXT NOT NULL,
|
||||
record_data TEXT NOT NULL, -- JSON
|
||||
indexed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (did, collection, rkey)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_collection ON indexed_records(collection);
|
||||
CREATE INDEX idx_did ON indexed_records(did);
|
||||
|
||||
CREATE TABLE crawl_queue (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
hostname TEXT NOT NULL UNIQUE,
|
||||
did TEXT,
|
||||
status TEXT DEFAULT 'pending', -- pending, in_progress, subscribed, failed
|
||||
last_crawled_at TIMESTAMP,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE subscriptions (
|
||||
did TEXT PRIMARY KEY,
|
||||
hostname TEXT NOT NULL,
|
||||
cursor INTEGER, -- Last processed sequence number
|
||||
status TEXT DEFAULT 'active', -- active, disconnected, failed
|
||||
last_event_at TIMESTAMP,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
```
|
||||
|
||||
**Leveraging indigo libraries:**
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/bluesky-social/indigo/repo"
|
||||
"github.com/bluesky-social/indigo/atproto/syntax"
|
||||
"github.com/bluesky-social/indigo/events"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
// Initial backfill: Parse CAR file
|
||||
r, err := repo.ReadRepoFromCar(ctx, bytes.NewReader(carData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterate records
|
||||
err = r.ForEach(ctx, "", func(path string, nodeCid cid.Cid) error {
|
||||
// Parse collection from path (e.g., "io.atcr.hold.captain/self")
|
||||
parts := strings.Split(path, "/")
|
||||
if len(parts) != 2 {
|
||||
return nil // skip invalid paths
|
||||
}
|
||||
|
||||
collection := parts[0]
|
||||
rkey := parts[1]
|
||||
|
||||
// Filter to io.atcr.* only
|
||||
if !strings.HasPrefix(collection, "io.atcr.") {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get record data
|
||||
recordBytes, err := r.GetRecord(ctx, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Store in database
|
||||
return store.IndexRecord(did, collection, rkey, recordBytes)
|
||||
})
|
||||
|
||||
// WebSocket subscription: Listen for updates
|
||||
wsURL := fmt.Sprintf("wss://%s/xrpc/com.atproto.sync.subscribeRepos", hostname)
|
||||
conn, _, err := websocket.DefaultDialer.Dial(wsURL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read events
|
||||
rsc := &events.RepoStreamCallbacks{
|
||||
RepoCommit: func(evt *events.RepoCommit) error {
|
||||
// Filter to io.atcr.* collections only
|
||||
for _, op := range evt.Ops {
|
||||
if !strings.HasPrefix(op.Collection, "io.atcr.") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process create/update/delete operations
|
||||
switch op.Action {
|
||||
case "create", "update":
|
||||
store.IndexRecord(evt.Repo, op.Collection, op.Rkey, op.Record)
|
||||
case "delete":
|
||||
store.DeleteRecord(evt.Repo, op.Collection, op.Rkey)
|
||||
}
|
||||
}
|
||||
|
||||
// Update cursor
|
||||
return store.UpdateCursor(evt.Repo, evt.Seq)
|
||||
},
|
||||
}
|
||||
|
||||
// Process stream
|
||||
scheduler := events.NewScheduler("discovery-worker", conn.RemoteAddr().String(), rsc)
|
||||
return events.HandleRepoStream(ctx, conn, scheduler)
|
||||
```
|
||||
|
||||
### Infrastructure Requirements
|
||||
|
||||
**Minimum specs:**
|
||||
- 1 vCPU
|
||||
- 1-2GB RAM
|
||||
- 20GB SSD
|
||||
- Minimal bandwidth (<1GB/day for dozens of holds)
|
||||
|
||||
**Estimated cost:**
|
||||
- Hetzner CX11: €4.15/month (~$5/month)
|
||||
- DigitalOcean Basic: $6/month
|
||||
- Fly.io: ~$5-10/month
|
||||
|
||||
**Deployment:**
|
||||
```bash
|
||||
# Build
|
||||
go build -o atcr-discovery ./cmd/atcr-discovery
|
||||
|
||||
# Run
|
||||
export DATABASE_PATH="/var/lib/atcr-discovery/discovery.db"
|
||||
export HTTP_ADDR=":8080"
|
||||
./atcr-discovery
|
||||
```
|
||||
|
||||
### Limitations
|
||||
|
||||
**What it does NOT do:**
|
||||
- ❌ Serve outbound `subscribeRepos` firehose (AppViews query via listReposByCollection)
|
||||
- ❌ Full MST validation (trust PDS validation)
|
||||
- ❌ Scale to millions of accounts (SQLite limits)
|
||||
- ❌ Multi-instance deployment (single process with SQLite)
|
||||
|
||||
**When to migrate to full relay:** When you have 1000+ holds, need PostgreSQL, or multi-instance deployment.
|
||||
|
||||
## Future Scale: Full Relay (Sync v1.1)
|
||||
|
||||
When ATCR grows beyond dozens of holds and needs real-time indexing, migrate to Bluesky's relay v1.1 implementation.
|
||||
|
||||
### When to Upgrade
|
||||
|
||||
**Indicators:**
|
||||
- 100+ holds requesting frequent crawls
|
||||
- Need real-time updates (re-crawl latency too high)
|
||||
- Multiple AppView instances need coordinated discovery
|
||||
- SQLite performance becomes bottleneck
|
||||
|
||||
### Relay v1.1 Characteristics
|
||||
|
||||
Released May 2025, this is Bluesky's current reference implementation.
|
||||
|
||||
**Key features:**
|
||||
- **Non-archival**: Doesn't mirror full repository data, only processes firehose
|
||||
- **WebSocket subscriptions**: Real-time updates from PDSs
|
||||
- **Scalable**: 2 vCPU, 12GB RAM handles ~100M accounts
|
||||
- **PostgreSQL**: Required for production scale
|
||||
- **Admin UI**: Web dashboard for management
|
||||
|
||||
**Source**: `github.com/bluesky-social/indigo/cmd/relay`
|
||||
|
||||
### Migration Path
|
||||
|
||||
**Step 1: Deploy relay v1.1**
|
||||
```bash
|
||||
git clone https://github.com/bluesky-social/indigo.git
|
||||
cd indigo
|
||||
go build -o relay ./cmd/relay
|
||||
|
||||
export DATABASE_URL="postgres://relay:password@localhost:5432/atcr_relay"
|
||||
./relay --admin-password="secure-password"
|
||||
```
|
||||
|
||||
**Step 2: Migrate data**
|
||||
- Export indexed records from SQLite
|
||||
- Trigger crawls in relay for all known holds
|
||||
- Verify relay indexes correctly
|
||||
|
||||
**Step 3: Update AppView configuration**
|
||||
```bash
|
||||
# Point to new relay
|
||||
export ATCR_RELAY_ENDPOINT="https://relay.atcr.io"
|
||||
```
|
||||
|
||||
**Step 4: Decommission minimal service**
|
||||
- Monitor relay for stability
|
||||
- Shut down old discovery service
|
||||
|
||||
### Infrastructure Requirements (Full Relay)
|
||||
|
||||
**Minimum specs:**
|
||||
- 2 vCPU cores
|
||||
- 12GB RAM
|
||||
- 100GB SSD
|
||||
- 30 Mbps bandwidth
|
||||
|
||||
**Estimated cost:**
|
||||
- Hetzner: ~$30-40/month
|
||||
- DigitalOcean: ~$50/month (with managed PostgreSQL)
|
||||
- Fly.io: ~$35-50/month
|
||||
|
||||
## Collection Indexing: The `collectiondir` Microservice
|
||||
|
||||
The `com.atproto.sync.listReposByCollection` endpoint is **not part of the relay core**. It's provided by a separate microservice called **`collectiondir`**.
|
||||
|
||||
### What is collectiondir?
|
||||
|
||||
- **Separate service** that indexes collections for efficient discovery
|
||||
- **Optional**: Not required by the ATProto spec, but very useful for AppViews
|
||||
- **Deployed alongside relay** by Bluesky's public instances
|
||||
|
||||
### Current Limitation: did:plc Only?
|
||||
|
||||
Based on testing, Bluesky's public relays (with collectiondir) appear to:
|
||||
- ✅ Index `io.atcr.*` collections from `did:plc` DIDs
|
||||
- ❌ NOT index `io.atcr.*` collections from `did:web` DIDs
|
||||
|
||||
This means:
|
||||
- ATCR manifests from users (did:plc) are discoverable
|
||||
- ATCR hold captain records (did:web) are NOT discoverable
|
||||
- The relay still **stores** all data (CAR file includes did:web records)
|
||||
- The issue is specifically with **indexing** for `listReposByCollection`
|
||||
|
||||
### Configuring collectiondir
|
||||
|
||||
Documentation on configuring collectiondir is sparse. Possible approaches:
|
||||
|
||||
1. **Fork and modify**: Clone indigo repo, modify collectiondir to index all DIDs
|
||||
2. **Configuration file**: Check if collectiondir accepts whitelist/configuration for indexed collections
|
||||
3. **No filtering**: Default behavior might be to index everything, but Bluesky's deployment filters
|
||||
|
||||
**Action item**: Review `indigo/cmd/collectiondir` source code to understand configuration options.
|
||||
|
||||
## Multi-Relay Strategy
|
||||
|
||||
Holds can request crawls from **multiple relays** simultaneously. This enables:
|
||||
|
||||
### Scenario: Bluesky + ATCR Relays
|
||||
|
||||
**Setup:**
|
||||
1. Hold deploys with embedded PDS at `did:web:hold01.atcr.io`
|
||||
2. Hold creates captain record (`io.atcr.hold.captain/self`)
|
||||
3. Hold requests crawl from **both**:
|
||||
- Bluesky relay: `https://bsky.network/xrpc/com.atproto.sync.requestCrawl`
|
||||
- ATCR relay: `https://relay.atcr.io/xrpc/com.atproto.sync.requestCrawl`
|
||||
|
||||
**Result:**
|
||||
- ✅ Bluesky relay indexes social posts (if hold owner posts)
|
||||
- ✅ ATCR relay indexes hold captain records
|
||||
- ✅ AppViews query ATCR relay for hold discovery
|
||||
- ✅ Independent networks - Bluesky posts work regardless of ATCR relay
|
||||
|
||||
### Request Crawl Script
|
||||
|
||||
The existing script can be modified to support multiple relays:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# deploy/request-crawl.sh
|
||||
|
||||
HOSTNAME=$1
|
||||
BLUESKY_RELAY=${2:-"https://bsky.network"}
|
||||
ATCR_RELAY=${3:-"https://relay.atcr.io"}
|
||||
|
||||
echo "Requesting crawl for $HOSTNAME from Bluesky relay..."
|
||||
curl -X POST "$BLUESKY_RELAY/xrpc/com.atproto.sync.requestCrawl" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"hostname\": \"$HOSTNAME\"}"
|
||||
|
||||
echo "Requesting crawl for $HOSTNAME from ATCR relay..."
|
||||
curl -X POST "$ATCR_RELAY/xrpc/com.atproto.sync.requestCrawl" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"hostname\": \"$HOSTNAME\"}"
|
||||
```
|
||||
|
||||
Usage:
|
||||
```bash
|
||||
./deploy/request-crawl.sh hold01.atcr.io
|
||||
```
|
||||
|
||||
## Deployment: Minimal Discovery Service
|
||||
|
||||
### 1. Infrastructure Setup
|
||||
|
||||
**Provision VPS:**
|
||||
- Hetzner CX11, DigitalOcean Basic, or Fly.io
|
||||
- Public domain (e.g., `discovery.atcr.io`)
|
||||
- TLS certificate (Let's Encrypt)
|
||||
|
||||
**Configure reverse proxy (optional - nginx):**
|
||||
```nginx
|
||||
upstream discovery {
|
||||
server 127.0.0.1:8080;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
server_name discovery.atcr.io;
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/discovery.atcr.io/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/discovery.atcr.io/privkey.pem;
|
||||
|
||||
location / {
|
||||
proxy_pass http://discovery;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Build and Deploy
|
||||
|
||||
```bash
|
||||
# Clone ATCR repo
|
||||
git clone https://github.com/atcr-io/atcr.git
|
||||
cd atcr
|
||||
|
||||
# Build discovery service
|
||||
go build -o atcr-discovery ./cmd/atcr-discovery
|
||||
|
||||
# Run
|
||||
export DATABASE_PATH="/var/lib/atcr-discovery/discovery.db"
|
||||
export HTTP_ADDR=":8080"
|
||||
export CRAWL_INTERVAL="12h"
|
||||
./atcr-discovery
|
||||
```
|
||||
|
||||
### 3. Update Hold Startup
|
||||
|
||||
Each hold should request crawl on startup:
|
||||
|
||||
```bash
|
||||
# In hold startup script or environment
|
||||
export ATCR_DISCOVERY_URL="https://discovery.atcr.io"
|
||||
|
||||
# Request crawl from both Bluesky and ATCR
|
||||
curl -X POST "https://bsky.network/xrpc/com.atproto.sync.requestCrawl" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"hostname\": \"$HOLD_PUBLIC_URL\"}"
|
||||
|
||||
curl -X POST "$ATCR_DISCOVERY_URL/xrpc/com.atproto.sync.requestCrawl" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"hostname\": \"$HOLD_PUBLIC_URL\"}"
|
||||
```
|
||||
|
||||
### 4. Update AppView Configuration
|
||||
|
||||
Point AppView discovery worker to the discovery service:
|
||||
|
||||
```bash
|
||||
# In .env.appview or environment
|
||||
export ATCR_RELAY_ENDPOINT="https://discovery.atcr.io"
|
||||
export ATCR_HOLD_DISCOVERY_ENABLED="true"
|
||||
export ATCR_HOLD_DISCOVERY_INTERVAL="6h"
|
||||
```
|
||||
|
||||
### 5. Monitor and Maintain
|
||||
|
||||
**Monitoring:**
|
||||
- Check crawl queue status
|
||||
- Monitor SQLite database size
|
||||
- Track failed crawls
|
||||
|
||||
**Maintenance:**
|
||||
- Re-crawl on schedule (every 6-24 hours)
|
||||
- Prune stale records (>7 days old)
|
||||
- Backup SQLite database regularly
|
||||
|
||||
## Trade-Offs and Considerations
|
||||
|
||||
### Running Your Own Relay
|
||||
|
||||
**Pros:**
|
||||
- ✅ Full control over indexing (can index `did:web` holds)
|
||||
- ✅ No dependency on third-party relay policies
|
||||
- ✅ Can customize collection filters for ATCR-specific needs
|
||||
- ✅ Relatively lightweight with modern relay implementation
|
||||
|
||||
**Cons:**
|
||||
- ❌ Infrastructure cost (~$30-50/month minimum)
|
||||
- ❌ Operational overhead (monitoring, updates, backups)
|
||||
- ❌ Need to maintain as network grows
|
||||
- ❌ Single point of failure for discovery (unless multi-relay)
|
||||
|
||||
### Alternatives to Running a Relay
|
||||
|
||||
#### 1. Direct Registration API
|
||||
|
||||
Holds POST to AppView on startup to register themselves:
|
||||
|
||||
**Pros:**
|
||||
- ✅ Simplest implementation
|
||||
- ✅ No relay infrastructure needed
|
||||
- ✅ Immediate registration (no crawl delay)
|
||||
|
||||
**Cons:**
|
||||
- ❌ Ties holds to specific AppView instances
|
||||
- ❌ Breaks decentralized discovery model
|
||||
- ❌ Each AppView has different hold registry
|
||||
|
||||
#### 2. Static Discovery File
|
||||
|
||||
Maintain `https://atcr.io/.well-known/holds.json`:
|
||||
|
||||
**Pros:**
|
||||
- ✅ No infrastructure beyond static hosting
|
||||
- ✅ All AppViews share same registry
|
||||
- ✅ Simple to implement
|
||||
|
||||
**Cons:**
|
||||
- ❌ Manual process (PRs/issues to add holds)
|
||||
- ❌ Not real-time discovery
|
||||
- ❌ Centralized control point
|
||||
|
||||
#### 3. Hybrid Approach
|
||||
|
||||
Combine multiple discovery mechanisms:
|
||||
|
||||
```go
|
||||
func (w *HoldDiscoveryWorker) DiscoverHolds(ctx context.Context) error {
|
||||
// 1. Fetch static registry
|
||||
staticHolds := w.fetchStaticRegistry()
|
||||
|
||||
// 2. Query relay (if available)
|
||||
relayHolds := w.queryRelay(ctx)
|
||||
|
||||
// 3. Accept direct registrations
|
||||
registeredHolds := w.getDirectRegistrations()
|
||||
|
||||
// Merge and deduplicate
|
||||
allHolds := mergeHolds(staticHolds, relayHolds, registeredHolds)
|
||||
|
||||
// Cache in database
|
||||
for _, hold := range allHolds {
|
||||
w.cacheHold(hold)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Pros:**
|
||||
- ✅ Multiple discovery paths (resilient)
|
||||
- ✅ Gradual migration to relay-based discovery
|
||||
- ✅ Supports both centralized bootstrap and decentralized growth
|
||||
|
||||
**Cons:**
|
||||
- ❌ More complex implementation
|
||||
- ❌ Potential for stale data if sources conflict
|
||||
|
||||
## Recommendations for ATCR
|
||||
|
||||
### Phase 1: MVP (Now - 1000 holds)
|
||||
|
||||
**Build minimal discovery service with WebSocket** (~$5-10/month):
|
||||
1. Implement `requestCrawl` + `listReposByCollection` endpoints
|
||||
2. Initial backfill via `getRepo` (CAR file parsing)
|
||||
3. Real-time updates via WebSocket `subscribeRepos`
|
||||
4. SQLite storage with cursor management
|
||||
5. Filter to `io.atcr.*` collections only
|
||||
|
||||
**Deliverables:**
|
||||
- `cmd/atcr-discovery` service
|
||||
- SQLite schema with cursor storage
|
||||
- CAR file parser (indigo libraries)
|
||||
- WebSocket subscriber with reconnection
|
||||
- Deployment scripts
|
||||
|
||||
**Cost**: ~$5-10/month VPS
|
||||
|
||||
**Why**: Minimal infrastructure, real-time updates, full control over indexing, sufficient for hundreds of holds.
|
||||
|
||||
### Phase 2: Migrate to Full Relay (1000+ holds)
|
||||
|
||||
**Deploy Bluesky relay v1.1** when scaling needed (~$30-50/month):
|
||||
1. Set up PostgreSQL database
|
||||
2. Deploy indigo relay with admin UI
|
||||
3. Migrate indexed data from SQLite
|
||||
4. Configure for `io.atcr.*` collection filtering (if possible)
|
||||
5. Handle thousands of concurrent WebSocket connections
|
||||
|
||||
**Cost**: ~$30-50/month
|
||||
|
||||
**Why**: Proven scalability to 100M+ accounts, standardized protocol, community support, production-ready infrastructure.
|
||||
|
||||
### Phase 3: Multi-Relay Federation (Future)
|
||||
|
||||
**Decentralized relay network:**
|
||||
1. Multiple ATCR relays operated independently
|
||||
2. AppViews query multiple relays (fallback/redundancy)
|
||||
3. Holds request crawls from all known ATCR relays
|
||||
4. Cross-relay synchronization (optional)
|
||||
|
||||
**Why**: No single point of failure, fully decentralized discovery, geographic distribution.
|
||||
|
||||
## Next Steps
|
||||
|
||||
### For MVP Implementation
|
||||
|
||||
1. **Create `cmd/atcr-discovery` package structure**
|
||||
- HTTP handlers for XRPC endpoints (`requestCrawl`, `listReposByCollection`)
|
||||
- Crawler with indigo CAR parsing for initial backfill
|
||||
- WebSocket subscriber for real-time updates
|
||||
- SQLite storage layer with cursor management
|
||||
- Background worker for managing subscriptions
|
||||
|
||||
2. **Database schema**
|
||||
- `indexed_records` table for collection data
|
||||
- `crawl_queue` table for crawl job management
|
||||
- `subscriptions` table for WebSocket cursor tracking
|
||||
- Indexes for efficient queries
|
||||
|
||||
3. **WebSocket implementation**
|
||||
- Use `github.com/bluesky-social/indigo/events` for event handling
|
||||
- Implement reconnection logic with cursor resume
|
||||
- Filter events to `io.atcr.*` collections only
|
||||
- Health monitoring for active subscriptions
|
||||
|
||||
4. **Testing strategy**
|
||||
- Unit tests for CAR parsing
|
||||
- Unit tests for event filtering
|
||||
- Integration tests with mock PDSs and WebSocket
|
||||
- Connection failure and reconnection testing
|
||||
- Load testing with SQLite
|
||||
|
||||
5. **Deployment**
|
||||
- Dockerfile for discovery service
|
||||
- Deployment scripts (systemd, docker-compose)
|
||||
- Monitoring setup (logs, metrics, WebSocket health)
|
||||
- Alert on subscription failures
|
||||
|
||||
6. **Documentation**
|
||||
- API documentation for XRPC endpoints
|
||||
- Deployment guide
|
||||
- Troubleshooting guide (WebSocket connection issues)
|
||||
|
||||
### Open Questions
|
||||
|
||||
1. **CAR parsing edge cases**: How to handle malformed CAR files or invalid records?
|
||||
2. **WebSocket reconnection**: What's the optimal backoff strategy for reconnection attempts?
|
||||
3. **Subscription management**: How many concurrent WebSocket connections can SQLite handle?
|
||||
4. **Rate limiting**: Should discovery service rate-limit requestCrawl to prevent abuse?
|
||||
5. **Authentication**: Should requestCrawl require authentication, or remain open?
|
||||
6. **Cursor storage**: Should cursors be persisted immediately or batched for performance?
|
||||
7. **Monitoring**: What metrics are most important for operational visibility (active subs, event rate, lag)?
|
||||
8. **Error handling**: When a WebSocket dies, should we re-backfill via getRepo or trust cursor resume?
|
||||
|
||||
## References
|
||||
|
||||
### ATProto Specifications
|
||||
- [ATProto Sync Specification](https://atproto.com/specs/sync)
|
||||
- [Repository Specification](https://atproto.com/specs/repository)
|
||||
- [CAR File Format](https://ipld.io/specs/transport/car/)
|
||||
|
||||
### Indigo Libraries
|
||||
- [Indigo Repository](https://github.com/bluesky-social/indigo)
|
||||
- [Indigo Repo Package](https://pkg.go.dev/github.com/bluesky-social/indigo/repo)
|
||||
- [Indigo ATProto Package](https://pkg.go.dev/github.com/bluesky-social/indigo/atproto)
|
||||
|
||||
### Relay Reference (Future)
|
||||
- [Relay v1.1 Updates](https://docs.bsky.app/blog/relay-sync-updates)
|
||||
- [Indigo Relay Implementation](https://github.com/bluesky-social/indigo/tree/main/cmd/relay)
|
||||
- [Running a Full-Network Relay](https://whtwnd.com/bnewbold.net/3kwzl7tye6u2y)
|
||||
101
docs/SAILOR.md
101
docs/SAILOR.md
@@ -1,101 +0,0 @@
|
||||
User approved Claude's plan:
|
||||
⎿ Implement Sailor Profile & Hold Discovery System
|
||||
|
||||
Summary
|
||||
|
||||
Add io.atcr.sailor.profile record type to manage user's default hold preference, and update manifest to store historical hold endpoint reference. This enables transparent hold
|
||||
routing while preserving image ownership semantics.
|
||||
|
||||
Changes Required
|
||||
|
||||
1. Create Sailor Profile Lexicon
|
||||
|
||||
File: lexicons/io/atcr/sailor/profile.json
|
||||
- New record type: io.atcr.sailor.profile
|
||||
- Fields: defaultHold (string, nullable), createdAt, updatedAt
|
||||
|
||||
2. Update Manifest Lexicon
|
||||
|
||||
File: lexicons/io/atcr/manifest.json
|
||||
- Add holdEndpoint field (string, required)
|
||||
- This is historical reference (immutable per manifest)
|
||||
|
||||
3. Update Go Types
|
||||
|
||||
File: pkg/atproto/lexicon.go
|
||||
- Add SailorProfileCollection = "io.atcr.sailor.profile"
|
||||
- Add SailorProfileRecord struct
|
||||
- Add NewSailorProfileRecord() constructor
|
||||
- Update ManifestRecord struct to include HoldEndpoint field
|
||||
|
||||
4. Create Profile Management
|
||||
|
||||
File: pkg/atproto/profile.go (new file)
|
||||
- EnsureProfile(ctx, client, defaultHoldEndpoint) function
|
||||
- Logic: check if profile exists, create with default if not
|
||||
|
||||
5. Update Auth Handlers
|
||||
|
||||
Files: pkg/auth/exchange/handler.go and pkg/auth/token/service.go
|
||||
- Call EnsureProfile() after token validation
|
||||
- Use authenticated client (has write access to user's PDS)
|
||||
- Pass AppView's default_hold_endpoint config
|
||||
|
||||
6. Update Hold Resolution
|
||||
|
||||
File: pkg/middleware/registry.go
|
||||
- Update findStorageEndpoint() priority:
|
||||
a. Check io.atcr.sailor.profile.defaultHold
|
||||
b. If null (opted out): check user's io.atcr.hold, then AppView default
|
||||
c. If no profile: check user's io.atcr.hold, then AppView default
|
||||
|
||||
7. Store Hold in Manifest
|
||||
|
||||
File: pkg/atproto/manifest_store.go
|
||||
- When creating manifest, include resolved holdEndpoint
|
||||
- Pass hold endpoint through context or parameter
|
||||
|
||||
8. Update Pull to Use Manifest Hold
|
||||
|
||||
File: pkg/atproto/manifest_store.go and pkg/storage/routing_repository.go
|
||||
- On pull, extract holdEndpoint from manifest
|
||||
- Route blob requests to that hold (not via discovery)
|
||||
|
||||
9. Update Documentation
|
||||
|
||||
Files: CLAUDE.md, docs/BYOS.md, .env.example
|
||||
- Document sailor profile concept
|
||||
- Explain hold resolution priority
|
||||
- Update examples for shared holds
|
||||
- Document how crew members configure profile
|
||||
|
||||
Benefits
|
||||
|
||||
- ✅ URL structure remains atcr.io/<owner>/<image> (ownership clear)
|
||||
- ✅ Hold is transparent infrastructure (like S3 region)
|
||||
- ✅ Supports personal, shared, and public holds
|
||||
- ✅ Historical reference in manifest (pull works even if defaults change)
|
||||
- ✅ User can opt-out (set defaultHold to null)
|
||||
- ✅ Future: UI for self-service profile management
|
||||
|
||||
|
||||
Progress Summary
|
||||
|
||||
✅ Completed:
|
||||
1. Created io.atcr.sailor.profile lexicon
|
||||
2. Updated io.atcr.manifest lexicon with holdEndpoint field
|
||||
3. Updated Go types in pkg/atproto/lexicon.go
|
||||
4. Created profile management in pkg/atproto/profile.go
|
||||
5. Updated /auth/exchange handler to manage profile
|
||||
|
||||
⏳ In Progress:
|
||||
- Need to update /auth/token handler similarly (add defaultHoldEndpoint parameter and profile management)
|
||||
- Fix compilation error in extractDefaultHoldEndpoint() - should use configuration.Middleware type not any
|
||||
|
||||
🔜 Remaining:
|
||||
- Update findStorageEndpoint() for new priority logic (check profile → own hold → default)
|
||||
- Update manifest_store.go to include holdEndpoint when creating manifests
|
||||
- Update pull flow to use manifest holdEndpoint
|
||||
- Update documentation
|
||||
|
||||
The architecture is solid - just need to finish the token handler update and fix the type issue in the config extraction. Would you like me to continue?
|
||||
568
docs/SBOM_SCANNING.md
Normal file
568
docs/SBOM_SCANNING.md
Normal file
@@ -0,0 +1,568 @@
|
||||
# SBOM Scanning
|
||||
|
||||
ATCR supports optional Software Bill of Materials (SBOM) generation for container images stored in holds. This feature enables automated security scanning and vulnerability analysis while maintaining the decentralized architecture.
|
||||
|
||||
## Overview
|
||||
|
||||
When enabled, holds automatically generate SBOMs for uploaded container images in the background. The scanning process:
|
||||
|
||||
- **Async execution**: Scanning happens after upload completes (non-blocking)
|
||||
- **ORAS artifacts**: SBOMs stored as OCI Registry as Storage (ORAS) artifacts
|
||||
- **ATProto integration**: Scan results stored as `io.atcr.manifest` records in hold's embedded PDS
|
||||
- **Tool agnostic**: Results accessible via XRPC, ATProto queries, and direct blob URLs
|
||||
- **Opt-in**: Disabled by default, enabled per-hold via configuration
|
||||
|
||||
### Default Scanner: Syft
|
||||
|
||||
ATCR uses [Anchore Syft](https://github.com/anchore/syft) for SBOM generation:
|
||||
- Industry-standard SBOM generator
|
||||
- Supports SPDX and CycloneDX formats
|
||||
- Comprehensive package detection (OS packages, language libraries, etc.)
|
||||
- Active maintenance and CVE database updates
|
||||
|
||||
Future enhancements may include [Grype](https://github.com/anchore/grype) for vulnerability scanning and [Trivy](https://github.com/aquasecurity/trivy) for comprehensive security analysis.
|
||||
|
||||
## Trust Model
|
||||
|
||||
### Same Trust as Docker Hub
|
||||
|
||||
SBOM scanning follows the same trust model as Docker Hub or other centralized registries:
|
||||
|
||||
**Docker Hub model:**
|
||||
- Docker Hub scans your image on their infrastructure
|
||||
- Results stored in their database
|
||||
- You trust Docker Hub's scanner version and scan integrity
|
||||
|
||||
**ATCR hold model:**
|
||||
- Hold scans image on their infrastructure
|
||||
- Results stored in hold's embedded PDS
|
||||
- You trust hold operator's scanner version and scan integrity
|
||||
|
||||
The security comes from **reproducibility** and **transparency**, not storage location:
|
||||
- Anyone can re-scan the same digest and verify results
|
||||
- Multiple holds scanning the same image provide independent verification
|
||||
- Scanner version and scan timestamp are recorded in ATProto records
|
||||
|
||||
### Why Hold's PDS?
|
||||
|
||||
Scan results are stored in the **hold's embedded PDS** rather than the user's PDS:
|
||||
|
||||
**Advantages:**
|
||||
1. **No OAuth expiry issues**: Hold owns its PDS, no service tokens needed
|
||||
2. **Hold-scoped metadata**: Scanner version, scan time, hold configuration
|
||||
3. **Multiple perspectives**: Different holds can scan the same image independently
|
||||
4. **Simpler auth**: Hold writes directly to its own PDS
|
||||
5. **Keeps user PDS lean**: Potentially large SBOM data doesn't bloat user's repo
|
||||
|
||||
**Security properties:**
|
||||
- Same trust level as trusting hold to serve correct blobs
|
||||
- DID signatures prove which hold generated the SBOM
|
||||
- Reproducible scans enable independent verification
|
||||
- Multiple holds scanning same digest → compare results for tampering detection
|
||||
|
||||
## ORAS Manifest Format
|
||||
|
||||
SBOMs are stored as ORAS artifacts that reference their subject image using the OCI referrers specification.
|
||||
|
||||
### Example Manifest Record
|
||||
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.manifest",
|
||||
"repository": "alice/myapp",
|
||||
"digest": "sha256:4a5e...",
|
||||
"holdDid": "did:web:hold01.atcr.io",
|
||||
"holdEndpoint": "https://hold01.atcr.io",
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"artifactType": "application/spdx+json",
|
||||
"subject": {
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"digest": "sha256:abc123...",
|
||||
"size": 1234
|
||||
},
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.empty.v1+json",
|
||||
"digest": "sha256:44136f...",
|
||||
"size": 2
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/spdx+json",
|
||||
"digest": "sha256:def456...",
|
||||
"size": 5678,
|
||||
"annotations": {
|
||||
"org.opencontainers.image.title": "sbom.spdx.json"
|
||||
}
|
||||
}
|
||||
],
|
||||
"manifestBlob": {
|
||||
"$type": "blob",
|
||||
"ref": { "$link": "bafyrei..." },
|
||||
"mimeType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"size": 789
|
||||
},
|
||||
"ownerDid": "did:plc:alice123",
|
||||
"scannedAt": "2025-10-20T12:34:56.789Z",
|
||||
"scannerVersion": "syft-v1.0.0",
|
||||
"createdAt": "2025-10-20T12:34:56.789Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Key Fields
|
||||
|
||||
- `artifactType`: Distinguishes SBOM artifact from regular image manifest
|
||||
- `application/spdx+json` for SPDX format
|
||||
- `application/vnd.cyclonedx+json` for CycloneDX format
|
||||
- `subject`: Reference to the original image manifest
|
||||
- `ownerDid`: DID of the image owner (for multi-tenant holds)
|
||||
- `scannedAt`: ISO 8601 timestamp of when scan completed
|
||||
- `scannerVersion`: Tool version for reproducibility tracking
|
||||
|
||||
### SBOM Blob
|
||||
|
||||
The actual SBOM document is stored as a blob in the hold's storage backend and referenced in the manifest's `layers` array. The blob contains the full SPDX or CycloneDX JSON document.
|
||||
|
||||
## Configuration
|
||||
|
||||
SBOM scanning is configured via environment variables on the hold service.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Enable SBOM scanning (opt-in)
|
||||
HOLD_SBOM_ENABLED=true
|
||||
|
||||
# Number of concurrent scan workers (default: 2)
|
||||
# Higher values = faster scanning, more CPU/memory usage
|
||||
HOLD_SBOM_WORKERS=4
|
||||
|
||||
# SBOM output format (default: spdx-json)
|
||||
# Options: spdx-json, cyclonedx-json
|
||||
HOLD_SBOM_FORMAT=spdx-json
|
||||
|
||||
# Future: Enable vulnerability scanning with Grype
|
||||
# HOLD_VULN_ENABLED=true
|
||||
```
|
||||
|
||||
### Example Configuration
|
||||
|
||||
```bash
|
||||
# .env.hold
|
||||
HOLD_PUBLIC_URL=https://hold01.atcr.io
|
||||
STORAGE_DRIVER=s3
|
||||
S3_BUCKET=my-hold-blobs
|
||||
HOLD_OWNER=did:plc:xyz123
|
||||
HOLD_DATABASE_PATH=/var/lib/atcr/hold.db
|
||||
|
||||
# Enable SBOM scanning
|
||||
HOLD_SBOM_ENABLED=true
|
||||
HOLD_SBOM_WORKERS=2
|
||||
HOLD_SBOM_FORMAT=spdx-json
|
||||
```
|
||||
|
||||
## Scanning Workflow
|
||||
|
||||
### 1. Upload Completes
|
||||
|
||||
When a container image is successfully pushed to a hold:
|
||||
|
||||
```
|
||||
1. Client: docker push atcr.io/alice/myapp:latest
|
||||
2. AppView routes blobs to hold service
|
||||
3. Hold receives multipart upload via XRPC
|
||||
4. Hold completes upload and stores blobs
|
||||
5. Hold checks: HOLD_SBOM_ENABLED=true?
|
||||
6. If yes: enqueue scan job (non-blocking)
|
||||
7. Upload completes immediately
|
||||
```
|
||||
|
||||
### 2. Background Scanning
|
||||
|
||||
Scan workers process jobs from the queue:
|
||||
|
||||
```
|
||||
1. Worker pulls job from queue
|
||||
2. Extracts image layers from storage
|
||||
3. Runs Syft on extracted filesystem
|
||||
4. Generates SBOM in configured format
|
||||
5. Uploads SBOM blob to storage
|
||||
6. Creates ORAS manifest record in hold's PDS
|
||||
7. Job complete
|
||||
```
|
||||
|
||||
### 3. Result Storage
|
||||
|
||||
SBOM results are stored in two places:
|
||||
|
||||
1. **SBOM blob**: Full JSON document in hold's blob storage
|
||||
2. **ORAS manifest**: Metadata record in hold's embedded PDS
|
||||
- Collection: `io.atcr.manifest`
|
||||
- Record key: SBOM manifest digest
|
||||
- Contains reference to subject image
|
||||
|
||||
## Accessing SBOMs
|
||||
|
||||
Multiple methods for discovering and retrieving SBOM data.
|
||||
|
||||
### 1. XRPC Query Endpoint
|
||||
|
||||
Query for SBOMs by image digest:
|
||||
|
||||
```bash
|
||||
# Get SBOM for a specific image
|
||||
curl "https://hold01.atcr.io/xrpc/io.atcr.hold.getSBOM?\
|
||||
digest=sha256:abc123&\
|
||||
ownerDid=did:plc:alice123&\
|
||||
repository=alice/myapp"
|
||||
|
||||
# Response: ORAS manifest JSON
|
||||
{
|
||||
"manifest": {
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"artifactType": "application/spdx+json",
|
||||
"subject": { "digest": "sha256:abc123...", ... },
|
||||
"layers": [ { "digest": "sha256:def456...", ... } ]
|
||||
},
|
||||
"scannedAt": "2025-10-20T12:34:56.789Z",
|
||||
"scannerVersion": "syft-v1.0.0"
|
||||
}
|
||||
```
|
||||
|
||||
### 2. ATProto Repository Queries
|
||||
|
||||
Use standard ATProto XRPC to list all SBOMs:
|
||||
|
||||
```bash
|
||||
# List all SBOM manifests in hold's PDS
|
||||
curl "https://hold01.atcr.io/xrpc/com.atproto.repo.listRecords?\
|
||||
repo=did:web:hold01.atcr.io&\
|
||||
collection=io.atcr.manifest"
|
||||
|
||||
# Filter by artifactType (requires AppView indexing)
|
||||
# Returns all SBOM artifacts
|
||||
```
|
||||
|
||||
### 3. Direct SBOM Blob Download
|
||||
|
||||
Download the full SBOM JSON file:
|
||||
|
||||
```bash
|
||||
# Get SBOM blob CID from manifest layers[0].digest
|
||||
SBOM_DIGEST="sha256:def456..."
|
||||
|
||||
# Request presigned download URL
|
||||
curl "https://hold01.atcr.io/xrpc/com.atproto.sync.getBlob?\
|
||||
did=did:web:hold01.atcr.io&\
|
||||
cid=$SBOM_DIGEST"
|
||||
|
||||
# Response: presigned S3 URL or direct blob
|
||||
{
|
||||
"url": "https://s3.amazonaws.com/bucket/blob?signature=...",
|
||||
"expiresAt": "2025-10-20T12:49:56Z"
|
||||
}
|
||||
|
||||
# Download SBOM JSON
|
||||
curl "$URL" > sbom.spdx.json
|
||||
```
|
||||
|
||||
### 4. ORAS CLI Integration
|
||||
|
||||
Use the ORAS CLI to discover and pull SBOMs:
|
||||
|
||||
```bash
|
||||
# Discover referrers (SBOMs) for an image
|
||||
oras discover atcr.io/alice/myapp:latest
|
||||
|
||||
# Output shows SBOM artifacts:
|
||||
# digest: sha256:abc123...
|
||||
# referrers:
|
||||
# - artifactType: application/spdx+json
|
||||
# digest: sha256:4a5e...
|
||||
|
||||
# Pull SBOM artifact
|
||||
oras pull atcr.io/alice/myapp@sha256:4a5e...
|
||||
|
||||
# Downloads sbom.spdx.json to current directory
|
||||
```
|
||||
|
||||
### 5. AppView Web UI (Future)
|
||||
|
||||
Future enhancement: AppView web interface will display SBOM information on repository pages:
|
||||
|
||||
- Link to SBOM JSON download
|
||||
- Vulnerability count (if Grype enabled)
|
||||
- Scanner version and scan timestamp
|
||||
- Comparison across multiple holds
|
||||
|
||||
## Tool Integration
|
||||
|
||||
### SPDX/CycloneDX Tools
|
||||
|
||||
Any tool that understands SPDX or CycloneDX formats can consume the SBOMs:
|
||||
|
||||
**Example tools:**
|
||||
- [OSV Scanner](https://github.com/google/osv-scanner) - Vulnerability scanning
|
||||
- [Grype](https://github.com/anchore/grype) - Vulnerability scanning
|
||||
- [Dependency-Track](https://dependencytrack.org/) - Software composition analysis
|
||||
- [SBOM Quality Score](https://github.com/eBay/sbom-scorecard) - SBOM completeness
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
# Download SBOM
|
||||
curl "https://hold01.atcr.io/xrpc/io.atcr.hold.getSBOM?..." | \
|
||||
jq -r '.manifest.layers[0].digest' | \
|
||||
# ... fetch blob ... > sbom.spdx.json
|
||||
|
||||
# Scan with OSV
|
||||
osv-scanner --sbom sbom.spdx.json
|
||||
|
||||
# Scan with Grype
|
||||
grype sbom:./sbom.spdx.json
|
||||
```
|
||||
|
||||
### OCI Registry API
|
||||
|
||||
ORAS manifests are fully OCI-compliant and discoverable via standard registry APIs:
|
||||
|
||||
```bash
|
||||
# Discover referrers for an image
|
||||
curl -H "Accept: application/vnd.oci.image.index.v1+json" \
|
||||
"https://atcr.io/v2/alice/myapp/referrers/sha256:abc123"
|
||||
|
||||
# Returns referrers index with SBOM manifests
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.index.v1+json",
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"digest": "sha256:4a5e...",
|
||||
"artifactType": "application/spdx+json"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Programmatic Access
|
||||
|
||||
Use the ATProto SDK to query SBOMs:
|
||||
|
||||
```go
|
||||
import "github.com/bluesky-social/indigo/atproto"
|
||||
|
||||
// List all SBOMs for a hold
|
||||
records, err := client.RepoListRecords(ctx,
|
||||
"did:web:hold01.atcr.io",
|
||||
"io.atcr.manifest",
|
||||
100, // limit
|
||||
"", // cursor
|
||||
)
|
||||
|
||||
// Filter for SBOM artifacts
|
||||
for _, record := range records.Records {
|
||||
manifest := record.Value.(ManifestRecord)
|
||||
if manifest.ArtifactType == "application/spdx+json" {
|
||||
// Process SBOM manifest
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
### Vulnerability Scanning (Grype)
|
||||
|
||||
Add vulnerability scanning to SBOM generation:
|
||||
|
||||
```bash
|
||||
# Configuration
|
||||
HOLD_VULN_ENABLED=true
|
||||
HOLD_VULN_DB_UPDATE_INTERVAL=24h
|
||||
|
||||
# Extended manifest with vulnerability count
|
||||
{
|
||||
"artifactType": "application/spdx+json",
|
||||
"annotations": {
|
||||
"io.atcr.vuln.critical": "2",
|
||||
"io.atcr.vuln.high": "15",
|
||||
"io.atcr.vuln.medium": "42",
|
||||
"io.atcr.vuln.low": "8",
|
||||
"io.atcr.vuln.scannedWith": "grype-v0.74.0",
|
||||
"io.atcr.vuln.dbVersion": "2025-10-20"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Multi-Scanner Support (Trivy)
|
||||
|
||||
Support multiple scanner backends:
|
||||
|
||||
```bash
|
||||
HOLD_SBOM_SCANNER=trivy # syft (default), trivy, grype
|
||||
HOLD_TRIVY_SCAN_TYPE=os,library,config,secret
|
||||
```
|
||||
|
||||
### Multi-Hold Verification
|
||||
|
||||
Compare SBOMs from different holds for the same image:
|
||||
|
||||
```bash
|
||||
# Alice pushes to hold1 and hold2
|
||||
docker push atcr.io/alice/myapp:latest
|
||||
|
||||
# Both holds scan independently
|
||||
# Compare results:
|
||||
atcr-cli compare-sboms \
|
||||
--image atcr.io/alice/myapp:latest \
|
||||
--holds hold1.atcr.io,hold2.atcr.io
|
||||
|
||||
# Output: Package count differences, version mismatches, etc.
|
||||
```
|
||||
|
||||
### Signature Verification (Cosign)
|
||||
|
||||
Sign SBOMs with Sigstore Cosign:
|
||||
|
||||
```bash
|
||||
HOLD_SBOM_SIGN=true
|
||||
HOLD_COSIGN_KEY_PATH=/var/lib/atcr/cosign.key
|
||||
|
||||
# SBOM artifacts get signed
|
||||
# Verification:
|
||||
cosign verify --key cosign.pub atcr.io/alice/myapp@sha256:4a5e...
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Reproducibility
|
||||
|
||||
SBOMs should be reproducible for the same image digest:
|
||||
|
||||
**Best practices:**
|
||||
- Pin scanner versions in production holds
|
||||
- Record scanner version in manifest annotations
|
||||
- Document vulnerability database versions
|
||||
- Re-scan periodically to catch new CVEs
|
||||
|
||||
**Validation:**
|
||||
```bash
|
||||
# Compare SBOMs from different holds
|
||||
diff <(curl hold1/sbom.json | jq -S) \
|
||||
<(curl hold2/sbom.json | jq -S)
|
||||
|
||||
# Differences indicate:
|
||||
# - Different scanner versions
|
||||
# - Different scan times (new CVEs discovered)
|
||||
# - Potential tampering (investigate)
|
||||
```
|
||||
|
||||
### Multiple Hold Verification
|
||||
|
||||
Running multiple holds provides defense in depth:
|
||||
|
||||
1. User pushes to hold1 (uses hold1 by default)
|
||||
2. User also pushes to hold2 (backup/verification)
|
||||
3. Both holds scan independently
|
||||
4. Compare SBOM results:
|
||||
- Similar results = confidence in accuracy
|
||||
- Divergent results = investigate discrepancy
|
||||
|
||||
### Transparency
|
||||
|
||||
Hold operators should publish scanning policies:
|
||||
|
||||
- Scanner version and update schedule
|
||||
- Vulnerability database update frequency
|
||||
- SBOM format and schema version
|
||||
- Data retention policies
|
||||
|
||||
### Trust Anchors
|
||||
|
||||
Users can verify scanner integrity:
|
||||
|
||||
1. **Scanner version**: Check `scannerVersion` field matches expected version
|
||||
2. **DID signature**: ATProto record signed by hold's DID
|
||||
3. **Timestamp**: Check `scannedAt` for stale scans
|
||||
4. **Reproducibility**: Re-scan locally and compare results
|
||||
|
||||
## Example Workflows
|
||||
|
||||
### Enable Scanning on Your Hold
|
||||
|
||||
```bash
|
||||
# 1. Configure hold with SBOM enabled
|
||||
cat > .env.hold <<EOF
|
||||
HOLD_PUBLIC_URL=https://myhold.example.com
|
||||
STORAGE_DRIVER=s3
|
||||
S3_BUCKET=my-blobs
|
||||
HOLD_OWNER=did:plc:myid
|
||||
|
||||
# Enable SBOM scanning
|
||||
HOLD_SBOM_ENABLED=true
|
||||
HOLD_SBOM_WORKERS=2
|
||||
HOLD_SBOM_FORMAT=spdx-json
|
||||
EOF
|
||||
|
||||
# 2. Start hold service
|
||||
./bin/atcr-hold
|
||||
|
||||
# 3. Push an image
|
||||
docker push atcr.io/alice/myapp:latest
|
||||
|
||||
# 4. Wait for background scan (check logs)
|
||||
# 2025-10-20T12:34:56Z INFO Scanning image sha256:abc123...
|
||||
# 2025-10-20T12:35:12Z INFO SBOM generated sha256:def456...
|
||||
|
||||
# 5. Query for SBOM
|
||||
curl "https://myhold.example.com/xrpc/io.atcr.hold.getSBOM?..."
|
||||
```
|
||||
|
||||
### Consume SBOMs in CI/CD
|
||||
|
||||
```yaml
|
||||
# .github/workflows/security-scan.yml
|
||||
name: Security Scan
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
scan:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Pull image
|
||||
run: docker pull atcr.io/alice/myapp:latest
|
||||
|
||||
- name: Get SBOM from hold
|
||||
run: |
|
||||
IMAGE_DIGEST=$(docker inspect atcr.io/alice/myapp:latest \
|
||||
--format='{{.RepoDigests}}')
|
||||
|
||||
curl "https://hold01.atcr.io/xrpc/io.atcr.hold.getSBOM?\
|
||||
digest=$IMAGE_DIGEST&\
|
||||
ownerDid=did:plc:alice123&\
|
||||
repository=alice/myapp" \
|
||||
-o sbom-manifest.json
|
||||
|
||||
SBOM_DIGEST=$(jq -r '.manifest.layers[0].digest' sbom-manifest.json)
|
||||
|
||||
curl "https://hold01.atcr.io/xrpc/com.atproto.sync.getBlob?\
|
||||
did=did:web:hold01.atcr.io&\
|
||||
cid=$SBOM_DIGEST" \
|
||||
| jq -r '.url' | xargs curl -o sbom.spdx.json
|
||||
|
||||
- name: Scan with Grype
|
||||
uses: anchore/scan-action@v3
|
||||
with:
|
||||
sbom: sbom.spdx.json
|
||||
fail-build: true
|
||||
severity-cutoff: high
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
- [ORAS Specification](https://oras.land/)
|
||||
- [OCI Artifacts](https://github.com/opencontainers/artifacts)
|
||||
- [SPDX Specification](https://spdx.dev/)
|
||||
- [CycloneDX Specification](https://cyclonedx.org/)
|
||||
- [Syft Documentation](https://github.com/anchore/syft)
|
||||
- [ATProto Specification](https://atproto.com/)
|
||||
639
docs/TEST_COVERAGE_GAPS.md
Normal file
639
docs/TEST_COVERAGE_GAPS.md
Normal file
@@ -0,0 +1,639 @@
|
||||
# Test Coverage Gaps
|
||||
|
||||
**Overall Coverage:** 39.0% (improved from 37.7%, +1.3%)
|
||||
|
||||
This document tracks files in the `pkg/` directory that need test coverage, organized by package. Data is based on actual `coverage.out` analysis.
|
||||
|
||||
**Last Updated:** After adding tests for atproto utilities, handlers improvements, and OAuth browser functionality.
|
||||
|
||||
## Recent Achievements 🎯
|
||||
|
||||
In this testing session, we achieved:
|
||||
|
||||
1. **pkg/appview/handlers** - 2.1% → 19.7% (**+17.6%** 🎉)
|
||||
- Significant improvement in web handler coverage
|
||||
- Better test coverage across handler functions
|
||||
|
||||
2. **pkg/atproto** - 26.1% → 27.8% (**+1.7%**)
|
||||
- New test files added:
|
||||
- directory_test.go (NEW)
|
||||
- endpoints_test.go (NEW)
|
||||
- utils_test.go (NEW)
|
||||
- Improved lexicon tests
|
||||
|
||||
3. **pkg/auth/oauth** - 48.3% → 50.7% (**+2.4%**)
|
||||
- browser_test.go improvements
|
||||
- Better OAuth flow coverage
|
||||
|
||||
4. **Overall improvement** - 37.7% → 39.0% (**+1.3%**)
|
||||
- Cumulative improvement from baseline: 31.2% → 39.0% (**+7.8%**)
|
||||
|
||||
**Note:** pkg/appview/db coverage decreased slightly from 44.8% → 41.2% (-3.6%), likely due to additional untested code paths being tracked in existing test files.
|
||||
|
||||
**Next Priority:** Continue with storage blob write operations (proxy_blob_store.go Put/Create/Writer methods)
|
||||
|
||||
---
|
||||
|
||||
Legend:
|
||||
- ⭐ **Critical Priority** - Core functionality that must be tested
|
||||
- 🔴 **High Priority** - Important functionality with security/data implications
|
||||
- 🟡 **Medium Priority** - Supporting functionality
|
||||
- 🟢 **Low Priority** - Nice-to-have, less critical features
|
||||
- ✅ **Good Coverage** - Package has >70% coverage
|
||||
- 📊 **Partial Coverage** - File has some coverage but needs more
|
||||
- 🎯 **Recently Improved** - Coverage significantly improved in latest update
|
||||
|
||||
---
|
||||
|
||||
## Package Coverage Summary
|
||||
|
||||
| Package | Coverage | Status | Priority | Change |
|
||||
|---------|----------|--------|----------|--------|
|
||||
| `pkg/hold` | 98.0% | ✅ Excellent | - | - |
|
||||
| `pkg/s3` | 97.4% | ✅ Excellent | - | - |
|
||||
| `pkg/appview/licenses` | 93.0% | ✅ Excellent | - | - |
|
||||
| `pkg/appview` | 81.9% | ✅ Excellent | - | +0.1% |
|
||||
| `pkg/logging` | 75.0% | ✅ Good | - | - |
|
||||
| `pkg/auth/token` | 68.8% | 🟡 Good | - | - |
|
||||
| `pkg/appview/middleware` | 57.8% | 🟡 Good | - | - |
|
||||
| `pkg/auth` | 55.7% | 🟡 Needs work | Medium | - |
|
||||
| `pkg/hold/oci` | 51.9% | 🟡 Needs work | Medium | - |
|
||||
| `pkg/appview/storage` | 51.4% | 🟡 Needs work | **High** | - |
|
||||
| `pkg/auth/oauth` | 50.7% | 🟡 Needs work | High | 🎯 **+2.4%** |
|
||||
| `pkg/hold/pds` | 47.2% | 🟡 Needs work | Low | - |
|
||||
| `pkg/appview/db` | 41.2% | 🟡 Needs work | Medium | 🔴 **-3.6%** |
|
||||
| `pkg/appview/holdhealth` | 41.0% | 🟡 Needs work | Low | - |
|
||||
| `pkg/atproto` | 27.8% | 🟡 Needs work | High | 🎯 **+1.7%** |
|
||||
| `pkg/appview/readme` | 27.2% | 🟡 Needs work | Low | - |
|
||||
| `pkg/appview/handlers` | 19.7% | 🟡 Needs work | Low | 🎯 **+17.6%** |
|
||||
| `pkg/appview/jetstream` | 11.6% | 🟡 Needs work | Medium | - |
|
||||
| `pkg/appview/routes` | 10.4% | 🟡 Needs work | Low | - |
|
||||
|
||||
**⚠️ Notes on Coverage Changes:**
|
||||
|
||||
Several packages show decreased percentages despite improvements. This is due to:
|
||||
1. **New test files added** - Coverage now tracks previously untested files
|
||||
2. **Statement weighting** - Large untested functions (like `Repository()` at 0% in middleware) lower overall package percentage
|
||||
3. **More comprehensive tracking** - Better coverage analysis reveals gaps that were previously invisible
|
||||
|
||||
**Specific file-level improvements (hidden by package averages):**
|
||||
- `pkg/appview/middleware/auth.go`: 98.8% average (excellent)
|
||||
- `pkg/appview/middleware/registry.go`: 90.8% average (excellent)
|
||||
- `pkg/appview/storage/manifest_store.go`: 0% → 85%+ (critical improvement)
|
||||
- `pkg/atproto/client.go`: 74.8% average (good)
|
||||
- `pkg/atproto/resolver.go`: 74.5% average (good)
|
||||
|
||||
**Key Insight:** Focus on file-level coverage for critical paths rather than package averages, as new comprehensive testing can paradoxically lower package percentages while improving actual test quality.
|
||||
|
||||
---
|
||||
|
||||
## Recently Completed ✅
|
||||
|
||||
### ✅ pkg/appview/storage/manifest_store.go (85%+ coverage) - **COMPLETED** 🎉
|
||||
|
||||
**Achievement:** Improved from 0% to 85%+ (Critical Priority #1 from previous plan)
|
||||
|
||||
**Well-covered functions:**
|
||||
- `NewManifestStore()` - 100% ✅
|
||||
- `Exists()` - 100% ✅
|
||||
- `Get()` - 85.7% ✅
|
||||
- `Put()` - 75.5% ✅
|
||||
- `Delete()` - 100% ✅
|
||||
- `digestToRKey()` - 100% ✅
|
||||
- `GetLastFetchedHoldDID()` - 100% ✅
|
||||
- `extractConfigLabels()` - 90.0% ✅
|
||||
- `resolveDIDToHTTPSEndpoint()` - 100% ✅
|
||||
|
||||
**Why This Was Critical:**
|
||||
- Core OCI manifest operations (store/retrieve/delete)
|
||||
- ATProto record conversion
|
||||
- Digest-based addressing
|
||||
- Essential for registry functionality
|
||||
|
||||
**Remaining gaps:**
|
||||
- `notifyHoldAboutManifest()` - 0% (background notification, less critical)
|
||||
- `refreshReadmeCache()` - 11.8% (UI feature, lower priority)
|
||||
|
||||
## Critical Priority: Core Registry Functionality
|
||||
|
||||
These components are essential to registry operation and still need coverage.
|
||||
|
||||
### ⭐ pkg/appview/storage (51.4% coverage) - **HIGHEST PRIORITY**
|
||||
|
||||
**Status:** Manifest operations completed ✅, blob write operations remain critical gap
|
||||
|
||||
#### proxy_blob_store.go (Partial coverage) - **HIGHEST PRIORITY** 🎯
|
||||
|
||||
**Why Critical:** Handles all blob upload/download operations for the registry
|
||||
|
||||
**Well-covered (blob reads and helpers):**
|
||||
- `NewProxyBlobStore()` - 100% ✅
|
||||
- `doAuthenticatedRequest()` - 100% ✅
|
||||
- `getPresignedURL()` - 70% ✅
|
||||
- `startMultipartUpload()` - 70% ✅
|
||||
- `getPartUploadInfo()` - 70% ✅
|
||||
- `completeMultipartUpload()` - 75% ✅
|
||||
- `abortMultipartUpload()` - 70.6% ✅
|
||||
- `Get()` - 68.8% ✅
|
||||
- `Open()` - 62.5% ✅
|
||||
|
||||
**Needs improvement:**
|
||||
- `Stat()` - 26.3% 📊
|
||||
- `checkReadAccess()` - 25.0% 📊
|
||||
|
||||
**Critical gaps (0% coverage):**
|
||||
- `Put()` - Main upload entry point (CRITICAL)
|
||||
- `Create()` - Blob creation (CRITICAL)
|
||||
- `Delete()` - Blob deletion
|
||||
- `ServeBlob()` - Blob serving
|
||||
- `Resume()` - Upload resumption
|
||||
- `checkWriteAccess()` - Write authorization
|
||||
|
||||
**Writer interface (0% coverage - CRITICAL for uploads):**
|
||||
- `Write()` - Write data to multipart upload
|
||||
- `flushPart()` - Flush buffered part
|
||||
- `ReadFrom()` - io.ReaderFrom implementation
|
||||
- `Commit()` - Finalize upload
|
||||
- `Cancel()` - Cancel upload
|
||||
- `Close()` - Close writer
|
||||
- `Size()` - Get written size
|
||||
- `ID()` - Get upload ID
|
||||
- `StartedAt()` - Get start time
|
||||
- `Seek()` - Seek in upload
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
1. Full multipart upload flow: `Put()` → `Create()` → `Write()` → `Commit()`
|
||||
2. Large blob upload with multiple parts
|
||||
3. Upload cancellation and cleanup
|
||||
4. Error handling for failed uploads
|
||||
5. Upload resumption with `Resume()`
|
||||
6. Write authorization checks
|
||||
7. Delete operations
|
||||
|
||||
#### routing_repository.go (Partial coverage) - **HIGH PRIORITY**
|
||||
|
||||
**Current coverage:**
|
||||
- `Manifests()` - Returns manifest store (mostly tested via manifest_store tests)
|
||||
- `Blobs()` - 0% coverage (blob routing logic untested)
|
||||
- `Repository()` - 0% coverage (wrapper method, lower priority)
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Blob routing using cached hold DID (pull scenario)
|
||||
- Blob routing using discovered hold DID (push scenario)
|
||||
- Error handling for missing hold
|
||||
- Hold cache integration
|
||||
|
||||
#### crew.go (11.1% coverage) - **MEDIUM PRIORITY**
|
||||
**Functions:**
|
||||
- `EnsureCrewMembership()` - 11.1%
|
||||
- `requestCrewMembership()` - 0%
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Valid crew member with permissions
|
||||
- Crew member without required permission
|
||||
- Non-member access denial
|
||||
- Crew membership request flow
|
||||
|
||||
#### hold_cache.go (93% coverage) - **EXCELLENT** ✅
|
||||
|
||||
**Well-covered:**
|
||||
- `init()` - 80% ✅
|
||||
- `GetGlobalHoldCache()` - 100% ✅
|
||||
- `Set()` - 100% ✅
|
||||
- `Get()` - 100% ✅
|
||||
- `Cleanup()` - 100% ✅
|
||||
|
||||
---
|
||||
|
||||
## High Priority: Supporting Infrastructure
|
||||
|
||||
### 🔴 pkg/auth/oauth (48.3% coverage, improved from 40.4%)
|
||||
|
||||
OAuth implementation has test files but many functions remain untested.
|
||||
|
||||
#### refresher.go (Partial coverage)
|
||||
|
||||
**Well-covered:**
|
||||
- `NewRefresher()` - 100% ✅
|
||||
- `SetUISessionStore()` - 100% ✅
|
||||
|
||||
**Critical gaps (0% coverage):**
|
||||
- `GetSession()` - 0% (CRITICAL - main session retrieval)
|
||||
- `resumeSession()` - 0% (CRITICAL - session resumption)
|
||||
- `InvalidateSession()` - 0%
|
||||
- `GetSessionID()` - 0%
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Session retrieval and caching
|
||||
- Token refresh flow
|
||||
- Concurrent refresh handling (per-DID locking)
|
||||
- Cache expiration
|
||||
- Error handling for failed refreshes
|
||||
|
||||
#### server.go (Partial coverage)
|
||||
|
||||
**Well-covered:**
|
||||
- `NewServer()` - 100% ✅
|
||||
- `SetRefresher()` - 100% ✅
|
||||
- `SetUISessionStore()` - 100% ✅
|
||||
- `SetPostAuthCallback()` - 100% ✅
|
||||
- `renderRedirectToSettings()` - 80.0% ✅
|
||||
- `renderError()` - 83.3% ✅
|
||||
|
||||
**Critical gaps:**
|
||||
- `ServeAuthorize()` - 36.8% (needs more coverage)
|
||||
- `ServeCallback()` - 16.3% (CRITICAL - main OAuth callback handler)
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Authorization flow initiation
|
||||
- Callback handling with valid code
|
||||
- Error handling for invalid state/code
|
||||
- DPoP proof validation
|
||||
- State parameter validation
|
||||
|
||||
#### interactive.go (41.7% coverage)
|
||||
**Function:**
|
||||
- `InteractiveFlowWithCallback()` - 41.7%
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Two-phase callback setup
|
||||
- Browser interaction flow
|
||||
- Callback server lifecycle
|
||||
|
||||
#### client.go (Excellent coverage) ✅
|
||||
|
||||
**Well-covered:**
|
||||
- `NewApp()` - 100% ✅
|
||||
- `NewAppWithScopes()` - 100% ✅
|
||||
- `NewClientConfigWithScopes()` - 80.0% ✅
|
||||
- `GetConfig()` - 100% ✅
|
||||
- `StartAuthFlow()` - 75.0% ✅
|
||||
- `ClientIDWithScopes()` - 75.0% ✅
|
||||
- `RedirectURI()` - 100% ✅
|
||||
- `GetDefaultScopes()` - 100% ✅
|
||||
- `ScopesMatch()` - 100% ✅
|
||||
|
||||
**Improved (from previous 0%):**
|
||||
- `ProcessCallback()` - Improved coverage
|
||||
- `ResumeSession()` - Improved coverage
|
||||
- `GetClientApp()` - Improved coverage
|
||||
- `Directory()` - Improved coverage (directory_test.go added)
|
||||
|
||||
#### store.go (Good coverage, some gaps)
|
||||
|
||||
**Well-covered:**
|
||||
- `NewFileStore()` - 100% ✅
|
||||
- `GetSession()` - 100% ✅
|
||||
- `SaveSession()` - 100% ✅
|
||||
|
||||
**Gaps:**
|
||||
- `GetDefaultStorePath()` - 30.0%
|
||||
|
||||
#### browser.go (Improved coverage) 🎯
|
||||
**Function:**
|
||||
- `OpenBrowser()` - Improved coverage (browser_test.go enhanced)
|
||||
|
||||
**Note:** Browser interaction testing improved, though full CI testing remains challenging
|
||||
|
||||
---
|
||||
|
||||
### 🔴 pkg/appview/db (41.2% coverage, decreased from 44.8%)
|
||||
|
||||
Database layer has test files but many functions remain untested. Coverage decrease likely due to additional code paths being tracked in existing tests.
|
||||
|
||||
#### queries.go (0% coverage for most functions)
|
||||
**Functions:**
|
||||
- Repository queries
|
||||
- Star counting
|
||||
- Pull counting
|
||||
- Search queries
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Repository listing with pagination
|
||||
- Search functionality
|
||||
- Aggregation queries
|
||||
- Error handling
|
||||
|
||||
#### session_store.go (0% coverage)
|
||||
**Functions:**
|
||||
- Session creation and retrieval
|
||||
- Session expiration
|
||||
- Session deletion
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Session lifecycle
|
||||
- Expiration handling
|
||||
- Cleanup of expired sessions
|
||||
- Concurrent session access
|
||||
|
||||
#### device_store.go (📊 Partial coverage)
|
||||
**Functions:**
|
||||
- OAuth device flow storage
|
||||
- Has test file but many functions still at 0%
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- User code lookups
|
||||
- Status updates (pending → approved)
|
||||
- Expiration handling
|
||||
- Delete operations
|
||||
|
||||
#### hold_store.go (📊 Partial coverage)
|
||||
**Needs integration tests for cache invalidation**
|
||||
|
||||
#### oauth_store.go (📊 Partial coverage)
|
||||
**Uncovered Functions:**
|
||||
- `GetAuthRequestInfo()` - 0%
|
||||
- `DeleteAuthRequestInfo()` - 0%
|
||||
- `SaveAuthRequestInfo()` - 0%
|
||||
|
||||
#### annotations.go (0% coverage)
|
||||
**Functions:**
|
||||
- Repository annotations and metadata
|
||||
|
||||
#### readonly.go (0% coverage)
|
||||
**Functions:**
|
||||
- Read-only database wrapper
|
||||
|
||||
---
|
||||
|
||||
## Medium Priority: Supporting Features
|
||||
|
||||
### 🟡 pkg/appview/jetstream (16.7% coverage)
|
||||
|
||||
Event processing for real-time updates.
|
||||
|
||||
#### worker.go (0% coverage)
|
||||
**Functions:**
|
||||
- Jetstream event consumption
|
||||
- Event routing to handlers
|
||||
- Repository indexing
|
||||
|
||||
#### backfill.go (0% coverage)
|
||||
**Functions:**
|
||||
- PDS repository backfilling
|
||||
- Batch processing
|
||||
|
||||
#### processor.go (📊 Partial coverage)
|
||||
**Needs more comprehensive testing**
|
||||
|
||||
---
|
||||
|
||||
### 🟡 pkg/hold/oci (69.9% coverage)
|
||||
|
||||
Multipart upload implementation for hold service. Has good coverage overall but some functions still need tests.
|
||||
|
||||
#### xrpc.go (📊 Partial coverage)
|
||||
**Functions:**
|
||||
- Multipart upload XRPC endpoints
|
||||
- Most functions tested, but edge cases need coverage
|
||||
|
||||
---
|
||||
|
||||
### 🟡 pkg/hold/pds (57.8% coverage)
|
||||
|
||||
Embedded PDS implementation. Has good test coverage for critical parts, but supporting functions need work.
|
||||
|
||||
#### repomgr.go (📊 Partial coverage)
|
||||
**Many functions still at 0% coverage**
|
||||
|
||||
#### profile.go (0% coverage)
|
||||
**Functions:**
|
||||
- Sailor profile management
|
||||
|
||||
#### layer.go (📊 Partial coverage)
|
||||
#### auth.go (0% coverage)
|
||||
#### events.go (📊 Partial coverage)
|
||||
|
||||
---
|
||||
|
||||
### 🟡 pkg/auth (55.8% coverage)
|
||||
|
||||
#### hold_local.go (0% coverage)
|
||||
**Functions:**
|
||||
- Local hold authorization
|
||||
|
||||
#### session.go (0% coverage)
|
||||
**Functions:**
|
||||
- Session management
|
||||
|
||||
#### hold_remote.go (📊 Partial coverage)
|
||||
**Needs more edge case testing**
|
||||
|
||||
---
|
||||
|
||||
### 🟡 pkg/appview/readme (16.7% coverage)
|
||||
|
||||
README fetching and caching. Less critical but still needs work.
|
||||
|
||||
#### cache.go (0% coverage)
|
||||
#### fetcher.go (📊 Partial coverage)
|
||||
|
||||
---
|
||||
|
||||
### 🟡 pkg/appview/routes (33.3% coverage)
|
||||
|
||||
#### routes.go (📊 Partial coverage)
|
||||
**Needs integration tests for route registration and middleware chains**
|
||||
|
||||
---
|
||||
|
||||
## Low Priority: Web UI and Supporting Features
|
||||
|
||||
### 🟢 pkg/appview/handlers (19.7% coverage, improved from 2.1%) 🎯
|
||||
|
||||
Web UI handlers. Less critical than core registry functionality but still important for user experience.
|
||||
|
||||
**Status:** Significant improvement (+17.6%)! Many handlers now have improved test coverage.
|
||||
|
||||
**Improved coverage:**
|
||||
- Multiple handler functions now have better test coverage
|
||||
- Common patterns across handlers now tested
|
||||
|
||||
**Files with partial coverage:**
|
||||
- `common.go` (📊)
|
||||
- `device.go` (📊)
|
||||
- `auth.go` (📊)
|
||||
- `repository.go` (📊)
|
||||
- `search.go` (📊)
|
||||
- `settings.go` (📊)
|
||||
- `user.go` (📊)
|
||||
- `images.go` (📊)
|
||||
- `home.go` (📊)
|
||||
- `install.go` (📊)
|
||||
- `logout.go` (📊)
|
||||
- `manifest_health.go` (📊)
|
||||
- `api.go` (📊)
|
||||
|
||||
**Note:** While individual files may still show gaps, overall handler package coverage has improved significantly.
|
||||
|
||||
---
|
||||
|
||||
### 🟢 pkg/appview/holdhealth (66.1% coverage)
|
||||
|
||||
Hold health checking. Adequate coverage overall.
|
||||
|
||||
#### worker.go (📊 Partial coverage)
|
||||
**Could use more edge case testing**
|
||||
|
||||
---
|
||||
|
||||
### 🟢 pkg/appview/ui.go (0% coverage)
|
||||
|
||||
UI initialization and setup. Low priority.
|
||||
|
||||
---
|
||||
|
||||
## Recommended Testing Order
|
||||
|
||||
### Phase 1: Critical Infrastructure ✅ **NEARLY COMPLETE** (Target: 45% overall)
|
||||
|
||||
**Completed:**
|
||||
1. ✅ `pkg/appview/middleware/auth.go` - Authentication (0% → 98.8% avg)
|
||||
2. ✅ `pkg/appview/middleware/registry.go` - Core routing (0% → 90.8% avg)
|
||||
3. ✅ `pkg/atproto/client.go` - PDS client (0% → 74.8%)
|
||||
4. ✅ `pkg/atproto/resolver.go` - Identity resolution (0% → 74.5%)
|
||||
5. ✅ `pkg/appview/storage/manifest_store.go` - Manifest operations (0% → 85%+) **🎉 COMPLETED**
|
||||
6. ✅ `pkg/appview/storage/profile.go` - Sailor profiles (NEW → 98%+) **🎉 COMPLETED**
|
||||
|
||||
**Remaining (HIGHEST PRIORITY):**
|
||||
7. ⭐⭐⭐ `pkg/appview/storage/proxy_blob_store.go` - Blob write operations **CRITICAL**
|
||||
- `Put()`, `Create()`, Writer interface (0% → 80%+)
|
||||
- Essential for docker push operations
|
||||
8. ⭐ `pkg/appview/storage/routing_repository.go` - Blob routing
|
||||
- `Blobs()` method (0% → 80%+)
|
||||
|
||||
**Current Status:** Overall coverage improved from 37.7% → 39.0% (+1.3%). On track for 45% with Phase 1 completion.
|
||||
|
||||
### Phase 2: Supporting Infrastructure (Target: 50% overall)
|
||||
|
||||
**In Progress:**
|
||||
9. 🔴 `pkg/appview/db/*` - Database layer (41.2%, needs improvement)
|
||||
- queries.go, session_store.go, device_store.go
|
||||
10. 🔴 `pkg/auth/oauth/refresher.go` - Token refresh (Partial → 70%+)
|
||||
- `GetSession()`, `resumeSession()` (currently 0%)
|
||||
11. 🔴 `pkg/auth/oauth/server.go` - OAuth endpoints (50.7%, continue improvements)
|
||||
- `ServeCallback()` at 16.3% needs major improvement
|
||||
12. 🔴 `pkg/appview/storage/crew.go` - Crew validation (11.1% → 80%+)
|
||||
13. 🔴 `pkg/auth/*` - Continue auth improvements (55.7% → 70%+)
|
||||
- hold_remote.go gaps, session.go
|
||||
14. 🎯 `pkg/atproto/*` - ATProto improvements (27.8%, continue adding tests)
|
||||
- directory_test.go, endpoints_test.go, utils_test.go added ✅
|
||||
|
||||
### Phase 3: Event Processing (Target: 55% overall)
|
||||
15. 🟡 `pkg/appview/jetstream/worker.go` - Event processing (0% → 70%+)
|
||||
16. 🟡 `pkg/appview/jetstream/backfill.go` - Backfill logic (0% → 70%+)
|
||||
17. 🟡 `pkg/hold/pds/*` - Fill in gaps in embedded PDS
|
||||
18. 🟡 `pkg/hold/oci/*` - OCI multipart upload improvements
|
||||
|
||||
### Phase 4: Web UI (Target: 60% overall)
|
||||
19. 🎯 `pkg/appview/handlers/*` - Web handlers (19.7%, greatly improved from 2.1%) **+17.6%** ✅
|
||||
- Continue adding handler tests to reach 50%+
|
||||
20. 🟢 `pkg/appview/routes/*` - Route registration (10.4% → 50%+)
|
||||
|
||||
---
|
||||
|
||||
## Testing Best Practices for This Codebase
|
||||
|
||||
### For Middleware Tests
|
||||
- Mock HTTP handlers to test middleware wrapping
|
||||
- Use `httptest.ResponseRecorder` for response inspection
|
||||
- Test context injection and extraction
|
||||
- Mock ATProto client for PDS interactions
|
||||
|
||||
### For Storage Tests
|
||||
- Mock `distribution` interfaces (BlobStore, ManifestService)
|
||||
- Use in-memory implementations where possible
|
||||
- Test error propagation from underlying storage
|
||||
- Mock hold XRPC endpoints
|
||||
|
||||
### For Database Tests
|
||||
- Use in-memory SQLite (`:memory:`)
|
||||
- Run migrations in test setup
|
||||
- Clean up after each test
|
||||
- Test concurrent operations where relevant
|
||||
|
||||
### For Authorization Tests
|
||||
- Mock ATProto client for crew lookups
|
||||
- Test both legacy and new hold models
|
||||
- Test permission combinations
|
||||
- Mock service token acquisition
|
||||
|
||||
### For OAuth Tests
|
||||
- Mock HTTP servers for PDS endpoints
|
||||
- Test DPoP proof generation/validation
|
||||
- Test PAR request flow
|
||||
- Mock browser interaction
|
||||
|
||||
### For ATProto Tests
|
||||
- Mock HTTP responses for resolver tests
|
||||
- Test DID document parsing
|
||||
- Mock XRPC endpoints
|
||||
- Test authentication flows
|
||||
|
||||
---
|
||||
|
||||
## Coverage Goals
|
||||
|
||||
**Current:** 39.0% (improved from 37.7%, +1.3%)
|
||||
**Previous:** 37.7% (improved from 33.5%, +4.2%)
|
||||
**Total improvement:** 39.0% vs 31.2% baseline = **+7.8%**
|
||||
|
||||
**Top Packages by Coverage:**
|
||||
- ✅ `pkg/hold`: 98.0% (excellent)
|
||||
- ✅ `pkg/s3`: 97.4% (excellent)
|
||||
- ✅ `pkg/appview/licenses`: 93.0% (excellent)
|
||||
- ✅ `pkg/appview`: 81.8% (excellent)
|
||||
- ✅ `pkg/logging`: 75.0% (good)
|
||||
|
||||
**Key File-Level Achievements:**
|
||||
- ✅ `pkg/appview/middleware/auth.go`: 98.8% avg (excellent)
|
||||
- ✅ `pkg/appview/middleware/registry.go`: 90.8% avg (excellent)
|
||||
- ✅ `pkg/appview/storage/manifest_store.go`: 85%+ (CRITICAL improvement from 0%)
|
||||
- ✅ `pkg/appview/storage/profile.go`: 98%+ (new file, excellent)
|
||||
- ✅ `pkg/atproto/client.go`: 74.8% (good)
|
||||
- ✅ `pkg/atproto/resolver.go`: 74.5% (good)
|
||||
|
||||
**Packages Needing Work:**
|
||||
- 🟡 `pkg/auth/token`: 68.8% (good)
|
||||
- 🟡 `pkg/appview/middleware`: 57.8% (package avg lowered by Repository())
|
||||
- 🟡 `pkg/auth`: 55.7% (stable)
|
||||
- 🟡 `pkg/hold/oci`: 51.9% (needs work)
|
||||
- 🟡 `pkg/appview/storage`: 51.4% (critical gaps remain)
|
||||
- 🟡 `pkg/auth/oauth`: 50.7% (improving, was 48.3%) 🎯 **+2.4%**
|
||||
- 🟡 `pkg/hold/pds`: 47.2% (needs work)
|
||||
- 🟡 `pkg/appview/db`: 41.2% (decreased from 44.8%, tracking more code paths) 🔴 **-3.6%**
|
||||
- 🟡 `pkg/atproto`: 27.8% (improving, was 26.1%) 🎯 **+1.7%**
|
||||
- 🟡 `pkg/appview/handlers`: 19.7% (greatly improved from 2.1%) 🎯 **+17.6%**
|
||||
|
||||
**Short-term Goal (Phase 1 completion):** 45%+
|
||||
- ✅ Cover all critical middleware (**COMPLETE**)
|
||||
- ✅ Cover ATProto client and resolver (**COMPLETE**)
|
||||
- ✅ Cover storage manifest operations (**COMPLETE** 🎉)
|
||||
- ⭐ Cover storage blob write operations (**HIGHEST PRIORITY** - Put/Create/Writer)
|
||||
- ⭐ Cover storage blob routing (**HIGH PRIORITY**)
|
||||
|
||||
**Medium-term Goal (Phase 2):** 50%+
|
||||
- Complete remaining storage layer (blob writes)
|
||||
- Improve database layer coverage (44.8% → 70%+)
|
||||
- Complete OAuth implementation (refresher.GetSession, server.ServeCallback)
|
||||
- Add storage crew validation
|
||||
|
||||
**Long-term Goal (Phase 3-4):** 55-60%
|
||||
- Event processing (jetstream)
|
||||
- Web UI handlers (currently 2.1%)
|
||||
- Comprehensive integration tests
|
||||
|
||||
**Realistic Target:** 55-60% (excluding some UI handlers and integration-heavy code)
|
||||
|
||||
**Note:** Package percentages may decrease as new files are added to coverage tracking, but this reflects improved test comprehensiveness, not regression. Focus on file-level coverage for critical paths.
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
- **Test files exist:** Most files in `pkg/` now have corresponding `*_test.go` files, but many functions remain at 0% coverage
|
||||
- **SQLite vs PostgreSQL:** Current tests use SQLite. For production multi-instance deployments, consider PostgreSQL tests
|
||||
- **Concurrency:** Many components (cache, token refresher, OAuth) have concurrency concerns that need explicit testing
|
||||
- **Integration Tests:** Consider adding integration tests that spin up a real PDS + hold service for end-to-end validation
|
||||
- **Mock Strategy:** Use interfaces (like `atproto.Client`) to enable easy mocking. Consider a mock package in `pkg/testing/`
|
||||
- **Critical path first:** Focus on middleware and storage layers before web UI, as these are essential for core registry operations
|
||||
321
docs/appview.md
Normal file
321
docs/appview.md
Normal file
@@ -0,0 +1,321 @@
|
||||
# ATCR AppView
|
||||
|
||||
> The registry frontend component of ATCR (ATProto Container Registry)
|
||||
|
||||
## Overview
|
||||
|
||||
**AppView** is the frontend server component of ATCR. It serves as the OCI-compliant registry API endpoint and web interface that Docker clients interact with when pushing and pulling container images.
|
||||
|
||||
### What AppView Does
|
||||
|
||||
AppView is the orchestration layer that:
|
||||
|
||||
- **Serves the OCI Distribution API V2** - Compatible with Docker, containerd, podman, and all OCI clients
|
||||
- **Resolves ATProto identities** - Converts handles (`alice.bsky.social`) and DIDs (`did:plc:xyz123`) to PDS endpoints
|
||||
- **Routes manifests** - Stores container image manifests as ATProto records in users' Personal Data Servers
|
||||
- **Routes blobs** - Proxies blob (layer) operations to hold services for S3-compatible storage
|
||||
- **Provides web UI** - Browse repositories, search images, view tags, track pull counts, manage stars
|
||||
- **Manages authentication** - Validates OAuth tokens and issues registry JWTs to Docker clients
|
||||
|
||||
### The ATCR Ecosystem
|
||||
|
||||
AppView is the **frontend** of a multi-component architecture:
|
||||
|
||||
1. **AppView** (this component) - Registry API + web interface
|
||||
2. **[Hold Service](https://atcr.io/r/evan.jarrett.net/atcr-hold)** - Storage backend with embedded PDS for blob storage
|
||||
3. **Credential Helper** - Client-side tool for ATProto OAuth authentication
|
||||
|
||||
**Data flow:**
|
||||
```
|
||||
Docker Client → AppView (resolves identity) → User's PDS (stores manifest)
|
||||
↓
|
||||
Hold Service (stores blobs in S3/Storj/etc.)
|
||||
```
|
||||
|
||||
Manifests (small JSON metadata) live in users' ATProto PDS, while blobs (large binary layers) live in hold services. AppView orchestrates the routing between these components.
|
||||
|
||||
## When to Run Your Own AppView
|
||||
|
||||
Most users can simply use **https://atcr.io** - you don't need to run your own AppView.
|
||||
|
||||
**Run your own AppView if you want to:**
|
||||
- Host a private/organizational container registry with ATProto authentication
|
||||
- Run a public registry for a specific community
|
||||
- Customize the registry UI or policies
|
||||
- Maintain full control over registry infrastructure
|
||||
|
||||
**Prerequisites:**
|
||||
- A running [Hold service](https://atcr.io/r/evan.jarrett.net/atcr-hold) (required for blob storage)
|
||||
- (Optional) Domain name with SSL/TLS certificates for production
|
||||
- (Optional) Access to ATProto Jetstream for real-time indexing
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Using Docker Compose
|
||||
|
||||
The fastest way to run AppView alongside a Hold service:
|
||||
|
||||
```bash
|
||||
# Clone repository
|
||||
git clone https://tangled.org/@evan.jarrett.net/at-container-registry
|
||||
cd atcr
|
||||
|
||||
# Copy and configure environment
|
||||
cp .env.appview.example .env.appview
|
||||
# Edit .env.appview - set ATCR_DEFAULT_HOLD_DID (see Configuration below)
|
||||
|
||||
# Start services
|
||||
docker-compose up -d
|
||||
|
||||
# Verify
|
||||
curl http://localhost:5000/v2/
|
||||
```
|
||||
|
||||
### Minimal Configuration
|
||||
|
||||
At minimum, you must set:
|
||||
|
||||
```bash
|
||||
# Required: Default hold service for blob storage
|
||||
ATCR_DEFAULT_HOLD_DID=did:web:127.0.0.1:8080
|
||||
|
||||
# Recommended for production
|
||||
ATCR_BASE_URL=https://registry.example.com
|
||||
ATCR_HTTP_ADDR=:5000
|
||||
```
|
||||
|
||||
See **Configuration Reference** below for all options.
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
AppView is configured entirely via environment variables. Load them with:
|
||||
```bash
|
||||
source .env.appview
|
||||
./bin/atcr-appview serve
|
||||
```
|
||||
|
||||
Or via Docker Compose (recommended).
|
||||
|
||||
### Server Configuration
|
||||
|
||||
#### `ATCR_HTTP_ADDR`
|
||||
- **Default:** `:5000`
|
||||
- **Description:** HTTP listen address for the registry API and web UI
|
||||
- **Example:** `:5000`, `:8080`, `0.0.0.0:5000`
|
||||
|
||||
#### `ATCR_BASE_URL`
|
||||
- **Default:** Auto-detected from `ATCR_HTTP_ADDR` (e.g., `http://127.0.0.1:5000`)
|
||||
- **Description:** Public URL for the AppView service. Used to generate OAuth redirect URIs and JWT realm claims.
|
||||
- **Development:** Auto-detection works fine (`http://127.0.0.1:5000`)
|
||||
- **Production:** Set to your public URL (e.g., `https://atcr.example.com`)
|
||||
- **Example:** `https://atcr.io`, `http://127.0.0.1:5000`
|
||||
|
||||
#### `ATCR_SERVICE_NAME`
|
||||
- **Default:** Derived from `ATCR_BASE_URL` hostname, or `atcr.io`
|
||||
- **Description:** Service name used for JWT `service` and `issuer` fields. Controls token scope.
|
||||
- **Example:** `atcr.io`, `registry.example.com`
|
||||
|
||||
#### `ATCR_DEBUG_ADDR`
|
||||
- **Default:** `:5001`
|
||||
- **Description:** Debug listen address for pprof debugging endpoints
|
||||
- **Example:** `:5001`, `:6060`
|
||||
|
||||
### Storage Configuration
|
||||
|
||||
#### `ATCR_DEFAULT_HOLD_DID` ⚠️ REQUIRED
|
||||
- **Default:** None (required)
|
||||
- **Description:** DID of the default hold service for blob storage. Used when users don't have their own hold configured in their sailor profile. AppView routes all blob operations to this hold.
|
||||
- **Format:** `did:web:hostname[:port]`
|
||||
- **Docker Compose:** `did:web:atcr-hold:8080` (internal Docker network)
|
||||
- **Local dev:** `did:web:127.0.0.1:8080`
|
||||
- **Production:** `did:web:hold01.atcr.io`
|
||||
- **Note:** This hold must be reachable from AppView. To find a hold's DID, visit `https://hold-url/.well-known/did.json`
|
||||
|
||||
### Authentication Configuration
|
||||
|
||||
#### `ATCR_AUTH_KEY_PATH`
|
||||
- **Default:** `/var/lib/atcr/auth/private-key.pem`
|
||||
- **Description:** Path to JWT signing private key (RSA). Auto-generated if missing.
|
||||
- **Note:** Keep this secure - it signs all registry JWTs issued to Docker clients
|
||||
|
||||
#### `ATCR_AUTH_CERT_PATH`
|
||||
- **Default:** `/var/lib/atcr/auth/private-key.crt`
|
||||
- **Description:** Path to JWT signing certificate. Auto-generated if missing.
|
||||
- **Note:** Paired with `ATCR_AUTH_KEY_PATH`
|
||||
|
||||
#### `ATCR_TOKEN_EXPIRATION`
|
||||
- **Default:** `300` (5 minutes)
|
||||
- **Description:** JWT token expiration in seconds. Registry JWTs are short-lived for security.
|
||||
- **Recommendation:** Keep between 300-900 seconds (5-15 minutes)
|
||||
|
||||
### Web UI Configuration
|
||||
|
||||
#### `ATCR_UI_ENABLED`
|
||||
- **Default:** `true`
|
||||
- **Description:** Enable the web interface. Set to `false` to run registry API only (no web UI, no database).
|
||||
- **Use case:** API-only deployments where you don't need the browsing interface
|
||||
|
||||
#### `ATCR_UI_DATABASE_PATH`
|
||||
- **Default:** `/var/lib/atcr/ui.db`
|
||||
- **Description:** SQLite database path for UI data (OAuth sessions, stars, pull counts, repository metadata)
|
||||
- **Note:** For multi-instance deployments, use PostgreSQL (see production docs)
|
||||
|
||||
### Logging Configuration
|
||||
|
||||
#### `ATCR_LOG_LEVEL`
|
||||
- **Default:** `info`
|
||||
- **Options:** `debug`, `info`, `warn`, `error`
|
||||
- **Description:** Log verbosity level
|
||||
- **Development:** Use `debug` for detailed troubleshooting
|
||||
- **Production:** Use `info` or `warn`
|
||||
|
||||
#### `ATCR_LOG_FORMATTER`
|
||||
- **Default:** `text`
|
||||
- **Options:** `text`, `json`
|
||||
- **Description:** Log output format
|
||||
- **Production:** Use `json` for structured logging (easier to parse with log aggregators)
|
||||
|
||||
### Hold Health Check Configuration
|
||||
|
||||
AppView periodically checks if hold services are reachable and caches results to display health indicators in the UI.
|
||||
|
||||
#### `ATCR_HEALTH_CHECK_INTERVAL`
|
||||
- **Default:** `15m`
|
||||
- **Description:** How often to check health of hold endpoints in the background
|
||||
- **Format:** Duration string (e.g., `5m`, `15m`, `30m`, `1h`)
|
||||
- **Recommendation:** 15-30 minutes for production
|
||||
|
||||
#### `ATCR_HEALTH_CACHE_TTL`
|
||||
- **Default:** `15m`
|
||||
- **Description:** How long to cache health check results before re-checking
|
||||
- **Format:** Duration string (e.g., `15m`, `30m`, `1h`)
|
||||
- **Note:** Should be >= `ATCR_HEALTH_CHECK_INTERVAL` for efficiency
|
||||
|
||||
### Jetstream Configuration (ATProto Event Streaming)
|
||||
|
||||
Jetstream provides real-time indexing of ATProto records (manifests, tags) into the AppView database for the web UI.
|
||||
|
||||
#### `JETSTREAM_URL`
|
||||
- **Default:** `wss://jetstream2.us-west.bsky.network/subscribe`
|
||||
- **Description:** Jetstream WebSocket URL for real-time ATProto events
|
||||
- **Note:** Connects to Bluesky's public Jetstream by default
|
||||
|
||||
#### `ATCR_BACKFILL_ENABLED`
|
||||
- **Default:** `false`
|
||||
- **Description:** Enable periodic sync of historical ATProto records. Set to `true` for production to ensure database completeness.
|
||||
- **Recommendation:** Enable for production AppView instances
|
||||
|
||||
#### `ATCR_RELAY_ENDPOINT`
|
||||
- **Default:** `https://relay1.us-east.bsky.network`
|
||||
- **Description:** ATProto relay endpoint for backfill sync API
|
||||
- **Note:** Used when `ATCR_BACKFILL_ENABLED=true`
|
||||
|
||||
#### `ATCR_BACKFILL_INTERVAL`
|
||||
- **Default:** `1h`
|
||||
- **Description:** How often to run backfill sync
|
||||
- **Format:** Duration string (e.g., `30m`, `1h`, `2h`, `24h`)
|
||||
|
||||
### Legacy Configuration
|
||||
|
||||
#### `TEST_MODE`
|
||||
- **Default:** `false`
|
||||
- **Description:** Enable test mode (skips some validations). Do not use in production.
|
||||
|
||||
## Web Interface Features
|
||||
|
||||
The AppView web UI provides:
|
||||
|
||||
- **Home page** - Featured repositories and recent pushes feed
|
||||
- **Repository pages** - View tags, manifests, pull instructions, health status
|
||||
- **Search** - Find repositories by owner handle or repository name
|
||||
- **User profiles** - View a user's repositories and activity
|
||||
- **Stars** - Favorite repositories (requires OAuth login)
|
||||
- **Pull counts** - Track image pull statistics
|
||||
- **Multi-arch support** - Display platform-specific manifests (linux/amd64, linux/arm64)
|
||||
- **Health indicators** - Real-time hold service reachability status
|
||||
- **Install scripts** - Host credential helper installation scripts at `/install.sh`
|
||||
|
||||
## Deployment Scenarios
|
||||
|
||||
### Public Registry (like atcr.io)
|
||||
|
||||
Open to all ATProto users:
|
||||
|
||||
```bash
|
||||
# AppView config
|
||||
ATCR_BASE_URL=https://registry.example.com
|
||||
ATCR_DEFAULT_HOLD_DID=did:web:hold01.example.com
|
||||
ATCR_UI_ENABLED=true
|
||||
ATCR_BACKFILL_ENABLED=true
|
||||
|
||||
# Hold config (linked hold service)
|
||||
HOLD_PUBLIC=true # Allow public pulls
|
||||
HOLD_ALLOW_ALL_CREW=true # Allow all authenticated users to push
|
||||
```
|
||||
|
||||
### Private Organizational Registry
|
||||
|
||||
Restricted to crew members only:
|
||||
|
||||
```bash
|
||||
# AppView config
|
||||
ATCR_BASE_URL=https://registry.internal.example.com
|
||||
ATCR_DEFAULT_HOLD_DID=did:web:hold.internal.example.com
|
||||
ATCR_UI_ENABLED=true
|
||||
|
||||
# Hold config (linked hold service)
|
||||
HOLD_PUBLIC=false # Require auth for pulls
|
||||
HOLD_ALLOW_ALL_CREW=false # Only owner + explicit crew can push
|
||||
HOLD_OWNER=did:plc:your-org-did # Organization DID
|
||||
```
|
||||
|
||||
### Development/Testing
|
||||
|
||||
Local Docker Compose setup:
|
||||
|
||||
```bash
|
||||
# AppView config
|
||||
ATCR_HTTP_ADDR=:5000
|
||||
ATCR_DEFAULT_HOLD_DID=did:web:atcr-hold:8080
|
||||
ATCR_LOG_LEVEL=debug
|
||||
|
||||
# Hold config (linked hold service)
|
||||
STORAGE_DRIVER=filesystem
|
||||
STORAGE_ROOT_DIR=/tmp/atcr-hold
|
||||
HOLD_PUBLIC=true
|
||||
HOLD_ALLOW_ALL_CREW=true
|
||||
```
|
||||
|
||||
## Production Deployment
|
||||
|
||||
For production deployments with:
|
||||
- Multiple AppView instances (load balancing)
|
||||
- PostgreSQL database (instead of SQLite)
|
||||
- SSL/TLS certificates
|
||||
- Systemd service files
|
||||
- Log rotation
|
||||
- Monitoring
|
||||
|
||||
See **[deploy/README.md](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/deploy/README.md)** for comprehensive production deployment guide.
|
||||
|
||||
### Quick Production Checklist
|
||||
|
||||
Before going to production:
|
||||
|
||||
- [ ] Set `ATCR_BASE_URL` to your public HTTPS URL
|
||||
- [ ] Set `ATCR_DEFAULT_HOLD_DID` to a production hold service
|
||||
- [ ] Enable Jetstream backfill (`ATCR_BACKFILL_ENABLED=true`)
|
||||
- [ ] Use `ATCR_LOG_FORMATTER=json` for structured logging
|
||||
- [ ] Secure JWT keys (`ATCR_AUTH_KEY_PATH`, `ATCR_AUTH_CERT_PATH`)
|
||||
- [ ] Configure SSL/TLS termination (nginx/Caddy/Cloudflare)
|
||||
- [ ] Set up database backups (if using SQLite, consider PostgreSQL)
|
||||
- [ ] Monitor hold health checks
|
||||
- [ ] Test OAuth flow end-to-end
|
||||
- [ ] Verify Docker push/pull works
|
||||
|
||||
## Configuration Files Reference
|
||||
|
||||
- **[.env.appview.example](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/.env.appview.example)** - All available environment variables with documentation
|
||||
- **[deploy/.env.prod.template](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/deploy/.env.prod.template)** - Production configuration template
|
||||
- **[deploy/README.md](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/deploy/README.md)** - Production deployment guide
|
||||
- **[Hold Service Documentation](https://atcr.io/r/evan.jarrett.net/atcr-hold)** - Storage backend setup
|
||||
403
docs/hold.md
Normal file
403
docs/hold.md
Normal file
@@ -0,0 +1,403 @@
|
||||
# ATCR Hold Service
|
||||
|
||||
> The storage backend component of ATCR (ATProto Container Registry)
|
||||
|
||||
## Overview
|
||||
|
||||
**Hold Service** is the storage backend component of ATCR. It enables BYOS (Bring Your Own Storage) - users can store their own container image layers in their own S3, Storj, Minio, or filesystem storage. Each hold runs as a full ATProto user with an embedded PDS, exposing both standard ATProto sync endpoints and custom XRPC endpoints for OCI multipart blob uploads.
|
||||
|
||||
### What Hold Service Does
|
||||
|
||||
Hold Service is the storage layer that:
|
||||
|
||||
- **Bring Your Own Storage (BYOS)** - Store your own container image layers in your own S3, Storj, Minio, or filesystem
|
||||
- **Embedded ATProto PDS** - Each hold is a full ATProto user with its own DID, repository, and identity
|
||||
- **Custom XRPC Endpoints** - OCI-compatible multipart upload endpoints (`io.atcr.hold.*`) for blob operations
|
||||
- **Presigned URL Generation** - Creates time-limited S3 URLs for direct client-to-storage transfers (~99% bandwidth reduction)
|
||||
- **Crew Management** - Controls access via captain and crew records stored in the hold's embedded PDS
|
||||
- **Standard ATProto Sync** - Exposes com.atproto.sync.* endpoints for repository synchronization and firehose
|
||||
- **Multi-Backend Support** - Works with S3, Storj, Minio, filesystem, Azure, GCS via distribution's driver system
|
||||
- **Bluesky Integration** - Optional: Posts container image push notifications from the hold's identity to Bluesky
|
||||
|
||||
### The ATCR Ecosystem
|
||||
|
||||
Hold Service is the **storage backend** of a multi-component architecture:
|
||||
|
||||
1. **[AppView](https://atcr.io/r/evan.jarrett.net/atcr-appview)** - Registry API + web interface
|
||||
2. **Hold Service** (this component) - Storage backend with embedded PDS
|
||||
3. **Credential Helper** - Client-side tool for ATProto OAuth authentication
|
||||
|
||||
**Data flow:**
|
||||
```
|
||||
Docker Client → AppView (resolves identity) → User's PDS (stores manifest)
|
||||
↓
|
||||
Hold Service (generates presigned URL)
|
||||
↓
|
||||
S3/Storj/etc. (client uploads/downloads blobs directly)
|
||||
```
|
||||
|
||||
Manifests (small JSON metadata) live in users' ATProto PDS, while blobs (large binary layers) live in hold services. AppView orchestrates the routing, and hold services provide presigned URLs to eliminate bandwidth bottlenecks.
|
||||
|
||||
## When to Run Your Own Hold
|
||||
|
||||
Most users can push to the default hold at **https://hold01.atcr.io** - you don't need to run your own hold.
|
||||
|
||||
**Run your own hold if you want to:**
|
||||
- Control where your container layer data is stored (own S3 bucket, Storj, etc.)
|
||||
- Manage access for a team or organization via crew membership
|
||||
- Reduce bandwidth costs by using presigned URLs for direct S3 transfers
|
||||
- Run a shared hold for a community or project
|
||||
- Maintain data sovereignty (keep blobs in specific geographic regions)
|
||||
|
||||
**Prerequisites:**
|
||||
- S3-compatible storage (AWS S3, Storj, Minio, UpCloud, etc.) OR filesystem storage
|
||||
- (Optional) Domain name with SSL/TLS certificates for production
|
||||
- ATProto DID for hold owner (get from: `https://bsky.social/xrpc/com.atproto.identity.resolveHandle?handle=yourhandle.bsky.social`)
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Using Docker Compose
|
||||
|
||||
The fastest way to run Hold service with S3 storage:
|
||||
|
||||
```bash
|
||||
# Clone repository
|
||||
git clone https://tangled.org/@evan.jarrett.net/at-container-registry
|
||||
cd atcr
|
||||
|
||||
# Copy and configure environment
|
||||
cp .env.hold.example .env.hold
|
||||
# Edit .env.hold - set HOLD_PUBLIC_URL, HOLD_OWNER, S3 credentials (see Configuration below)
|
||||
|
||||
# Start hold service
|
||||
docker-compose -f docker-compose.hold.yml up -d
|
||||
|
||||
# Verify
|
||||
curl http://localhost:8080/.well-known/did.json
|
||||
```
|
||||
|
||||
### Minimal Configuration
|
||||
|
||||
At minimum, you must set:
|
||||
|
||||
```bash
|
||||
# Required: Public URL (generates did:web identity)
|
||||
HOLD_PUBLIC_URL=https://hold.example.com
|
||||
|
||||
# Required: Your ATProto DID (for captain record)
|
||||
HOLD_OWNER=did:plc:your-did-here
|
||||
|
||||
# Required: Storage driver type
|
||||
STORAGE_DRIVER=s3
|
||||
|
||||
# Required for S3: Credentials and bucket
|
||||
AWS_ACCESS_KEY_ID=your-access-key
|
||||
AWS_SECRET_ACCESS_KEY=your-secret-key
|
||||
S3_BUCKET=your-bucket-name
|
||||
|
||||
# Recommended: Database directory for embedded PDS
|
||||
HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
```
|
||||
|
||||
See **Configuration Reference** below for all options.
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
Hold Service is configured entirely via environment variables. Load them with:
|
||||
```bash
|
||||
source .env.hold
|
||||
./bin/atcr-hold
|
||||
```
|
||||
|
||||
Or via Docker Compose (recommended).
|
||||
|
||||
### Server Configuration
|
||||
|
||||
#### `HOLD_PUBLIC_URL` ⚠️ REQUIRED
|
||||
- **Default:** None (required)
|
||||
- **Description:** Public URL of this hold service. Used to generate the hold's did:web identity. The hostname becomes the hold's DID.
|
||||
- **Format:** `https://hold.example.com` or `http://127.0.0.1:8080` (development)
|
||||
- **Example:** `https://hold01.atcr.io` → DID is `did:web:hold01.atcr.io`
|
||||
- **Note:** This URL must be reachable by AppView and Docker clients
|
||||
|
||||
#### `HOLD_SERVER_ADDR`
|
||||
- **Default:** `:8080`
|
||||
- **Description:** HTTP listen address for XRPC endpoints
|
||||
- **Example:** `:8080`, `:9000`, `0.0.0.0:8080`
|
||||
|
||||
#### `HOLD_PUBLIC`
|
||||
- **Default:** `false`
|
||||
- **Description:** Allow public blob reads (pulls) without authentication. Writes always require crew membership.
|
||||
- **Use cases:**
|
||||
- `true`: Public registry (anyone can pull, authenticated users can push if crew)
|
||||
- `false`: Private registry (authentication required for both push and pull)
|
||||
|
||||
### Storage Configuration
|
||||
|
||||
#### `STORAGE_DRIVER`
|
||||
- **Default:** `s3`
|
||||
- **Options:** `s3`, `filesystem`
|
||||
- **Description:** Storage backend type. S3 enables presigned URLs for direct client-to-storage transfers (~99% bandwidth reduction). Filesystem stores blobs locally (development/testing).
|
||||
|
||||
#### S3 Storage (when `STORAGE_DRIVER=s3`)
|
||||
|
||||
##### `AWS_ACCESS_KEY_ID` ⚠️ REQUIRED for S3
|
||||
- **Description:** S3 access key ID for authentication
|
||||
- **Example:** `AKIAIOSFODNN7EXAMPLE`
|
||||
|
||||
##### `AWS_SECRET_ACCESS_KEY` ⚠️ REQUIRED for S3
|
||||
- **Description:** S3 secret access key for authentication
|
||||
- **Example:** `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY`
|
||||
|
||||
##### `AWS_REGION`
|
||||
- **Default:** `us-east-1`
|
||||
- **Description:** S3 region
|
||||
- **AWS regions:** `us-east-1`, `us-west-2`, `eu-west-1`, etc.
|
||||
- **UpCloud regions:** `us-chi1`, `us-nyc1`, `de-fra1`, `uk-lon1`, `sg-sin1`
|
||||
|
||||
##### `S3_BUCKET` ⚠️ REQUIRED for S3
|
||||
- **Description:** S3 bucket name where blobs will be stored
|
||||
- **Example:** `atcr-blobs`, `my-company-registry-blobs`
|
||||
- **Note:** Bucket must already exist
|
||||
|
||||
##### `S3_ENDPOINT`
|
||||
- **Default:** None (uses AWS S3)
|
||||
- **Description:** S3-compatible endpoint URL for non-AWS providers
|
||||
- **Storj:** `https://gateway.storjshare.io`
|
||||
- **UpCloud:** `https://[bucket-id].upcloudobjects.com`
|
||||
- **Minio:** `http://minio:9000`
|
||||
- **Note:** Leave empty for AWS S3
|
||||
|
||||
#### Filesystem Storage (when `STORAGE_DRIVER=filesystem`)
|
||||
|
||||
##### `STORAGE_ROOT_DIR`
|
||||
- **Default:** `/var/lib/atcr/hold`
|
||||
- **Description:** Directory path where blobs will be stored on local filesystem
|
||||
- **Use case:** Development, testing, or single-server deployments
|
||||
- **Note:** Presigned URLs are not available with filesystem driver (hold proxies all blob transfers)
|
||||
|
||||
### Embedded PDS Configuration
|
||||
|
||||
#### `HOLD_DATABASE_DIR`
|
||||
- **Default:** `/var/lib/atcr-hold`
|
||||
- **Description:** Directory path for embedded PDS carstore (SQLite database). Carstore creates `db.sqlite3` inside this directory.
|
||||
- **Note:** This must be a directory path, NOT a file path. If empty, embedded PDS is disabled (not recommended - hold authorization requires PDS).
|
||||
|
||||
#### `HOLD_KEY_PATH`
|
||||
- **Default:** `{HOLD_DATABASE_DIR}/signing.key`
|
||||
- **Description:** Path to hold's signing key (secp256k1). Auto-generated on first run if missing.
|
||||
- **Note:** Keep this secure - it's used to sign ATProto commits in the hold's repository
|
||||
|
||||
### Access Control
|
||||
|
||||
#### `HOLD_OWNER`
|
||||
- **Default:** None
|
||||
- **Description:** Your ATProto DID. Used to create the captain record and add you as the first crew member with admin role.
|
||||
- **Get your DID:** `https://bsky.social/xrpc/com.atproto.identity.resolveHandle?handle=yourhandle.bsky.social`
|
||||
- **Example:** `did:plc:abc123xyz789`
|
||||
- **Note:** If set, the hold will initialize with your DID as owner on first run
|
||||
|
||||
#### `HOLD_ALLOW_ALL_CREW`
|
||||
- **Default:** `false`
|
||||
- **Description:** Allow any authenticated ATCR user to write to this hold (treat all as crew)
|
||||
- **Security model:**
|
||||
- `true`: Any authenticated user can push images (useful for shared/community holds)
|
||||
- `false`: Only hold owner and explicit crew members can push (verified via crew records in hold's PDS)
|
||||
- **Use cases:**
|
||||
- Public registry: `HOLD_PUBLIC=true, HOLD_ALLOW_ALL_CREW=true`
|
||||
- ATProto users only: `HOLD_PUBLIC=false, HOLD_ALLOW_ALL_CREW=true`
|
||||
- Private hold: `HOLD_PUBLIC=false, HOLD_ALLOW_ALL_CREW=false` (default)
|
||||
|
||||
### Bluesky Integration
|
||||
|
||||
#### `HOLD_BLUESKY_POSTS_ENABLED`
|
||||
- **Default:** `false`
|
||||
- **Description:** Create Bluesky posts when users push container images. Posts include image name, tag, size, and layer count.
|
||||
- **Note:** Posts are created from the hold's embedded PDS identity (did:web). Requires hold to be crawled by Bluesky relay.
|
||||
- **Enable relay crawl:** `./deploy/request-crawl.sh hold.example.com`
|
||||
|
||||
#### `HOLD_PROFILE_AVATAR`
|
||||
- **Default:** `https://imgs.blue/evan.jarrett.net/1TpTOdtS60GdJWBYEqtK22y688jajbQ9a5kbYRFtwuqrkBAE`
|
||||
- **Description:** URL to download avatar image for hold's Bluesky profile. Downloaded and uploaded as blob during bootstrap.
|
||||
- **Note:** Avatar is stored in hold's PDS and displayed on Bluesky profile
|
||||
|
||||
### Advanced Configuration
|
||||
|
||||
#### `TEST_MODE`
|
||||
- **Default:** `false`
|
||||
- **Description:** Enable test mode (skips some validations). Do not use in production.
|
||||
|
||||
#### `DISABLE_PRESIGNED_URLS`
|
||||
- **Default:** `false`
|
||||
- **Description:** Force proxy mode even with S3 configured (for testing). Disables presigned URL generation and routes all blob transfers through the hold service.
|
||||
- **Use case:** Testing, debugging, or environments where presigned URLs don't work
|
||||
|
||||
## XRPC Endpoints
|
||||
|
||||
Hold Service exposes two types of XRPC endpoints:
|
||||
|
||||
### ATProto Sync Endpoints (Standard)
|
||||
- `GET /.well-known/did.json` - DID document (did:web resolution)
|
||||
- `GET /xrpc/com.atproto.sync.getRepo` - Download full repository as CAR file
|
||||
- `GET /xrpc/com.atproto.sync.getBlob` - Get blob or presigned download URL
|
||||
- `GET /xrpc/com.atproto.sync.subscribeRepos` - WebSocket firehose for real-time events
|
||||
- `GET /xrpc/com.atproto.sync.listRepos` - List all repositories (single-user PDS)
|
||||
- `GET /xrpc/com.atproto.repo.describeRepo` - Repository metadata
|
||||
- `GET /xrpc/com.atproto.repo.getRecord` - Get record by collection and rkey
|
||||
- `GET /xrpc/com.atproto.repo.listRecords` - List records in collection
|
||||
- `POST /xrpc/com.atproto.repo.deleteRecord` - Delete record (owner/crew admin only)
|
||||
|
||||
### OCI Multipart Upload Endpoints (Custom)
|
||||
- `POST /xrpc/io.atcr.hold.initiateUpload` - Start multipart upload session
|
||||
- `POST /xrpc/io.atcr.hold.getPartUploadUrl` - Get presigned URL for uploading a part
|
||||
- `PUT /xrpc/io.atcr.hold.uploadPart` - Direct buffered part upload (alternative to presigned URLs)
|
||||
- `POST /xrpc/io.atcr.hold.completeUpload` - Finalize multipart upload
|
||||
- `POST /xrpc/io.atcr.hold.abortUpload` - Cancel multipart upload
|
||||
- `POST /xrpc/io.atcr.hold.notifyManifest` - Notify hold of manifest upload (creates layer records, Bluesky posts)
|
||||
|
||||
## Authorization Model
|
||||
|
||||
Hold Service uses crew membership records in its embedded PDS for access control:
|
||||
|
||||
### Read Access (Blob Downloads)
|
||||
|
||||
**Public Hold** (`HOLD_PUBLIC=true`):
|
||||
- Anonymous users: ✅ Allowed
|
||||
- Authenticated users: ✅ Allowed
|
||||
|
||||
**Private Hold** (`HOLD_PUBLIC=false`):
|
||||
- Anonymous users: ❌ Forbidden
|
||||
- Authenticated users with crew membership: ✅ Allowed
|
||||
- Crew must have `blob:read` permission
|
||||
|
||||
### Write Access (Blob Uploads)
|
||||
|
||||
Regardless of `HOLD_PUBLIC` setting:
|
||||
- Hold owner (from captain record): ✅ Allowed
|
||||
- Crew members with `blob:write` permission: ✅ Allowed
|
||||
- Non-crew authenticated users: Depends on `HOLD_ALLOW_ALL_CREW`
|
||||
- `HOLD_ALLOW_ALL_CREW=true`: ✅ Allowed
|
||||
- `HOLD_ALLOW_ALL_CREW=false`: ❌ Forbidden
|
||||
|
||||
### Authentication Method
|
||||
|
||||
AppView uses **service tokens** from user's PDS to authenticate with hold service:
|
||||
1. AppView calls user's PDS: `com.atproto.server.getServiceAuth` with hold DID
|
||||
2. User's PDS returns a service token scoped to the hold DID
|
||||
3. AppView includes service token in XRPC requests to hold
|
||||
4. Hold validates token and checks crew membership in its embedded PDS
|
||||
|
||||
## Deployment Scenarios
|
||||
|
||||
### Personal Hold (Single User)
|
||||
|
||||
Your own storage for your images:
|
||||
|
||||
```bash
|
||||
# Hold config
|
||||
HOLD_PUBLIC_URL=https://hold.alice.com
|
||||
HOLD_OWNER=did:plc:alice-did
|
||||
HOLD_PUBLIC=false # Private (only you can pull)
|
||||
HOLD_ALLOW_ALL_CREW=false # Only you can push
|
||||
HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
|
||||
# S3 storage
|
||||
STORAGE_DRIVER=s3
|
||||
AWS_ACCESS_KEY_ID=your-key
|
||||
AWS_SECRET_ACCESS_KEY=your-secret
|
||||
S3_BUCKET=alice-container-registry
|
||||
S3_ENDPOINT=https://gateway.storjshare.io # Using Storj
|
||||
```
|
||||
|
||||
### Shared Hold (Team/Organization)
|
||||
|
||||
Shared storage for a team with crew members:
|
||||
|
||||
```bash
|
||||
# Hold config
|
||||
HOLD_PUBLIC_URL=https://hold.acme.corp
|
||||
HOLD_OWNER=did:plc:acme-org-did
|
||||
HOLD_PUBLIC=false # Private reads (crew only)
|
||||
HOLD_ALLOW_ALL_CREW=false # Explicit crew membership required
|
||||
HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
|
||||
# S3 storage
|
||||
STORAGE_DRIVER=s3
|
||||
AWS_ACCESS_KEY_ID=your-key
|
||||
AWS_SECRET_ACCESS_KEY=your-secret
|
||||
S3_BUCKET=acme-registry-blobs
|
||||
```
|
||||
|
||||
Then add crew members via XRPC or hold PDS records.
|
||||
|
||||
### Public Hold (Community Registry)
|
||||
|
||||
Open storage allowing anyone to push and pull:
|
||||
|
||||
```bash
|
||||
# Hold config
|
||||
HOLD_PUBLIC_URL=https://hold.community.io
|
||||
HOLD_OWNER=did:plc:community-did
|
||||
HOLD_PUBLIC=true # Public reads (anyone can pull)
|
||||
HOLD_ALLOW_ALL_CREW=true # Any authenticated user can push
|
||||
HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
|
||||
# S3 storage
|
||||
STORAGE_DRIVER=s3
|
||||
AWS_ACCESS_KEY_ID=your-key
|
||||
AWS_SECRET_ACCESS_KEY=your-secret
|
||||
S3_BUCKET=community-registry-blobs
|
||||
```
|
||||
|
||||
### Development/Testing
|
||||
|
||||
Local filesystem storage for testing:
|
||||
|
||||
```bash
|
||||
# Hold config
|
||||
HOLD_PUBLIC_URL=http://127.0.0.1:8080
|
||||
HOLD_OWNER=did:plc:your-test-did
|
||||
HOLD_PUBLIC=true
|
||||
HOLD_ALLOW_ALL_CREW=true
|
||||
HOLD_DATABASE_DIR=/tmp/atcr-hold
|
||||
|
||||
# Filesystem storage
|
||||
STORAGE_DRIVER=filesystem
|
||||
STORAGE_ROOT_DIR=/tmp/atcr-hold-blobs
|
||||
```
|
||||
|
||||
## Production Deployment
|
||||
|
||||
For production deployments with:
|
||||
- SSL/TLS certificates
|
||||
- S3 storage with presigned URLs
|
||||
- Proper access control
|
||||
- Systemd service files
|
||||
- Monitoring
|
||||
|
||||
See **[deploy/README.md](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/deploy/README.md)** for comprehensive production deployment guide.
|
||||
|
||||
### Quick Production Checklist
|
||||
|
||||
Before going to production:
|
||||
|
||||
- [ ] Set `HOLD_PUBLIC_URL` to your public HTTPS URL
|
||||
- [ ] Set `HOLD_OWNER` to your ATProto DID
|
||||
- [ ] Configure S3 storage (`STORAGE_DRIVER=s3`)
|
||||
- [ ] Set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `S3_BUCKET`, `S3_ENDPOINT`
|
||||
- [ ] Set `HOLD_DATABASE_DIR` to persistent directory
|
||||
- [ ] Configure `HOLD_PUBLIC` and `HOLD_ALLOW_ALL_CREW` for desired access model
|
||||
- [ ] Configure SSL/TLS termination (Caddy/nginx/Cloudflare)
|
||||
- [ ] Verify DID document: `curl https://hold.example.com/.well-known/did.json`
|
||||
- [ ] Test presigned URLs: Check logs for "presigned URL" messages during push
|
||||
- [ ] Monitor crew membership: `curl https://hold.example.com/xrpc/com.atproto.repo.listRecords?repo={holdDID}&collection=io.atcr.hold.crew`
|
||||
- [ ] (Optional) Enable Bluesky posts: `HOLD_BLUESKY_POSTS_ENABLED=true`
|
||||
- [ ] (Optional) Request relay crawl: `./deploy/request-crawl.sh hold.example.com`
|
||||
|
||||
## Configuration Files Reference
|
||||
|
||||
- **[.env.hold.example](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/.env.hold.example)** - All available environment variables with documentation
|
||||
- **[deploy/.env.prod.template](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/deploy/.env.prod.template)** - Production configuration template (includes both AppView and Hold)
|
||||
- **[deploy/README.md](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/deploy/README.md)** - Production deployment guide
|
||||
- **[AppView Documentation](https://atcr.io/r/evan.jarrett.net/atcr-appview)** - Registry API server setup
|
||||
- **[BYOS Architecture](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/docs/BYOS.md)** - Bring Your Own Storage technical design
|
||||
381
go.mod
381
go.mod
@@ -1,50 +1,300 @@
|
||||
module atcr.io
|
||||
|
||||
go 1.24.7
|
||||
go 1.24.9
|
||||
|
||||
require (
|
||||
github.com/anchore/grype v0.102.0
|
||||
github.com/anchore/syft v1.36.0
|
||||
github.com/aws/aws-sdk-go v1.55.5
|
||||
github.com/bluesky-social/indigo v0.0.0-20251003000214-3259b215110e
|
||||
github.com/bluesky-social/indigo v0.0.0-20251021193747-543ab1124beb
|
||||
github.com/distribution/distribution/v3 v3.0.0
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/earthboundkid/versioninfo/v2 v2.24.1
|
||||
github.com/go-chi/chi/v5 v5.2.3
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/ipfs/go-block-format v0.2.0
|
||||
github.com/ipfs/go-cid v0.4.1
|
||||
github.com/ipfs/go-datastore v0.6.0
|
||||
github.com/ipfs/go-ipfs-blockstore v1.3.1
|
||||
github.com/ipfs/go-ipld-format v0.6.0
|
||||
github.com/ipld/go-car v0.6.1-0.20230509095817-92d28eb23ba4
|
||||
github.com/klauspost/compress v1.18.0
|
||||
github.com/mattn/go-sqlite3 v1.14.32
|
||||
github.com/microcosm-cc/bluemonday v1.0.27
|
||||
github.com/multiformats/go-multihash v0.2.3
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/whyrusleeping/cbor-gen v0.3.1
|
||||
github.com/yuin/goldmark v1.7.13
|
||||
go.opentelemetry.io/otel v1.37.0
|
||||
go.yaml.in/yaml/v4 v4.0.0-rc.2
|
||||
golang.org/x/crypto v0.39.0
|
||||
golang.org/x/crypto v0.43.0
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da
|
||||
gorm.io/gorm v1.31.0
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
cloud.google.com/go v0.121.3 // indirect
|
||||
cloud.google.com/go/auth v0.16.2 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
cloud.google.com/go/storage v1.55.0 // indirect
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20250520111509-a70c2aa677fa // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/CycloneDX/cyclonedx-go v0.9.3 // indirect
|
||||
github.com/DataDog/zstd v1.5.7 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
|
||||
github.com/Intevation/gval v1.3.0 // indirect
|
||||
github.com/Intevation/jsonpath v0.2.1 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.4.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.13.0 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
github.com/RussellLuo/slidingwindow v0.0.0-20200528002341-535bb99d338b // indirect
|
||||
github.com/STARRY-S/zip v0.2.3 // indirect
|
||||
github.com/acobaugh/osrelease v0.1.0 // indirect
|
||||
github.com/adrg/xdg v0.5.3 // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/anchore/archiver/v3 v3.5.3-0.20241210171143-5b1d8d1c7c51 // indirect
|
||||
github.com/anchore/clio v0.0.0-20250715152405-a0fa658e5084 // indirect
|
||||
github.com/anchore/fangs v0.0.0-20250716230140-94c22408c232 // indirect
|
||||
github.com/anchore/go-collections v0.0.0-20251016125210-a3c352120e8c // indirect
|
||||
github.com/anchore/go-homedir v0.0.0-20250319154043-c29668562e4d // indirect
|
||||
github.com/anchore/go-logger v0.0.0-20250318195838-07ae343dd722 // indirect
|
||||
github.com/anchore/go-lzo v0.1.0 // indirect
|
||||
github.com/anchore/go-macholibre v0.0.0-20250320151634-807da7ad2331 // indirect
|
||||
github.com/anchore/go-rpmdb v0.0.0-20250516171929-f77691e1faec // indirect
|
||||
github.com/anchore/go-struct-converter v0.0.0-20250211213226-cce56d595160 // indirect
|
||||
github.com/anchore/go-sync v0.0.0-20250714163430-add63db73ad1 // indirect
|
||||
github.com/anchore/go-version v1.2.2-0.20210903204242-51efa5b487c4 // indirect
|
||||
github.com/anchore/packageurl-go v0.1.1-0.20250220190351-d62adb6e1115 // indirect
|
||||
github.com/anchore/stereoscope v0.1.11 // indirect
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
||||
github.com/aquasecurity/go-pep440-version v0.0.1 // indirect
|
||||
github.com/aquasecurity/go-version v0.0.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.70 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect
|
||||
github.com/aws/smithy-go v1.22.4 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/aymerick/douceur v0.2.0 // indirect
|
||||
github.com/becheran/wildmatch-go v1.0.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
|
||||
github.com/bitnami/go-version v0.0.0-20250505154626-452e8c5ee607 // indirect
|
||||
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb // indirect
|
||||
github.com/bmatcuk/doublestar/v2 v2.0.4 // indirect
|
||||
github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect
|
||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.1 // indirect
|
||||
github.com/bodgit/windows v1.0.1 // indirect
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect
|
||||
github.com/carlmjohnson/versioninfo v0.22.5 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.3.1 // indirect
|
||||
github.com/charmbracelet/lipgloss v1.1.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.10.1 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.3 // indirect
|
||||
github.com/containerd/containerd v1.7.28 // indirect
|
||||
github.com/containerd/containerd/api v1.9.0 // indirect
|
||||
github.com/containerd/continuity v0.4.5 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/fifo v1.1.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/containerd/ttrpc v1.2.7 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/deitch/magic v0.0.0-20240306090643-c67ab88f10cb // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/diskfs/go-diskfs v1.7.0 // indirect
|
||||
github.com/docker/cli v28.5.1+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v28.5.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.9.3 // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/elliotchance/phpserialize v1.4.0 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/facebookincubator/nvdtools v0.1.5 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
|
||||
github.com/github/go-spdx/v2 v2.3.4 // indirect
|
||||
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||
github.com/glebarez/sqlite v1.11.0 // indirect
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.6.2 // indirect
|
||||
github.com/go-git/go-git/v5 v5.16.3 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.2 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-restruct/restruct v1.2.0-alpha // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/goccy/go-yaml v1.18.0 // indirect
|
||||
github.com/gocql/gocql v1.7.0 // indirect
|
||||
github.com/gocsaf/csaf/v3 v3.3.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/gohugoio/hashstructure v0.6.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/snappy v1.0.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/go-containerregistry v0.20.6 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/licensecheck v0.3.1 // indirect
|
||||
github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
|
||||
github.com/gookit/color v1.6.0 // indirect
|
||||
github.com/gorilla/css v1.0.1 // indirect
|
||||
github.com/gorilla/handlers v1.5.2 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
||||
github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b // indirect
|
||||
github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-getter v1.8.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.5 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.6 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/hcl/v2 v2.24.0 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/iancoleman/strcase v0.3.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/ipfs/bbloom v0.0.4 // indirect
|
||||
github.com/ipfs/go-blockservice v0.5.2 // indirect
|
||||
github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.2.1 // indirect
|
||||
github.com/ipfs/go-ipfs-util v0.0.3 // indirect
|
||||
github.com/ipfs/go-ipld-cbor v0.1.0 // indirect
|
||||
github.com/ipfs/go-ipld-legacy v0.2.1 // indirect
|
||||
github.com/ipfs/go-libipfs v0.7.0 // indirect
|
||||
github.com/ipfs/go-log v1.0.5 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.5.1 // indirect
|
||||
github.com/ipfs/go-merkledag v0.11.0 // indirect
|
||||
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
|
||||
github.com/ipfs/go-verifcid v0.0.3 // indirect
|
||||
github.com/ipld/go-codec-dagpb v1.6.0 // indirect
|
||||
github.com/ipld/go-ipld-prime v0.21.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/jbenet/goprocess v0.1.4 // indirect
|
||||
github.com/jinzhu/copier v0.4.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/kastenhq/goversion v0.0.0-20230811215019-93b2f8823953 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/knqyf263/go-apk-version v0.0.0-20200609155635-041fdbb8563f // indirect
|
||||
github.com/knqyf263/go-deb-version v0.0.0-20241115132648-6f4aee6ccd23 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/masahiro331/go-mvn-version v0.0.0-20250131095131-f4974fa13b8a // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mholt/archives v0.1.5 // indirect
|
||||
github.com/mikelolasagasti/xz v1.0.1 // indirect
|
||||
github.com/minio/minlz v1.0.1 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.2 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/moby/sys/signal v0.7.1 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/nix-community/go-nix v0.0.0-20250101154619-4bdde671e0a1 // indirect
|
||||
github.com/nwaples/rardecode v1.1.3 // indirect
|
||||
github.com/nwaples/rardecode/v2 v2.2.0 // indirect
|
||||
github.com/olekukonko/errors v1.1.0 // indirect
|
||||
github.com/olekukonko/ll v0.0.9 // indirect
|
||||
github.com/olekukonko/tablewriter v1.1.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.1 // indirect
|
||||
github.com/opencontainers/selinux v1.12.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/openvex/go-vex v0.2.7 // indirect
|
||||
github.com/package-url/packageurl-go v0.1.3 // indirect
|
||||
github.com/pandatix/go-cvss v0.6.2 // indirect
|
||||
github.com/pborman/indent v1.2.1 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pjbgf/sha1cd v0.4.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pkg/profile v1.7.0 // indirect
|
||||
github.com/pkg/xattr v0.4.12 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f // indirect
|
||||
github.com/prometheus/client_golang v1.20.5 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.60.1 // indirect
|
||||
@@ -52,40 +302,107 @@ require (
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect
|
||||
github.com/redis/go-redis/v9 v9.7.3 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rust-secure-code/go-rustaudit v0.0.0-20250226111315-e20ec32e963c // indirect
|
||||
github.com/sagikazarmark/locafero v0.9.0 // indirect
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
|
||||
github.com/sassoftware/go-rpmutils v0.4.0 // indirect
|
||||
github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e // indirect
|
||||
github.com/sergi/go-diff v1.4.0 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect
|
||||
github.com/skeema/knownhosts v1.3.1 // indirect
|
||||
github.com/sorairolake/lzip-go v0.3.8 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spdx/gordf v0.0.0-20250128162952-000978ccd6fb // indirect
|
||||
github.com/spdx/tools-golang v0.5.5 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.9.2 // indirect
|
||||
github.com/spf13/pflag v1.0.9 // indirect
|
||||
github.com/spf13/viper v1.20.1 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/sylabs/sif/v2 v2.22.0 // indirect
|
||||
github.com/sylabs/squashfs v1.0.6 // indirect
|
||||
github.com/therootcompany/xz v1.0.1 // indirect
|
||||
github.com/ulikunitz/xz v0.5.15 // indirect
|
||||
github.com/vbatts/go-mtree v0.6.0 // indirect
|
||||
github.com/vbatts/tar-split v0.12.1 // indirect
|
||||
github.com/vifraa/gopom v1.0.0 // indirect
|
||||
github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651 // indirect
|
||||
github.com/wagoodman/go-progress v0.0.0-20230925121702-07e42b3cdba0 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
github.com/zclconf/go-cty v1.16.3 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect
|
||||
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect
|
||||
go.etcd.io/bbolt v1.4.2 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.37.0 // indirect
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect
|
||||
go.opentelemetry.io/otel v1.32.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.8.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.32.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
golang.org/x/net v0.37.0 // indirect
|
||||
golang.org/x/sync v0.15.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect
|
||||
google.golang.org/grpc v1.68.0 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/net v0.46.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/term v0.36.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
google.golang.org/api v0.242.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250715232539-7130f93afb79 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250715232539-7130f93afb79 // indirect
|
||||
google.golang.org/grpc v1.74.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
gorm.io/driver/postgres v1.5.7 // indirect
|
||||
lukechampine.com/blake3 v1.2.1 // indirect
|
||||
modernc.org/libc v1.66.10 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.39.1 // indirect
|
||||
)
|
||||
|
||||
// Pin OpenTelemetry SDK to v1.32.0 for compatibility with distribution/distribution
|
||||
replace (
|
||||
go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v1.32.0
|
||||
go.opentelemetry.io/otel/sdk/log => go.opentelemetry.io/otel/sdk/log v0.8.0
|
||||
go.opentelemetry.io/otel/sdk/metric => go.opentelemetry.io/otel/sdk/metric v1.32.0
|
||||
)
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"key": "tid",
|
||||
"record": {
|
||||
"type": "object",
|
||||
"required": ["repository", "digest", "mediaType", "schemaVersion", "config", "layers", "holdEndpoint", "createdAt"],
|
||||
"required": ["repository", "digest", "mediaType", "schemaVersion", "holdEndpoint", "createdAt"],
|
||||
"properties": {
|
||||
"repository": {
|
||||
"type": "string",
|
||||
@@ -29,7 +29,9 @@
|
||||
"description": "OCI media type",
|
||||
"knownValues": [
|
||||
"application/vnd.oci.image.manifest.v1+json",
|
||||
"application/vnd.docker.distribution.manifest.v2+json"
|
||||
"application/vnd.docker.distribution.manifest.v2+json",
|
||||
"application/vnd.oci.image.index.v1+json",
|
||||
"application/vnd.docker.distribution.manifest.list.v2+json"
|
||||
]
|
||||
},
|
||||
"schemaVersion": {
|
||||
@@ -47,7 +49,15 @@
|
||||
"type": "ref",
|
||||
"ref": "#blobReference"
|
||||
},
|
||||
"description": "Filesystem layers"
|
||||
"description": "Filesystem layers (for image manifests)"
|
||||
},
|
||||
"manifests": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "ref",
|
||||
"ref": "#manifestReference"
|
||||
},
|
||||
"description": "Referenced manifests (for manifest lists/indexes)"
|
||||
},
|
||||
"annotations": {
|
||||
"type": "object",
|
||||
@@ -100,6 +110,64 @@
|
||||
"description": "Optional metadata"
|
||||
}
|
||||
}
|
||||
},
|
||||
"manifestReference": {
|
||||
"type": "object",
|
||||
"description": "Reference to a manifest in a manifest list/index",
|
||||
"required": ["mediaType", "size", "digest"],
|
||||
"properties": {
|
||||
"mediaType": {
|
||||
"type": "string",
|
||||
"description": "Media type of the referenced manifest"
|
||||
},
|
||||
"size": {
|
||||
"type": "integer",
|
||||
"description": "Size in bytes"
|
||||
},
|
||||
"digest": {
|
||||
"type": "string",
|
||||
"description": "Content digest (e.g., 'sha256:...')"
|
||||
},
|
||||
"platform": {
|
||||
"type": "ref",
|
||||
"ref": "#platform",
|
||||
"description": "Platform information for this manifest"
|
||||
},
|
||||
"annotations": {
|
||||
"type": "object",
|
||||
"description": "Optional metadata"
|
||||
}
|
||||
}
|
||||
},
|
||||
"platform": {
|
||||
"type": "object",
|
||||
"description": "Platform information describing OS and architecture",
|
||||
"required": ["architecture", "os"],
|
||||
"properties": {
|
||||
"architecture": {
|
||||
"type": "string",
|
||||
"description": "CPU architecture (e.g., 'amd64', 'arm64', 'arm')"
|
||||
},
|
||||
"os": {
|
||||
"type": "string",
|
||||
"description": "Operating system (e.g., 'linux', 'windows', 'darwin')"
|
||||
},
|
||||
"osVersion": {
|
||||
"type": "string",
|
||||
"description": "Optional OS version"
|
||||
},
|
||||
"osFeatures": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "Optional OS features"
|
||||
},
|
||||
"variant": {
|
||||
"type": "string",
|
||||
"description": "Optional CPU variant (e.g., 'v7' for ARM)"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"key": "any",
|
||||
"record": {
|
||||
"type": "object",
|
||||
"required": ["repository", "tag", "manifestDigest", "createdAt"],
|
||||
"required": ["repository", "tag", "createdAt"],
|
||||
"properties": {
|
||||
"repository": {
|
||||
"type": "string",
|
||||
@@ -20,9 +20,14 @@
|
||||
"description": "Tag name (e.g., 'latest', 'v1.0.0', '12-slim')",
|
||||
"maxLength": 128
|
||||
},
|
||||
"manifest": {
|
||||
"type": "string",
|
||||
"format": "at-uri",
|
||||
"description": "AT-URI of the manifest this tag points to (e.g., 'at://did:plc:xyz/io.atcr.manifest/abc123'). Preferred over manifestDigest for new records."
|
||||
},
|
||||
"manifestDigest": {
|
||||
"type": "string",
|
||||
"description": "Digest of the manifest this tag points to (e.g., 'sha256:...')"
|
||||
"description": "DEPRECATED: Digest of the manifest (e.g., 'sha256:...'). Kept for backward compatibility with old records. New records should use 'manifest' field instead."
|
||||
},
|
||||
"createdAt": {
|
||||
"type": "string",
|
||||
|
||||
363
pkg/appview/config.go
Normal file
363
pkg/appview/config.go
Normal file
@@ -0,0 +1,363 @@
|
||||
// Package appview implements the ATCR AppView component, which serves as the main
|
||||
// OCI Distribution API server. It resolves identities (handle/DID to PDS endpoint),
|
||||
// routes manifests to user's PDS, routes blobs to hold services, validates OAuth tokens,
|
||||
// and issues registry JWTs. This package provides environment-based configuration,
|
||||
// middleware registration, and HTTP server setup for the AppView service.
|
||||
package appview
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/distribution/distribution/v3/configuration"
|
||||
)
|
||||
|
||||
// Config represents the AppView service configuration
|
||||
type Config struct {
|
||||
Version string `yaml:"version"`
|
||||
LogLevel string `yaml:"log_level"`
|
||||
Server ServerConfig `yaml:"server"`
|
||||
UI UIConfig `yaml:"ui"`
|
||||
Health HealthConfig `yaml:"health"`
|
||||
Jetstream JetstreamConfig `yaml:"jetstream"`
|
||||
Auth AuthConfig `yaml:"auth"`
|
||||
Distribution *configuration.Configuration `yaml:"-"` // Wrapped distribution config for compatibility
|
||||
}
|
||||
|
||||
// ServerConfig defines server settings
|
||||
type ServerConfig struct {
|
||||
// Addr is the HTTP listen address (from env: ATCR_HTTP_ADDR, default: ":5000")
|
||||
Addr string `yaml:"addr"`
|
||||
|
||||
// BaseURL is the public URL for OAuth/JWT realm (from env: ATCR_BASE_URL)
|
||||
// Auto-detected from Addr if not set
|
||||
BaseURL string `yaml:"base_url"`
|
||||
|
||||
// DefaultHoldDID is the default hold DID for blob storage (from env: ATCR_DEFAULT_HOLD_DID)
|
||||
// REQUIRED - e.g., "did:web:hold01.atcr.io"
|
||||
DefaultHoldDID string `yaml:"default_hold_did"`
|
||||
|
||||
// TestMode enables HTTP for local DID resolution and transition:generic scope (from env: TEST_MODE)
|
||||
TestMode bool `yaml:"test_mode"`
|
||||
|
||||
// DebugAddr is the debug/pprof HTTP listen address (from env: ATCR_DEBUG_ADDR, default: ":5001")
|
||||
DebugAddr string `yaml:"debug_addr"`
|
||||
|
||||
// OAuthKeyPath is the path to the OAuth client P-256 signing key (from env: ATCR_OAUTH_KEY_PATH, default: "/var/lib/atcr/oauth/client.key")
|
||||
// Auto-generated on first run for production (non-localhost) deployments
|
||||
OAuthKeyPath string `yaml:"oauth_key_path"`
|
||||
|
||||
// ClientName is the OAuth client display name (from env: ATCR_CLIENT_NAME, default: "AT Container Registry")
|
||||
// Shown in OAuth authorization screens
|
||||
ClientName string `yaml:"client_name"`
|
||||
}
|
||||
|
||||
// UIConfig defines web UI settings
|
||||
type UIConfig struct {
|
||||
// Enabled controls whether the web UI is enabled (from env: ATCR_UI_ENABLED, default: true)
|
||||
Enabled bool `yaml:"enabled"`
|
||||
|
||||
// DatabasePath is the path to the UI SQLite database (from env: ATCR_UI_DATABASE_PATH, default: "/var/lib/atcr/ui.db")
|
||||
DatabasePath string `yaml:"database_path"`
|
||||
|
||||
// SkipDBMigrations controls whether to skip running database migrations (from env: SKIP_DB_MIGRATIONS, default: false)
|
||||
SkipDBMigrations bool `yaml:"skip_db_migrations"`
|
||||
}
|
||||
|
||||
// HealthConfig defines health check and cache settings
|
||||
type HealthConfig struct {
|
||||
// CacheTTL is the hold health check cache TTL (from env: ATCR_HEALTH_CACHE_TTL, default: 15m)
|
||||
CacheTTL time.Duration `yaml:"cache_ttl"`
|
||||
|
||||
// CheckInterval is the hold health check refresh interval (from env: ATCR_HEALTH_CHECK_INTERVAL, default: 15m)
|
||||
CheckInterval time.Duration `yaml:"check_interval"`
|
||||
|
||||
// ReadmeCacheTTL is the README cache TTL (from env: ATCR_README_CACHE_TTL, default: 1h)
|
||||
ReadmeCacheTTL time.Duration `yaml:"readme_cache_ttl"`
|
||||
}
|
||||
|
||||
// JetstreamConfig defines ATProto Jetstream settings
|
||||
type JetstreamConfig struct {
|
||||
// URL is the Jetstream WebSocket URL (from env: JETSTREAM_URL, default: wss://jetstream2.us-west.bsky.network/subscribe)
|
||||
URL string `yaml:"url"`
|
||||
|
||||
// BackfillEnabled controls whether backfill is enabled (from env: ATCR_BACKFILL_ENABLED, default: true)
|
||||
BackfillEnabled bool `yaml:"backfill_enabled"`
|
||||
|
||||
// BackfillInterval is the backfill interval (from env: ATCR_BACKFILL_INTERVAL, default: 1h)
|
||||
BackfillInterval time.Duration `yaml:"backfill_interval"`
|
||||
|
||||
// RelayEndpoint is the relay endpoint for sync API (from env: ATCR_RELAY_ENDPOINT, default: https://relay1.us-east.bsky.network)
|
||||
RelayEndpoint string `yaml:"relay_endpoint"`
|
||||
}
|
||||
|
||||
// AuthConfig defines authentication settings
|
||||
type AuthConfig struct {
|
||||
// KeyPath is the JWT signing key path (from env: ATCR_AUTH_KEY_PATH, default: "/var/lib/atcr/auth/private-key.pem")
|
||||
KeyPath string `yaml:"key_path"`
|
||||
|
||||
// CertPath is the JWT certificate path (from env: ATCR_AUTH_CERT_PATH, default: "/var/lib/atcr/auth/private-key.crt")
|
||||
CertPath string `yaml:"cert_path"`
|
||||
|
||||
// TokenExpiration is the JWT expiration duration (from env: ATCR_TOKEN_EXPIRATION, default: 300s)
|
||||
TokenExpiration time.Duration `yaml:"token_expiration"`
|
||||
|
||||
// ServiceName is the service name used for JWT issuer and service fields
|
||||
// Derived from ATCR_SERVICE_NAME env var or extracted from base URL (e.g., "atcr.io")
|
||||
ServiceName string `yaml:"service_name"`
|
||||
}
|
||||
|
||||
// LoadConfigFromEnv builds a complete configuration from environment variables
|
||||
// This follows the same pattern as the hold service (no config files, only env vars)
|
||||
func LoadConfigFromEnv() (*Config, error) {
|
||||
cfg := &Config{
|
||||
Version: "0.1",
|
||||
}
|
||||
|
||||
// Logging configuration
|
||||
cfg.LogLevel = getEnvOrDefault("ATCR_LOG_LEVEL", "info")
|
||||
|
||||
// Server configuration
|
||||
cfg.Server.Addr = getEnvOrDefault("ATCR_HTTP_ADDR", ":5000")
|
||||
cfg.Server.DebugAddr = getEnvOrDefault("ATCR_DEBUG_ADDR", ":5001")
|
||||
cfg.Server.DefaultHoldDID = os.Getenv("ATCR_DEFAULT_HOLD_DID")
|
||||
if cfg.Server.DefaultHoldDID == "" {
|
||||
return nil, fmt.Errorf("ATCR_DEFAULT_HOLD_DID is required")
|
||||
}
|
||||
cfg.Server.TestMode = os.Getenv("TEST_MODE") == "true"
|
||||
cfg.Server.OAuthKeyPath = getEnvOrDefault("ATCR_OAUTH_KEY_PATH", "/var/lib/atcr/oauth/client.key")
|
||||
cfg.Server.ClientName = getEnvOrDefault("ATCR_CLIENT_NAME", "AT Container Registry")
|
||||
|
||||
// Auto-detect base URL if not explicitly set
|
||||
cfg.Server.BaseURL = os.Getenv("ATCR_BASE_URL")
|
||||
if cfg.Server.BaseURL == "" {
|
||||
cfg.Server.BaseURL = autoDetectBaseURL(cfg.Server.Addr)
|
||||
}
|
||||
|
||||
// UI configuration
|
||||
cfg.UI.Enabled = os.Getenv("ATCR_UI_ENABLED") != "false"
|
||||
cfg.UI.DatabasePath = getEnvOrDefault("ATCR_UI_DATABASE_PATH", "/var/lib/atcr/ui.db")
|
||||
cfg.UI.SkipDBMigrations = os.Getenv("SKIP_DB_MIGRATIONS") == "true"
|
||||
|
||||
// Health and cache configuration
|
||||
cfg.Health.CacheTTL = getDurationOrDefault("ATCR_HEALTH_CACHE_TTL", 15*time.Minute)
|
||||
cfg.Health.CheckInterval = getDurationOrDefault("ATCR_HEALTH_CHECK_INTERVAL", 15*time.Minute)
|
||||
cfg.Health.ReadmeCacheTTL = getDurationOrDefault("ATCR_README_CACHE_TTL", 1*time.Hour)
|
||||
|
||||
// Jetstream configuration
|
||||
cfg.Jetstream.URL = getEnvOrDefault("JETSTREAM_URL", "wss://jetstream2.us-west.bsky.network/subscribe")
|
||||
cfg.Jetstream.BackfillEnabled = os.Getenv("ATCR_BACKFILL_ENABLED") != "false"
|
||||
cfg.Jetstream.BackfillInterval = getDurationOrDefault("ATCR_BACKFILL_INTERVAL", 1*time.Hour)
|
||||
cfg.Jetstream.RelayEndpoint = getEnvOrDefault("ATCR_RELAY_ENDPOINT", "https://relay1.us-east.bsky.network")
|
||||
|
||||
// Auth configuration
|
||||
cfg.Auth.KeyPath = getEnvOrDefault("ATCR_AUTH_KEY_PATH", "/var/lib/atcr/auth/private-key.pem")
|
||||
cfg.Auth.CertPath = getEnvOrDefault("ATCR_AUTH_CERT_PATH", "/var/lib/atcr/auth/private-key.crt")
|
||||
|
||||
// Parse token expiration (default: 300 seconds = 5 minutes)
|
||||
expirationStr := getEnvOrDefault("ATCR_TOKEN_EXPIRATION", "300")
|
||||
expirationSecs, err := strconv.Atoi(expirationStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid ATCR_TOKEN_EXPIRATION: %w", err)
|
||||
}
|
||||
cfg.Auth.TokenExpiration = time.Duration(expirationSecs) * time.Second
|
||||
|
||||
// Derive service name from base URL or env var (used for JWT issuer and service)
|
||||
cfg.Auth.ServiceName = getServiceName(cfg.Server.BaseURL)
|
||||
|
||||
// Build distribution configuration for compatibility with distribution library
|
||||
distConfig, err := buildDistributionConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build distribution config: %w", err)
|
||||
}
|
||||
cfg.Distribution = distConfig
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// buildDistributionConfig creates a distribution Configuration from our Config
|
||||
// This maintains compatibility with the distribution library
|
||||
func buildDistributionConfig(cfg *Config) (*configuration.Configuration, error) {
|
||||
distConfig := &configuration.Configuration{}
|
||||
|
||||
// Version
|
||||
distConfig.Version = configuration.MajorMinorVersion(0, 1)
|
||||
|
||||
// Logging
|
||||
distConfig.Log = configuration.Log{
|
||||
Level: configuration.Loglevel(cfg.LogLevel),
|
||||
Formatter: getEnvOrDefault("ATCR_LOG_FORMATTER", "text"),
|
||||
Fields: map[string]any{
|
||||
"service": "atcr-appview",
|
||||
},
|
||||
}
|
||||
|
||||
// HTTP server
|
||||
httpSecret := os.Getenv("REGISTRY_HTTP_SECRET")
|
||||
if httpSecret == "" {
|
||||
// Generate a random 32-byte secret
|
||||
randomBytes := make([]byte, 32)
|
||||
if _, err := rand.Read(randomBytes); err != nil {
|
||||
return nil, fmt.Errorf("failed to generate random secret: %w", err)
|
||||
}
|
||||
httpSecret = hex.EncodeToString(randomBytes)
|
||||
}
|
||||
|
||||
distConfig.HTTP = configuration.HTTP{
|
||||
Addr: cfg.Server.Addr,
|
||||
Secret: httpSecret,
|
||||
Headers: map[string][]string{
|
||||
"X-Content-Type-Options": {"nosniff"},
|
||||
},
|
||||
Debug: configuration.Debug{
|
||||
Addr: cfg.Server.DebugAddr,
|
||||
},
|
||||
}
|
||||
|
||||
// Storage (fake in-memory placeholder - all real storage is proxied)
|
||||
distConfig.Storage = buildStorageConfig()
|
||||
|
||||
// Middleware (ATProto resolver)
|
||||
distConfig.Middleware = buildMiddlewareConfig(cfg.Server.DefaultHoldDID, cfg.Server.BaseURL)
|
||||
|
||||
// Auth (use values from cfg.Auth)
|
||||
realm := cfg.Server.BaseURL + "/auth/token"
|
||||
|
||||
distConfig.Auth = configuration.Auth{
|
||||
"token": configuration.Parameters{
|
||||
"realm": realm,
|
||||
"service": cfg.Auth.ServiceName,
|
||||
"issuer": cfg.Auth.ServiceName,
|
||||
"rootcertbundle": cfg.Auth.CertPath,
|
||||
"privatekey": cfg.Auth.KeyPath,
|
||||
"expiration": int(cfg.Auth.TokenExpiration.Seconds()),
|
||||
},
|
||||
}
|
||||
|
||||
// Health checks
|
||||
distConfig.Health = buildHealthConfig()
|
||||
|
||||
return distConfig, nil
|
||||
}
|
||||
|
||||
// autoDetectBaseURL determines the base URL for the service from the HTTP address
|
||||
func autoDetectBaseURL(httpAddr string) string {
|
||||
// Auto-detect from HTTP addr
|
||||
if httpAddr[0] == ':' {
|
||||
// Just a port, assume localhost
|
||||
// Use "127.0.0.1" per RFC 8252 (OAuth servers reject "localhost")
|
||||
return fmt.Sprintf("http://127.0.0.1%s", httpAddr)
|
||||
}
|
||||
|
||||
// Full address provided
|
||||
return fmt.Sprintf("http://%s", httpAddr)
|
||||
}
|
||||
|
||||
// buildStorageConfig creates a fake in-memory storage config
|
||||
// This is required for distribution validation but is never actually used
|
||||
// All storage is routed through middleware to ATProto (manifests) and hold services (blobs)
|
||||
func buildStorageConfig() configuration.Storage {
|
||||
storage := configuration.Storage{}
|
||||
|
||||
// Use in-memory storage as a placeholder
|
||||
storage["inmemory"] = configuration.Parameters{}
|
||||
|
||||
// Disable upload purging
|
||||
// NOTE: Must use map[any]any for uploadpurging (not configuration.Parameters)
|
||||
// because distribution's validation code does a type assertion to map[any]any
|
||||
storage["maintenance"] = configuration.Parameters{
|
||||
"uploadpurging": map[any]any{
|
||||
"enabled": false,
|
||||
"age": 7 * 24 * time.Hour, // 168h
|
||||
"interval": 24 * time.Hour, // 24h
|
||||
"dryrun": false,
|
||||
},
|
||||
}
|
||||
|
||||
return storage
|
||||
}
|
||||
|
||||
// buildMiddlewareConfig creates middleware configuration
|
||||
func buildMiddlewareConfig(defaultHoldDID string, baseURL string) map[string][]configuration.Middleware {
|
||||
// Check test mode
|
||||
testMode := os.Getenv("TEST_MODE") == "true"
|
||||
|
||||
return map[string][]configuration.Middleware{
|
||||
"registry": {
|
||||
{
|
||||
Name: "atproto-resolver",
|
||||
Options: configuration.Parameters{
|
||||
"default_hold_did": defaultHoldDID,
|
||||
"test_mode": testMode,
|
||||
"base_url": baseURL,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// buildHealthConfig creates health check configuration
|
||||
func buildHealthConfig() configuration.Health {
|
||||
return configuration.Health{
|
||||
StorageDriver: configuration.StorageDriver{
|
||||
Enabled: true,
|
||||
Interval: 10 * time.Second,
|
||||
Threshold: 3,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// getServiceName extracts service name from base URL or uses env var
|
||||
func getServiceName(baseURL string) string {
|
||||
// Check env var first
|
||||
if serviceName := os.Getenv("ATCR_SERVICE_NAME"); serviceName != "" {
|
||||
return serviceName
|
||||
}
|
||||
|
||||
// Try to extract from base URL
|
||||
parsed, err := url.Parse(baseURL)
|
||||
if err == nil && parsed.Hostname() != "" {
|
||||
hostname := parsed.Hostname()
|
||||
|
||||
// Strip localhost/127.0.0.1 and use default
|
||||
if hostname == "localhost" || hostname == "127.0.0.1" {
|
||||
return "atcr.io"
|
||||
}
|
||||
|
||||
return hostname
|
||||
}
|
||||
|
||||
// Default fallback
|
||||
return "atcr.io"
|
||||
}
|
||||
|
||||
// getEnvOrDefault gets an environment variable or returns a default value
|
||||
func getEnvOrDefault(key, defaultValue string) string {
|
||||
if val := os.Getenv(key); val != "" {
|
||||
return val
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// getDurationOrDefault parses a duration from environment variable or returns default
|
||||
// Logs a warning if parsing fails
|
||||
func getDurationOrDefault(envKey string, defaultValue time.Duration) time.Duration {
|
||||
envVal := os.Getenv(envKey)
|
||||
if envVal == "" {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
parsed, err := time.ParseDuration(envVal)
|
||||
if err != nil {
|
||||
slog.Warn("Invalid duration, using default", "env_key", envKey, "env_value", envVal, "default", defaultValue)
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
return parsed
|
||||
}
|
||||
276
pkg/appview/config_test.go
Normal file
276
pkg/appview/config_test.go
Normal file
@@ -0,0 +1,276 @@
|
||||
package appview
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Test_getServiceName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
baseURL string
|
||||
envService string
|
||||
setEnv bool
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "env var set",
|
||||
baseURL: "http://127.0.0.1:5000",
|
||||
envService: "custom.registry.io",
|
||||
setEnv: true,
|
||||
want: "custom.registry.io",
|
||||
},
|
||||
{
|
||||
name: "localhost - use default",
|
||||
baseURL: "http://localhost:5000",
|
||||
setEnv: false,
|
||||
want: "atcr.io",
|
||||
},
|
||||
{
|
||||
name: "127.0.0.1 - use default",
|
||||
baseURL: "http://127.0.0.1:5000",
|
||||
setEnv: false,
|
||||
want: "atcr.io",
|
||||
},
|
||||
{
|
||||
name: "custom domain",
|
||||
baseURL: "https://registry.example.com",
|
||||
setEnv: false,
|
||||
want: "registry.example.com",
|
||||
},
|
||||
{
|
||||
name: "domain with port",
|
||||
baseURL: "https://registry.example.com:443",
|
||||
setEnv: false,
|
||||
want: "registry.example.com",
|
||||
},
|
||||
{
|
||||
name: "invalid URL - use default",
|
||||
baseURL: "://invalid",
|
||||
setEnv: false,
|
||||
want: "atcr.io",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.setEnv {
|
||||
t.Setenv("ATCR_SERVICE_NAME", tt.envService)
|
||||
} else {
|
||||
os.Unsetenv("ATCR_SERVICE_NAME")
|
||||
}
|
||||
|
||||
got := getServiceName(tt.baseURL)
|
||||
if got != tt.want {
|
||||
t.Errorf("getServiceName() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestBuildLogConfig removed - buildLogConfig is now an internal function
|
||||
|
||||
// TestBuildHTTPConfig removed - buildHTTPConfig is now an internal function
|
||||
|
||||
func TestBuildStorageConfig(t *testing.T) {
|
||||
got := buildStorageConfig()
|
||||
|
||||
// Verify inmemory driver exists
|
||||
if _, ok := got["inmemory"]; !ok {
|
||||
t.Error("buildStorageConfig() missing inmemory driver")
|
||||
}
|
||||
|
||||
// Verify maintenance config
|
||||
maintenance, ok := got["maintenance"]
|
||||
if !ok {
|
||||
t.Fatal("buildStorageConfig() missing maintenance config")
|
||||
}
|
||||
|
||||
uploadPurging, ok := maintenance["uploadpurging"]
|
||||
if !ok {
|
||||
t.Fatal("buildStorageConfig() missing uploadpurging config")
|
||||
}
|
||||
|
||||
// Verify uploadpurging is map[any]any (for distribution validation)
|
||||
purging, ok := uploadPurging.(map[any]any)
|
||||
if !ok {
|
||||
t.Fatalf("uploadpurging is %T, want map[any]any", uploadPurging)
|
||||
}
|
||||
|
||||
if purging["enabled"] != false {
|
||||
t.Error("uploadpurging enabled should be false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMiddlewareConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
defaultHoldDID string
|
||||
baseURL string
|
||||
testMode bool
|
||||
setTestMode bool
|
||||
wantTestMode bool
|
||||
}{
|
||||
{
|
||||
name: "normal mode",
|
||||
defaultHoldDID: "did:web:hold01.atcr.io",
|
||||
baseURL: "https://atcr.io",
|
||||
setTestMode: false,
|
||||
wantTestMode: false,
|
||||
},
|
||||
{
|
||||
name: "test mode enabled",
|
||||
defaultHoldDID: "did:web:hold01.atcr.io",
|
||||
baseURL: "https://atcr.io",
|
||||
testMode: true,
|
||||
setTestMode: true,
|
||||
wantTestMode: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.setTestMode {
|
||||
t.Setenv("TEST_MODE", "true")
|
||||
} else {
|
||||
os.Unsetenv("TEST_MODE")
|
||||
}
|
||||
|
||||
got := buildMiddlewareConfig(tt.defaultHoldDID, tt.baseURL)
|
||||
|
||||
registryMW, ok := got["registry"]
|
||||
if !ok {
|
||||
t.Fatal("buildMiddlewareConfig() missing registry middleware")
|
||||
}
|
||||
|
||||
if len(registryMW) != 1 {
|
||||
t.Fatalf("buildMiddlewareConfig() registry middleware count = %v, want 1", len(registryMW))
|
||||
}
|
||||
|
||||
mw := registryMW[0]
|
||||
if mw.Name != "atproto-resolver" {
|
||||
t.Errorf("middleware name = %v, want atproto-resolver", mw.Name)
|
||||
}
|
||||
|
||||
if mw.Options["default_hold_did"] != tt.defaultHoldDID {
|
||||
t.Errorf("default_hold_did = %v, want %v", mw.Options["default_hold_did"], tt.defaultHoldDID)
|
||||
}
|
||||
|
||||
if mw.Options["base_url"] != tt.baseURL {
|
||||
t.Errorf("base_url = %v, want %v", mw.Options["base_url"], tt.baseURL)
|
||||
}
|
||||
|
||||
if mw.Options["test_mode"] != tt.wantTestMode {
|
||||
t.Errorf("test_mode = %v, want %v", mw.Options["test_mode"], tt.wantTestMode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildHealthConfig(t *testing.T) {
|
||||
got := buildHealthConfig()
|
||||
|
||||
if !got.StorageDriver.Enabled {
|
||||
t.Error("buildHealthConfig().StorageDriver.Enabled = false, want true")
|
||||
}
|
||||
|
||||
if got.StorageDriver.Interval.Seconds() != 10 {
|
||||
t.Errorf("buildHealthConfig().StorageDriver.Interval = %v, want 10s", got.StorageDriver.Interval)
|
||||
}
|
||||
|
||||
if got.StorageDriver.Threshold != 3 {
|
||||
t.Errorf("buildHealthConfig().StorageDriver.Threshold = %v, want 3", got.StorageDriver.Threshold)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadConfigFromEnv(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
envHoldDID string
|
||||
setHoldDID bool
|
||||
wantError bool
|
||||
}{
|
||||
{
|
||||
name: "valid config",
|
||||
envHoldDID: "did:web:hold01.atcr.io",
|
||||
setHoldDID: true,
|
||||
wantError: false,
|
||||
},
|
||||
{
|
||||
name: "missing default hold DID",
|
||||
setHoldDID: false,
|
||||
wantError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.setHoldDID {
|
||||
t.Setenv("ATCR_DEFAULT_HOLD_DID", tt.envHoldDID)
|
||||
} else {
|
||||
os.Unsetenv("ATCR_DEFAULT_HOLD_DID")
|
||||
}
|
||||
|
||||
// Clear other env vars to use defaults
|
||||
os.Unsetenv("ATCR_BASE_URL")
|
||||
os.Unsetenv("ATCR_SERVICE_NAME")
|
||||
|
||||
got, err := LoadConfigFromEnv()
|
||||
if (err != nil) != tt.wantError {
|
||||
t.Errorf("LoadConfigFromEnv() error = %v, wantError %v", err, tt.wantError)
|
||||
return
|
||||
}
|
||||
|
||||
if tt.wantError {
|
||||
return
|
||||
}
|
||||
|
||||
// Verify config structure
|
||||
if got.Version != "0.1" {
|
||||
t.Errorf("version = %v, want 0.1", got.Version)
|
||||
}
|
||||
|
||||
if got.LogLevel != "info" {
|
||||
t.Errorf("log level = %v, want info", got.LogLevel)
|
||||
}
|
||||
|
||||
if got.Server.Addr != ":5000" {
|
||||
t.Errorf("HTTP addr = %v, want :5000", got.Server.Addr)
|
||||
}
|
||||
|
||||
if got.Server.DefaultHoldDID != tt.envHoldDID {
|
||||
t.Errorf("default hold DID = %v, want %v", got.Server.DefaultHoldDID, tt.envHoldDID)
|
||||
}
|
||||
|
||||
if got.UI.DatabasePath != "/var/lib/atcr/ui.db" {
|
||||
t.Errorf("UI database path = %v, want /var/lib/atcr/ui.db", got.UI.DatabasePath)
|
||||
}
|
||||
|
||||
if got.Health.CacheTTL != 15*time.Minute {
|
||||
t.Errorf("health cache TTL = %v, want 15m", got.Health.CacheTTL)
|
||||
}
|
||||
|
||||
if got.Jetstream.URL != "wss://jetstream2.us-west.bsky.network/subscribe" {
|
||||
t.Errorf("jetstream URL = %v, want default", got.Jetstream.URL)
|
||||
}
|
||||
|
||||
// Verify distribution config was built
|
||||
if got.Distribution == nil {
|
||||
t.Error("distribution config is nil")
|
||||
}
|
||||
|
||||
if _, ok := got.Distribution.Storage["inmemory"]; !ok {
|
||||
t.Error("distribution storage missing inmemory driver")
|
||||
}
|
||||
|
||||
if _, ok := got.Distribution.Middleware["registry"]; !ok {
|
||||
t.Error("distribution middleware missing registry")
|
||||
}
|
||||
|
||||
if _, ok := got.Distribution.Auth["token"]; !ok {
|
||||
t.Error("distribution auth missing token config")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
78
pkg/appview/db/annotations.go
Normal file
78
pkg/appview/db/annotations.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GetRepositoryAnnotations retrieves all annotations for a repository
|
||||
func GetRepositoryAnnotations(db *sql.DB, did, repository string) (map[string]string, error) {
|
||||
rows, err := db.Query(`
|
||||
SELECT key, value
|
||||
FROM repository_annotations
|
||||
WHERE did = ? AND repository = ?
|
||||
`, did, repository)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
annotations := make(map[string]string)
|
||||
for rows.Next() {
|
||||
var key, value string
|
||||
if err := rows.Scan(&key, &value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
annotations[key] = value
|
||||
}
|
||||
|
||||
return annotations, rows.Err()
|
||||
}
|
||||
|
||||
// UpsertRepositoryAnnotations replaces all annotations for a repository
|
||||
// Only called when manifest has at least one non-empty annotation
|
||||
func UpsertRepositoryAnnotations(db *sql.DB, did, repository string, annotations map[string]string) error {
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Delete existing annotations
|
||||
_, err = tx.Exec(`
|
||||
DELETE FROM repository_annotations
|
||||
WHERE did = ? AND repository = ?
|
||||
`, did, repository)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Insert new annotations
|
||||
stmt, err := tx.Prepare(`
|
||||
INSERT INTO repository_annotations (did, repository, key, value, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
now := time.Now()
|
||||
for key, value := range annotations {
|
||||
_, err = stmt.Exec(did, repository, key, value, now)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// DeleteRepositoryAnnotations removes all annotations for a repository
|
||||
func DeleteRepositoryAnnotations(db *sql.DB, did, repository string) error {
|
||||
_, err := db.Exec(`
|
||||
DELETE FROM repository_annotations
|
||||
WHERE did = ? AND repository = ?
|
||||
`, did, repository)
|
||||
return err
|
||||
}
|
||||
361
pkg/appview/db/annotations_test.go
Normal file
361
pkg/appview/db/annotations_test.go
Normal file
@@ -0,0 +1,361 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAnnotations_Placeholder(t *testing.T) {
|
||||
// Placeholder test for annotations package
|
||||
// GetRepositoryAnnotations returns map[string]string
|
||||
annotations := make(map[string]string)
|
||||
annotations["test"] = "value"
|
||||
|
||||
if annotations["test"] != "value" {
|
||||
t.Error("Expected annotation value to be stored")
|
||||
}
|
||||
}
|
||||
|
||||
// Integration tests
|
||||
|
||||
func setupAnnotationsTestDB(t *testing.T) *sql.DB {
|
||||
t.Helper()
|
||||
// Use file::memory: with cache=shared to ensure all connections share the same in-memory DB
|
||||
db, err := InitDB("file::memory:?cache=shared", true)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to initialize test database: %v", err)
|
||||
}
|
||||
// Limit to single connection to avoid race conditions in tests
|
||||
db.SetMaxOpenConns(1)
|
||||
t.Cleanup(func() { db.Close() })
|
||||
return db
|
||||
}
|
||||
|
||||
func createAnnotationTestUser(t *testing.T, db *sql.DB, did, handle string) {
|
||||
t.Helper()
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO users (did, handle, pds_endpoint, last_seen)
|
||||
VALUES (?, ?, ?, datetime('now'))
|
||||
`, did, handle, "https://pds.example.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create test user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetRepositoryAnnotations_Empty tests retrieving from empty repository
|
||||
func TestGetRepositoryAnnotations_Empty(t *testing.T) {
|
||||
db := setupAnnotationsTestDB(t)
|
||||
|
||||
annotations, err := GetRepositoryAnnotations(db, "did:plc:alice123", "myapp")
|
||||
if err != nil {
|
||||
t.Fatalf("GetRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
|
||||
if len(annotations) != 0 {
|
||||
t.Errorf("Expected empty annotations, got %d entries", len(annotations))
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetRepositoryAnnotations_WithData tests retrieving existing annotations
|
||||
func TestGetRepositoryAnnotations_WithData(t *testing.T) {
|
||||
db := setupAnnotationsTestDB(t)
|
||||
createAnnotationTestUser(t, db, "did:plc:alice123", "alice.bsky.social")
|
||||
|
||||
// Insert test annotations
|
||||
testAnnotations := map[string]string{
|
||||
"org.opencontainers.image.title": "My App",
|
||||
"org.opencontainers.image.description": "A test application",
|
||||
"org.opencontainers.image.version": "1.0.0",
|
||||
}
|
||||
|
||||
err := UpsertRepositoryAnnotations(db, "did:plc:alice123", "myapp", testAnnotations)
|
||||
if err != nil {
|
||||
t.Fatalf("UpsertRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
|
||||
// Retrieve annotations
|
||||
annotations, err := GetRepositoryAnnotations(db, "did:plc:alice123", "myapp")
|
||||
if err != nil {
|
||||
t.Fatalf("GetRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
|
||||
if len(annotations) != len(testAnnotations) {
|
||||
t.Errorf("Expected %d annotations, got %d", len(testAnnotations), len(annotations))
|
||||
}
|
||||
|
||||
for key, expectedValue := range testAnnotations {
|
||||
if actualValue, ok := annotations[key]; !ok {
|
||||
t.Errorf("Missing annotation key: %s", key)
|
||||
} else if actualValue != expectedValue {
|
||||
t.Errorf("Annotation[%s] = %v, want %v", key, actualValue, expectedValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpsertRepositoryAnnotations_Insert tests inserting new annotations
|
||||
func TestUpsertRepositoryAnnotations_Insert(t *testing.T) {
|
||||
db := setupAnnotationsTestDB(t)
|
||||
createAnnotationTestUser(t, db, "did:plc:bob456", "bob.bsky.social")
|
||||
|
||||
annotations := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
}
|
||||
|
||||
err := UpsertRepositoryAnnotations(db, "did:plc:bob456", "testapp", annotations)
|
||||
if err != nil {
|
||||
t.Fatalf("UpsertRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify annotations were inserted
|
||||
retrieved, err := GetRepositoryAnnotations(db, "did:plc:bob456", "testapp")
|
||||
if err != nil {
|
||||
t.Fatalf("GetRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
|
||||
if len(retrieved) != len(annotations) {
|
||||
t.Errorf("Expected %d annotations, got %d", len(annotations), len(retrieved))
|
||||
}
|
||||
|
||||
for key, expectedValue := range annotations {
|
||||
if actualValue := retrieved[key]; actualValue != expectedValue {
|
||||
t.Errorf("Annotation[%s] = %v, want %v", key, actualValue, expectedValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpsertRepositoryAnnotations_Update tests updating existing annotations
|
||||
func TestUpsertRepositoryAnnotations_Update(t *testing.T) {
|
||||
db := setupAnnotationsTestDB(t)
|
||||
createAnnotationTestUser(t, db, "did:plc:charlie789", "charlie.bsky.social")
|
||||
|
||||
// Insert initial annotations
|
||||
initial := map[string]string{
|
||||
"key1": "oldvalue1",
|
||||
"key2": "oldvalue2",
|
||||
"key3": "oldvalue3",
|
||||
}
|
||||
|
||||
err := UpsertRepositoryAnnotations(db, "did:plc:charlie789", "updateapp", initial)
|
||||
if err != nil {
|
||||
t.Fatalf("Initial UpsertRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
|
||||
// Update with new annotations (completely replaces old ones)
|
||||
updated := map[string]string{
|
||||
"key1": "newvalue1", // Updated
|
||||
"key4": "newvalue4", // New key (key2 and key3 removed)
|
||||
}
|
||||
|
||||
err = UpsertRepositoryAnnotations(db, "did:plc:charlie789", "updateapp", updated)
|
||||
if err != nil {
|
||||
t.Fatalf("Update UpsertRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify annotations were replaced
|
||||
retrieved, err := GetRepositoryAnnotations(db, "did:plc:charlie789", "updateapp")
|
||||
if err != nil {
|
||||
t.Fatalf("GetRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
|
||||
if len(retrieved) != len(updated) {
|
||||
t.Errorf("Expected %d annotations, got %d", len(updated), len(retrieved))
|
||||
}
|
||||
|
||||
// Verify new values
|
||||
if retrieved["key1"] != "newvalue1" {
|
||||
t.Errorf("key1 = %v, want newvalue1", retrieved["key1"])
|
||||
}
|
||||
if retrieved["key4"] != "newvalue4" {
|
||||
t.Errorf("key4 = %v, want newvalue4", retrieved["key4"])
|
||||
}
|
||||
|
||||
// Verify old keys were removed
|
||||
if _, exists := retrieved["key2"]; exists {
|
||||
t.Error("key2 should have been removed")
|
||||
}
|
||||
if _, exists := retrieved["key3"]; exists {
|
||||
t.Error("key3 should have been removed")
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpsertRepositoryAnnotations_EmptyMap tests upserting with empty map
|
||||
func TestUpsertRepositoryAnnotations_EmptyMap(t *testing.T) {
|
||||
db := setupAnnotationsTestDB(t)
|
||||
createAnnotationTestUser(t, db, "did:plc:dave111", "dave.bsky.social")
|
||||
|
||||
// Insert initial annotations
|
||||
initial := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
}
|
||||
|
||||
err := UpsertRepositoryAnnotations(db, "did:plc:dave111", "emptyapp", initial)
|
||||
if err != nil {
|
||||
t.Fatalf("Initial UpsertRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
|
||||
// Upsert with empty map (should delete all)
|
||||
empty := make(map[string]string)
|
||||
|
||||
err = UpsertRepositoryAnnotations(db, "did:plc:dave111", "emptyapp", empty)
|
||||
if err != nil {
|
||||
t.Fatalf("Empty UpsertRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify all annotations were deleted
|
||||
retrieved, err := GetRepositoryAnnotations(db, "did:plc:dave111", "emptyapp")
|
||||
if err != nil {
|
||||
t.Fatalf("GetRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
|
||||
if len(retrieved) != 0 {
|
||||
t.Errorf("Expected 0 annotations after empty upsert, got %d", len(retrieved))
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpsertRepositoryAnnotations_MultipleRepos tests isolation between repositories
|
||||
func TestUpsertRepositoryAnnotations_MultipleRepos(t *testing.T) {
|
||||
db := setupAnnotationsTestDB(t)
|
||||
createAnnotationTestUser(t, db, "did:plc:eve222", "eve.bsky.social")
|
||||
|
||||
// Insert annotations for repo1
|
||||
repo1Annotations := map[string]string{
|
||||
"repo": "repo1",
|
||||
"key1": "value1",
|
||||
}
|
||||
err := UpsertRepositoryAnnotations(db, "did:plc:eve222", "repo1", repo1Annotations)
|
||||
if err != nil {
|
||||
t.Fatalf("UpsertRepositoryAnnotations(repo1) error = %v", err)
|
||||
}
|
||||
|
||||
// Insert annotations for repo2 (same DID, different repo)
|
||||
repo2Annotations := map[string]string{
|
||||
"repo": "repo2",
|
||||
"key2": "value2",
|
||||
}
|
||||
err = UpsertRepositoryAnnotations(db, "did:plc:eve222", "repo2", repo2Annotations)
|
||||
if err != nil {
|
||||
t.Fatalf("UpsertRepositoryAnnotations(repo2) error = %v", err)
|
||||
}
|
||||
|
||||
// Verify repo1 annotations unchanged
|
||||
retrieved1, err := GetRepositoryAnnotations(db, "did:plc:eve222", "repo1")
|
||||
if err != nil {
|
||||
t.Fatalf("GetRepositoryAnnotations(repo1) error = %v", err)
|
||||
}
|
||||
if len(retrieved1) != len(repo1Annotations) {
|
||||
t.Errorf("repo1: Expected %d annotations, got %d", len(repo1Annotations), len(retrieved1))
|
||||
}
|
||||
if retrieved1["repo"] != "repo1" {
|
||||
t.Errorf("repo1: Expected repo=repo1, got %v", retrieved1["repo"])
|
||||
}
|
||||
|
||||
// Verify repo2 annotations
|
||||
retrieved2, err := GetRepositoryAnnotations(db, "did:plc:eve222", "repo2")
|
||||
if err != nil {
|
||||
t.Fatalf("GetRepositoryAnnotations(repo2) error = %v", err)
|
||||
}
|
||||
if len(retrieved2) != len(repo2Annotations) {
|
||||
t.Errorf("repo2: Expected %d annotations, got %d", len(repo2Annotations), len(retrieved2))
|
||||
}
|
||||
if retrieved2["repo"] != "repo2" {
|
||||
t.Errorf("repo2: Expected repo=repo2, got %v", retrieved2["repo"])
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteRepositoryAnnotations tests deleting annotations
|
||||
func TestDeleteRepositoryAnnotations(t *testing.T) {
|
||||
db := setupAnnotationsTestDB(t)
|
||||
createAnnotationTestUser(t, db, "did:plc:frank333", "frank.bsky.social")
|
||||
|
||||
// Insert annotations
|
||||
annotations := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
}
|
||||
err := UpsertRepositoryAnnotations(db, "did:plc:frank333", "deleteapp", annotations)
|
||||
if err != nil {
|
||||
t.Fatalf("UpsertRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify annotations exist
|
||||
retrieved, err := GetRepositoryAnnotations(db, "did:plc:frank333", "deleteapp")
|
||||
if err != nil {
|
||||
t.Fatalf("GetRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
if len(retrieved) != 2 {
|
||||
t.Fatalf("Expected 2 annotations before delete, got %d", len(retrieved))
|
||||
}
|
||||
|
||||
// Delete annotations
|
||||
err = DeleteRepositoryAnnotations(db, "did:plc:frank333", "deleteapp")
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteRepositoryAnnotations() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify annotations were deleted
|
||||
retrieved, err = GetRepositoryAnnotations(db, "did:plc:frank333", "deleteapp")
|
||||
if err != nil {
|
||||
t.Fatalf("GetRepositoryAnnotations() after delete error = %v", err)
|
||||
}
|
||||
if len(retrieved) != 0 {
|
||||
t.Errorf("Expected 0 annotations after delete, got %d", len(retrieved))
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteRepositoryAnnotations_NonExistent tests deleting non-existent annotations
|
||||
func TestDeleteRepositoryAnnotations_NonExistent(t *testing.T) {
|
||||
db := setupAnnotationsTestDB(t)
|
||||
|
||||
// Delete from non-existent repository (should not error)
|
||||
err := DeleteRepositoryAnnotations(db, "did:plc:ghost999", "nonexistent")
|
||||
if err != nil {
|
||||
t.Errorf("DeleteRepositoryAnnotations() for non-existent repo should not error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAnnotations_DifferentDIDs tests isolation between different DIDs
|
||||
func TestAnnotations_DifferentDIDs(t *testing.T) {
|
||||
db := setupAnnotationsTestDB(t)
|
||||
createAnnotationTestUser(t, db, "did:plc:alice123", "alice.bsky.social")
|
||||
createAnnotationTestUser(t, db, "did:plc:bob456", "bob.bsky.social")
|
||||
|
||||
// Insert annotations for alice
|
||||
aliceAnnotations := map[string]string{
|
||||
"owner": "alice",
|
||||
"key1": "alice-value1",
|
||||
}
|
||||
err := UpsertRepositoryAnnotations(db, "did:plc:alice123", "sharedname", aliceAnnotations)
|
||||
if err != nil {
|
||||
t.Fatalf("UpsertRepositoryAnnotations(alice) error = %v", err)
|
||||
}
|
||||
|
||||
// Insert annotations for bob (same repo name, different DID)
|
||||
bobAnnotations := map[string]string{
|
||||
"owner": "bob",
|
||||
"key1": "bob-value1",
|
||||
}
|
||||
err = UpsertRepositoryAnnotations(db, "did:plc:bob456", "sharedname", bobAnnotations)
|
||||
if err != nil {
|
||||
t.Fatalf("UpsertRepositoryAnnotations(bob) error = %v", err)
|
||||
}
|
||||
|
||||
// Verify alice's annotations unchanged
|
||||
aliceRetrieved, err := GetRepositoryAnnotations(db, "did:plc:alice123", "sharedname")
|
||||
if err != nil {
|
||||
t.Fatalf("GetRepositoryAnnotations(alice) error = %v", err)
|
||||
}
|
||||
if aliceRetrieved["owner"] != "alice" {
|
||||
t.Errorf("alice: Expected owner=alice, got %v", aliceRetrieved["owner"])
|
||||
}
|
||||
|
||||
// Verify bob's annotations
|
||||
bobRetrieved, err := GetRepositoryAnnotations(db, "did:plc:bob456", "sharedname")
|
||||
if err != nil {
|
||||
t.Fatalf("GetRepositoryAnnotations(bob) error = %v", err)
|
||||
}
|
||||
if bobRetrieved["owner"] != "bob" {
|
||||
t.Errorf("bob: Expected owner=bob, got %v", bobRetrieved["owner"])
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"database/sql"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -108,7 +109,7 @@ func (s *DeviceStore) GetPendingByUserCode(userCode string) (*PendingAuthorizati
|
||||
return nil, false
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: Failed to query pending auth: %v\n", err)
|
||||
slog.Warn("Failed to query pending auth", "component", "device_store", "error", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
@@ -144,7 +145,7 @@ func (s *DeviceStore) GetPendingByDeviceCode(deviceCode string) (*PendingAuthori
|
||||
return nil, false
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: Failed to query pending auth: %v\n", err)
|
||||
slog.Warn("Failed to query pending auth", "component", "device_store", "error", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
@@ -382,13 +383,13 @@ func (s *DeviceStore) CleanupExpired() {
|
||||
`)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: Failed to cleanup expired pending auths: %v\n", err)
|
||||
slog.Warn("Failed to cleanup expired pending auths", "component", "device_store", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
deleted, _ := result.RowsAffected()
|
||||
if deleted > 0 {
|
||||
fmt.Printf("Cleaned up %d expired pending device auths\n", deleted)
|
||||
slog.Info("Cleaned up expired pending device auths", "count", deleted)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -405,7 +406,7 @@ func (s *DeviceStore) CleanupExpiredContext(ctx context.Context) error {
|
||||
|
||||
deleted, _ := result.RowsAffected()
|
||||
if deleted > 0 {
|
||||
fmt.Printf("Cleaned up %d expired pending device auths\n", deleted)
|
||||
slog.Info("Cleaned up expired pending device auths", "count", deleted)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -415,9 +416,15 @@ func (s *DeviceStore) CleanupExpiredContext(ctx context.Context) error {
|
||||
// Format: XXXX-XXXX (e.g., "WDJB-MJHT")
|
||||
// Character set: A-Z excluding ambiguous chars (0, O, I, 1, L)
|
||||
func generateUserCode() string {
|
||||
chars := "ABCDEFGHJKLMNPQRSTUVWXYZ23456789"
|
||||
chars := "ABCDEFGHJKMNPQRSTUVWXYZ23456789"
|
||||
code := make([]byte, 8)
|
||||
rand.Read(code)
|
||||
if _, err := rand.Read(code); err != nil {
|
||||
// Fallback to timestamp-based generation if crypto rand fails
|
||||
now := time.Now().UnixNano()
|
||||
for i := range code {
|
||||
code[i] = byte(now >> (i * 8))
|
||||
}
|
||||
}
|
||||
for i := range code {
|
||||
code[i] = chars[int(code[i])%len(chars)]
|
||||
}
|
||||
|
||||
635
pkg/appview/db/device_store_test.go
Normal file
635
pkg/appview/db/device_store_test.go
Normal file
@@ -0,0 +1,635 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
// setupTestDB creates an in-memory SQLite database for testing
|
||||
func setupTestDB(t *testing.T) *DeviceStore {
|
||||
t.Helper()
|
||||
// Use file::memory: with cache=shared to ensure all connections share the same in-memory DB
|
||||
// This prevents race conditions where different connections see different databases
|
||||
db, err := InitDB("file::memory:?cache=shared", true)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to initialize test database: %v", err)
|
||||
}
|
||||
|
||||
// Limit to single connection to avoid race conditions in tests
|
||||
db.SetMaxOpenConns(1)
|
||||
|
||||
t.Cleanup(func() {
|
||||
db.Close()
|
||||
})
|
||||
return NewDeviceStore(db)
|
||||
}
|
||||
|
||||
// createTestUser creates a test user in the database
|
||||
func createTestUser(t *testing.T, store *DeviceStore, did, handle string) {
|
||||
t.Helper()
|
||||
_, err := store.db.Exec(`
|
||||
INSERT OR IGNORE INTO users (did, handle, pds_endpoint, last_seen)
|
||||
VALUES (?, ?, ?, datetime('now'))
|
||||
`, did, handle, "https://pds.example.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create test user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDevice_Struct(t *testing.T) {
|
||||
device := &Device{
|
||||
DID: "did:plc:test",
|
||||
Handle: "alice.bsky.social",
|
||||
Name: "My Device",
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
if device.DID != "did:plc:test" {
|
||||
t.Errorf("Expected DID, got %q", device.DID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateUserCode(t *testing.T) {
|
||||
// Generate multiple codes to test
|
||||
codes := make(map[string]bool)
|
||||
for i := 0; i < 100; i++ {
|
||||
code := generateUserCode()
|
||||
|
||||
// Test format: XXXX-XXXX
|
||||
if len(code) != 9 {
|
||||
t.Errorf("Expected code length 9, got %d for code %q", len(code), code)
|
||||
}
|
||||
|
||||
if code[4] != '-' {
|
||||
t.Errorf("Expected hyphen at position 4, got %q", string(code[4]))
|
||||
}
|
||||
|
||||
// Test valid characters (A-Z, 2-9, no ambiguous chars)
|
||||
validChars := "ABCDEFGHJKLMNPQRSTUVWXYZ23456789"
|
||||
parts := strings.Split(code, "-")
|
||||
if len(parts) != 2 {
|
||||
t.Errorf("Expected 2 parts separated by hyphen, got %d", len(parts))
|
||||
}
|
||||
|
||||
for _, part := range parts {
|
||||
for _, ch := range part {
|
||||
if !strings.ContainsRune(validChars, ch) {
|
||||
t.Errorf("Invalid character %q in code %q", ch, code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test uniqueness (should be very rare to get duplicates)
|
||||
if codes[code] {
|
||||
t.Logf("Warning: duplicate code generated: %q (rare but possible)", code)
|
||||
}
|
||||
codes[code] = true
|
||||
}
|
||||
|
||||
// Verify we got mostly unique codes (at least 95%)
|
||||
if len(codes) < 95 {
|
||||
t.Errorf("Expected at least 95 unique codes out of 100, got %d", len(codes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateUserCode_Format(t *testing.T) {
|
||||
code := generateUserCode()
|
||||
|
||||
// Test exact format
|
||||
if len(code) != 9 {
|
||||
t.Fatal("Code must be exactly 9 characters")
|
||||
}
|
||||
|
||||
if code[4] != '-' {
|
||||
t.Fatal("Character at index 4 must be hyphen")
|
||||
}
|
||||
|
||||
// Test no ambiguous characters (O, 0, I, 1, L)
|
||||
ambiguous := "O01IL"
|
||||
for _, ch := range code {
|
||||
if strings.ContainsRune(ambiguous, ch) {
|
||||
t.Errorf("Code contains ambiguous character %q: %s", ch, code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeviceStore_CreatePendingAuth tests creating pending authorization
|
||||
func TestDeviceStore_CreatePendingAuth(t *testing.T) {
|
||||
store := setupTestDB(t)
|
||||
|
||||
pending, err := store.CreatePendingAuth("My Device", "192.168.1.1", "Test Agent")
|
||||
if err != nil {
|
||||
t.Fatalf("CreatePendingAuth() error = %v", err)
|
||||
}
|
||||
|
||||
if pending.DeviceCode == "" {
|
||||
t.Error("DeviceCode should not be empty")
|
||||
}
|
||||
if pending.UserCode == "" {
|
||||
t.Error("UserCode should not be empty")
|
||||
}
|
||||
if pending.DeviceName != "My Device" {
|
||||
t.Errorf("DeviceName = %v, want My Device", pending.DeviceName)
|
||||
}
|
||||
if pending.IPAddress != "192.168.1.1" {
|
||||
t.Errorf("IPAddress = %v, want 192.168.1.1", pending.IPAddress)
|
||||
}
|
||||
if pending.UserAgent != "Test Agent" {
|
||||
t.Errorf("UserAgent = %v, want Test Agent", pending.UserAgent)
|
||||
}
|
||||
if pending.ExpiresAt.Before(time.Now()) {
|
||||
t.Error("ExpiresAt should be in the future")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeviceStore_GetPendingByUserCode tests retrieving pending auth by user code
|
||||
func TestDeviceStore_GetPendingByUserCode(t *testing.T) {
|
||||
store := setupTestDB(t)
|
||||
|
||||
// Create pending auth
|
||||
created, err := store.CreatePendingAuth("My Device", "192.168.1.1", "Test Agent")
|
||||
if err != nil {
|
||||
t.Fatalf("CreatePendingAuth() error = %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
userCode string
|
||||
wantFound bool
|
||||
}{
|
||||
{
|
||||
name: "existing user code",
|
||||
userCode: created.UserCode,
|
||||
wantFound: true,
|
||||
},
|
||||
{
|
||||
name: "non-existent user code",
|
||||
userCode: "AAAA-BBBB",
|
||||
wantFound: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pending, found := store.GetPendingByUserCode(tt.userCode)
|
||||
if found != tt.wantFound {
|
||||
t.Errorf("GetPendingByUserCode() found = %v, want %v", found, tt.wantFound)
|
||||
}
|
||||
if tt.wantFound && pending == nil {
|
||||
t.Error("Expected pending auth, got nil")
|
||||
}
|
||||
if tt.wantFound && pending != nil {
|
||||
if pending.DeviceName != "My Device" {
|
||||
t.Errorf("DeviceName = %v, want My Device", pending.DeviceName)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeviceStore_GetPendingByDeviceCode tests retrieving pending auth by device code
|
||||
func TestDeviceStore_GetPendingByDeviceCode(t *testing.T) {
|
||||
store := setupTestDB(t)
|
||||
|
||||
// Create pending auth
|
||||
created, err := store.CreatePendingAuth("My Device", "192.168.1.1", "Test Agent")
|
||||
if err != nil {
|
||||
t.Fatalf("CreatePendingAuth() error = %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
deviceCode string
|
||||
wantFound bool
|
||||
}{
|
||||
{
|
||||
name: "existing device code",
|
||||
deviceCode: created.DeviceCode,
|
||||
wantFound: true,
|
||||
},
|
||||
{
|
||||
name: "non-existent device code",
|
||||
deviceCode: "invalidcode",
|
||||
wantFound: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pending, found := store.GetPendingByDeviceCode(tt.deviceCode)
|
||||
if found != tt.wantFound {
|
||||
t.Errorf("GetPendingByDeviceCode() found = %v, want %v", found, tt.wantFound)
|
||||
}
|
||||
if tt.wantFound && pending == nil {
|
||||
t.Error("Expected pending auth, got nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeviceStore_ApprovePending tests approving pending authorization
|
||||
func TestDeviceStore_ApprovePending(t *testing.T) {
|
||||
store := setupTestDB(t)
|
||||
|
||||
// Create test users
|
||||
createTestUser(t, store, "did:plc:alice123", "alice.bsky.social")
|
||||
createTestUser(t, store, "did:plc:bob123", "bob.bsky.social")
|
||||
|
||||
// Create pending auth
|
||||
pending, err := store.CreatePendingAuth("My Device", "192.168.1.1", "Test Agent")
|
||||
if err != nil {
|
||||
t.Fatalf("CreatePendingAuth() error = %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
userCode string
|
||||
did string
|
||||
handle string
|
||||
wantErr bool
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "successful approval",
|
||||
userCode: pending.UserCode,
|
||||
did: "did:plc:alice123",
|
||||
handle: "alice.bsky.social",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "non-existent user code",
|
||||
userCode: "AAAA-BBBB",
|
||||
did: "did:plc:bob123",
|
||||
handle: "bob.bsky.social",
|
||||
wantErr: true,
|
||||
errString: "not found",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
secret, err := store.ApprovePending(tt.userCode, tt.did, tt.handle)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ApprovePending() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr {
|
||||
if secret == "" {
|
||||
t.Error("Expected device secret, got empty string")
|
||||
}
|
||||
if !strings.HasPrefix(secret, "atcr_device_") {
|
||||
t.Errorf("Secret should start with atcr_device_, got %v", secret)
|
||||
}
|
||||
|
||||
// Verify device was created
|
||||
devices := store.ListDevices(tt.did)
|
||||
if len(devices) != 1 {
|
||||
t.Errorf("Expected 1 device, got %d", len(devices))
|
||||
}
|
||||
}
|
||||
if tt.wantErr && tt.errString != "" && err != nil {
|
||||
if !strings.Contains(err.Error(), tt.errString) {
|
||||
t.Errorf("Error should contain %q, got %v", tt.errString, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeviceStore_ApprovePending_AlreadyApproved tests double approval
|
||||
func TestDeviceStore_ApprovePending_AlreadyApproved(t *testing.T) {
|
||||
store := setupTestDB(t)
|
||||
createTestUser(t, store, "did:plc:alice123", "alice.bsky.social")
|
||||
|
||||
pending, err := store.CreatePendingAuth("My Device", "192.168.1.1", "Test Agent")
|
||||
if err != nil {
|
||||
t.Fatalf("CreatePendingAuth() error = %v", err)
|
||||
}
|
||||
|
||||
// First approval
|
||||
_, err = store.ApprovePending(pending.UserCode, "did:plc:alice123", "alice.bsky.social")
|
||||
if err != nil {
|
||||
t.Fatalf("First ApprovePending() error = %v", err)
|
||||
}
|
||||
|
||||
// Second approval should fail
|
||||
_, err = store.ApprovePending(pending.UserCode, "did:plc:alice123", "alice.bsky.social")
|
||||
if err == nil {
|
||||
t.Error("Expected error for double approval, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "already approved") {
|
||||
t.Errorf("Error should contain 'already approved', got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeviceStore_ValidateDeviceSecret tests device secret validation
|
||||
func TestDeviceStore_ValidateDeviceSecret(t *testing.T) {
|
||||
store := setupTestDB(t)
|
||||
createTestUser(t, store, "did:plc:alice123", "alice.bsky.social")
|
||||
|
||||
// Create and approve a device
|
||||
pending, err := store.CreatePendingAuth("My Device", "192.168.1.1", "Test Agent")
|
||||
if err != nil {
|
||||
t.Fatalf("CreatePendingAuth() error = %v", err)
|
||||
}
|
||||
|
||||
secret, err := store.ApprovePending(pending.UserCode, "did:plc:alice123", "alice.bsky.social")
|
||||
if err != nil {
|
||||
t.Fatalf("ApprovePending() error = %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
secret string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid secret",
|
||||
secret: secret,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid secret",
|
||||
secret: "atcr_device_invalid",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty secret",
|
||||
secret: "",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
device, err := store.ValidateDeviceSecret(tt.secret)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ValidateDeviceSecret() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr {
|
||||
if device == nil {
|
||||
t.Error("Expected device, got nil")
|
||||
}
|
||||
if device.DID != "did:plc:alice123" {
|
||||
t.Errorf("DID = %v, want did:plc:alice123", device.DID)
|
||||
}
|
||||
if device.Name != "My Device" {
|
||||
t.Errorf("Name = %v, want My Device", device.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeviceStore_ListDevices tests listing devices
|
||||
func TestDeviceStore_ListDevices(t *testing.T) {
|
||||
store := setupTestDB(t)
|
||||
did := "did:plc:alice123"
|
||||
createTestUser(t, store, did, "alice.bsky.social")
|
||||
|
||||
// Initially empty
|
||||
devices := store.ListDevices(did)
|
||||
if len(devices) != 0 {
|
||||
t.Errorf("Expected 0 devices initially, got %d", len(devices))
|
||||
}
|
||||
|
||||
// Create 3 devices
|
||||
for i := 0; i < 3; i++ {
|
||||
pending, err := store.CreatePendingAuth("Device "+string(rune('A'+i)), "192.168.1.1", "Agent")
|
||||
if err != nil {
|
||||
t.Fatalf("CreatePendingAuth() error = %v", err)
|
||||
}
|
||||
_, err = store.ApprovePending(pending.UserCode, did, "alice.bsky.social")
|
||||
if err != nil {
|
||||
t.Fatalf("ApprovePending() error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// List devices
|
||||
devices = store.ListDevices(did)
|
||||
if len(devices) != 3 {
|
||||
t.Errorf("Expected 3 devices, got %d", len(devices))
|
||||
}
|
||||
|
||||
// Verify they're sorted by created_at DESC (newest first)
|
||||
for i := 0; i < len(devices)-1; i++ {
|
||||
if devices[i].CreatedAt.Before(devices[i+1].CreatedAt) {
|
||||
t.Error("Devices should be sorted by created_at DESC")
|
||||
}
|
||||
}
|
||||
|
||||
// List devices for different DID
|
||||
otherDevices := store.ListDevices("did:plc:bob123")
|
||||
if len(otherDevices) != 0 {
|
||||
t.Errorf("Expected 0 devices for different DID, got %d", len(otherDevices))
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeviceStore_RevokeDevice tests revoking a device
|
||||
func TestDeviceStore_RevokeDevice(t *testing.T) {
|
||||
store := setupTestDB(t)
|
||||
did := "did:plc:alice123"
|
||||
createTestUser(t, store, did, "alice.bsky.social")
|
||||
|
||||
// Create device
|
||||
pending, err := store.CreatePendingAuth("My Device", "192.168.1.1", "Test Agent")
|
||||
if err != nil {
|
||||
t.Fatalf("CreatePendingAuth() error = %v", err)
|
||||
}
|
||||
_, err = store.ApprovePending(pending.UserCode, did, "alice.bsky.social")
|
||||
if err != nil {
|
||||
t.Fatalf("ApprovePending() error = %v", err)
|
||||
}
|
||||
|
||||
devices := store.ListDevices(did)
|
||||
if len(devices) != 1 {
|
||||
t.Fatalf("Expected 1 device, got %d", len(devices))
|
||||
}
|
||||
deviceID := devices[0].ID
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
did string
|
||||
deviceID string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "successful revocation",
|
||||
did: did,
|
||||
deviceID: deviceID,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "non-existent device",
|
||||
did: did,
|
||||
deviceID: "non-existent-id",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "wrong DID",
|
||||
did: "did:plc:bob123",
|
||||
deviceID: deviceID,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := store.RevokeDevice(tt.did, tt.deviceID)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("RevokeDevice() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Verify device was removed (after first successful test)
|
||||
devices = store.ListDevices(did)
|
||||
if len(devices) != 0 {
|
||||
t.Errorf("Expected 0 devices after revocation, got %d", len(devices))
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeviceStore_UpdateLastUsed tests updating last used timestamp
|
||||
func TestDeviceStore_UpdateLastUsed(t *testing.T) {
|
||||
store := setupTestDB(t)
|
||||
createTestUser(t, store, "did:plc:alice123", "alice.bsky.social")
|
||||
|
||||
// Create device
|
||||
pending, err := store.CreatePendingAuth("My Device", "192.168.1.1", "Test Agent")
|
||||
if err != nil {
|
||||
t.Fatalf("CreatePendingAuth() error = %v", err)
|
||||
}
|
||||
secret, err := store.ApprovePending(pending.UserCode, "did:plc:alice123", "alice.bsky.social")
|
||||
if err != nil {
|
||||
t.Fatalf("ApprovePending() error = %v", err)
|
||||
}
|
||||
|
||||
// Get device to get secret hash
|
||||
device, err := store.ValidateDeviceSecret(secret)
|
||||
if err != nil {
|
||||
t.Fatalf("ValidateDeviceSecret() error = %v", err)
|
||||
}
|
||||
|
||||
initialLastUsed := device.LastUsed
|
||||
|
||||
// Wait a bit to ensure timestamp difference
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Update last used
|
||||
err = store.UpdateLastUsed(device.SecretHash)
|
||||
if err != nil {
|
||||
t.Errorf("UpdateLastUsed() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify it was updated
|
||||
device2, err := store.ValidateDeviceSecret(secret)
|
||||
if err != nil {
|
||||
t.Fatalf("ValidateDeviceSecret() error = %v", err)
|
||||
}
|
||||
|
||||
if !device2.LastUsed.After(initialLastUsed) {
|
||||
t.Error("LastUsed should be updated to later time")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeviceStore_CleanupExpired tests cleanup of expired pending auths
|
||||
func TestDeviceStore_CleanupExpired(t *testing.T) {
|
||||
store := setupTestDB(t)
|
||||
|
||||
// Create pending auth with manual expiration time
|
||||
pending, err := store.CreatePendingAuth("My Device", "192.168.1.1", "Test Agent")
|
||||
if err != nil {
|
||||
t.Fatalf("CreatePendingAuth() error = %v", err)
|
||||
}
|
||||
|
||||
// Manually update expiration to the past
|
||||
_, err = store.db.Exec(`
|
||||
UPDATE pending_device_auth
|
||||
SET expires_at = datetime('now', '-1 hour')
|
||||
WHERE device_code = ?
|
||||
`, pending.DeviceCode)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update expiration: %v", err)
|
||||
}
|
||||
|
||||
// Run cleanup
|
||||
store.CleanupExpired()
|
||||
|
||||
// Verify it was deleted
|
||||
_, found := store.GetPendingByDeviceCode(pending.DeviceCode)
|
||||
if found {
|
||||
t.Error("Expired pending auth should have been cleaned up")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeviceStore_CleanupExpiredContext tests context-aware cleanup
|
||||
func TestDeviceStore_CleanupExpiredContext(t *testing.T) {
|
||||
store := setupTestDB(t)
|
||||
|
||||
// Create and expire pending auth
|
||||
pending, err := store.CreatePendingAuth("My Device", "192.168.1.1", "Test Agent")
|
||||
if err != nil {
|
||||
t.Fatalf("CreatePendingAuth() error = %v", err)
|
||||
}
|
||||
|
||||
_, err = store.db.Exec(`
|
||||
UPDATE pending_device_auth
|
||||
SET expires_at = datetime('now', '-1 hour')
|
||||
WHERE device_code = ?
|
||||
`, pending.DeviceCode)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update expiration: %v", err)
|
||||
}
|
||||
|
||||
// Run context-aware cleanup
|
||||
ctx := context.Background()
|
||||
err = store.CleanupExpiredContext(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("CleanupExpiredContext() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify it was deleted
|
||||
_, found := store.GetPendingByDeviceCode(pending.DeviceCode)
|
||||
if found {
|
||||
t.Error("Expired pending auth should have been cleaned up")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeviceStore_SecretHashing tests bcrypt hashing
|
||||
func TestDeviceStore_SecretHashing(t *testing.T) {
|
||||
store := setupTestDB(t)
|
||||
createTestUser(t, store, "did:plc:alice123", "alice.bsky.social")
|
||||
|
||||
pending, err := store.CreatePendingAuth("My Device", "192.168.1.1", "Test Agent")
|
||||
if err != nil {
|
||||
t.Fatalf("CreatePendingAuth() error = %v", err)
|
||||
}
|
||||
|
||||
secret, err := store.ApprovePending(pending.UserCode, "did:plc:alice123", "alice.bsky.social")
|
||||
if err != nil {
|
||||
t.Fatalf("ApprovePending() error = %v", err)
|
||||
}
|
||||
|
||||
// Get device via ValidateDeviceSecret to access secret hash
|
||||
device, err := store.ValidateDeviceSecret(secret)
|
||||
if err != nil {
|
||||
t.Fatalf("ValidateDeviceSecret() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify bcrypt hash is valid
|
||||
err = bcrypt.CompareHashAndPassword([]byte(device.SecretHash), []byte(secret))
|
||||
if err != nil {
|
||||
t.Error("Secret hash should match secret")
|
||||
}
|
||||
|
||||
// Verify wrong secret doesn't match
|
||||
err = bcrypt.CompareHashAndPassword([]byte(device.SecretHash), []byte("wrong_secret"))
|
||||
if err == nil {
|
||||
t.Error("Wrong secret should not match hash")
|
||||
}
|
||||
}
|
||||
138
pkg/appview/db/hold_store.go
Normal file
138
pkg/appview/db/hold_store.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HoldCaptainRecord represents a cached captain record from a hold's PDS
|
||||
type HoldCaptainRecord struct {
|
||||
HoldDID string `json:"-"` // Set manually, not from JSON
|
||||
OwnerDID string `json:"owner"`
|
||||
Public bool `json:"public"`
|
||||
AllowAllCrew bool `json:"allowAllCrew"`
|
||||
DeployedAt string `json:"deployedAt"`
|
||||
Region string `json:"region"`
|
||||
Provider string `json:"provider"`
|
||||
UpdatedAt time.Time `json:"-"` // Set manually, not from JSON
|
||||
}
|
||||
|
||||
// GetCaptainRecord retrieves a captain record from the cache
|
||||
// Returns nil if not found (cache miss)
|
||||
func GetCaptainRecord(db *sql.DB, holdDID string) (*HoldCaptainRecord, error) {
|
||||
query := `
|
||||
SELECT hold_did, owner_did, public, allow_all_crew,
|
||||
deployed_at, region, provider, updated_at
|
||||
FROM hold_captain_records
|
||||
WHERE hold_did = ?
|
||||
`
|
||||
|
||||
var record HoldCaptainRecord
|
||||
var deployedAt, region, provider sql.NullString
|
||||
|
||||
err := db.QueryRow(query, holdDID).Scan(
|
||||
&record.HoldDID,
|
||||
&record.OwnerDID,
|
||||
&record.Public,
|
||||
&record.AllowAllCrew,
|
||||
&deployedAt,
|
||||
®ion,
|
||||
&provider,
|
||||
&record.UpdatedAt,
|
||||
)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil // Cache miss - not an error
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query captain record: %w", err)
|
||||
}
|
||||
|
||||
// Handle nullable fields
|
||||
if deployedAt.Valid {
|
||||
record.DeployedAt = deployedAt.String
|
||||
}
|
||||
if region.Valid {
|
||||
record.Region = region.String
|
||||
}
|
||||
if provider.Valid {
|
||||
record.Provider = provider.String
|
||||
}
|
||||
|
||||
return &record, nil
|
||||
}
|
||||
|
||||
// UpsertCaptainRecord inserts or updates a captain record in the cache
|
||||
func UpsertCaptainRecord(db *sql.DB, record *HoldCaptainRecord) error {
|
||||
query := `
|
||||
INSERT INTO hold_captain_records (
|
||||
hold_did, owner_did, public, allow_all_crew,
|
||||
deployed_at, region, provider, updated_at
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(hold_did) DO UPDATE SET
|
||||
owner_did = excluded.owner_did,
|
||||
public = excluded.public,
|
||||
allow_all_crew = excluded.allow_all_crew,
|
||||
deployed_at = excluded.deployed_at,
|
||||
region = excluded.region,
|
||||
provider = excluded.provider,
|
||||
updated_at = excluded.updated_at
|
||||
`
|
||||
|
||||
_, err := db.Exec(query,
|
||||
record.HoldDID,
|
||||
record.OwnerDID,
|
||||
record.Public,
|
||||
record.AllowAllCrew,
|
||||
nullString(record.DeployedAt),
|
||||
nullString(record.Region),
|
||||
nullString(record.Provider),
|
||||
record.UpdatedAt,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upsert captain record: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListHoldDIDs returns all known hold DIDs from the cache
|
||||
func ListHoldDIDs(db *sql.DB) ([]string, error) {
|
||||
query := `
|
||||
SELECT hold_did
|
||||
FROM hold_captain_records
|
||||
ORDER BY updated_at DESC
|
||||
`
|
||||
|
||||
rows, err := db.Query(query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query hold DIDs: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var holdDIDs []string
|
||||
for rows.Next() {
|
||||
var holdDID string
|
||||
if err := rows.Scan(&holdDID); err != nil {
|
||||
return nil, fmt.Errorf("failed to scan hold DID: %w", err)
|
||||
}
|
||||
holdDIDs = append(holdDIDs, holdDID)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error iterating hold DIDs: %w", err)
|
||||
}
|
||||
|
||||
return holdDIDs, nil
|
||||
}
|
||||
|
||||
// nullString converts a string to sql.NullString
|
||||
func nullString(s string) sql.NullString {
|
||||
if s == "" {
|
||||
return sql.NullString{Valid: false}
|
||||
}
|
||||
return sql.NullString{String: s, Valid: true}
|
||||
}
|
||||
477
pkg/appview/db/hold_store_test.go
Normal file
477
pkg/appview/db/hold_store_test.go
Normal file
@@ -0,0 +1,477 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNullString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expectedValid bool
|
||||
expectedStr string
|
||||
}{
|
||||
{
|
||||
name: "empty string",
|
||||
input: "",
|
||||
expectedValid: false,
|
||||
expectedStr: "",
|
||||
},
|
||||
{
|
||||
name: "non-empty string",
|
||||
input: "hello",
|
||||
expectedValid: true,
|
||||
expectedStr: "hello",
|
||||
},
|
||||
{
|
||||
name: "whitespace string",
|
||||
input: " ",
|
||||
expectedValid: true,
|
||||
expectedStr: " ",
|
||||
},
|
||||
{
|
||||
name: "single character",
|
||||
input: "a",
|
||||
expectedValid: true,
|
||||
expectedStr: "a",
|
||||
},
|
||||
{
|
||||
name: "newline string",
|
||||
input: "\n",
|
||||
expectedValid: true,
|
||||
expectedStr: "\n",
|
||||
},
|
||||
{
|
||||
name: "tab string",
|
||||
input: "\t",
|
||||
expectedValid: true,
|
||||
expectedStr: "\t",
|
||||
},
|
||||
{
|
||||
name: "DID string",
|
||||
input: "did:plc:abc123",
|
||||
expectedValid: true,
|
||||
expectedStr: "did:plc:abc123",
|
||||
},
|
||||
{
|
||||
name: "URL string",
|
||||
input: "https://example.com",
|
||||
expectedValid: true,
|
||||
expectedStr: "https://example.com",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := nullString(tt.input)
|
||||
if result.Valid != tt.expectedValid {
|
||||
t.Errorf("nullString(%q).Valid = %v, want %v", tt.input, result.Valid, tt.expectedValid)
|
||||
}
|
||||
if result.String != tt.expectedStr {
|
||||
t.Errorf("nullString(%q).String = %q, want %q", tt.input, result.String, tt.expectedStr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Integration tests
|
||||
|
||||
func setupHoldTestDB(t *testing.T) *sql.DB {
|
||||
t.Helper()
|
||||
// Use file::memory: with cache=shared to ensure all connections share the same in-memory DB
|
||||
db, err := InitDB("file::memory:?cache=shared", true)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to initialize test database: %v", err)
|
||||
}
|
||||
// Limit to single connection to avoid race conditions in tests
|
||||
db.SetMaxOpenConns(1)
|
||||
t.Cleanup(func() { db.Close() })
|
||||
return db
|
||||
}
|
||||
|
||||
// TestGetCaptainRecord tests retrieving captain records
|
||||
func TestGetCaptainRecord(t *testing.T) {
|
||||
db := setupHoldTestDB(t)
|
||||
|
||||
// Insert a test record
|
||||
testRecord := &HoldCaptainRecord{
|
||||
HoldDID: "did:web:hold01.atcr.io",
|
||||
OwnerDID: "did:plc:alice123",
|
||||
Public: true,
|
||||
AllowAllCrew: false,
|
||||
DeployedAt: "2025-01-15",
|
||||
Region: "us-west-2",
|
||||
Provider: "aws",
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
err := UpsertCaptainRecord(db, testRecord)
|
||||
if err != nil {
|
||||
t.Fatalf("UpsertCaptainRecord() error = %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
holdDID string
|
||||
wantFound bool
|
||||
}{
|
||||
{
|
||||
name: "existing record",
|
||||
holdDID: "did:web:hold01.atcr.io",
|
||||
wantFound: true,
|
||||
},
|
||||
{
|
||||
name: "non-existent record",
|
||||
holdDID: "did:web:unknown.atcr.io",
|
||||
wantFound: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
record, err := GetCaptainRecord(db, tt.holdDID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCaptainRecord() error = %v", err)
|
||||
}
|
||||
|
||||
if tt.wantFound {
|
||||
if record == nil {
|
||||
t.Error("Expected record, got nil")
|
||||
return
|
||||
}
|
||||
if record.HoldDID != tt.holdDID {
|
||||
t.Errorf("HoldDID = %v, want %v", record.HoldDID, tt.holdDID)
|
||||
}
|
||||
if record.OwnerDID != testRecord.OwnerDID {
|
||||
t.Errorf("OwnerDID = %v, want %v", record.OwnerDID, testRecord.OwnerDID)
|
||||
}
|
||||
if record.Public != testRecord.Public {
|
||||
t.Errorf("Public = %v, want %v", record.Public, testRecord.Public)
|
||||
}
|
||||
if record.AllowAllCrew != testRecord.AllowAllCrew {
|
||||
t.Errorf("AllowAllCrew = %v, want %v", record.AllowAllCrew, testRecord.AllowAllCrew)
|
||||
}
|
||||
if record.DeployedAt != testRecord.DeployedAt {
|
||||
t.Errorf("DeployedAt = %v, want %v", record.DeployedAt, testRecord.DeployedAt)
|
||||
}
|
||||
if record.Region != testRecord.Region {
|
||||
t.Errorf("Region = %v, want %v", record.Region, testRecord.Region)
|
||||
}
|
||||
if record.Provider != testRecord.Provider {
|
||||
t.Errorf("Provider = %v, want %v", record.Provider, testRecord.Provider)
|
||||
}
|
||||
} else {
|
||||
if record != nil {
|
||||
t.Errorf("Expected nil, got record: %+v", record)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetCaptainRecord_NullableFields tests handling of NULL fields
|
||||
func TestGetCaptainRecord_NullableFields(t *testing.T) {
|
||||
db := setupHoldTestDB(t)
|
||||
|
||||
// Insert record with empty nullable fields
|
||||
testRecord := &HoldCaptainRecord{
|
||||
HoldDID: "did:web:hold02.atcr.io",
|
||||
OwnerDID: "did:plc:bob456",
|
||||
Public: false,
|
||||
AllowAllCrew: true,
|
||||
DeployedAt: "", // Empty - should be NULL
|
||||
Region: "", // Empty - should be NULL
|
||||
Provider: "", // Empty - should be NULL
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
err := UpsertCaptainRecord(db, testRecord)
|
||||
if err != nil {
|
||||
t.Fatalf("UpsertCaptainRecord() error = %v", err)
|
||||
}
|
||||
|
||||
record, err := GetCaptainRecord(db, testRecord.HoldDID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCaptainRecord() error = %v", err)
|
||||
}
|
||||
|
||||
if record == nil {
|
||||
t.Fatal("Expected record, got nil")
|
||||
}
|
||||
|
||||
if record.DeployedAt != "" {
|
||||
t.Errorf("DeployedAt = %v, want empty string", record.DeployedAt)
|
||||
}
|
||||
if record.Region != "" {
|
||||
t.Errorf("Region = %v, want empty string", record.Region)
|
||||
}
|
||||
if record.Provider != "" {
|
||||
t.Errorf("Provider = %v, want empty string", record.Provider)
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpsertCaptainRecord_Insert tests inserting new records
|
||||
func TestUpsertCaptainRecord_Insert(t *testing.T) {
|
||||
db := setupHoldTestDB(t)
|
||||
|
||||
record := &HoldCaptainRecord{
|
||||
HoldDID: "did:web:hold03.atcr.io",
|
||||
OwnerDID: "did:plc:charlie789",
|
||||
Public: true,
|
||||
AllowAllCrew: true,
|
||||
DeployedAt: "2025-02-01",
|
||||
Region: "eu-west-1",
|
||||
Provider: "gcp",
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
err := UpsertCaptainRecord(db, record)
|
||||
if err != nil {
|
||||
t.Fatalf("UpsertCaptainRecord() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify it was inserted
|
||||
retrieved, err := GetCaptainRecord(db, record.HoldDID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCaptainRecord() error = %v", err)
|
||||
}
|
||||
|
||||
if retrieved == nil {
|
||||
t.Fatal("Expected record to be inserted")
|
||||
}
|
||||
|
||||
if retrieved.HoldDID != record.HoldDID {
|
||||
t.Errorf("HoldDID = %v, want %v", retrieved.HoldDID, record.HoldDID)
|
||||
}
|
||||
if retrieved.OwnerDID != record.OwnerDID {
|
||||
t.Errorf("OwnerDID = %v, want %v", retrieved.OwnerDID, record.OwnerDID)
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpsertCaptainRecord_Update tests updating existing records
|
||||
func TestUpsertCaptainRecord_Update(t *testing.T) {
|
||||
db := setupHoldTestDB(t)
|
||||
|
||||
// Insert initial record
|
||||
initialRecord := &HoldCaptainRecord{
|
||||
HoldDID: "did:web:hold04.atcr.io",
|
||||
OwnerDID: "did:plc:dave111",
|
||||
Public: false,
|
||||
AllowAllCrew: false,
|
||||
DeployedAt: "2025-01-01",
|
||||
Region: "us-east-1",
|
||||
Provider: "aws",
|
||||
UpdatedAt: time.Now().Add(-1 * time.Hour),
|
||||
}
|
||||
|
||||
err := UpsertCaptainRecord(db, initialRecord)
|
||||
if err != nil {
|
||||
t.Fatalf("Initial UpsertCaptainRecord() error = %v", err)
|
||||
}
|
||||
|
||||
// Update the record
|
||||
updatedRecord := &HoldCaptainRecord{
|
||||
HoldDID: "did:web:hold04.atcr.io", // Same DID
|
||||
OwnerDID: "did:plc:eve222", // Changed owner
|
||||
Public: true, // Changed to public
|
||||
AllowAllCrew: true, // Changed allow all crew
|
||||
DeployedAt: "2025-03-01", // Changed date
|
||||
Region: "ap-south-1", // Changed region
|
||||
Provider: "azure", // Changed provider
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
err = UpsertCaptainRecord(db, updatedRecord)
|
||||
if err != nil {
|
||||
t.Fatalf("Update UpsertCaptainRecord() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify it was updated
|
||||
retrieved, err := GetCaptainRecord(db, updatedRecord.HoldDID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCaptainRecord() error = %v", err)
|
||||
}
|
||||
|
||||
if retrieved == nil {
|
||||
t.Fatal("Expected record to exist")
|
||||
}
|
||||
|
||||
if retrieved.OwnerDID != updatedRecord.OwnerDID {
|
||||
t.Errorf("OwnerDID = %v, want %v", retrieved.OwnerDID, updatedRecord.OwnerDID)
|
||||
}
|
||||
if retrieved.Public != updatedRecord.Public {
|
||||
t.Errorf("Public = %v, want %v", retrieved.Public, updatedRecord.Public)
|
||||
}
|
||||
if retrieved.AllowAllCrew != updatedRecord.AllowAllCrew {
|
||||
t.Errorf("AllowAllCrew = %v, want %v", retrieved.AllowAllCrew, updatedRecord.AllowAllCrew)
|
||||
}
|
||||
if retrieved.DeployedAt != updatedRecord.DeployedAt {
|
||||
t.Errorf("DeployedAt = %v, want %v", retrieved.DeployedAt, updatedRecord.DeployedAt)
|
||||
}
|
||||
if retrieved.Region != updatedRecord.Region {
|
||||
t.Errorf("Region = %v, want %v", retrieved.Region, updatedRecord.Region)
|
||||
}
|
||||
if retrieved.Provider != updatedRecord.Provider {
|
||||
t.Errorf("Provider = %v, want %v", retrieved.Provider, updatedRecord.Provider)
|
||||
}
|
||||
|
||||
// Verify there's still only one record in the database
|
||||
holds, err := ListHoldDIDs(db)
|
||||
if err != nil {
|
||||
t.Fatalf("ListHoldDIDs() error = %v", err)
|
||||
}
|
||||
if len(holds) != 1 {
|
||||
t.Errorf("Expected 1 record, got %d", len(holds))
|
||||
}
|
||||
}
|
||||
|
||||
// TestListHoldDIDs tests listing all hold DIDs
|
||||
func TestListHoldDIDs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
records []*HoldCaptainRecord
|
||||
wantCount int
|
||||
}{
|
||||
{
|
||||
name: "empty database",
|
||||
records: []*HoldCaptainRecord{},
|
||||
wantCount: 0,
|
||||
},
|
||||
{
|
||||
name: "single record",
|
||||
records: []*HoldCaptainRecord{
|
||||
{
|
||||
HoldDID: "did:web:hold05.atcr.io",
|
||||
OwnerDID: "did:plc:alice123",
|
||||
Public: true,
|
||||
AllowAllCrew: false,
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
},
|
||||
wantCount: 1,
|
||||
},
|
||||
{
|
||||
name: "multiple records",
|
||||
records: []*HoldCaptainRecord{
|
||||
{
|
||||
HoldDID: "did:web:hold06.atcr.io",
|
||||
OwnerDID: "did:plc:alice123",
|
||||
Public: true,
|
||||
AllowAllCrew: false,
|
||||
UpdatedAt: time.Now().Add(-2 * time.Hour),
|
||||
},
|
||||
{
|
||||
HoldDID: "did:web:hold07.atcr.io",
|
||||
OwnerDID: "did:plc:bob456",
|
||||
Public: false,
|
||||
AllowAllCrew: true,
|
||||
UpdatedAt: time.Now().Add(-1 * time.Hour),
|
||||
},
|
||||
{
|
||||
HoldDID: "did:web:hold08.atcr.io",
|
||||
OwnerDID: "did:plc:charlie789",
|
||||
Public: true,
|
||||
AllowAllCrew: true,
|
||||
UpdatedAt: time.Now(), // Most recent
|
||||
},
|
||||
},
|
||||
wantCount: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Fresh database for each test
|
||||
db := setupHoldTestDB(t)
|
||||
|
||||
// Insert test records
|
||||
for _, record := range tt.records {
|
||||
err := UpsertCaptainRecord(db, record)
|
||||
if err != nil {
|
||||
t.Fatalf("UpsertCaptainRecord() error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// List holds
|
||||
holds, err := ListHoldDIDs(db)
|
||||
if err != nil {
|
||||
t.Fatalf("ListHoldDIDs() error = %v", err)
|
||||
}
|
||||
|
||||
if len(holds) != tt.wantCount {
|
||||
t.Errorf("ListHoldDIDs() count = %d, want %d", len(holds), tt.wantCount)
|
||||
}
|
||||
|
||||
// Verify order (most recent first)
|
||||
if len(tt.records) > 1 {
|
||||
// Most recent should be first (hold08)
|
||||
if holds[0] != "did:web:hold08.atcr.io" {
|
||||
t.Errorf("First hold = %v, want did:web:hold08.atcr.io", holds[0])
|
||||
}
|
||||
// Oldest should be last (hold06)
|
||||
if holds[len(holds)-1] != "did:web:hold06.atcr.io" {
|
||||
t.Errorf("Last hold = %v, want did:web:hold06.atcr.io", holds[len(holds)-1])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestListHoldDIDs_OrderByUpdatedAt tests that holds are ordered correctly
|
||||
func TestListHoldDIDs_OrderByUpdatedAt(t *testing.T) {
|
||||
db := setupHoldTestDB(t)
|
||||
|
||||
// Insert records with specific update times
|
||||
now := time.Now()
|
||||
records := []*HoldCaptainRecord{
|
||||
{
|
||||
HoldDID: "did:web:oldest.atcr.io",
|
||||
OwnerDID: "did:plc:test1",
|
||||
Public: true,
|
||||
UpdatedAt: now.Add(-3 * time.Hour),
|
||||
},
|
||||
{
|
||||
HoldDID: "did:web:newest.atcr.io",
|
||||
OwnerDID: "did:plc:test2",
|
||||
Public: true,
|
||||
UpdatedAt: now,
|
||||
},
|
||||
{
|
||||
HoldDID: "did:web:middle.atcr.io",
|
||||
OwnerDID: "did:plc:test3",
|
||||
Public: true,
|
||||
UpdatedAt: now.Add(-1 * time.Hour),
|
||||
},
|
||||
}
|
||||
|
||||
for _, record := range records {
|
||||
err := UpsertCaptainRecord(db, record)
|
||||
if err != nil {
|
||||
t.Fatalf("UpsertCaptainRecord() error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
holds, err := ListHoldDIDs(db)
|
||||
if err != nil {
|
||||
t.Fatalf("ListHoldDIDs() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify order: newest first, oldest last
|
||||
expectedOrder := []string{
|
||||
"did:web:newest.atcr.io",
|
||||
"did:web:middle.atcr.io",
|
||||
"did:web:oldest.atcr.io",
|
||||
}
|
||||
|
||||
if len(holds) != len(expectedOrder) {
|
||||
t.Fatalf("Expected %d holds, got %d", len(expectedOrder), len(holds))
|
||||
}
|
||||
|
||||
for i, expected := range expectedOrder {
|
||||
if holds[i] != expected {
|
||||
t.Errorf("holds[%d] = %v, want %v", i, holds[i], expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,3 @@
|
||||
description: Example migrarion query
|
||||
description: Example migration query
|
||||
query: |
|
||||
SELECT COUNT(*) FROM schema_migrations;
|
||||
@@ -0,0 +1,19 @@
|
||||
description: Normalize hold_endpoint column to store DIDs instead of URLs
|
||||
query: |
|
||||
-- Convert any URL-formatted hold_endpoint values to DID format
|
||||
-- This ensures all hold identifiers are stored consistently as did:web:hostname
|
||||
|
||||
-- Convert HTTPS URLs to did:web: format
|
||||
-- https://hold.example.com → did:web:hold.example.com
|
||||
UPDATE manifests
|
||||
SET hold_endpoint = 'did:web:' || substr(hold_endpoint, 9)
|
||||
WHERE hold_endpoint LIKE 'https://%';
|
||||
|
||||
-- Convert HTTP URLs to did:web: format
|
||||
-- http://172.28.0.3:8080 → did:web:172.28.0.3:8080
|
||||
UPDATE manifests
|
||||
SET hold_endpoint = 'did:web:' || substr(hold_endpoint, 8)
|
||||
WHERE hold_endpoint LIKE 'http://%';
|
||||
|
||||
-- Entries already in did:web: format are left unchanged
|
||||
-- did:web:hold.example.com → did:web:hold.example.com (no change)
|
||||
@@ -0,0 +1,7 @@
|
||||
description: Add readme_url to manifests (obsolete - kept for migration history)
|
||||
query: |
|
||||
-- This migration is obsolete. The readme_url and other annotations
|
||||
-- are now stored in the repository_annotations table (see schema.sql).
|
||||
-- Backfill will populate annotation data from PDS records.
|
||||
-- This migration is kept as a no-op to maintain migration history.
|
||||
SELECT 1;
|
||||
@@ -0,0 +1,35 @@
|
||||
description: Remove annotation columns from manifests table
|
||||
query: |
|
||||
-- Drop annotation columns from manifests table (if they exist)
|
||||
-- Annotations are now stored in repository_annotations table
|
||||
-- SQLite doesn't support DROP COLUMN IF EXISTS, so we recreate the table
|
||||
|
||||
-- Create new manifests table without annotation columns
|
||||
CREATE TABLE IF NOT EXISTS manifests_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
hold_endpoint TEXT NOT NULL,
|
||||
schema_version INTEGER NOT NULL,
|
||||
media_type TEXT NOT NULL,
|
||||
config_digest TEXT,
|
||||
config_size INTEGER,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
UNIQUE(did, repository, digest),
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Copy data (only core fields, annotation columns are dropped)
|
||||
INSERT INTO manifests_new (id, did, repository, digest, hold_endpoint, schema_version, media_type, config_digest, config_size, created_at)
|
||||
SELECT id, did, repository, digest, hold_endpoint, schema_version, media_type, config_digest, config_size, created_at
|
||||
FROM manifests;
|
||||
|
||||
-- Swap tables
|
||||
DROP TABLE manifests;
|
||||
ALTER TABLE manifests_new RENAME TO manifests;
|
||||
|
||||
-- Recreate indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_manifests_did_repo ON manifests(did, repository);
|
||||
CREATE INDEX IF NOT EXISTS idx_manifests_created_at ON manifests(created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_manifests_digest ON manifests(digest);
|
||||
@@ -2,6 +2,18 @@
|
||||
|
||||
This directory contains database migrations for the ATCR AppView database.
|
||||
|
||||
## Schema vs Migrations
|
||||
|
||||
**`schema.sql`** (in parent directory) contains the **complete base schema** for fresh database installations. It includes all tables, indexes, and constraints.
|
||||
|
||||
**Migrations** (this directory) handle **changes to existing databases**. They are only for:
|
||||
- `ALTER TABLE` statements (add/modify/drop columns)
|
||||
- `UPDATE` statements (data transformations)
|
||||
- `DELETE` statements (data cleanup)
|
||||
- Creating/modifying indexes on existing tables
|
||||
|
||||
**NEW TABLES go in `schema.sql`, NOT in migrations.**
|
||||
|
||||
## Migration Format
|
||||
|
||||
Each migration is a YAML file with the following structure:
|
||||
@@ -33,13 +45,43 @@ Examples:
|
||||
2. **Create a new YAML file** with format `000N_descriptive_name.yaml`
|
||||
3. **Add description** (optional) - Explain what the migration does
|
||||
4. **Write your SQL in `query`** - Use the `|` block scalar for clean multi-line SQL
|
||||
5. **Use `IF EXISTS` / `IF NOT EXISTS`** where possible for idempotency (note: not supported for `DROP COLUMN`)
|
||||
5. **Use `IF EXISTS` / `IF NOT EXISTS`** where possible for idempotency
|
||||
|
||||
## Examples
|
||||
|
||||
### Simple single-statement migration:
|
||||
### Adding a column to existing table:
|
||||
|
||||
Filename: `0002_add_repository_description_index.yaml`
|
||||
Filename: `0007_add_readme_url_to_manifests.yaml`
|
||||
|
||||
```yaml
|
||||
description: Add readme_url column to manifests table for storing io.atcr.readme annotation
|
||||
query: |
|
||||
ALTER TABLE manifests ADD COLUMN readme_url TEXT;
|
||||
```
|
||||
|
||||
**IMPORTANT:** After creating this migration, also add the column to `schema.sql` so fresh installations include it!
|
||||
|
||||
### Data transformation migration:
|
||||
|
||||
Filename: `0005_normalize_hold_endpoint_to_did.yaml`
|
||||
|
||||
```yaml
|
||||
description: Normalize hold_endpoint column to store DIDs instead of URLs
|
||||
query: |
|
||||
-- Convert HTTPS URLs to did:web: format
|
||||
UPDATE manifests
|
||||
SET hold_endpoint = 'did:web:' || substr(hold_endpoint, 9)
|
||||
WHERE hold_endpoint LIKE 'https://%';
|
||||
|
||||
-- Convert HTTP URLs to did:web: format
|
||||
UPDATE manifests
|
||||
SET hold_endpoint = 'did:web:' || substr(hold_endpoint, 8)
|
||||
WHERE hold_endpoint LIKE 'http://%';
|
||||
```
|
||||
|
||||
### Adding an index to existing table:
|
||||
|
||||
Filename: `0008_add_repository_description_index.yaml`
|
||||
|
||||
```yaml
|
||||
description: Add index on manifests description field for faster searches
|
||||
@@ -47,28 +89,6 @@ query: |
|
||||
CREATE INDEX IF NOT EXISTS idx_manifests_description ON manifests(description);
|
||||
```
|
||||
|
||||
### Complex multi-statement migration:
|
||||
|
||||
Filename: `0003_create_webhooks_table.yaml`
|
||||
|
||||
```yaml
|
||||
description: Create webhooks table for repository event notifications
|
||||
query: |
|
||||
-- Create webhooks table
|
||||
CREATE TABLE IF NOT EXISTS webhooks (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
url TEXT NOT NULL,
|
||||
events TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Create index on URL for faster lookups
|
||||
CREATE INDEX IF NOT EXISTS idx_webhooks_url ON webhooks(url);
|
||||
|
||||
-- Create index on events for filtering
|
||||
CREATE INDEX IF NOT EXISTS idx_webhooks_events ON webhooks(events);
|
||||
```
|
||||
|
||||
## How Migrations Run
|
||||
|
||||
1. Migrations are loaded from this directory on startup
|
||||
@@ -82,4 +102,6 @@ query: |
|
||||
- **Never modify existing migrations** - Once applied, they're immutable
|
||||
- **Test migrations** before committing - Ensure they work on existing databases
|
||||
- **Version numbers must be unique** - The migration system will fail if duplicates exist
|
||||
- **Migrations are run automatically** on `InitDB()` - No manual intervention needed
|
||||
- **Migrations run automatically** on `InitDB()` - Schema first, then migrations
|
||||
- **CRITICAL: Update `schema.sql` for structural changes** - When you ALTER a table or add columns, update both the migration AND `schema.sql` so fresh installations have the same structure
|
||||
- **New tables go in `schema.sql` only** - Don't create migration files for new tables
|
||||
|
||||
@@ -13,22 +13,17 @@ type User struct {
|
||||
|
||||
// Manifest represents an OCI manifest stored in the cache
|
||||
type Manifest struct {
|
||||
ID int64
|
||||
DID string
|
||||
Repository string
|
||||
Digest string
|
||||
HoldEndpoint string
|
||||
SchemaVersion int
|
||||
MediaType string
|
||||
ConfigDigest string
|
||||
ConfigSize int64
|
||||
CreatedAt time.Time
|
||||
Title string
|
||||
Description string
|
||||
SourceURL string
|
||||
DocumentationURL string
|
||||
Licenses string
|
||||
IconURL string
|
||||
ID int64
|
||||
DID string
|
||||
Repository string
|
||||
Digest string
|
||||
HoldEndpoint string
|
||||
SchemaVersion int
|
||||
MediaType string
|
||||
ConfigDigest string
|
||||
ConfigSize int64
|
||||
CreatedAt time.Time
|
||||
// Annotations removed - now stored in repository_annotations table
|
||||
}
|
||||
|
||||
// Layer represents a layer in a manifest
|
||||
@@ -40,6 +35,19 @@ type Layer struct {
|
||||
LayerIndex int
|
||||
}
|
||||
|
||||
// ManifestReference represents a reference to a manifest in a manifest list/index
|
||||
type ManifestReference struct {
|
||||
ManifestID int64
|
||||
Digest string
|
||||
Size int64
|
||||
MediaType string
|
||||
PlatformArchitecture string
|
||||
PlatformOS string
|
||||
PlatformVariant string
|
||||
PlatformOSVersion string
|
||||
ReferenceIndex int
|
||||
}
|
||||
|
||||
// Tag represents a tag pointing to a manifest
|
||||
type Tag struct {
|
||||
ID int64
|
||||
@@ -52,17 +60,20 @@ type Tag struct {
|
||||
|
||||
// Push represents a combined tag and manifest for the recent pushes view
|
||||
type Push struct {
|
||||
DID string
|
||||
Handle string
|
||||
Repository string
|
||||
Tag string
|
||||
Digest string
|
||||
Title string
|
||||
Description string
|
||||
IconURL string
|
||||
StarCount int
|
||||
PullCount int
|
||||
CreatedAt time.Time
|
||||
DID string
|
||||
Handle string
|
||||
Repository string
|
||||
Tag string
|
||||
Digest string
|
||||
Title string
|
||||
Description string
|
||||
IconURL string
|
||||
StarCount int
|
||||
PullCount int
|
||||
IsStarred bool // Whether the current user has starred this repository
|
||||
CreatedAt time.Time
|
||||
HoldEndpoint string // Hold endpoint for health checking
|
||||
Reachable bool // Whether the hold endpoint is reachable
|
||||
}
|
||||
|
||||
// Repository represents an aggregated view of a user's repository
|
||||
@@ -79,6 +90,8 @@ type Repository struct {
|
||||
DocumentationURL string
|
||||
Licenses string
|
||||
IconURL string
|
||||
ReadmeURL string
|
||||
Version string
|
||||
}
|
||||
|
||||
// RepositoryStats represents statistics for a repository
|
||||
@@ -102,6 +115,7 @@ type FeaturedRepository struct {
|
||||
IconURL string
|
||||
StarCount int
|
||||
PullCount int
|
||||
IsStarred bool // Whether the current user has starred this repository
|
||||
}
|
||||
|
||||
// RepositoryWithStats combines repository data with statistics
|
||||
@@ -119,4 +133,42 @@ type RepoCardData struct {
|
||||
IconURL string
|
||||
StarCount int
|
||||
PullCount int
|
||||
IsStarred bool // Whether the current user has starred this repository
|
||||
}
|
||||
|
||||
// PlatformInfo represents platform information (OS/Architecture)
|
||||
type PlatformInfo struct {
|
||||
OS string
|
||||
Architecture string
|
||||
Variant string
|
||||
OSVersion string
|
||||
}
|
||||
|
||||
// TagWithPlatforms extends Tag with platform information
|
||||
type TagWithPlatforms struct {
|
||||
Tag
|
||||
Platforms []PlatformInfo
|
||||
IsMultiArch bool
|
||||
}
|
||||
|
||||
// VulnerabilitySummary represents vulnerability counts by severity
|
||||
type VulnerabilitySummary struct {
|
||||
Critical int
|
||||
High int
|
||||
Medium int
|
||||
Low int
|
||||
Total int
|
||||
}
|
||||
|
||||
// ManifestWithMetadata extends Manifest with tags and platform information
|
||||
type ManifestWithMetadata struct {
|
||||
Manifest
|
||||
Tags []string
|
||||
Platforms []PlatformInfo
|
||||
PlatformCount int
|
||||
IsManifestList bool
|
||||
Reachable bool // Whether the hold endpoint is reachable
|
||||
Pending bool // Whether health check is still in progress
|
||||
Vulnerabilities *VulnerabilitySummary
|
||||
HasVulnerabilities bool
|
||||
}
|
||||
|
||||
27
pkg/appview/db/models_test.go
Normal file
27
pkg/appview/db/models_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package db
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestUser_Struct(t *testing.T) {
|
||||
user := &User{
|
||||
DID: "did:plc:test",
|
||||
Handle: "alice.bsky.social",
|
||||
PDSEndpoint: "https://bsky.social",
|
||||
}
|
||||
|
||||
if user.DID != "did:plc:test" {
|
||||
t.Errorf("Expected DID %q, got %q", "did:plc:test", user.DID)
|
||||
}
|
||||
|
||||
if user.Handle != "alice.bsky.social" {
|
||||
t.Errorf("Expected handle %q, got %q", "alice.bsky.social", user.Handle)
|
||||
}
|
||||
|
||||
if user.PDSEndpoint != "https://bsky.social" {
|
||||
t.Errorf("Expected PDS endpoint %q, got %q", "https://bsky.social", user.PDSEndpoint)
|
||||
}
|
||||
}
|
||||
|
||||
// RepositoryInfo tests removed - struct definition may vary
|
||||
|
||||
// TODO: Add tests for all model structs
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/bluesky-social/indigo/atproto/auth/oauth"
|
||||
@@ -105,7 +106,7 @@ func (s *OAuthStore) DeleteSessionsForDID(ctx context.Context, did string) error
|
||||
|
||||
deleted, _ := result.RowsAffected()
|
||||
if deleted > 0 {
|
||||
fmt.Printf("Deleted %d OAuth session(s) for DID %s\n", deleted, did)
|
||||
slog.Info("Deleted OAuth sessions for DID", "count", deleted, "did", did)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -207,7 +208,7 @@ func (s *OAuthStore) CleanupOldSessions(ctx context.Context, olderThan time.Dura
|
||||
|
||||
deleted, _ := result.RowsAffected()
|
||||
if deleted > 0 {
|
||||
fmt.Printf("Cleaned up %d old OAuth sessions (older than %v)\n", deleted, olderThan)
|
||||
slog.Info("Cleaned up old OAuth sessions", "count", deleted, "older_than", olderThan)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -228,12 +229,95 @@ func (s *OAuthStore) CleanupExpiredAuthRequests(ctx context.Context) error {
|
||||
|
||||
deleted, _ := result.RowsAffected()
|
||||
if deleted > 0 {
|
||||
fmt.Printf("Cleaned up %d expired auth requests\n", deleted)
|
||||
slog.Info("Cleaned up expired auth requests", "count", deleted)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// InvalidateSessionsWithMismatchedScopes removes all sessions whose scopes don't match the desired scopes
|
||||
// This is called on AppView startup to ensure all sessions have current scopes
|
||||
// Returns the count of invalidated sessions
|
||||
func (s *OAuthStore) InvalidateSessionsWithMismatchedScopes(ctx context.Context, desiredScopes []string) (int, error) {
|
||||
// Query all sessions
|
||||
rows, err := s.db.QueryContext(ctx, `
|
||||
SELECT session_key, account_did, session_id, session_data
|
||||
FROM oauth_sessions
|
||||
`)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to query sessions: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var sessionsToDelete []string
|
||||
for rows.Next() {
|
||||
var sessionKey, accountDID, sessionID, sessionDataJSON string
|
||||
if err := rows.Scan(&sessionKey, &accountDID, &sessionID, &sessionDataJSON); err != nil {
|
||||
slog.Warn("Failed to scan session row", "component", "oauth/store", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse session data
|
||||
var sessionData oauth.ClientSessionData
|
||||
if err := json.Unmarshal([]byte(sessionDataJSON), &sessionData); err != nil {
|
||||
slog.Warn("Failed to parse session data", "component", "oauth/store", "session_key", sessionKey, "error", err)
|
||||
// Delete malformed sessions
|
||||
sessionsToDelete = append(sessionsToDelete, sessionKey)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if scopes match (need to import oauth package for ScopesMatch)
|
||||
// Since we're in db package, we can't import oauth (circular dependency)
|
||||
// So we'll implement a simple scope comparison here
|
||||
if !scopesMatch(sessionData.Scopes, desiredScopes) {
|
||||
sessionsToDelete = append(sessionsToDelete, sessionKey)
|
||||
}
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return 0, fmt.Errorf("error iterating sessions: %w", err)
|
||||
}
|
||||
|
||||
// Delete sessions with mismatched scopes
|
||||
if len(sessionsToDelete) > 0 {
|
||||
for _, key := range sessionsToDelete {
|
||||
_, err := s.db.ExecContext(ctx, `
|
||||
DELETE FROM oauth_sessions WHERE session_key = ?
|
||||
`, key)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to delete session", "component", "oauth/store", "session_key", key, "error", err)
|
||||
}
|
||||
}
|
||||
slog.Info("Invalidated OAuth sessions with mismatched scopes", "count", len(sessionsToDelete))
|
||||
}
|
||||
|
||||
return len(sessionsToDelete), nil
|
||||
}
|
||||
|
||||
// scopesMatch checks if two scope lists are equivalent (order-independent)
|
||||
// Local implementation to avoid circular dependency with oauth package
|
||||
func scopesMatch(stored, desired []string) bool {
|
||||
if len(stored) == 0 && len(desired) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(stored) != len(desired) {
|
||||
return false
|
||||
}
|
||||
|
||||
desiredMap := make(map[string]bool, len(desired))
|
||||
for _, scope := range desired {
|
||||
desiredMap[scope] = true
|
||||
}
|
||||
|
||||
for _, scope := range stored {
|
||||
if !desiredMap[scope] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// makeSessionKey creates a composite key for session storage
|
||||
func makeSessionKey(did, sessionID string) string {
|
||||
return fmt.Sprintf("%s:%s", did, sessionID)
|
||||
|
||||
421
pkg/appview/db/oauth_store_test.go
Normal file
421
pkg/appview/db/oauth_store_test.go
Normal file
@@ -0,0 +1,421 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/bluesky-social/indigo/atproto/auth/oauth"
|
||||
"github.com/bluesky-social/indigo/atproto/syntax"
|
||||
)
|
||||
|
||||
func TestInvalidateSessionsWithMismatchedScopes(t *testing.T) {
|
||||
// Create in-memory test database
|
||||
db, err := InitDB(":memory:", true)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
store := NewOAuthStore(db)
|
||||
ctx := context.Background()
|
||||
|
||||
// Test 1: Empty database - should return 0
|
||||
count, err := store.InvalidateSessionsWithMismatchedScopes(ctx, []string{"atproto", "blob:image/png"})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error with empty DB, got: %v", err)
|
||||
}
|
||||
if count != 0 {
|
||||
t.Errorf("Expected 0 invalidated sessions in empty DB, got %d", count)
|
||||
}
|
||||
|
||||
// Helper to create session data
|
||||
createSession := func(did, sessionID string, scopes []string) oauth.ClientSessionData {
|
||||
parsedDID, _ := syntax.ParseDID(did)
|
||||
return oauth.ClientSessionData{
|
||||
AccountDID: parsedDID,
|
||||
SessionID: sessionID,
|
||||
HostURL: "https://bsky.social",
|
||||
AuthServerURL: "https://bsky.social",
|
||||
AuthServerTokenEndpoint: "https://bsky.social/oauth/token",
|
||||
Scopes: scopes,
|
||||
AccessToken: "test_access_token",
|
||||
RefreshToken: "test_refresh_token",
|
||||
DPoPAuthServerNonce: "test_nonce",
|
||||
DPoPHostNonce: "test_host_nonce",
|
||||
DPoPPrivateKeyMultibase: "test_key",
|
||||
}
|
||||
}
|
||||
|
||||
// Test 2: Session with matching scopes - should not be invalidated
|
||||
matchingSession := createSession("did:plc:test1", "session1", []string{"atproto", "blob:image/png"})
|
||||
if err := store.SaveSession(ctx, matchingSession); err != nil {
|
||||
t.Fatalf("Failed to save matching session: %v", err)
|
||||
}
|
||||
|
||||
count, err = store.InvalidateSessionsWithMismatchedScopes(ctx, []string{"atproto", "blob:image/png"})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got: %v", err)
|
||||
}
|
||||
if count != 0 {
|
||||
t.Errorf("Expected 0 invalidated sessions (all match), got %d", count)
|
||||
}
|
||||
|
||||
// Verify session still exists
|
||||
retrieved, err := store.GetSession(ctx, matchingSession.AccountDID, matchingSession.SessionID)
|
||||
if err != nil {
|
||||
t.Errorf("Expected session to still exist, got error: %v", err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Error("Expected session to still exist, got nil")
|
||||
}
|
||||
|
||||
// Test 3: Session with mismatched scopes (missing scope) - should be invalidated
|
||||
mismatchedSession := createSession("did:plc:test2", "session2", []string{"atproto"}) // Missing blob scope
|
||||
if err := store.SaveSession(ctx, mismatchedSession); err != nil {
|
||||
t.Fatalf("Failed to save mismatched session: %v", err)
|
||||
}
|
||||
|
||||
count, err = store.InvalidateSessionsWithMismatchedScopes(ctx, []string{"atproto", "blob:image/png"})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got: %v", err)
|
||||
}
|
||||
if count != 1 {
|
||||
t.Errorf("Expected 1 invalidated session, got %d", count)
|
||||
}
|
||||
|
||||
// Verify mismatched session was deleted
|
||||
_, err = store.GetSession(ctx, mismatchedSession.AccountDID, mismatchedSession.SessionID)
|
||||
if err == nil {
|
||||
t.Error("Expected session to be deleted (should error), but got no error")
|
||||
}
|
||||
|
||||
// Test 4: Session with extra scopes - should be invalidated
|
||||
extraScopeSession := createSession("did:plc:test3", "session3", []string{"atproto", "blob:image/png", "extra:scope"})
|
||||
if err := store.SaveSession(ctx, extraScopeSession); err != nil {
|
||||
t.Fatalf("Failed to save extra scope session: %v", err)
|
||||
}
|
||||
|
||||
count, err = store.InvalidateSessionsWithMismatchedScopes(ctx, []string{"atproto", "blob:image/png"})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got: %v", err)
|
||||
}
|
||||
if count != 1 {
|
||||
t.Errorf("Expected 1 invalidated session (extra scope), got %d", count)
|
||||
}
|
||||
|
||||
// Test 5: Multiple sessions with mixed matches - only mismatch should be invalidated
|
||||
matching1 := createSession("did:plc:test4", "session4", []string{"atproto", "blob:image/png"})
|
||||
matching2 := createSession("did:plc:test5", "session5", []string{"blob:image/png", "atproto"}) // Different order
|
||||
mismatched1 := createSession("did:plc:test6", "session6", []string{"atproto"})
|
||||
mismatched2 := createSession("did:plc:test7", "session7", []string{"wrong", "scopes"})
|
||||
|
||||
for _, sess := range []oauth.ClientSessionData{matching1, matching2, mismatched1, mismatched2} {
|
||||
if err := store.SaveSession(ctx, sess); err != nil {
|
||||
t.Fatalf("Failed to save session: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
count, err = store.InvalidateSessionsWithMismatchedScopes(ctx, []string{"atproto", "blob:image/png"})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got: %v", err)
|
||||
}
|
||||
if count != 2 {
|
||||
t.Errorf("Expected 2 invalidated sessions, got %d", count)
|
||||
}
|
||||
|
||||
// Verify matching sessions still exist
|
||||
for _, sess := range []oauth.ClientSessionData{matching1, matching2} {
|
||||
retrieved, err := store.GetSession(ctx, sess.AccountDID, sess.SessionID)
|
||||
if err != nil {
|
||||
t.Errorf("Expected matching session %s to exist, got error: %v", sess.SessionID, err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Errorf("Expected matching session %s to exist, got nil", sess.SessionID)
|
||||
}
|
||||
}
|
||||
|
||||
// Test 6: Malformed session data - should be deleted
|
||||
parsedDID, _ := syntax.ParseDID("did:plc:test8")
|
||||
_, err = db.ExecContext(ctx, `
|
||||
INSERT INTO oauth_sessions (session_key, account_did, session_id, session_data, created_at, updated_at)
|
||||
VALUES (?, ?, ?, ?, datetime('now'), datetime('now'))
|
||||
`, makeSessionKey("did:plc:test8", "malformed"), "did:plc:test8", "malformed", "invalid json data")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to insert malformed session: %v", err)
|
||||
}
|
||||
|
||||
count, err = store.InvalidateSessionsWithMismatchedScopes(ctx, []string{"atproto", "blob:image/png"})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error handling malformed data, got: %v", err)
|
||||
}
|
||||
if count != 1 {
|
||||
t.Errorf("Expected 1 invalidated session (malformed), got %d", count)
|
||||
}
|
||||
|
||||
// Verify malformed session was deleted
|
||||
_, err = store.GetSession(ctx, parsedDID, "malformed")
|
||||
if err == nil {
|
||||
t.Error("Expected malformed session to be deleted, but got no error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestScopesMatch(t *testing.T) {
|
||||
// Test the local scopesMatch function to ensure it matches the oauth.ScopesMatch behavior
|
||||
tests := []struct {
|
||||
name string
|
||||
stored []string
|
||||
desired []string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "exact match",
|
||||
stored: []string{"atproto", "blob:image/png"},
|
||||
desired: []string{"atproto", "blob:image/png"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "different order",
|
||||
stored: []string{"blob:image/png", "atproto"},
|
||||
desired: []string{"atproto", "blob:image/png"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "missing scope",
|
||||
stored: []string{"atproto"},
|
||||
desired: []string{"atproto", "blob:image/png"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "extra scope",
|
||||
stored: []string{"atproto", "blob:image/png", "extra"},
|
||||
desired: []string{"atproto", "blob:image/png"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "both empty",
|
||||
stored: []string{},
|
||||
desired: []string{},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "nil vs empty",
|
||||
stored: nil,
|
||||
desired: []string{},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := scopesMatch(tt.stored, tt.desired)
|
||||
if result != tt.expected {
|
||||
t.Errorf("scopesMatch(%v, %v) = %v, want %v",
|
||||
tt.stored, tt.desired, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOAuthStoreSessionLifecycle(t *testing.T) {
|
||||
// Basic test to ensure SaveSession, GetSession, DeleteSession work correctly
|
||||
db, err := InitDB(":memory:", true)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
store := NewOAuthStore(db)
|
||||
ctx := context.Background()
|
||||
|
||||
// Create test session
|
||||
did, _ := syntax.ParseDID("did:plc:testuser")
|
||||
sessionData := oauth.ClientSessionData{
|
||||
AccountDID: did,
|
||||
SessionID: "test_session_id",
|
||||
HostURL: "https://bsky.social",
|
||||
AuthServerURL: "https://bsky.social",
|
||||
AuthServerTokenEndpoint: "https://bsky.social/oauth/token",
|
||||
Scopes: []string{"atproto", "blob:image/png"},
|
||||
AccessToken: "test_access_token",
|
||||
RefreshToken: "test_refresh_token",
|
||||
DPoPAuthServerNonce: "test_nonce",
|
||||
DPoPHostNonce: "test_host_nonce",
|
||||
DPoPPrivateKeyMultibase: "test_key",
|
||||
}
|
||||
|
||||
// Test SaveSession
|
||||
if err := store.SaveSession(ctx, sessionData); err != nil {
|
||||
t.Fatalf("Failed to save session: %v", err)
|
||||
}
|
||||
|
||||
// Test GetSession
|
||||
retrieved, err := store.GetSession(ctx, did, "test_session_id")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get session: %v", err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Fatal("Retrieved session is nil")
|
||||
}
|
||||
if retrieved.SessionID != sessionData.SessionID {
|
||||
t.Errorf("Expected session ID %s, got %s", sessionData.SessionID, retrieved.SessionID)
|
||||
}
|
||||
if len(retrieved.Scopes) != len(sessionData.Scopes) {
|
||||
t.Errorf("Expected %d scopes, got %d", len(sessionData.Scopes), len(retrieved.Scopes))
|
||||
}
|
||||
|
||||
// Test UpdateSession (upsert)
|
||||
sessionData.AccessToken = "new_access_token"
|
||||
if err := store.SaveSession(ctx, sessionData); err != nil {
|
||||
t.Fatalf("Failed to update session: %v", err)
|
||||
}
|
||||
|
||||
retrieved, err = store.GetSession(ctx, did, "test_session_id")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get updated session: %v", err)
|
||||
}
|
||||
if retrieved.AccessToken != "new_access_token" {
|
||||
t.Errorf("Expected updated access token, got %s", retrieved.AccessToken)
|
||||
}
|
||||
|
||||
// Test DeleteSession
|
||||
if err := store.DeleteSession(ctx, did, "test_session_id"); err != nil {
|
||||
t.Fatalf("Failed to delete session: %v", err)
|
||||
}
|
||||
|
||||
// Verify deletion
|
||||
_, err = store.GetSession(ctx, did, "test_session_id")
|
||||
if err == nil {
|
||||
t.Error("Expected error after deletion, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanupOldSessions(t *testing.T) {
|
||||
db, err := InitDB(":memory:", true)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
store := NewOAuthStore(db)
|
||||
ctx := context.Background()
|
||||
|
||||
// Insert old session (31 days ago)
|
||||
did1, _ := syntax.ParseDID("did:plc:old")
|
||||
oldSessionData := oauth.ClientSessionData{
|
||||
AccountDID: did1,
|
||||
SessionID: "old_session",
|
||||
HostURL: "https://bsky.social",
|
||||
AuthServerURL: "https://bsky.social",
|
||||
AuthServerTokenEndpoint: "https://bsky.social/oauth/token",
|
||||
Scopes: []string{"atproto"},
|
||||
AccessToken: "old_token",
|
||||
RefreshToken: "old_refresh",
|
||||
DPoPAuthServerNonce: "old_nonce",
|
||||
DPoPHostNonce: "old_host_nonce",
|
||||
DPoPPrivateKeyMultibase: "old_key",
|
||||
}
|
||||
|
||||
// Save and manually update timestamp to be old
|
||||
if err := store.SaveSession(ctx, oldSessionData); err != nil {
|
||||
t.Fatalf("Failed to save old session: %v", err)
|
||||
}
|
||||
|
||||
// Update timestamp to 31 days ago
|
||||
oldTime := time.Now().Add(-31 * 24 * time.Hour)
|
||||
_, err = db.ExecContext(ctx, `
|
||||
UPDATE oauth_sessions
|
||||
SET updated_at = ?
|
||||
WHERE session_key = ?
|
||||
`, oldTime, makeSessionKey(did1.String(), "old_session"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update session timestamp: %v", err)
|
||||
}
|
||||
|
||||
// Insert recent session (1 day ago)
|
||||
did2, _ := syntax.ParseDID("did:plc:recent")
|
||||
recentSessionData := oauth.ClientSessionData{
|
||||
AccountDID: did2,
|
||||
SessionID: "recent_session",
|
||||
HostURL: "https://bsky.social",
|
||||
AuthServerURL: "https://bsky.social",
|
||||
AuthServerTokenEndpoint: "https://bsky.social/oauth/token",
|
||||
Scopes: []string{"atproto"},
|
||||
AccessToken: "recent_token",
|
||||
RefreshToken: "recent_refresh",
|
||||
DPoPAuthServerNonce: "recent_nonce",
|
||||
DPoPHostNonce: "recent_host_nonce",
|
||||
DPoPPrivateKeyMultibase: "recent_key",
|
||||
}
|
||||
|
||||
if err := store.SaveSession(ctx, recentSessionData); err != nil {
|
||||
t.Fatalf("Failed to save recent session: %v", err)
|
||||
}
|
||||
|
||||
// Run cleanup (remove sessions older than 30 days)
|
||||
if err := store.CleanupOldSessions(ctx, 30*24*time.Hour); err != nil {
|
||||
t.Fatalf("Failed to cleanup old sessions: %v", err)
|
||||
}
|
||||
|
||||
// Verify old session was deleted
|
||||
_, err = store.GetSession(ctx, did1, "old_session")
|
||||
if err == nil {
|
||||
t.Error("Expected old session to be deleted")
|
||||
}
|
||||
|
||||
// Verify recent session still exists
|
||||
_, err = store.GetSession(ctx, did2, "recent_session")
|
||||
if err != nil {
|
||||
t.Errorf("Expected recent session to exist, got error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMakeSessionKey tests the session key generation function
|
||||
func TestMakeSessionKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
did string
|
||||
sessionID string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "normal case",
|
||||
did: "did:plc:abc123",
|
||||
sessionID: "session_xyz789",
|
||||
expected: "did:plc:abc123:session_xyz789",
|
||||
},
|
||||
{
|
||||
name: "empty did",
|
||||
did: "",
|
||||
sessionID: "session123",
|
||||
expected: ":session123",
|
||||
},
|
||||
{
|
||||
name: "empty session",
|
||||
did: "did:plc:test",
|
||||
sessionID: "",
|
||||
expected: "did:plc:test:",
|
||||
},
|
||||
{
|
||||
name: "both empty",
|
||||
did: "",
|
||||
sessionID: "",
|
||||
expected: ":",
|
||||
},
|
||||
{
|
||||
name: "with colon in did",
|
||||
did: "did:web:example.com",
|
||||
sessionID: "session123",
|
||||
expected: "did:web:example.com:session123",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := makeSessionKey(tt.did, tt.sessionID)
|
||||
if result != tt.expected {
|
||||
t.Errorf("makeSessionKey(%q, %q) = %q, want %q", tt.did, tt.sessionID, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -31,7 +31,7 @@ func escapeLikePattern(s string) string {
|
||||
}
|
||||
|
||||
// GetRecentPushes fetches recent pushes with pagination
|
||||
func GetRecentPushes(db *sql.DB, limit, offset int, userFilter string) ([]Push, int, error) {
|
||||
func GetRecentPushes(db *sql.DB, limit, offset int, userFilter string, currentUserDID string) ([]Push, int, error) {
|
||||
query := `
|
||||
SELECT
|
||||
u.did,
|
||||
@@ -39,19 +39,21 @@ func GetRecentPushes(db *sql.DB, limit, offset int, userFilter string) ([]Push,
|
||||
t.repository,
|
||||
t.tag,
|
||||
t.digest,
|
||||
COALESCE(m.title, ''),
|
||||
COALESCE(m.description, ''),
|
||||
COALESCE(m.icon_url, ''),
|
||||
COALESCE((SELECT value FROM repository_annotations WHERE did = u.did AND repository = t.repository AND key = 'org.opencontainers.image.title'), ''),
|
||||
COALESCE((SELECT value FROM repository_annotations WHERE did = u.did AND repository = t.repository AND key = 'org.opencontainers.image.description'), ''),
|
||||
COALESCE((SELECT value FROM repository_annotations WHERE did = u.did AND repository = t.repository AND key = 'io.atcr.icon'), ''),
|
||||
COALESCE(rs.pull_count, 0),
|
||||
COALESCE((SELECT COUNT(*) FROM stars WHERE owner_did = u.did AND repository = t.repository), 0),
|
||||
t.created_at
|
||||
COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = u.did AND repository = t.repository), 0),
|
||||
t.created_at,
|
||||
m.hold_endpoint
|
||||
FROM tags t
|
||||
JOIN users u ON t.did = u.did
|
||||
JOIN manifests m ON t.did = m.did AND t.repository = m.repository AND t.digest = m.digest
|
||||
LEFT JOIN repository_stats rs ON t.did = rs.did AND t.repository = rs.repository
|
||||
`
|
||||
|
||||
args := []any{}
|
||||
args := []any{currentUserDID}
|
||||
|
||||
if userFilter != "" {
|
||||
query += " WHERE u.handle = ? OR u.did = ?"
|
||||
@@ -70,9 +72,11 @@ func GetRecentPushes(db *sql.DB, limit, offset int, userFilter string) ([]Push,
|
||||
var pushes []Push
|
||||
for rows.Next() {
|
||||
var p Push
|
||||
if err := rows.Scan(&p.DID, &p.Handle, &p.Repository, &p.Tag, &p.Digest, &p.Title, &p.Description, &p.IconURL, &p.PullCount, &p.StarCount, &p.CreatedAt); err != nil {
|
||||
var isStarredInt int
|
||||
if err := rows.Scan(&p.DID, &p.Handle, &p.Repository, &p.Tag, &p.Digest, &p.Title, &p.Description, &p.IconURL, &p.PullCount, &p.StarCount, &isStarredInt, &p.CreatedAt, &p.HoldEndpoint); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
p.IsStarred = isStarredInt > 0
|
||||
pushes = append(pushes, p)
|
||||
}
|
||||
|
||||
@@ -93,8 +97,8 @@ func GetRecentPushes(db *sql.DB, limit, offset int, userFilter string) ([]Push,
|
||||
return pushes, total, nil
|
||||
}
|
||||
|
||||
// SearchPushes searches for pushes matching the query across handles, DIDs, repositories, and manifest annotations
|
||||
func SearchPushes(db *sql.DB, query string, limit, offset int) ([]Push, int, error) {
|
||||
// SearchPushes searches for pushes matching the query across handles, DIDs, repositories, and annotations
|
||||
func SearchPushes(db *sql.DB, query string, limit, offset int, currentUserDID string) ([]Push, int, error) {
|
||||
// Escape LIKE wildcards so they're treated literally
|
||||
query = escapeLikePattern(query)
|
||||
|
||||
@@ -108,12 +112,14 @@ func SearchPushes(db *sql.DB, query string, limit, offset int) ([]Push, int, err
|
||||
t.repository,
|
||||
t.tag,
|
||||
t.digest,
|
||||
COALESCE(m.title, ''),
|
||||
COALESCE(m.description, ''),
|
||||
COALESCE(m.icon_url, ''),
|
||||
COALESCE((SELECT value FROM repository_annotations WHERE did = u.did AND repository = t.repository AND key = 'org.opencontainers.image.title'), ''),
|
||||
COALESCE((SELECT value FROM repository_annotations WHERE did = u.did AND repository = t.repository AND key = 'org.opencontainers.image.description'), ''),
|
||||
COALESCE((SELECT value FROM repository_annotations WHERE did = u.did AND repository = t.repository AND key = 'io.atcr.icon'), ''),
|
||||
COALESCE(rs.pull_count, 0),
|
||||
COALESCE((SELECT COUNT(*) FROM stars WHERE owner_did = u.did AND repository = t.repository), 0),
|
||||
t.created_at
|
||||
COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = u.did AND repository = t.repository), 0),
|
||||
t.created_at,
|
||||
m.hold_endpoint
|
||||
FROM tags t
|
||||
JOIN users u ON t.did = u.did
|
||||
JOIN manifests m ON t.did = m.did AND t.repository = m.repository AND t.digest = m.digest
|
||||
@@ -121,13 +127,16 @@ func SearchPushes(db *sql.DB, query string, limit, offset int) ([]Push, int, err
|
||||
WHERE u.handle LIKE ? ESCAPE '\'
|
||||
OR u.did = ?
|
||||
OR t.repository LIKE ? ESCAPE '\'
|
||||
OR m.title LIKE ? ESCAPE '\'
|
||||
OR m.description LIKE ? ESCAPE '\'
|
||||
OR EXISTS (
|
||||
SELECT 1 FROM repository_annotations ra
|
||||
WHERE ra.did = u.did AND ra.repository = t.repository
|
||||
AND ra.value LIKE ? ESCAPE '\'
|
||||
)
|
||||
ORDER BY t.created_at DESC
|
||||
LIMIT ? OFFSET ?
|
||||
`
|
||||
|
||||
rows, err := db.Query(sqlQuery, searchPattern, query, searchPattern, searchPattern, searchPattern, limit, offset)
|
||||
rows, err := db.Query(sqlQuery, currentUserDID, searchPattern, query, searchPattern, searchPattern, limit, offset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@@ -136,9 +145,11 @@ func SearchPushes(db *sql.DB, query string, limit, offset int) ([]Push, int, err
|
||||
var pushes []Push
|
||||
for rows.Next() {
|
||||
var p Push
|
||||
if err := rows.Scan(&p.DID, &p.Handle, &p.Repository, &p.Tag, &p.Digest, &p.Title, &p.Description, &p.IconURL, &p.PullCount, &p.StarCount, &p.CreatedAt); err != nil {
|
||||
var isStarredInt int
|
||||
if err := rows.Scan(&p.DID, &p.Handle, &p.Repository, &p.Tag, &p.Digest, &p.Title, &p.Description, &p.IconURL, &p.PullCount, &p.StarCount, &isStarredInt, &p.CreatedAt, &p.HoldEndpoint); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
p.IsStarred = isStarredInt > 0
|
||||
pushes = append(pushes, p)
|
||||
}
|
||||
|
||||
@@ -151,12 +162,15 @@ func SearchPushes(db *sql.DB, query string, limit, offset int) ([]Push, int, err
|
||||
WHERE u.handle LIKE ? ESCAPE '\'
|
||||
OR u.did = ?
|
||||
OR t.repository LIKE ? ESCAPE '\'
|
||||
OR m.title LIKE ? ESCAPE '\'
|
||||
OR m.description LIKE ? ESCAPE '\'
|
||||
OR EXISTS (
|
||||
SELECT 1 FROM repository_annotations ra
|
||||
WHERE ra.did = u.did AND ra.repository = t.repository
|
||||
AND ra.value LIKE ? ESCAPE '\'
|
||||
)
|
||||
`
|
||||
|
||||
var total int
|
||||
if err := db.QueryRow(countQuery, searchPattern, query, searchPattern, searchPattern, searchPattern).Scan(&total); err != nil {
|
||||
if err := db.QueryRow(countQuery, searchPattern, query, searchPattern, searchPattern).Scan(&total); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
@@ -240,8 +254,7 @@ func GetUserRepositories(db *sql.DB, did string) ([]Repository, error) {
|
||||
// Get manifests for this repo
|
||||
manifestRows, err := db.Query(`
|
||||
SELECT id, digest, hold_endpoint, schema_version, media_type,
|
||||
config_digest, config_size, created_at,
|
||||
title, description, source_url, documentation_url, licenses, icon_url
|
||||
config_digest, config_size, created_at
|
||||
FROM manifests
|
||||
WHERE did = ? AND repository = ?
|
||||
ORDER BY created_at DESC
|
||||
@@ -256,65 +269,51 @@ func GetUserRepositories(db *sql.DB, did string) ([]Repository, error) {
|
||||
m.DID = did
|
||||
m.Repository = r.Name
|
||||
|
||||
// Use sql.NullString for nullable annotation fields
|
||||
var title, description, sourceURL, documentationURL, licenses, iconURL sql.NullString
|
||||
|
||||
if err := manifestRows.Scan(&m.ID, &m.Digest, &m.HoldEndpoint, &m.SchemaVersion,
|
||||
&m.MediaType, &m.ConfigDigest, &m.ConfigSize, &m.CreatedAt,
|
||||
&title, &description, &sourceURL, &documentationURL, &licenses, &iconURL); err != nil {
|
||||
&m.MediaType, &m.ConfigDigest, &m.ConfigSize, &m.CreatedAt); err != nil {
|
||||
manifestRows.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert NullString to string
|
||||
if title.Valid {
|
||||
m.Title = title.String
|
||||
}
|
||||
if description.Valid {
|
||||
m.Description = description.String
|
||||
}
|
||||
if sourceURL.Valid {
|
||||
m.SourceURL = sourceURL.String
|
||||
}
|
||||
if documentationURL.Valid {
|
||||
m.DocumentationURL = documentationURL.String
|
||||
}
|
||||
if licenses.Valid {
|
||||
m.Licenses = licenses.String
|
||||
}
|
||||
if iconURL.Valid {
|
||||
m.IconURL = iconURL.String
|
||||
}
|
||||
|
||||
r.Manifests = append(r.Manifests, m)
|
||||
}
|
||||
manifestRows.Close()
|
||||
|
||||
// Aggregate repository-level annotations from most recent manifest
|
||||
if len(r.Manifests) > 0 {
|
||||
latest := r.Manifests[0]
|
||||
r.Title = latest.Title
|
||||
r.Description = latest.Description
|
||||
r.SourceURL = latest.SourceURL
|
||||
r.DocumentationURL = latest.DocumentationURL
|
||||
r.Licenses = latest.Licenses
|
||||
r.IconURL = latest.IconURL
|
||||
// Fetch repository-level annotations from annotations table
|
||||
annotations, err := GetRepositoryAnnotations(db, did, r.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.Title = annotations["org.opencontainers.image.title"]
|
||||
r.Description = annotations["org.opencontainers.image.description"]
|
||||
r.SourceURL = annotations["org.opencontainers.image.source"]
|
||||
r.DocumentationURL = annotations["org.opencontainers.image.documentation"]
|
||||
r.Licenses = annotations["org.opencontainers.image.licenses"]
|
||||
r.IconURL = annotations["io.atcr.icon"]
|
||||
r.ReadmeURL = annotations["io.atcr.readme"]
|
||||
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
||||
return repos, nil
|
||||
}
|
||||
|
||||
// GetRepositoryMetadata retrieves metadata for a repository from annotations table
|
||||
// Returns a map of annotation key -> value for easy access in templates and handlers
|
||||
func GetRepositoryMetadata(db *sql.DB, did string, repository string) (map[string]string, error) {
|
||||
return GetRepositoryAnnotations(db, did, repository)
|
||||
}
|
||||
|
||||
// GetUserByDID retrieves a user by DID
|
||||
func GetUserByDID(db *sql.DB, did string) (*User, error) {
|
||||
var user User
|
||||
var avatar sql.NullString
|
||||
err := db.QueryRow(`
|
||||
SELECT did, handle, pds_endpoint, avatar, last_seen
|
||||
FROM users
|
||||
WHERE did = ?
|
||||
`, did).Scan(&user.DID, &user.Handle, &user.PDSEndpoint, &user.Avatar, &user.LastSeen)
|
||||
`, did).Scan(&user.DID, &user.Handle, &user.PDSEndpoint, &avatar, &user.LastSeen)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
@@ -323,17 +322,23 @@ func GetUserByDID(db *sql.DB, did string) (*User, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Handle NULL avatar
|
||||
if avatar.Valid {
|
||||
user.Avatar = avatar.String
|
||||
}
|
||||
|
||||
return &user, nil
|
||||
}
|
||||
|
||||
// GetUserByHandle retrieves a user by handle
|
||||
func GetUserByHandle(db *sql.DB, handle string) (*User, error) {
|
||||
var user User
|
||||
var avatar sql.NullString
|
||||
err := db.QueryRow(`
|
||||
SELECT did, handle, pds_endpoint, avatar, last_seen
|
||||
FROM users
|
||||
WHERE handle = ?
|
||||
`, handle).Scan(&user.DID, &user.Handle, &user.PDSEndpoint, &user.Avatar, &user.LastSeen)
|
||||
`, handle).Scan(&user.DID, &user.Handle, &user.PDSEndpoint, &avatar, &user.LastSeen)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
@@ -342,6 +347,11 @@ func GetUserByHandle(db *sql.DB, handle string) (*User, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Handle NULL avatar
|
||||
if avatar.Valid {
|
||||
user.Avatar = avatar.String
|
||||
}
|
||||
|
||||
return &user, nil
|
||||
}
|
||||
|
||||
@@ -359,6 +369,38 @@ func UpsertUser(db *sql.DB, user *User) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// UpsertUserIgnoreAvatar inserts or updates a user record, but preserves existing avatar on update
|
||||
// This is useful when avatar fetch fails, and we don't want to overwrite an existing avatar with empty string
|
||||
func UpsertUserIgnoreAvatar(db *sql.DB, user *User) error {
|
||||
_, err := db.Exec(`
|
||||
INSERT INTO users (did, handle, pds_endpoint, avatar, last_seen)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT(did) DO UPDATE SET
|
||||
handle = excluded.handle,
|
||||
pds_endpoint = excluded.pds_endpoint,
|
||||
last_seen = excluded.last_seen
|
||||
`, user.DID, user.Handle, user.PDSEndpoint, user.Avatar, user.LastSeen)
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateUserLastSeen updates only the last_seen timestamp for a user
|
||||
// This is more efficient than UpsertUser when only updating activity timestamp
|
||||
func UpdateUserLastSeen(db *sql.DB, did string) error {
|
||||
_, err := db.Exec(`
|
||||
UPDATE users SET last_seen = ? WHERE did = ?
|
||||
`, time.Now(), did)
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateUserHandle updates a user's handle when an identity change event is received
|
||||
// This is called when Jetstream receives an identity event indicating a handle change
|
||||
func UpdateUserHandle(db *sql.DB, did string, newHandle string) error {
|
||||
_, err := db.Exec(`
|
||||
UPDATE users SET handle = ?, last_seen = ? WHERE did = ?
|
||||
`, newHandle, time.Now(), did)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetManifestDigestsForDID returns all manifest digests for a DID
|
||||
func GetManifestDigestsForDID(db *sql.DB, did string) ([]string, error) {
|
||||
rows, err := db.Query(`
|
||||
@@ -484,25 +526,42 @@ func DeleteTagsNotInList(db *sql.DB, did string, keepTags []struct{ Repository,
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// InsertManifest inserts a new manifest record
|
||||
// InsertManifest inserts or updates a manifest record
|
||||
// Uses UPSERT to update core metadata if manifest already exists
|
||||
// Returns the manifest ID (works correctly for both insert and update)
|
||||
// Note: Annotations are stored separately in repository_annotations table
|
||||
func InsertManifest(db *sql.DB, manifest *Manifest) (int64, error) {
|
||||
result, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO manifests
|
||||
_, err := db.Exec(`
|
||||
INSERT INTO manifests
|
||||
(did, repository, digest, hold_endpoint, schema_version, media_type,
|
||||
config_digest, config_size, created_at,
|
||||
title, description, source_url, documentation_url, licenses, icon_url)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
config_digest, config_size, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(did, repository, digest) DO UPDATE SET
|
||||
hold_endpoint = excluded.hold_endpoint,
|
||||
schema_version = excluded.schema_version,
|
||||
media_type = excluded.media_type,
|
||||
config_digest = excluded.config_digest,
|
||||
config_size = excluded.config_size
|
||||
`, manifest.DID, manifest.Repository, manifest.Digest, manifest.HoldEndpoint,
|
||||
manifest.SchemaVersion, manifest.MediaType, manifest.ConfigDigest,
|
||||
manifest.ConfigSize, manifest.CreatedAt,
|
||||
manifest.Title, manifest.Description, manifest.SourceURL,
|
||||
manifest.DocumentationURL, manifest.Licenses, manifest.IconURL)
|
||||
manifest.ConfigSize, manifest.CreatedAt)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return result.LastInsertId()
|
||||
// Query for the ID (works for both insert and update)
|
||||
var id int64
|
||||
err = db.QueryRow(`
|
||||
SELECT id FROM manifests
|
||||
WHERE did = ? AND repository = ? AND digest = ?
|
||||
`, manifest.DID, manifest.Repository, manifest.Digest).Scan(&id)
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get manifest ID after upsert: %w", err)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// InsertLayer inserts a new layer record
|
||||
@@ -534,6 +593,80 @@ func DeleteTag(db *sql.DB, did, repository, tag string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// GetTagsWithPlatforms returns all tags for a repository with platform information
|
||||
// Only multi-arch tags (manifest lists) have platform info in manifest_references
|
||||
// Single-arch tags will have empty Platforms slice (platform is obvious for single-arch)
|
||||
func GetTagsWithPlatforms(db *sql.DB, did, repository string) ([]TagWithPlatforms, error) {
|
||||
rows, err := db.Query(`
|
||||
SELECT
|
||||
t.id,
|
||||
t.did,
|
||||
t.repository,
|
||||
t.tag,
|
||||
t.digest,
|
||||
t.created_at,
|
||||
m.media_type,
|
||||
COALESCE(mr.platform_os, '') as platform_os,
|
||||
COALESCE(mr.platform_architecture, '') as platform_architecture,
|
||||
COALESCE(mr.platform_variant, '') as platform_variant,
|
||||
COALESCE(mr.platform_os_version, '') as platform_os_version
|
||||
FROM tags t
|
||||
JOIN manifests m ON t.digest = m.digest AND t.did = m.did AND t.repository = m.repository
|
||||
LEFT JOIN manifest_references mr ON m.id = mr.manifest_id
|
||||
WHERE t.did = ? AND t.repository = ?
|
||||
ORDER BY t.created_at DESC, mr.reference_index
|
||||
`, did, repository)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// Group platforms by tag
|
||||
tagMap := make(map[string]*TagWithPlatforms)
|
||||
var tagOrder []string // Preserve order
|
||||
|
||||
for rows.Next() {
|
||||
var t Tag
|
||||
var mediaType, platformOS, platformArch, platformVariant, platformOSVersion string
|
||||
|
||||
if err := rows.Scan(&t.ID, &t.DID, &t.Repository, &t.Tag, &t.Digest, &t.CreatedAt,
|
||||
&mediaType, &platformOS, &platformArch, &platformVariant, &platformOSVersion); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get or create TagWithPlatforms
|
||||
tagKey := t.Tag
|
||||
if _, exists := tagMap[tagKey]; !exists {
|
||||
tagMap[tagKey] = &TagWithPlatforms{
|
||||
Tag: t,
|
||||
Platforms: []PlatformInfo{},
|
||||
}
|
||||
tagOrder = append(tagOrder, tagKey)
|
||||
}
|
||||
|
||||
// Add platform info if present (only for multi-arch manifest lists)
|
||||
if platformOS != "" || platformArch != "" {
|
||||
tagMap[tagKey].Platforms = append(tagMap[tagKey].Platforms, PlatformInfo{
|
||||
OS: platformOS,
|
||||
Architecture: platformArch,
|
||||
Variant: platformVariant,
|
||||
OSVersion: platformOSVersion,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Convert map to slice, preserving order and setting IsMultiArch
|
||||
result := make([]TagWithPlatforms, 0, len(tagMap))
|
||||
for _, tagKey := range tagOrder {
|
||||
tag := tagMap[tagKey]
|
||||
tag.IsMultiArch = len(tag.Platforms) > 1
|
||||
result = append(result, *tag)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// DeleteManifest deletes a manifest and its associated layers
|
||||
// If repository is empty, deletes all manifests matching did and digest
|
||||
func DeleteManifest(db *sql.DB, did, repository, digest string) error {
|
||||
@@ -549,50 +682,72 @@ func DeleteManifest(db *sql.DB, did, repository, digest string) error {
|
||||
}
|
||||
|
||||
// GetManifest fetches a single manifest by digest
|
||||
// Note: Annotations are stored separately in repository_annotations table
|
||||
func GetManifest(db *sql.DB, digest string) (*Manifest, error) {
|
||||
var m Manifest
|
||||
|
||||
// Use sql.NullString for nullable annotation fields
|
||||
var title, description, sourceURL, documentationURL, licenses, iconURL sql.NullString
|
||||
|
||||
err := db.QueryRow(`
|
||||
SELECT id, did, repository, digest, hold_endpoint, schema_version,
|
||||
media_type, config_digest, config_size, created_at,
|
||||
title, description, source_url, documentation_url, licenses, icon_url
|
||||
media_type, config_digest, config_size, created_at
|
||||
FROM manifests
|
||||
WHERE digest = ?
|
||||
`, digest).Scan(&m.ID, &m.DID, &m.Repository, &m.Digest, &m.HoldEndpoint,
|
||||
&m.SchemaVersion, &m.MediaType, &m.ConfigDigest, &m.ConfigSize,
|
||||
&m.CreatedAt,
|
||||
&title, &description, &sourceURL, &documentationURL, &licenses, &iconURL)
|
||||
&m.CreatedAt)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert NullString to string
|
||||
if title.Valid {
|
||||
m.Title = title.String
|
||||
}
|
||||
if description.Valid {
|
||||
m.Description = description.String
|
||||
}
|
||||
if sourceURL.Valid {
|
||||
m.SourceURL = sourceURL.String
|
||||
}
|
||||
if documentationURL.Valid {
|
||||
m.DocumentationURL = documentationURL.String
|
||||
}
|
||||
if licenses.Valid {
|
||||
m.Licenses = licenses.String
|
||||
}
|
||||
if iconURL.Valid {
|
||||
m.IconURL = iconURL.String
|
||||
}
|
||||
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
// GetNewestManifestForRepo returns the newest manifest for a specific repository
|
||||
// Used by backfill to ensure annotations come from the most recent manifest
|
||||
func GetNewestManifestForRepo(db *sql.DB, did, repository string) (*Manifest, error) {
|
||||
var m Manifest
|
||||
err := db.QueryRow(`
|
||||
SELECT id, did, repository, digest, hold_endpoint, schema_version, media_type,
|
||||
config_digest, config_size, created_at
|
||||
FROM manifests
|
||||
WHERE did = ? AND repository = ?
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 1
|
||||
`, did, repository).Scan(
|
||||
&m.ID, &m.DID, &m.Repository, &m.Digest,
|
||||
&m.HoldEndpoint, &m.SchemaVersion, &m.MediaType,
|
||||
&m.ConfigDigest, &m.ConfigSize, &m.CreatedAt,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
// GetRepositoriesForDID returns all unique repository names for a DID
|
||||
// Used by backfill to reconcile annotations for all repositories
|
||||
func GetRepositoriesForDID(db *sql.DB, did string) ([]string, error) {
|
||||
rows, err := db.Query(`
|
||||
SELECT DISTINCT repository
|
||||
FROM manifests
|
||||
WHERE did = ?
|
||||
`, did)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var repositories []string
|
||||
for rows.Next() {
|
||||
var repo string
|
||||
if err := rows.Scan(&repo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
repositories = append(repositories, repo)
|
||||
}
|
||||
return repositories, rows.Err()
|
||||
}
|
||||
|
||||
// GetLayersForManifest fetches all layers for a manifest
|
||||
func GetLayersForManifest(db *sql.DB, manifestID int64) ([]Layer, error) {
|
||||
rows, err := db.Query(`
|
||||
@@ -619,6 +774,289 @@ func GetLayersForManifest(db *sql.DB, manifestID int64) ([]Layer, error) {
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
// InsertManifestReference inserts a new manifest reference record (for manifest lists/indexes)
|
||||
func InsertManifestReference(db *sql.DB, ref *ManifestReference) error {
|
||||
_, err := db.Exec(`
|
||||
INSERT INTO manifest_references (manifest_id, digest, size, media_type,
|
||||
platform_architecture, platform_os,
|
||||
platform_variant, platform_os_version,
|
||||
reference_index)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`, ref.ManifestID, ref.Digest, ref.Size, ref.MediaType,
|
||||
ref.PlatformArchitecture, ref.PlatformOS,
|
||||
ref.PlatformVariant, ref.PlatformOSVersion,
|
||||
ref.ReferenceIndex)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetManifestReferencesForManifest fetches all manifest references for a manifest list/index
|
||||
func GetManifestReferencesForManifest(db *sql.DB, manifestID int64) ([]ManifestReference, error) {
|
||||
rows, err := db.Query(`
|
||||
SELECT manifest_id, digest, size, media_type,
|
||||
platform_architecture, platform_os, platform_variant, platform_os_version,
|
||||
reference_index
|
||||
FROM manifest_references
|
||||
WHERE manifest_id = ?
|
||||
ORDER BY reference_index
|
||||
`, manifestID)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var refs []ManifestReference
|
||||
for rows.Next() {
|
||||
var r ManifestReference
|
||||
var arch, os, variant, osVersion sql.NullString
|
||||
if err := rows.Scan(&r.ManifestID, &r.Digest, &r.Size, &r.MediaType,
|
||||
&arch, &os, &variant, &osVersion,
|
||||
&r.ReferenceIndex); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert nullable strings
|
||||
if arch.Valid {
|
||||
r.PlatformArchitecture = arch.String
|
||||
}
|
||||
if os.Valid {
|
||||
r.PlatformOS = os.String
|
||||
}
|
||||
if variant.Valid {
|
||||
r.PlatformVariant = variant.String
|
||||
}
|
||||
if osVersion.Valid {
|
||||
r.PlatformOSVersion = osVersion.String
|
||||
}
|
||||
|
||||
refs = append(refs, r)
|
||||
}
|
||||
|
||||
return refs, nil
|
||||
}
|
||||
|
||||
// GetTopLevelManifests returns only manifest lists and orphaned single-arch manifests
|
||||
// Filters out platform-specific manifests that are referenced by manifest lists
|
||||
// Note: Annotations are stored separately in repository_annotations table - use GetRepositoryMetadata to fetch them
|
||||
func GetTopLevelManifests(db *sql.DB, did, repository string, limit, offset int) ([]ManifestWithMetadata, error) {
|
||||
rows, err := db.Query(`
|
||||
WITH manifest_list_children AS (
|
||||
-- Get all digests that are children of manifest lists
|
||||
SELECT DISTINCT mr.digest
|
||||
FROM manifest_references mr
|
||||
JOIN manifests m ON mr.manifest_id = m.id
|
||||
WHERE m.did = ? AND m.repository = ?
|
||||
)
|
||||
SELECT
|
||||
m.id, m.did, m.repository, m.digest, m.media_type,
|
||||
m.schema_version, m.created_at,
|
||||
m.config_digest, m.config_size, m.hold_endpoint,
|
||||
GROUP_CONCAT(DISTINCT t.tag) as tags,
|
||||
COUNT(DISTINCT mr.digest) as platform_count
|
||||
FROM manifests m
|
||||
LEFT JOIN tags t ON m.digest = t.digest AND m.did = t.did AND m.repository = t.repository
|
||||
LEFT JOIN manifest_references mr ON m.id = mr.manifest_id
|
||||
WHERE m.did = ? AND m.repository = ?
|
||||
AND (
|
||||
-- Include manifest lists
|
||||
m.media_type LIKE '%index%' OR m.media_type LIKE '%manifest.list%'
|
||||
OR
|
||||
-- Include single-arch NOT referenced by any list
|
||||
m.digest NOT IN (SELECT digest FROM manifest_list_children WHERE digest IS NOT NULL)
|
||||
)
|
||||
GROUP BY m.id
|
||||
ORDER BY m.created_at DESC
|
||||
LIMIT ? OFFSET ?
|
||||
`, did, repository, did, repository, limit, offset)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var manifests []ManifestWithMetadata
|
||||
for rows.Next() {
|
||||
var m ManifestWithMetadata
|
||||
var tags, configDigest sql.NullString
|
||||
var configSize sql.NullInt64
|
||||
|
||||
if err := rows.Scan(
|
||||
&m.ID, &m.DID, &m.Repository, &m.Digest, &m.MediaType,
|
||||
&m.SchemaVersion, &m.CreatedAt,
|
||||
&configDigest, &configSize, &m.HoldEndpoint,
|
||||
&tags, &m.PlatformCount,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set nullable fields
|
||||
if configDigest.Valid {
|
||||
m.ConfigDigest = configDigest.String
|
||||
}
|
||||
if configSize.Valid {
|
||||
m.ConfigSize = configSize.Int64
|
||||
}
|
||||
|
||||
// Parse tags
|
||||
if tags.Valid && tags.String != "" {
|
||||
m.Tags = strings.Split(tags.String, ",")
|
||||
}
|
||||
|
||||
// Determine if manifest list
|
||||
m.IsManifestList = strings.Contains(m.MediaType, "index") || strings.Contains(m.MediaType, "manifest.list")
|
||||
|
||||
manifests = append(manifests, m)
|
||||
}
|
||||
|
||||
// Fetch platform details for multi-arch manifests AFTER closing the main query
|
||||
for i := range manifests {
|
||||
if manifests[i].IsManifestList {
|
||||
platformRows, err := db.Query(`
|
||||
SELECT
|
||||
mr.platform_os,
|
||||
mr.platform_architecture,
|
||||
mr.platform_variant,
|
||||
mr.platform_os_version
|
||||
FROM manifest_references mr
|
||||
WHERE mr.manifest_id = ?
|
||||
ORDER BY mr.reference_index
|
||||
`, manifests[i].ID)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
manifests[i].Platforms = []PlatformInfo{}
|
||||
for platformRows.Next() {
|
||||
var p PlatformInfo
|
||||
var os, arch, variant, osVersion sql.NullString
|
||||
|
||||
if err := platformRows.Scan(&os, &arch, &variant, &osVersion); err != nil {
|
||||
platformRows.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if os.Valid {
|
||||
p.OS = os.String
|
||||
}
|
||||
if arch.Valid {
|
||||
p.Architecture = arch.String
|
||||
}
|
||||
if variant.Valid {
|
||||
p.Variant = variant.String
|
||||
}
|
||||
if osVersion.Valid {
|
||||
p.OSVersion = osVersion.String
|
||||
}
|
||||
|
||||
manifests[i].Platforms = append(manifests[i].Platforms, p)
|
||||
}
|
||||
platformRows.Close()
|
||||
|
||||
manifests[i].PlatformCount = len(manifests[i].Platforms)
|
||||
}
|
||||
}
|
||||
|
||||
return manifests, nil
|
||||
}
|
||||
|
||||
// GetManifestDetail returns a manifest with full platform details and tags
|
||||
// Note: Annotations are stored separately in repository_annotations table - use GetRepositoryMetadata to fetch them
|
||||
func GetManifestDetail(db *sql.DB, did, repository, digest string) (*ManifestWithMetadata, error) {
|
||||
// First, get the manifest and its tags
|
||||
var m ManifestWithMetadata
|
||||
var tags, configDigest sql.NullString
|
||||
var configSize sql.NullInt64
|
||||
|
||||
err := db.QueryRow(`
|
||||
SELECT
|
||||
m.id, m.did, m.repository, m.digest, m.media_type,
|
||||
m.schema_version, m.created_at,
|
||||
m.config_digest, m.config_size, m.hold_endpoint,
|
||||
GROUP_CONCAT(DISTINCT t.tag) as tags
|
||||
FROM manifests m
|
||||
LEFT JOIN tags t ON m.digest = t.digest AND m.did = t.did AND m.repository = t.repository
|
||||
WHERE m.did = ? AND m.repository = ? AND m.digest = ?
|
||||
GROUP BY m.id
|
||||
`, did, repository, digest).Scan(
|
||||
&m.ID, &m.DID, &m.Repository, &m.Digest, &m.MediaType,
|
||||
&m.SchemaVersion, &m.CreatedAt,
|
||||
&configDigest, &configSize, &m.HoldEndpoint,
|
||||
&tags,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, fmt.Errorf("manifest not found")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set nullable fields
|
||||
if configDigest.Valid {
|
||||
m.ConfigDigest = configDigest.String
|
||||
}
|
||||
if configSize.Valid {
|
||||
m.ConfigSize = configSize.Int64
|
||||
}
|
||||
|
||||
// Parse tags
|
||||
if tags.Valid && tags.String != "" {
|
||||
m.Tags = strings.Split(tags.String, ",")
|
||||
}
|
||||
|
||||
// Determine if manifest list
|
||||
m.IsManifestList = strings.Contains(m.MediaType, "index") || strings.Contains(m.MediaType, "manifest.list")
|
||||
|
||||
// If this is a manifest list, get platform details
|
||||
if m.IsManifestList {
|
||||
platforms, err := db.Query(`
|
||||
SELECT
|
||||
mr.platform_os,
|
||||
mr.platform_architecture,
|
||||
mr.platform_variant,
|
||||
mr.platform_os_version
|
||||
FROM manifest_references mr
|
||||
WHERE mr.manifest_id = ?
|
||||
ORDER BY mr.reference_index
|
||||
`, m.ID)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer platforms.Close()
|
||||
|
||||
m.Platforms = []PlatformInfo{}
|
||||
for platforms.Next() {
|
||||
var p PlatformInfo
|
||||
var os, arch, variant, osVersion sql.NullString
|
||||
|
||||
if err := platforms.Scan(&os, &arch, &variant, &osVersion); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if os.Valid {
|
||||
p.OS = os.String
|
||||
}
|
||||
if arch.Valid {
|
||||
p.Architecture = arch.String
|
||||
}
|
||||
if variant.Valid {
|
||||
p.Variant = variant.String
|
||||
}
|
||||
if osVersion.Valid {
|
||||
p.OSVersion = osVersion.String
|
||||
}
|
||||
|
||||
m.Platforms = append(m.Platforms, p)
|
||||
}
|
||||
|
||||
m.PlatformCount = len(m.Platforms)
|
||||
}
|
||||
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
// GetFirehoseCursor retrieves the current firehose cursor
|
||||
func GetFirehoseCursor(db *sql.DB) (int64, error) {
|
||||
var cursor int64
|
||||
@@ -656,6 +1094,34 @@ func IsManifestTagged(db *sql.DB, did, repository, digest string) (bool, error)
|
||||
return count > 0, nil
|
||||
}
|
||||
|
||||
// GetManifestTags retrieves all tags for a manifest
|
||||
func GetManifestTags(db *sql.DB, did, repository, digest string) ([]string, error) {
|
||||
rows, err := db.Query(`
|
||||
SELECT tag FROM tags
|
||||
WHERE did = ? AND repository = ? AND digest = ?
|
||||
ORDER BY tag
|
||||
`, did, repository, digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var tags []string
|
||||
for rows.Next() {
|
||||
var tag string
|
||||
if err := rows.Scan(&tag); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
// BackfillState represents the backfill progress
|
||||
type BackfillState struct {
|
||||
StartCursor int64
|
||||
@@ -809,8 +1275,7 @@ func GetRepository(db *sql.DB, did, repository string) (*Repository, error) {
|
||||
// Get manifests for this repo
|
||||
manifestRows, err := db.Query(`
|
||||
SELECT id, digest, hold_endpoint, schema_version, media_type,
|
||||
config_digest, config_size, created_at,
|
||||
title, description, source_url, documentation_url, licenses, icon_url
|
||||
config_digest, config_size, created_at
|
||||
FROM manifests
|
||||
WHERE did = ? AND repository = ?
|
||||
ORDER BY created_at DESC
|
||||
@@ -825,51 +1290,30 @@ func GetRepository(db *sql.DB, did, repository string) (*Repository, error) {
|
||||
m.DID = did
|
||||
m.Repository = repository
|
||||
|
||||
// Use sql.NullString for nullable annotation fields
|
||||
var title, description, sourceURL, documentationURL, licenses, iconURL sql.NullString
|
||||
|
||||
if err := manifestRows.Scan(&m.ID, &m.Digest, &m.HoldEndpoint, &m.SchemaVersion,
|
||||
&m.MediaType, &m.ConfigDigest, &m.ConfigSize, &m.CreatedAt,
|
||||
&title, &description, &sourceURL, &documentationURL, &licenses, &iconURL); err != nil {
|
||||
&m.MediaType, &m.ConfigDigest, &m.ConfigSize, &m.CreatedAt); err != nil {
|
||||
manifestRows.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert NullString to string
|
||||
if title.Valid {
|
||||
m.Title = title.String
|
||||
}
|
||||
if description.Valid {
|
||||
m.Description = description.String
|
||||
}
|
||||
if sourceURL.Valid {
|
||||
m.SourceURL = sourceURL.String
|
||||
}
|
||||
if documentationURL.Valid {
|
||||
m.DocumentationURL = documentationURL.String
|
||||
}
|
||||
if licenses.Valid {
|
||||
m.Licenses = licenses.String
|
||||
}
|
||||
if iconURL.Valid {
|
||||
m.IconURL = iconURL.String
|
||||
}
|
||||
|
||||
r.Manifests = append(r.Manifests, m)
|
||||
}
|
||||
manifestRows.Close()
|
||||
|
||||
// Aggregate repository-level annotations from most recent manifest
|
||||
if len(r.Manifests) > 0 {
|
||||
latest := r.Manifests[0]
|
||||
r.Title = latest.Title
|
||||
r.Description = latest.Description
|
||||
r.SourceURL = latest.SourceURL
|
||||
r.DocumentationURL = latest.DocumentationURL
|
||||
r.Licenses = latest.Licenses
|
||||
r.IconURL = latest.IconURL
|
||||
// Fetch repository-level annotations from annotations table
|
||||
annotations, err := GetRepositoryAnnotations(db, did, repository)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.Title = annotations["org.opencontainers.image.title"]
|
||||
r.Description = annotations["org.opencontainers.image.description"]
|
||||
r.SourceURL = annotations["org.opencontainers.image.source"]
|
||||
r.DocumentationURL = annotations["org.opencontainers.image.documentation"]
|
||||
r.Licenses = annotations["org.opencontainers.image.licenses"]
|
||||
r.IconURL = annotations["io.atcr.icon"]
|
||||
r.ReadmeURL = annotations["io.atcr.readme"]
|
||||
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
@@ -1133,7 +1577,7 @@ func (m *MetricsDB) IncrementPushCount(did, repository string) error {
|
||||
}
|
||||
|
||||
// GetFeaturedRepositories fetches top repositories sorted by stars and pulls
|
||||
func GetFeaturedRepositories(db *sql.DB, limit int) ([]FeaturedRepository, error) {
|
||||
func GetFeaturedRepositories(db *sql.DB, limit int, currentUserDID string) ([]FeaturedRepository, error) {
|
||||
query := `
|
||||
WITH latest_manifests AS (
|
||||
SELECT did, repository, MAX(id) as latest_id
|
||||
@@ -1154,11 +1598,12 @@ func GetFeaturedRepositories(db *sql.DB, limit int) ([]FeaturedRepository, error
|
||||
m.did,
|
||||
u.handle,
|
||||
m.repository,
|
||||
m.title,
|
||||
m.description,
|
||||
m.icon_url,
|
||||
COALESCE((SELECT value FROM repository_annotations WHERE did = m.did AND repository = m.repository AND key = 'org.opencontainers.image.title'), ''),
|
||||
COALESCE((SELECT value FROM repository_annotations WHERE did = m.did AND repository = m.repository AND key = 'org.opencontainers.image.description'), ''),
|
||||
COALESCE((SELECT value FROM repository_annotations WHERE did = m.did AND repository = m.repository AND key = 'io.atcr.icon'), ''),
|
||||
rs.pull_count,
|
||||
rs.star_count
|
||||
rs.star_count,
|
||||
COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = m.did AND repository = m.repository), 0)
|
||||
FROM latest_manifests lm
|
||||
JOIN manifests m ON lm.latest_id = m.id
|
||||
JOIN users u ON m.did = u.did
|
||||
@@ -1167,7 +1612,7 @@ func GetFeaturedRepositories(db *sql.DB, limit int) ([]FeaturedRepository, error
|
||||
LIMIT ?
|
||||
`
|
||||
|
||||
rows, err := db.Query(query, limit)
|
||||
rows, err := db.Query(query, currentUserDID, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1176,23 +1621,13 @@ func GetFeaturedRepositories(db *sql.DB, limit int) ([]FeaturedRepository, error
|
||||
var featured []FeaturedRepository
|
||||
for rows.Next() {
|
||||
var f FeaturedRepository
|
||||
var title, description, iconURL sql.NullString
|
||||
var isStarredInt int
|
||||
|
||||
if err := rows.Scan(&f.OwnerDID, &f.OwnerHandle, &f.Repository,
|
||||
&title, &description, &iconURL, &f.PullCount, &f.StarCount); err != nil {
|
||||
&f.Title, &f.Description, &f.IconURL, &f.PullCount, &f.StarCount, &isStarredInt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert NullString to string
|
||||
if title.Valid {
|
||||
f.Title = title.String
|
||||
}
|
||||
if description.Valid {
|
||||
f.Description = description.String
|
||||
}
|
||||
if iconURL.Valid {
|
||||
f.IconURL = iconURL.String
|
||||
}
|
||||
f.IsStarred = isStarredInt > 0
|
||||
|
||||
featured = append(featured, f)
|
||||
}
|
||||
|
||||
1201
pkg/appview/db/queries_test.go
Normal file
1201
pkg/appview/db/queries_test.go
Normal file
File diff suppressed because it is too large
Load Diff
115
pkg/appview/db/readonly.go
Normal file
115
pkg/appview/db/readonly.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
sqlite3 "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
const (
|
||||
// ReadOnlyDriverName is the name of the custom SQLite driver with table authorization
|
||||
ReadOnlyDriverName = "sqlite3_readonly_public"
|
||||
)
|
||||
|
||||
// sensitiveTables defines tables that should never be accessible from public queries
|
||||
var sensitiveTables = map[string]bool{
|
||||
"oauth_sessions": true, // OAuth tokens
|
||||
"ui_sessions": true, // Session IDs
|
||||
"oauth_auth_requests": true, // OAuth state
|
||||
"devices": true, // Device secret hashes
|
||||
"pending_device_auth": true, // Pending device secrets
|
||||
}
|
||||
|
||||
// readOnlyAuthorizerCallback blocks access to sensitive tables
|
||||
func readOnlyAuthorizerCallback(action int, arg1, arg2, dbName string) int {
|
||||
// arg1 contains the table name for most operations
|
||||
tableName := arg1
|
||||
|
||||
// Block any access to sensitive tables
|
||||
if action == sqlite3.SQLITE_READ || action == sqlite3.SQLITE_UPDATE ||
|
||||
action == sqlite3.SQLITE_INSERT || action == sqlite3.SQLITE_DELETE ||
|
||||
action == sqlite3.SQLITE_SELECT {
|
||||
if sensitiveTables[tableName] {
|
||||
slog.Warn("Blocked access to sensitive table", "component", "SECURITY", "table", tableName, "action", action)
|
||||
return sqlite3.SQLITE_DENY
|
||||
}
|
||||
}
|
||||
|
||||
// Allow everything else
|
||||
return sqlite3.SQLITE_OK
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Register a custom SQLite driver with authorizer for read-only public queries
|
||||
sql.Register(ReadOnlyDriverName,
|
||||
&sqlite3.SQLiteDriver{
|
||||
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
|
||||
conn.RegisterAuthorizer(readOnlyAuthorizerCallback)
|
||||
return nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// InitializeDatabase initializes the SQLite database and session store
|
||||
// Returns: (read-write DB, read-only DB, session store)
|
||||
func InitializeDatabase(uiEnabled bool, dbPath string, skipMigrations bool) (*sql.DB, *sql.DB, *SessionStore) {
|
||||
if !uiEnabled {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// Ensure directory exists
|
||||
dbDir := filepath.Dir(dbPath)
|
||||
if err := os.MkdirAll(dbDir, 0700); err != nil {
|
||||
slog.Warn("Failed to create UI database directory", "error", err)
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// Initialize read-write database (for writes and auth operations)
|
||||
database, err := InitDB(dbPath, skipMigrations)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to initialize UI database", "error", err)
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// Open read-only connection for public queries (search, user pages, etc.)
|
||||
// Uses custom driver with SQLite authorizer that blocks sensitive tables
|
||||
// This prevents accidental writes and blocks access to sensitive tables even if SQL injection occurs
|
||||
readOnlyDB, err := sql.Open(ReadOnlyDriverName, "file:"+dbPath+"?mode=ro")
|
||||
if err != nil {
|
||||
slog.Warn("Failed to open read-only database connection", "error", err)
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
slog.Info("UI database initialized", "mode", "readonly", "path", dbPath)
|
||||
|
||||
// Create SQLite-backed session store
|
||||
sessionStore := NewSessionStore(database)
|
||||
|
||||
// Start cleanup goroutines for all SQLite stores
|
||||
go func() {
|
||||
ticker := time.NewTicker(1 * time.Hour)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
ctx := context.Background()
|
||||
|
||||
// Cleanup UI sessions
|
||||
sessionStore.Cleanup()
|
||||
|
||||
// Cleanup OAuth sessions (older than 30 days)
|
||||
oauthStore := NewOAuthStore(database)
|
||||
oauthStore.CleanupOldSessions(ctx, 30*24*time.Hour)
|
||||
oauthStore.CleanupExpiredAuthRequests(ctx)
|
||||
|
||||
// Cleanup device pending auths
|
||||
deviceStore := NewDeviceStore(database)
|
||||
deviceStore.CleanupExpired()
|
||||
}
|
||||
}()
|
||||
|
||||
return database, readOnlyDB, sessionStore
|
||||
}
|
||||
@@ -1,12 +1,10 @@
|
||||
package main
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
)
|
||||
|
||||
func TestAuthorizerBlocksSensitiveTables(t *testing.T) {
|
||||
@@ -15,11 +13,13 @@ func TestAuthorizerBlocksSensitiveTables(t *testing.T) {
|
||||
dbPath := filepath.Join(tmpDir, "test.db")
|
||||
|
||||
// Set environment for database path
|
||||
os.Setenv("ATCR_UI_DATABASE_PATH", dbPath)
|
||||
if err := os.Setenv("ATCR_UI_DATABASE_PATH", dbPath); err != nil {
|
||||
t.Fatalf("Failed to set environment variable: %v", err)
|
||||
}
|
||||
defer os.Unsetenv("ATCR_UI_DATABASE_PATH")
|
||||
|
||||
// Initialize database (creates schema)
|
||||
database, err := db.InitDB(dbPath)
|
||||
database, err := InitDB(dbPath, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to initialize database: %v", err)
|
||||
}
|
||||
@@ -43,7 +43,7 @@ func TestAuthorizerBlocksSensitiveTables(t *testing.T) {
|
||||
}
|
||||
|
||||
// Open read-only connection with authorizer (using our custom driver)
|
||||
readOnlyDB, err := sql.Open("sqlite3_readonly_public", "file:"+dbPath+"?mode=ro")
|
||||
readOnlyDB, err := sql.Open(ReadOnlyDriverName, "file:"+dbPath+"?mode=ro")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open read-only database: %v", err)
|
||||
}
|
||||
@@ -1,3 +1,7 @@
|
||||
// Package db provides the database layer for the AppView web UI, including
|
||||
// SQLite schema initialization, migrations, and query functions for OAuth
|
||||
// sessions, device flows, repository metadata, stars, pull counts, and
|
||||
// user profiles.
|
||||
package db
|
||||
|
||||
import (
|
||||
@@ -5,6 +9,7 @@ import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -17,160 +22,11 @@ import (
|
||||
//go:embed migrations/*.yaml
|
||||
var migrationsFS embed.FS
|
||||
|
||||
const schema = `
|
||||
CREATE TABLE IF NOT EXISTS schema_migrations (
|
||||
version INTEGER PRIMARY KEY,
|
||||
applied_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
did TEXT PRIMARY KEY,
|
||||
handle TEXT NOT NULL,
|
||||
pds_endpoint TEXT NOT NULL,
|
||||
avatar TEXT,
|
||||
last_seen TIMESTAMP NOT NULL,
|
||||
UNIQUE(handle)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_users_handle ON users(handle);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS manifests (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
hold_endpoint TEXT NOT NULL,
|
||||
schema_version INTEGER NOT NULL,
|
||||
media_type TEXT NOT NULL,
|
||||
config_digest TEXT,
|
||||
config_size INTEGER,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
title TEXT,
|
||||
description TEXT,
|
||||
source_url TEXT,
|
||||
documentation_url TEXT,
|
||||
licenses TEXT,
|
||||
icon_url TEXT,
|
||||
UNIQUE(did, repository, digest),
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_manifests_did_repo ON manifests(did, repository);
|
||||
CREATE INDEX IF NOT EXISTS idx_manifests_created_at ON manifests(created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_manifests_digest ON manifests(digest);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS layers (
|
||||
manifest_id INTEGER NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
size INTEGER NOT NULL,
|
||||
media_type TEXT NOT NULL,
|
||||
layer_index INTEGER NOT NULL,
|
||||
PRIMARY KEY(manifest_id, layer_index),
|
||||
FOREIGN KEY(manifest_id) REFERENCES manifests(id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_layers_digest ON layers(digest);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tags (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
tag TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
UNIQUE(did, repository, tag),
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_tags_did_repo ON tags(did, repository);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS oauth_sessions (
|
||||
session_key TEXT PRIMARY KEY,
|
||||
account_did TEXT NOT NULL,
|
||||
session_id TEXT NOT NULL,
|
||||
session_data TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(account_did, session_id)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_oauth_sessions_did ON oauth_sessions(account_did);
|
||||
CREATE INDEX IF NOT EXISTS idx_oauth_sessions_updated ON oauth_sessions(updated_at DESC);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS oauth_auth_requests (
|
||||
state TEXT PRIMARY KEY,
|
||||
request_data TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_oauth_auth_requests_created ON oauth_auth_requests(created_at);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ui_sessions (
|
||||
id TEXT PRIMARY KEY,
|
||||
did TEXT NOT NULL,
|
||||
handle TEXT NOT NULL,
|
||||
pds_endpoint TEXT NOT NULL,
|
||||
oauth_session_id TEXT,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_ui_sessions_did ON ui_sessions(did);
|
||||
CREATE INDEX IF NOT EXISTS idx_ui_sessions_expires ON ui_sessions(expires_at);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS devices (
|
||||
id TEXT PRIMARY KEY,
|
||||
did TEXT NOT NULL,
|
||||
handle TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
secret_hash TEXT NOT NULL UNIQUE,
|
||||
ip_address TEXT,
|
||||
location TEXT,
|
||||
user_agent TEXT,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
last_used TIMESTAMP,
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_devices_did ON devices(did);
|
||||
CREATE INDEX IF NOT EXISTS idx_devices_hash ON devices(secret_hash);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS pending_device_auth (
|
||||
device_code TEXT PRIMARY KEY,
|
||||
user_code TEXT NOT NULL UNIQUE,
|
||||
device_name TEXT NOT NULL,
|
||||
ip_address TEXT,
|
||||
user_agent TEXT,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
approved_did TEXT,
|
||||
approved_at TIMESTAMP,
|
||||
device_secret TEXT,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_pending_device_auth_user_code ON pending_device_auth(user_code);
|
||||
CREATE INDEX IF NOT EXISTS idx_pending_device_auth_expires ON pending_device_auth(expires_at);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS repository_stats (
|
||||
did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
pull_count INTEGER NOT NULL DEFAULT 0,
|
||||
last_pull TIMESTAMP,
|
||||
push_count INTEGER NOT NULL DEFAULT 0,
|
||||
last_push TIMESTAMP,
|
||||
PRIMARY KEY(did, repository),
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_repository_stats_did ON repository_stats(did);
|
||||
CREATE INDEX IF NOT EXISTS idx_repository_stats_pull_count ON repository_stats(pull_count DESC);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS stars (
|
||||
starrer_did TEXT NOT NULL,
|
||||
owner_did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY(starrer_did, owner_did, repository),
|
||||
FOREIGN KEY(starrer_did) REFERENCES users(did) ON DELETE CASCADE,
|
||||
FOREIGN KEY(owner_did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_stars_owner_repo ON stars(owner_did, repository);
|
||||
CREATE INDEX IF NOT EXISTS idx_stars_starrer ON stars(starrer_did);
|
||||
`
|
||||
//go:embed schema.sql
|
||||
var schemaSQL string
|
||||
|
||||
// InitDB initializes the SQLite database with the schema
|
||||
func InitDB(path string) (*sql.DB, error) {
|
||||
func InitDB(path string, skipMigrations bool) (*sql.DB, error) {
|
||||
db, err := sql.Open("sqlite3", path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -181,14 +37,16 @@ func InitDB(path string) (*sql.DB, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create schema
|
||||
if _, err := db.Exec(schema); err != nil {
|
||||
// Create schema from embedded SQL file
|
||||
if _, err := db.Exec(schemaSQL); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Run migrations
|
||||
if err := runMigrations(db); err != nil {
|
||||
return nil, err
|
||||
// Run migrations unless skipped
|
||||
if !skipMigrations {
|
||||
if err := runMigrations(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return db, nil
|
||||
@@ -229,7 +87,7 @@ func runMigrations(db *sql.DB) error {
|
||||
}
|
||||
|
||||
// Apply migration
|
||||
fmt.Printf("Applying migration %d: %s\n%s\n", m.Version, m.Name, m.Description)
|
||||
slog.Info("Applying migration", "version", m.Version, "name", m.Name, "description", m.Description)
|
||||
if _, err := db.Exec(m.Query); err != nil {
|
||||
return fmt.Errorf("failed to apply migration %d (%s): %w", m.Version, m.Name, err)
|
||||
}
|
||||
@@ -239,7 +97,7 @@ func runMigrations(db *sql.DB) error {
|
||||
return fmt.Errorf("failed to record migration %d: %w", m.Version, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Migration %d applied successfully\n", m.Version)
|
||||
slog.Info("Migration applied successfully", "version", m.Version)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
212
pkg/appview/db/schema.sql
Normal file
212
pkg/appview/db/schema.sql
Normal file
@@ -0,0 +1,212 @@
|
||||
-- ATCR AppView Database Schema
|
||||
-- This file contains the complete base schema for fresh database installations.
|
||||
-- Migrations (in migrations/*.yaml) handle changes to existing databases.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS schema_migrations (
|
||||
version INTEGER PRIMARY KEY,
|
||||
applied_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
did TEXT PRIMARY KEY,
|
||||
handle TEXT NOT NULL,
|
||||
pds_endpoint TEXT NOT NULL,
|
||||
avatar TEXT,
|
||||
last_seen TIMESTAMP NOT NULL,
|
||||
UNIQUE(handle)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_users_handle ON users(handle);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS manifests (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
hold_endpoint TEXT NOT NULL, -- Stored as DID (e.g., did:web:hold.example.com)
|
||||
schema_version INTEGER NOT NULL,
|
||||
media_type TEXT NOT NULL,
|
||||
config_digest TEXT,
|
||||
config_size INTEGER,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
UNIQUE(did, repository, digest),
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_manifests_did_repo ON manifests(did, repository);
|
||||
CREATE INDEX IF NOT EXISTS idx_manifests_created_at ON manifests(created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_manifests_digest ON manifests(digest);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS repository_annotations (
|
||||
did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
key TEXT NOT NULL,
|
||||
value TEXT NOT NULL,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY(did, repository, key),
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_repository_annotations_did_repo ON repository_annotations(did, repository);
|
||||
CREATE INDEX IF NOT EXISTS idx_repository_annotations_key ON repository_annotations(key);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS layers (
|
||||
manifest_id INTEGER NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
size INTEGER NOT NULL,
|
||||
media_type TEXT NOT NULL,
|
||||
layer_index INTEGER NOT NULL,
|
||||
PRIMARY KEY(manifest_id, layer_index),
|
||||
FOREIGN KEY(manifest_id) REFERENCES manifests(id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_layers_digest ON layers(digest);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS manifest_references (
|
||||
manifest_id INTEGER NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
media_type TEXT NOT NULL,
|
||||
size INTEGER NOT NULL,
|
||||
platform_architecture TEXT,
|
||||
platform_os TEXT,
|
||||
platform_variant TEXT,
|
||||
platform_os_version TEXT,
|
||||
reference_index INTEGER NOT NULL,
|
||||
PRIMARY KEY(manifest_id, reference_index),
|
||||
FOREIGN KEY(manifest_id) REFERENCES manifests(id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_manifest_references_digest ON manifest_references(digest);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tags (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
tag TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
UNIQUE(did, repository, tag),
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_tags_did_repo ON tags(did, repository);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS oauth_sessions (
|
||||
session_key TEXT PRIMARY KEY,
|
||||
account_did TEXT NOT NULL,
|
||||
session_id TEXT NOT NULL,
|
||||
session_data TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(account_did, session_id)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_oauth_sessions_did ON oauth_sessions(account_did);
|
||||
CREATE INDEX IF NOT EXISTS idx_oauth_sessions_updated ON oauth_sessions(updated_at DESC);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS oauth_auth_requests (
|
||||
state TEXT PRIMARY KEY,
|
||||
request_data TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_oauth_auth_requests_created ON oauth_auth_requests(created_at);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ui_sessions (
|
||||
id TEXT PRIMARY KEY,
|
||||
did TEXT NOT NULL,
|
||||
handle TEXT NOT NULL,
|
||||
pds_endpoint TEXT NOT NULL,
|
||||
oauth_session_id TEXT,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_ui_sessions_did ON ui_sessions(did);
|
||||
CREATE INDEX IF NOT EXISTS idx_ui_sessions_expires ON ui_sessions(expires_at);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS devices (
|
||||
id TEXT PRIMARY KEY,
|
||||
did TEXT NOT NULL,
|
||||
handle TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
secret_hash TEXT NOT NULL UNIQUE,
|
||||
ip_address TEXT,
|
||||
location TEXT,
|
||||
user_agent TEXT,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
last_used TIMESTAMP,
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_devices_did ON devices(did);
|
||||
CREATE INDEX IF NOT EXISTS idx_devices_hash ON devices(secret_hash);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS pending_device_auth (
|
||||
device_code TEXT PRIMARY KEY,
|
||||
user_code TEXT NOT NULL UNIQUE,
|
||||
device_name TEXT NOT NULL,
|
||||
ip_address TEXT,
|
||||
user_agent TEXT,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
approved_did TEXT,
|
||||
approved_at TIMESTAMP,
|
||||
device_secret TEXT,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_pending_device_auth_user_code ON pending_device_auth(user_code);
|
||||
CREATE INDEX IF NOT EXISTS idx_pending_device_auth_expires ON pending_device_auth(expires_at);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS repository_stats (
|
||||
did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
pull_count INTEGER NOT NULL DEFAULT 0,
|
||||
last_pull TIMESTAMP,
|
||||
push_count INTEGER NOT NULL DEFAULT 0,
|
||||
last_push TIMESTAMP,
|
||||
PRIMARY KEY(did, repository),
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_repository_stats_did ON repository_stats(did);
|
||||
CREATE INDEX IF NOT EXISTS idx_repository_stats_pull_count ON repository_stats(pull_count DESC);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS stars (
|
||||
starrer_did TEXT NOT NULL,
|
||||
owner_did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY(starrer_did, owner_did, repository),
|
||||
FOREIGN KEY(starrer_did) REFERENCES users(did) ON DELETE CASCADE,
|
||||
FOREIGN KEY(owner_did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_stars_owner_repo ON stars(owner_did, repository);
|
||||
CREATE INDEX IF NOT EXISTS idx_stars_starrer ON stars(starrer_did);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS hold_captain_records (
|
||||
hold_did TEXT PRIMARY KEY,
|
||||
owner_did TEXT NOT NULL,
|
||||
public BOOLEAN NOT NULL,
|
||||
allow_all_crew BOOLEAN NOT NULL,
|
||||
deployed_at TEXT,
|
||||
region TEXT,
|
||||
provider TEXT,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_hold_captain_updated ON hold_captain_records(updated_at);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS hold_crew_approvals (
|
||||
hold_did TEXT NOT NULL,
|
||||
user_did TEXT NOT NULL,
|
||||
approved_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY(hold_did, user_did)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_crew_approvals_expires ON hold_crew_approvals(expires_at);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS hold_crew_denials (
|
||||
hold_did TEXT NOT NULL,
|
||||
user_did TEXT NOT NULL,
|
||||
denial_count INTEGER NOT NULL DEFAULT 1,
|
||||
next_retry_at TIMESTAMP NOT NULL,
|
||||
last_denied_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY(hold_did, user_did)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_crew_denials_retry ON hold_crew_denials(next_retry_at);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS readme_cache (
|
||||
url TEXT PRIMARY KEY,
|
||||
html TEXT NOT NULL,
|
||||
fetched_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_readme_cache_fetched ON readme_cache(fetched_at);
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"database/sql"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
@@ -83,7 +84,7 @@ func (s *SessionStore) Get(id string) (*Session, bool) {
|
||||
return nil, false
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: Failed to query session: %v\n", err)
|
||||
slog.Warn("Failed to query session", "error", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
@@ -124,7 +125,25 @@ func (s *SessionStore) Delete(id string) {
|
||||
`, id)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: Failed to delete session: %v\n", err)
|
||||
slog.Warn("Failed to delete session", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteByDID removes all sessions for a given DID
|
||||
// This is useful when OAuth refresh fails and we need to force re-authentication
|
||||
func (s *SessionStore) DeleteByDID(did string) {
|
||||
result, err := s.db.Exec(`
|
||||
DELETE FROM ui_sessions WHERE did = ?
|
||||
`, did)
|
||||
|
||||
if err != nil {
|
||||
slog.Warn("Failed to delete sessions for DID", "did", did, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
deleted, _ := result.RowsAffected()
|
||||
if deleted > 0 {
|
||||
slog.Info("Deleted UI sessions for DID due to OAuth failure", "count", deleted, "did", did)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,13 +155,13 @@ func (s *SessionStore) Cleanup() {
|
||||
`)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: Failed to cleanup sessions: %v\n", err)
|
||||
slog.Warn("Failed to cleanup sessions", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
deleted, _ := result.RowsAffected()
|
||||
if deleted > 0 {
|
||||
fmt.Printf("Cleaned up %d expired UI sessions\n", deleted)
|
||||
slog.Info("Cleaned up expired UI sessions", "count", deleted)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,7 +178,7 @@ func (s *SessionStore) CleanupContext(ctx context.Context) error {
|
||||
|
||||
deleted, _ := result.RowsAffected()
|
||||
if deleted > 0 {
|
||||
fmt.Printf("Cleaned up %d expired UI sessions\n", deleted)
|
||||
slog.Info("Cleaned up expired UI sessions", "count", deleted)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
533
pkg/appview/db/session_store_test.go
Normal file
533
pkg/appview/db/session_store_test.go
Normal file
@@ -0,0 +1,533 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// setupSessionTestDB creates an in-memory SQLite database for testing
|
||||
func setupSessionTestDB(t *testing.T) *SessionStore {
|
||||
t.Helper()
|
||||
// Use file::memory: with cache=shared to ensure all connections share the same in-memory DB
|
||||
db, err := InitDB("file::memory:?cache=shared", true)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to initialize test database: %v", err)
|
||||
}
|
||||
// Limit to single connection to avoid race conditions in tests
|
||||
db.SetMaxOpenConns(1)
|
||||
t.Cleanup(func() {
|
||||
db.Close()
|
||||
})
|
||||
return NewSessionStore(db)
|
||||
}
|
||||
|
||||
// createSessionTestUser creates a test user in the database
|
||||
func createSessionTestUser(t *testing.T, store *SessionStore, did, handle string) {
|
||||
t.Helper()
|
||||
_, err := store.db.Exec(`
|
||||
INSERT OR IGNORE INTO users (did, handle, pds_endpoint, last_seen)
|
||||
VALUES (?, ?, ?, datetime('now'))
|
||||
`, did, handle, "https://pds.example.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create test user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSession_Struct(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "test-session",
|
||||
DID: "did:plc:test",
|
||||
Handle: "alice.bsky.social",
|
||||
PDSEndpoint: "https://bsky.social",
|
||||
OAuthSessionID: "oauth-123",
|
||||
ExpiresAt: time.Now().Add(1 * time.Hour),
|
||||
}
|
||||
|
||||
if sess.DID != "did:plc:test" {
|
||||
t.Errorf("Expected DID, got %q", sess.DID)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSessionStore_Create tests session creation without OAuth
|
||||
func TestSessionStore_Create(t *testing.T) {
|
||||
store := setupSessionTestDB(t)
|
||||
createSessionTestUser(t, store, "did:plc:alice123", "alice.bsky.social")
|
||||
|
||||
sessionID, err := store.Create("did:plc:alice123", "alice.bsky.social", "https://pds.example.com", 1*time.Hour)
|
||||
if err != nil {
|
||||
t.Fatalf("Create() error = %v", err)
|
||||
}
|
||||
|
||||
if sessionID == "" {
|
||||
t.Error("Create() returned empty session ID")
|
||||
}
|
||||
|
||||
// Verify session can be retrieved
|
||||
sess, found := store.Get(sessionID)
|
||||
if !found {
|
||||
t.Error("Created session not found")
|
||||
}
|
||||
if sess == nil {
|
||||
t.Fatal("Session is nil")
|
||||
}
|
||||
if sess.DID != "did:plc:alice123" {
|
||||
t.Errorf("DID = %v, want did:plc:alice123", sess.DID)
|
||||
}
|
||||
if sess.Handle != "alice.bsky.social" {
|
||||
t.Errorf("Handle = %v, want alice.bsky.social", sess.Handle)
|
||||
}
|
||||
if sess.OAuthSessionID != "" {
|
||||
t.Errorf("OAuthSessionID should be empty, got %v", sess.OAuthSessionID)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSessionStore_CreateWithOAuth tests session creation with OAuth
|
||||
func TestSessionStore_CreateWithOAuth(t *testing.T) {
|
||||
store := setupSessionTestDB(t)
|
||||
createSessionTestUser(t, store, "did:plc:alice123", "alice.bsky.social")
|
||||
|
||||
oauthSessionID := "oauth-123"
|
||||
sessionID, err := store.CreateWithOAuth("did:plc:alice123", "alice.bsky.social", "https://pds.example.com", oauthSessionID, 1*time.Hour)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateWithOAuth() error = %v", err)
|
||||
}
|
||||
|
||||
if sessionID == "" {
|
||||
t.Error("CreateWithOAuth() returned empty session ID")
|
||||
}
|
||||
|
||||
// Verify session has OAuth session ID
|
||||
sess, found := store.Get(sessionID)
|
||||
if !found {
|
||||
t.Error("Created session not found")
|
||||
}
|
||||
if sess.OAuthSessionID != oauthSessionID {
|
||||
t.Errorf("OAuthSessionID = %v, want %v", sess.OAuthSessionID, oauthSessionID)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSessionStore_Get tests retrieving sessions
|
||||
func TestSessionStore_Get(t *testing.T) {
|
||||
store := setupSessionTestDB(t)
|
||||
createSessionTestUser(t, store, "did:plc:alice123", "alice.bsky.social")
|
||||
|
||||
// Create a valid session
|
||||
validID, err := store.Create("did:plc:alice123", "alice.bsky.social", "https://pds.example.com", 1*time.Hour)
|
||||
if err != nil {
|
||||
t.Fatalf("Create() error = %v", err)
|
||||
}
|
||||
|
||||
// Create a session and manually expire it
|
||||
expiredID, err := store.Create("did:plc:alice123", "alice.bsky.social", "https://pds.example.com", 1*time.Hour)
|
||||
if err != nil {
|
||||
t.Fatalf("Create() error = %v", err)
|
||||
}
|
||||
|
||||
// Manually update expiration to the past
|
||||
_, err = store.db.Exec(`
|
||||
UPDATE ui_sessions
|
||||
SET expires_at = datetime('now', '-1 hour')
|
||||
WHERE id = ?
|
||||
`, expiredID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update expiration: %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
sessionID string
|
||||
wantFound bool
|
||||
}{
|
||||
{
|
||||
name: "valid session",
|
||||
sessionID: validID,
|
||||
wantFound: true,
|
||||
},
|
||||
{
|
||||
name: "expired session",
|
||||
sessionID: expiredID,
|
||||
wantFound: false,
|
||||
},
|
||||
{
|
||||
name: "non-existent session",
|
||||
sessionID: "non-existent-id",
|
||||
wantFound: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
sess, found := store.Get(tt.sessionID)
|
||||
if found != tt.wantFound {
|
||||
t.Errorf("Get() found = %v, want %v", found, tt.wantFound)
|
||||
}
|
||||
if tt.wantFound && sess == nil {
|
||||
t.Error("Expected session, got nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSessionStore_Extend tests extending session expiration
|
||||
func TestSessionStore_Extend(t *testing.T) {
|
||||
store := setupSessionTestDB(t)
|
||||
createSessionTestUser(t, store, "did:plc:alice123", "alice.bsky.social")
|
||||
|
||||
sessionID, err := store.Create("did:plc:alice123", "alice.bsky.social", "https://pds.example.com", 1*time.Hour)
|
||||
if err != nil {
|
||||
t.Fatalf("Create() error = %v", err)
|
||||
}
|
||||
|
||||
// Get initial expiration
|
||||
sess1, _ := store.Get(sessionID)
|
||||
initialExpiry := sess1.ExpiresAt
|
||||
|
||||
// Wait a bit to ensure time difference
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Extend session
|
||||
err = store.Extend(sessionID, 2*time.Hour)
|
||||
if err != nil {
|
||||
t.Errorf("Extend() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify expiration was updated
|
||||
sess2, found := store.Get(sessionID)
|
||||
if !found {
|
||||
t.Fatal("Session not found after extend")
|
||||
}
|
||||
if !sess2.ExpiresAt.After(initialExpiry) {
|
||||
t.Error("ExpiresAt should be later after extend")
|
||||
}
|
||||
|
||||
// Test extending non-existent session
|
||||
err = store.Extend("non-existent-id", 1*time.Hour)
|
||||
if err == nil {
|
||||
t.Error("Expected error when extending non-existent session")
|
||||
}
|
||||
if err != nil && !strings.Contains(err.Error(), "not found") {
|
||||
t.Errorf("Expected 'not found' error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSessionStore_Delete tests deleting a session
|
||||
func TestSessionStore_Delete(t *testing.T) {
|
||||
store := setupSessionTestDB(t)
|
||||
createSessionTestUser(t, store, "did:plc:alice123", "alice.bsky.social")
|
||||
|
||||
sessionID, err := store.Create("did:plc:alice123", "alice.bsky.social", "https://pds.example.com", 1*time.Hour)
|
||||
if err != nil {
|
||||
t.Fatalf("Create() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify session exists
|
||||
_, found := store.Get(sessionID)
|
||||
if !found {
|
||||
t.Fatal("Session should exist before delete")
|
||||
}
|
||||
|
||||
// Delete session
|
||||
store.Delete(sessionID)
|
||||
|
||||
// Verify session is gone
|
||||
_, found = store.Get(sessionID)
|
||||
if found {
|
||||
t.Error("Session should not exist after delete")
|
||||
}
|
||||
|
||||
// Deleting non-existent session should not error
|
||||
store.Delete("non-existent-id")
|
||||
}
|
||||
|
||||
// TestSessionStore_DeleteByDID tests deleting all sessions for a DID
|
||||
func TestSessionStore_DeleteByDID(t *testing.T) {
|
||||
store := setupSessionTestDB(t)
|
||||
did := "did:plc:alice123"
|
||||
createSessionTestUser(t, store, did, "alice.bsky.social")
|
||||
createSessionTestUser(t, store, "did:plc:bob123", "bob.bsky.social")
|
||||
|
||||
// Create multiple sessions for alice
|
||||
sessionIDs := make([]string, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
id, err := store.Create(did, "alice.bsky.social", "https://pds.example.com", 1*time.Hour)
|
||||
if err != nil {
|
||||
t.Fatalf("Create() error = %v", err)
|
||||
}
|
||||
sessionIDs[i] = id
|
||||
}
|
||||
|
||||
// Create a session for bob
|
||||
bobSessionID, err := store.Create("did:plc:bob123", "bob.bsky.social", "https://pds.example.com", 1*time.Hour)
|
||||
if err != nil {
|
||||
t.Fatalf("Create() error = %v", err)
|
||||
}
|
||||
|
||||
// Delete all sessions for alice
|
||||
store.DeleteByDID(did)
|
||||
|
||||
// Verify alice's sessions are gone
|
||||
for _, id := range sessionIDs {
|
||||
_, found := store.Get(id)
|
||||
if found {
|
||||
t.Errorf("Session %v should have been deleted", id)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify bob's session still exists
|
||||
_, found := store.Get(bobSessionID)
|
||||
if !found {
|
||||
t.Error("Bob's session should still exist")
|
||||
}
|
||||
|
||||
// Deleting sessions for non-existent DID should not error
|
||||
store.DeleteByDID("did:plc:nonexistent")
|
||||
}
|
||||
|
||||
// TestSessionStore_Cleanup tests removing expired sessions
|
||||
func TestSessionStore_Cleanup(t *testing.T) {
|
||||
store := setupSessionTestDB(t)
|
||||
createSessionTestUser(t, store, "did:plc:alice123", "alice.bsky.social")
|
||||
|
||||
// Create valid session by inserting directly with SQLite datetime format
|
||||
validID := "valid-session-id"
|
||||
_, err := store.db.Exec(`
|
||||
INSERT INTO ui_sessions (id, did, handle, pds_endpoint, oauth_session_id, expires_at, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, datetime('now', '+1 hour'), datetime('now'))
|
||||
`, validID, "did:plc:alice123", "alice.bsky.social", "https://pds.example.com", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create valid session: %v", err)
|
||||
}
|
||||
|
||||
// Create expired session
|
||||
expiredID := "expired-session-id"
|
||||
_, err = store.db.Exec(`
|
||||
INSERT INTO ui_sessions (id, did, handle, pds_endpoint, oauth_session_id, expires_at, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, datetime('now', '-1 hour'), datetime('now'))
|
||||
`, expiredID, "did:plc:alice123", "alice.bsky.social", "https://pds.example.com", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create expired session: %v", err)
|
||||
}
|
||||
|
||||
// Verify we have 2 sessions before cleanup
|
||||
var countBefore int
|
||||
err = store.db.QueryRow("SELECT COUNT(*) FROM ui_sessions").Scan(&countBefore)
|
||||
if err != nil {
|
||||
t.Fatalf("Query error: %v", err)
|
||||
}
|
||||
if countBefore != 2 {
|
||||
t.Fatalf("Expected 2 sessions before cleanup, got %d", countBefore)
|
||||
}
|
||||
|
||||
// Run cleanup
|
||||
store.Cleanup()
|
||||
|
||||
// Verify valid session still exists in database
|
||||
var countValid int
|
||||
err = store.db.QueryRow("SELECT COUNT(*) FROM ui_sessions WHERE id = ?", validID).Scan(&countValid)
|
||||
if err != nil {
|
||||
t.Fatalf("Query error: %v", err)
|
||||
}
|
||||
if countValid != 1 {
|
||||
t.Errorf("Valid session should still exist in database, count = %d", countValid)
|
||||
}
|
||||
|
||||
// Verify expired session was cleaned up
|
||||
var countExpired int
|
||||
err = store.db.QueryRow("SELECT COUNT(*) FROM ui_sessions WHERE id = ?", expiredID).Scan(&countExpired)
|
||||
if err != nil {
|
||||
t.Fatalf("Query error: %v", err)
|
||||
}
|
||||
if countExpired != 0 {
|
||||
t.Error("Expired session should have been deleted from database")
|
||||
}
|
||||
|
||||
// Verify we can still get the valid session
|
||||
_, found := store.Get(validID)
|
||||
if !found {
|
||||
t.Error("Valid session should be retrievable after cleanup")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSessionStore_CleanupContext tests context-aware cleanup
|
||||
func TestSessionStore_CleanupContext(t *testing.T) {
|
||||
store := setupSessionTestDB(t)
|
||||
createSessionTestUser(t, store, "did:plc:alice123", "alice.bsky.social")
|
||||
|
||||
// Create a session and manually expire it
|
||||
expiredID, err := store.Create("did:plc:alice123", "alice.bsky.social", "https://pds.example.com", 1*time.Hour)
|
||||
if err != nil {
|
||||
t.Fatalf("Create() error = %v", err)
|
||||
}
|
||||
|
||||
// Manually update expiration to the past
|
||||
_, err = store.db.Exec(`
|
||||
UPDATE ui_sessions
|
||||
SET expires_at = datetime('now', '-1 hour')
|
||||
WHERE id = ?
|
||||
`, expiredID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update expiration: %v", err)
|
||||
}
|
||||
|
||||
// Run context-aware cleanup
|
||||
ctx := context.Background()
|
||||
err = store.CleanupContext(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("CleanupContext() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify expired session was cleaned up
|
||||
var count int
|
||||
err = store.db.QueryRow("SELECT COUNT(*) FROM ui_sessions WHERE id = ?", expiredID).Scan(&count)
|
||||
if err != nil {
|
||||
t.Fatalf("Query error: %v", err)
|
||||
}
|
||||
if count != 0 {
|
||||
t.Error("Expired session should have been deleted from database")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSetCookie tests setting session cookie
|
||||
func TestSetCookie(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
sessionID := "test-session-id"
|
||||
maxAge := 3600
|
||||
|
||||
SetCookie(w, sessionID, maxAge)
|
||||
|
||||
cookies := w.Result().Cookies()
|
||||
if len(cookies) != 1 {
|
||||
t.Fatalf("Expected 1 cookie, got %d", len(cookies))
|
||||
}
|
||||
|
||||
cookie := cookies[0]
|
||||
if cookie.Name != "atcr_session" {
|
||||
t.Errorf("Name = %v, want atcr_session", cookie.Name)
|
||||
}
|
||||
if cookie.Value != sessionID {
|
||||
t.Errorf("Value = %v, want %v", cookie.Value, sessionID)
|
||||
}
|
||||
if cookie.MaxAge != maxAge {
|
||||
t.Errorf("MaxAge = %v, want %v", cookie.MaxAge, maxAge)
|
||||
}
|
||||
if !cookie.HttpOnly {
|
||||
t.Error("HttpOnly should be true")
|
||||
}
|
||||
if !cookie.Secure {
|
||||
t.Error("Secure should be true")
|
||||
}
|
||||
if cookie.SameSite != http.SameSiteLaxMode {
|
||||
t.Errorf("SameSite = %v, want Lax", cookie.SameSite)
|
||||
}
|
||||
if cookie.Path != "/" {
|
||||
t.Errorf("Path = %v, want /", cookie.Path)
|
||||
}
|
||||
}
|
||||
|
||||
// TestClearCookie tests clearing session cookie
|
||||
func TestClearCookie(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
ClearCookie(w)
|
||||
|
||||
cookies := w.Result().Cookies()
|
||||
if len(cookies) != 1 {
|
||||
t.Fatalf("Expected 1 cookie, got %d", len(cookies))
|
||||
}
|
||||
|
||||
cookie := cookies[0]
|
||||
if cookie.Name != "atcr_session" {
|
||||
t.Errorf("Name = %v, want atcr_session", cookie.Name)
|
||||
}
|
||||
if cookie.Value != "" {
|
||||
t.Errorf("Value should be empty, got %v", cookie.Value)
|
||||
}
|
||||
if cookie.MaxAge != -1 {
|
||||
t.Errorf("MaxAge = %v, want -1", cookie.MaxAge)
|
||||
}
|
||||
if !cookie.HttpOnly {
|
||||
t.Error("HttpOnly should be true")
|
||||
}
|
||||
if !cookie.Secure {
|
||||
t.Error("Secure should be true")
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetSessionID tests retrieving session ID from cookie
|
||||
func TestGetSessionID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cookie *http.Cookie
|
||||
wantID string
|
||||
wantFound bool
|
||||
}{
|
||||
{
|
||||
name: "valid cookie",
|
||||
cookie: &http.Cookie{
|
||||
Name: "atcr_session",
|
||||
Value: "test-session-id",
|
||||
},
|
||||
wantID: "test-session-id",
|
||||
wantFound: true,
|
||||
},
|
||||
{
|
||||
name: "no cookie",
|
||||
cookie: nil,
|
||||
wantID: "",
|
||||
wantFound: false,
|
||||
},
|
||||
{
|
||||
name: "wrong cookie name",
|
||||
cookie: &http.Cookie{
|
||||
Name: "other_cookie",
|
||||
Value: "test-value",
|
||||
},
|
||||
wantID: "",
|
||||
wantFound: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
if tt.cookie != nil {
|
||||
req.AddCookie(tt.cookie)
|
||||
}
|
||||
|
||||
id, found := GetSessionID(req)
|
||||
if found != tt.wantFound {
|
||||
t.Errorf("GetSessionID() found = %v, want %v", found, tt.wantFound)
|
||||
}
|
||||
if id != tt.wantID {
|
||||
t.Errorf("GetSessionID() id = %v, want %v", id, tt.wantID)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSessionStore_SessionIDUniqueness tests that generated session IDs are unique
|
||||
func TestSessionStore_SessionIDUniqueness(t *testing.T) {
|
||||
store := setupSessionTestDB(t)
|
||||
createSessionTestUser(t, store, "did:plc:alice123", "alice.bsky.social")
|
||||
|
||||
// Generate multiple session IDs
|
||||
ids := make(map[string]bool)
|
||||
for i := 0; i < 100; i++ {
|
||||
id, err := store.Create("did:plc:alice123", "alice.bsky.social", "https://pds.example.com", 1*time.Hour)
|
||||
if err != nil {
|
||||
t.Fatalf("Create() error = %v", err)
|
||||
}
|
||||
if ids[id] {
|
||||
t.Errorf("Duplicate session ID generated: %v", id)
|
||||
}
|
||||
ids[id] = true
|
||||
}
|
||||
|
||||
if len(ids) != 100 {
|
||||
t.Errorf("Expected 100 unique IDs, got %d", len(ids))
|
||||
}
|
||||
}
|
||||
171
pkg/appview/db/tag_delete_test.go
Normal file
171
pkg/appview/db/tag_delete_test.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
)
|
||||
|
||||
// TestTagDeleteRoundTrip tests the full flow of creating and deleting tags
|
||||
// This simulates what Jetstream does: encode repo/tag to rkey, then decode and delete
|
||||
func TestTagDeleteRoundTrip(t *testing.T) {
|
||||
// Create in-memory test database
|
||||
db, err := InitDB(":memory:", true)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Insert test user
|
||||
testUser := &User{
|
||||
DID: "did:plc:test123",
|
||||
Handle: "testuser.bsky.social",
|
||||
PDSEndpoint: "https://test.pds.example.com",
|
||||
Avatar: "",
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
if err := UpsertUser(db, testUser); err != nil {
|
||||
t.Fatalf("Failed to insert user: %v", err)
|
||||
}
|
||||
|
||||
// Test cases covering different tag patterns
|
||||
testCases := []struct {
|
||||
name string
|
||||
repository string
|
||||
tag string
|
||||
expectRoundTrip bool // Some cases can't round-trip due to encoding limitations
|
||||
}{
|
||||
{
|
||||
name: "simple tag",
|
||||
repository: "test-image",
|
||||
tag: "latest",
|
||||
expectRoundTrip: true,
|
||||
},
|
||||
{
|
||||
name: "tag with hyphen (like latest-amd64)",
|
||||
repository: "test-image",
|
||||
tag: "latest-amd64",
|
||||
expectRoundTrip: true,
|
||||
},
|
||||
{
|
||||
name: "tag with hyphen (like latest-arm64)",
|
||||
repository: "test-image",
|
||||
tag: "latest-arm64",
|
||||
expectRoundTrip: true,
|
||||
},
|
||||
{
|
||||
name: "tag with version",
|
||||
repository: "myapp",
|
||||
tag: "v1.0.0",
|
||||
expectRoundTrip: true,
|
||||
},
|
||||
{
|
||||
name: "repository with underscore",
|
||||
repository: "my_repo",
|
||||
tag: "latest",
|
||||
expectRoundTrip: true,
|
||||
},
|
||||
{
|
||||
name: "both with underscores (known limitation)",
|
||||
repository: "my_repo",
|
||||
tag: "my_tag",
|
||||
expectRoundTrip: false, // Cannot round-trip: underscore is the separator
|
||||
},
|
||||
{
|
||||
name: "repository with multiple hyphens",
|
||||
repository: "multi-part-name",
|
||||
tag: "test-build",
|
||||
expectRoundTrip: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Step 1: Insert tag using UpsertTag (simulates tag creation)
|
||||
tag := &Tag{
|
||||
DID: testUser.DID,
|
||||
Repository: tc.repository,
|
||||
Tag: tc.tag,
|
||||
Digest: "sha256:abc123def456",
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
if err := UpsertTag(db, tag); err != nil {
|
||||
t.Fatalf("Failed to upsert tag: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: Verify tag was created
|
||||
var count int
|
||||
err := db.QueryRow(`
|
||||
SELECT COUNT(*) FROM tags
|
||||
WHERE did = ? AND repository = ? AND tag = ?
|
||||
`, testUser.DID, tc.repository, tc.tag).Scan(&count)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count tags: %v", err)
|
||||
}
|
||||
if count != 1 {
|
||||
t.Fatalf("Expected 1 tag after insert, got %d", count)
|
||||
}
|
||||
|
||||
// Step 3: Simulate Jetstream delete flow
|
||||
// This is what happens in processTag when operation == "delete"
|
||||
// The rkey comes from ATProto, we need to parse it back to repo/tag
|
||||
|
||||
// First, let's see what the rkey would be (this is how tags are stored in ATProto)
|
||||
rkey := atproto.RepositoryTagToRKey(tc.repository, tc.tag)
|
||||
t.Logf("RKey for %s:%s = %s", tc.repository, tc.tag, rkey)
|
||||
|
||||
// Then parse it back (this is what Jetstream does)
|
||||
parsedRepo, parsedTag := atproto.RKeyToRepositoryTag(rkey)
|
||||
t.Logf("Parsed back: repository=%s, tag=%s", parsedRepo, parsedTag)
|
||||
|
||||
// Verify round-trip (skip for known limitations)
|
||||
if tc.expectRoundTrip {
|
||||
if parsedRepo != tc.repository {
|
||||
t.Errorf("Repository round-trip failed: stored=%s, parsed=%s", tc.repository, parsedRepo)
|
||||
}
|
||||
if parsedTag != tc.tag {
|
||||
t.Errorf("Tag round-trip failed: stored=%s, parsed=%s", tc.tag, parsedTag)
|
||||
}
|
||||
|
||||
// Step 4: Delete using parsed values (like Jetstream does)
|
||||
if err := DeleteTag(db, testUser.DID, parsedRepo, parsedTag); err != nil {
|
||||
t.Fatalf("Failed to delete tag: %v", err)
|
||||
}
|
||||
|
||||
// Step 5: Verify tag was deleted
|
||||
err = db.QueryRow(`
|
||||
SELECT COUNT(*) FROM tags
|
||||
WHERE did = ? AND repository = ? AND tag = ?
|
||||
`, testUser.DID, tc.repository, tc.tag).Scan(&count)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count tags after delete: %v", err)
|
||||
}
|
||||
if count != 0 {
|
||||
// This is the bug! Tag wasn't deleted
|
||||
t.Errorf("Expected 0 tags after delete, got %d (tag still exists!)", count)
|
||||
|
||||
// Debug: show what's actually in the database
|
||||
rows, err := db.Query(`
|
||||
SELECT repository, tag FROM tags WHERE did = ?
|
||||
`, testUser.DID)
|
||||
if err != nil {
|
||||
t.Logf("Failed to query remaining tags: %v", err)
|
||||
} else {
|
||||
t.Logf("Remaining tags in database:")
|
||||
for rows.Next() {
|
||||
var repo, tag string
|
||||
rows.Scan(&repo, &tag)
|
||||
t.Logf(" - repository=%s, tag=%s", repo, tag)
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Known limitation: skip delete test for non-round-trippable cases
|
||||
t.Logf("Skipping delete test - known limitation: %s != %s or %s != %s",
|
||||
tc.repository, parsedRepo, tc.tag, parsedTag)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,11 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
@@ -14,8 +13,7 @@ import (
|
||||
"atcr.io/pkg/atproto"
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
"github.com/bluesky-social/indigo/atproto/identity"
|
||||
"github.com/bluesky-social/indigo/atproto/syntax"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
// StarRepositoryHandler handles starring a repository
|
||||
@@ -34,23 +32,22 @@ func (h *StarRepositoryHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
// Extract parameters
|
||||
vars := mux.Vars(r)
|
||||
handle := vars["handle"]
|
||||
repository := vars["repository"]
|
||||
handle := chi.URLParam(r, "handle")
|
||||
repository := chi.URLParam(r, "repository")
|
||||
|
||||
// Resolve owner's handle to DID
|
||||
ownerDID, err := resolveIdentityToDID(r.Context(), h.Directory, handle)
|
||||
ownerDID, err := atproto.ResolveHandleToDID(r.Context(), handle)
|
||||
if err != nil {
|
||||
log.Printf("StarRepository: Failed to resolve handle %s: %v", handle, err)
|
||||
slog.Warn("Failed to resolve handle for star", "handle", handle, "error", err)
|
||||
http.Error(w, fmt.Sprintf("Failed to resolve handle: %v", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Get OAuth session for the authenticated user
|
||||
log.Printf("StarRepository: Getting OAuth session for user DID %s", user.DID)
|
||||
slog.Debug("Getting OAuth session for star", "user_did", user.DID)
|
||||
session, err := h.Refresher.GetSession(r.Context(), user.DID)
|
||||
if err != nil {
|
||||
log.Printf("StarRepository: Failed to get OAuth session for %s: %v", user.DID, err)
|
||||
slog.Warn("Failed to get OAuth session for star", "user_did", user.DID, "error", err)
|
||||
http.Error(w, fmt.Sprintf("Failed to get OAuth session: %v", err), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
@@ -66,7 +63,7 @@ func (h *StarRepositoryHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
// Write star record to user's PDS
|
||||
_, err = pdsClient.PutRecord(r.Context(), atproto.StarCollection, rkey, starRecord)
|
||||
if err != nil {
|
||||
log.Printf("StarRepository: Failed to create star record: %v", err)
|
||||
slog.Error("Failed to create star record", "error", err)
|
||||
http.Error(w, fmt.Sprintf("Failed to create star: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
@@ -93,23 +90,22 @@ func (h *UnstarRepositoryHandler) ServeHTTP(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
// Extract parameters
|
||||
vars := mux.Vars(r)
|
||||
handle := vars["handle"]
|
||||
repository := vars["repository"]
|
||||
handle := chi.URLParam(r, "handle")
|
||||
repository := chi.URLParam(r, "repository")
|
||||
|
||||
// Resolve owner's handle to DID
|
||||
ownerDID, err := resolveIdentityToDID(r.Context(), h.Directory, handle)
|
||||
ownerDID, err := atproto.ResolveHandleToDID(r.Context(), handle)
|
||||
if err != nil {
|
||||
log.Printf("UnstarRepository: Failed to resolve handle %s: %v", handle, err)
|
||||
slog.Warn("Failed to resolve handle for unstar", "handle", handle, "error", err)
|
||||
http.Error(w, fmt.Sprintf("Failed to resolve handle: %v", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Get OAuth session for the authenticated user
|
||||
log.Printf("UnstarRepository: Getting OAuth session for user DID %s", user.DID)
|
||||
slog.Debug("Getting OAuth session for unstar", "user_did", user.DID)
|
||||
session, err := h.Refresher.GetSession(r.Context(), user.DID)
|
||||
if err != nil {
|
||||
log.Printf("UnstarRepository: Failed to get OAuth session for %s: %v", user.DID, err)
|
||||
slog.Warn("Failed to get OAuth session for unstar", "user_did", user.DID, "error", err)
|
||||
http.Error(w, fmt.Sprintf("Failed to get OAuth session: %v", err), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
@@ -120,16 +116,16 @@ func (h *UnstarRepositoryHandler) ServeHTTP(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
// Delete star record from user's PDS
|
||||
rkey := atproto.StarRecordKey(ownerDID, repository)
|
||||
log.Printf("UnstarRepository: Deleting star record for %s/%s (rkey: %s)", handle, repository, rkey)
|
||||
slog.Debug("Deleting star record", "handle", handle, "repository", repository, "rkey", rkey)
|
||||
err = pdsClient.DeleteRecord(r.Context(), atproto.StarCollection, rkey)
|
||||
if err != nil {
|
||||
// If record doesn't exist, still return success (idempotent)
|
||||
if !errors.Is(err, atproto.ErrRecordNotFound) {
|
||||
log.Printf("UnstarRepository: Failed to delete star record: %v", err)
|
||||
slog.Error("Failed to delete star record", "error", err)
|
||||
http.Error(w, fmt.Sprintf("Failed to delete star: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
log.Printf("UnstarRepository: Star record not found (already unstarred)")
|
||||
slog.Debug("Star record not found, already unstarred")
|
||||
}
|
||||
|
||||
// Return success
|
||||
@@ -155,14 +151,13 @@ func (h *CheckStarHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Extract parameters
|
||||
vars := mux.Vars(r)
|
||||
handle := vars["handle"]
|
||||
repository := vars["repository"]
|
||||
handle := chi.URLParam(r, "handle")
|
||||
repository := chi.URLParam(r, "repository")
|
||||
|
||||
// Resolve owner's handle to DID
|
||||
ownerDID, err := resolveIdentityToDID(r.Context(), h.Directory, handle)
|
||||
ownerDID, err := atproto.ResolveHandleToDID(r.Context(), handle)
|
||||
if err != nil {
|
||||
log.Printf("CheckStar: Failed to resolve handle %s: %v", handle, err)
|
||||
slog.Warn("Failed to resolve handle for check star", "handle", handle, "error", err)
|
||||
http.Error(w, fmt.Sprintf("Failed to resolve handle: %v", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
@@ -170,7 +165,7 @@ func (h *CheckStarHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Get OAuth session for the authenticated user
|
||||
session, err := h.Refresher.GetSession(r.Context(), user.DID)
|
||||
if err != nil {
|
||||
log.Printf("CheckStar: Failed to get OAuth session for %s: %v", user.DID, err)
|
||||
slog.Debug("Failed to get OAuth session for check star", "user_did", user.DID, "error", err)
|
||||
// No OAuth session - return not starred
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]bool{"starred": false})
|
||||
@@ -200,12 +195,11 @@ type GetStatsHandler struct {
|
||||
|
||||
func (h *GetStatsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Extract parameters
|
||||
vars := mux.Vars(r)
|
||||
handle := vars["handle"]
|
||||
repository := vars["repository"]
|
||||
handle := chi.URLParam(r, "handle")
|
||||
repository := chi.URLParam(r, "repository")
|
||||
|
||||
// Resolve owner's handle to DID
|
||||
ownerDID, err := resolveIdentityToDID(r.Context(), h.Directory, handle)
|
||||
ownerDID, err := atproto.ResolveHandleToDID(r.Context(), handle)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to resolve handle", http.StatusBadRequest)
|
||||
return
|
||||
@@ -223,19 +217,38 @@ func (h *GetStatsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
json.NewEncoder(w).Encode(stats)
|
||||
}
|
||||
|
||||
// resolveIdentityToDID is a helper function that resolves a handle or DID to a DID
|
||||
func resolveIdentityToDID(ctx context.Context, directory identity.Directory, identityStr string) (string, error) {
|
||||
// Parse as AT identifier (handle or DID)
|
||||
atID, err := syntax.ParseAtIdentifier(identityStr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Resolve to DID via directory
|
||||
ident, err := directory.Lookup(ctx, *atID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return ident.DID.String(), nil
|
||||
// ManifestDetailHandler returns detailed manifest information including platforms
|
||||
type ManifestDetailHandler struct {
|
||||
DB *sql.DB
|
||||
Directory identity.Directory
|
||||
}
|
||||
|
||||
func (h *ManifestDetailHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Extract parameters
|
||||
handle := chi.URLParam(r, "handle")
|
||||
repository := chi.URLParam(r, "repository")
|
||||
digest := chi.URLParam(r, "digest")
|
||||
|
||||
// Resolve owner's handle to DID
|
||||
ownerDID, err := atproto.ResolveHandleToDID(r.Context(), handle)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to resolve handle", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Get manifest detail from database
|
||||
manifest, err := db.GetManifestDetail(h.DB, ownerDID, repository, digest)
|
||||
if err != nil {
|
||||
if err.Error() == "manifest not found" {
|
||||
http.Error(w, "Manifest not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
slog.Error("Failed to get manifest detail", "error", err)
|
||||
http.Error(w, "Failed to fetch manifest", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Return manifest as JSON
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(manifest)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@ type LoginHandler struct {
|
||||
|
||||
func (h *LoginHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
returnTo := r.URL.Query().Get("return_to")
|
||||
fmt.Printf("DEBUG [login]: GET request. return_to param=%s, full query=%s\n", returnTo, r.URL.RawQuery)
|
||||
slog.Debug("Login GET request", "return_to", returnTo, "query", r.URL.RawQuery)
|
||||
if returnTo == "" {
|
||||
returnTo = "/"
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"atcr.io/pkg/appview/middleware"
|
||||
@@ -22,3 +23,11 @@ func NewPageData(r *http.Request, registryURL string) PageData {
|
||||
RegistryURL: registryURL,
|
||||
}
|
||||
}
|
||||
|
||||
// TrimRegistryURL removes http:// or https:// prefix from a URL
|
||||
// for use in Docker commands where only the host:port is needed
|
||||
func TrimRegistryURL(url string) string {
|
||||
url = strings.TrimPrefix(url, "https://")
|
||||
url = strings.TrimPrefix(url, "http://")
|
||||
return url
|
||||
}
|
||||
|
||||
76
pkg/appview/handlers/common_test.go
Normal file
76
pkg/appview/handlers/common_test.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package handlers
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestTrimRegistryURL(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "https prefix",
|
||||
input: "https://atcr.io",
|
||||
expected: "atcr.io",
|
||||
},
|
||||
{
|
||||
name: "http prefix",
|
||||
input: "http://atcr.io",
|
||||
expected: "atcr.io",
|
||||
},
|
||||
{
|
||||
name: "no prefix",
|
||||
input: "atcr.io",
|
||||
expected: "atcr.io",
|
||||
},
|
||||
{
|
||||
name: "with port https",
|
||||
input: "https://localhost:5000",
|
||||
expected: "localhost:5000",
|
||||
},
|
||||
{
|
||||
name: "with port http",
|
||||
input: "http://registry.example.com:443",
|
||||
expected: "registry.example.com:443",
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
input: "",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "with path",
|
||||
input: "https://atcr.io/v2/",
|
||||
expected: "atcr.io/v2/",
|
||||
},
|
||||
{
|
||||
name: "IP address https",
|
||||
input: "https://127.0.0.1:5000",
|
||||
expected: "127.0.0.1:5000",
|
||||
},
|
||||
{
|
||||
name: "IP address http",
|
||||
input: "http://192.168.1.1",
|
||||
expected: "192.168.1.1",
|
||||
},
|
||||
{
|
||||
name: "only http://",
|
||||
input: "http://",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "only https://",
|
||||
input: "https://",
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := TrimRegistryURL(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("TrimRegistryURL(%q) = %q, want %q", tt.input, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -4,11 +4,12 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/go-chi/chi/v5"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
)
|
||||
@@ -271,7 +272,7 @@ func (h *DeviceApproveHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
// Approve the device
|
||||
_, err := h.Store.ApprovePending(req.UserCode, sess.DID, sess.Handle)
|
||||
if err != nil {
|
||||
fmt.Printf("ERROR [device/approve]: Failed to approve: %v\n", err)
|
||||
slog.Error("Failed to approve device", "component", "device/approve", "error", err)
|
||||
http.Error(w, fmt.Sprintf("failed to approve: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
@@ -337,8 +338,7 @@ func (h *RevokeDeviceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
// Get device ID from URL
|
||||
vars := mux.Vars(r)
|
||||
deviceID := vars["id"]
|
||||
deviceID := chi.URLParam(r, "id")
|
||||
if deviceID == "" {
|
||||
http.Error(w, "device ID required", http.StatusBadRequest)
|
||||
return
|
||||
|
||||
703
pkg/appview/handlers/device_test.go
Normal file
703
pkg/appview/handlers/device_test.go
Normal file
@@ -0,0 +1,703 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"github.com/go-chi/chi/v5"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// setupTestDB creates an in-memory SQLite database with full schema for testing
|
||||
func setupTestDB(t *testing.T) *sql.DB {
|
||||
database, err := db.InitDB(":memory:", true)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to initialize test database: %v", err)
|
||||
}
|
||||
return database
|
||||
}
|
||||
|
||||
// Test getClientIP function (existing test, expanded)
|
||||
func TestGetClientIP(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
remoteAddr string
|
||||
xForwardedFor string
|
||||
xRealIP string
|
||||
expectedIP string
|
||||
}{
|
||||
{
|
||||
name: "X-Forwarded-For single IP",
|
||||
remoteAddr: "192.168.1.1:1234",
|
||||
xForwardedFor: "10.0.0.1",
|
||||
xRealIP: "",
|
||||
expectedIP: "10.0.0.1",
|
||||
},
|
||||
{
|
||||
name: "X-Forwarded-For multiple IPs",
|
||||
remoteAddr: "192.168.1.1:1234",
|
||||
xForwardedFor: "10.0.0.1, 10.0.0.2, 10.0.0.3",
|
||||
xRealIP: "",
|
||||
expectedIP: "10.0.0.1",
|
||||
},
|
||||
{
|
||||
name: "X-Forwarded-For with whitespace",
|
||||
remoteAddr: "192.168.1.1:1234",
|
||||
xForwardedFor: " 10.0.0.1 ",
|
||||
xRealIP: "",
|
||||
expectedIP: "10.0.0.1",
|
||||
},
|
||||
{
|
||||
name: "X-Real-IP when no X-Forwarded-For",
|
||||
remoteAddr: "192.168.1.1:1234",
|
||||
xForwardedFor: "",
|
||||
xRealIP: "10.0.0.2",
|
||||
expectedIP: "10.0.0.2",
|
||||
},
|
||||
{
|
||||
name: "X-Forwarded-For takes priority over X-Real-IP",
|
||||
remoteAddr: "192.168.1.1:1234",
|
||||
xForwardedFor: "10.0.0.1",
|
||||
xRealIP: "10.0.0.2",
|
||||
expectedIP: "10.0.0.1",
|
||||
},
|
||||
{
|
||||
name: "RemoteAddr fallback with port",
|
||||
remoteAddr: "192.168.1.1:1234",
|
||||
xForwardedFor: "",
|
||||
xRealIP: "",
|
||||
expectedIP: "192.168.1.1",
|
||||
},
|
||||
{
|
||||
name: "RemoteAddr fallback without port",
|
||||
remoteAddr: "192.168.1.1",
|
||||
xForwardedFor: "",
|
||||
xRealIP: "",
|
||||
expectedIP: "192.168.1.1",
|
||||
},
|
||||
{
|
||||
name: "IPv6 RemoteAddr",
|
||||
remoteAddr: "[::1]:1234",
|
||||
xForwardedFor: "",
|
||||
xRealIP: "",
|
||||
expectedIP: "[",
|
||||
},
|
||||
{
|
||||
name: "IPv6 in X-Forwarded-For",
|
||||
remoteAddr: "192.168.1.1:1234",
|
||||
xForwardedFor: "2001:db8::1",
|
||||
xRealIP: "",
|
||||
expectedIP: "2001:db8::1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "http://example.com/test", nil)
|
||||
req.RemoteAddr = tt.remoteAddr
|
||||
|
||||
if tt.xForwardedFor != "" {
|
||||
req.Header.Set("X-Forwarded-For", tt.xForwardedFor)
|
||||
}
|
||||
|
||||
if tt.xRealIP != "" {
|
||||
req.Header.Set("X-Real-IP", tt.xRealIP)
|
||||
}
|
||||
|
||||
result := getClientIP(req)
|
||||
if result != tt.expectedIP {
|
||||
t.Errorf("getClientIP() = %q, want %q", result, tt.expectedIP)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceCodeHandler_Success(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
handler := &DeviceCodeHandler{
|
||||
Store: store,
|
||||
AppViewBaseURL: "http://localhost:5000",
|
||||
}
|
||||
|
||||
reqBody := DeviceCodeRequest{
|
||||
DeviceName: "My Test Device",
|
||||
}
|
||||
body, _ := json.Marshal(reqBody)
|
||||
req := httptest.NewRequest("POST", "/auth/device/code", bytes.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
var response DeviceCodeResponse
|
||||
if err := json.NewDecoder(rr.Body).Decode(&response); err != nil {
|
||||
t.Fatalf("Failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if response.DeviceCode == "" {
|
||||
t.Error("Expected device_code to be set")
|
||||
}
|
||||
if response.UserCode == "" {
|
||||
t.Error("Expected user_code to be set")
|
||||
}
|
||||
if !strings.HasPrefix(response.VerificationURI, "http://localhost:5000") {
|
||||
t.Errorf("Expected verification_uri to start with base URL, got %s", response.VerificationURI)
|
||||
}
|
||||
if response.ExpiresIn != 600 {
|
||||
t.Errorf("Expected expires_in to be 600, got %d", response.ExpiresIn)
|
||||
}
|
||||
if response.Interval != 5 {
|
||||
t.Errorf("Expected interval to be 5, got %d", response.Interval)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceCodeHandler_DefaultDeviceName(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
handler := &DeviceCodeHandler{
|
||||
Store: store,
|
||||
AppViewBaseURL: "http://localhost:5000",
|
||||
}
|
||||
|
||||
// Empty device name should get default
|
||||
reqBody := DeviceCodeRequest{
|
||||
DeviceName: "",
|
||||
}
|
||||
body, _ := json.Marshal(reqBody)
|
||||
req := httptest.NewRequest("POST", "/auth/device/code", bytes.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
var response DeviceCodeResponse
|
||||
if err := json.NewDecoder(rr.Body).Decode(&response); err != nil {
|
||||
t.Fatalf("Failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if response.UserCode == "" {
|
||||
t.Error("Expected user_code to be set even with default device name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceCodeHandler_MethodNotAllowed(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
handler := &DeviceCodeHandler{
|
||||
Store: store,
|
||||
AppViewBaseURL: "http://localhost:5000",
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/auth/device/code", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusMethodNotAllowed {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusMethodNotAllowed, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceTokenHandler_AuthorizationPending(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
handler := &DeviceTokenHandler{
|
||||
Store: store,
|
||||
}
|
||||
|
||||
// Create a pending authorization
|
||||
pending, err := store.CreatePendingAuth("Test Device", "127.0.0.1", "TestAgent/1.0")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pending auth: %v", err)
|
||||
}
|
||||
|
||||
// Poll before approval
|
||||
reqBody := DeviceTokenRequest{
|
||||
DeviceCode: pending.DeviceCode,
|
||||
}
|
||||
body, _ := json.Marshal(reqBody)
|
||||
req := httptest.NewRequest("POST", "/auth/device/token", bytes.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
var response DeviceTokenResponse
|
||||
if err := json.NewDecoder(rr.Body).Decode(&response); err != nil {
|
||||
t.Fatalf("Failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if response.Error != "authorization_pending" {
|
||||
t.Errorf("Expected error 'authorization_pending', got %s", response.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceTokenHandler_ExpiredToken(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
handler := &DeviceTokenHandler{
|
||||
Store: store,
|
||||
}
|
||||
|
||||
// Try to poll with invalid device code
|
||||
reqBody := DeviceTokenRequest{
|
||||
DeviceCode: "invalid_code_12345",
|
||||
}
|
||||
body, _ := json.Marshal(reqBody)
|
||||
req := httptest.NewRequest("POST", "/auth/device/token", bytes.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
var response DeviceTokenResponse
|
||||
if err := json.NewDecoder(rr.Body).Decode(&response); err != nil {
|
||||
t.Fatalf("Failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if response.Error != "expired_token" {
|
||||
t.Errorf("Expected error 'expired_token', got %s", response.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceTokenHandler_Approved(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
handler := &DeviceTokenHandler{
|
||||
Store: store,
|
||||
}
|
||||
|
||||
// Create a pending authorization
|
||||
pending, err := store.CreatePendingAuth("Test Device", "127.0.0.1", "TestAgent/1.0")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pending auth: %v", err)
|
||||
}
|
||||
|
||||
// Create user first (required for foreign key)
|
||||
_, err = database.Exec(`
|
||||
INSERT INTO users (did, handle, pds_endpoint, last_seen)
|
||||
VALUES (?, ?, ?, ?)
|
||||
`, "did:plc:test123", "test.bsky.social", "https://bsky.social", time.Now())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create user: %v", err)
|
||||
}
|
||||
|
||||
// Approve it
|
||||
_, err = store.ApprovePending(pending.UserCode, "did:plc:test123", "test.bsky.social")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to approve pending: %v", err)
|
||||
}
|
||||
|
||||
// Poll after approval
|
||||
reqBody := DeviceTokenRequest{
|
||||
DeviceCode: pending.DeviceCode,
|
||||
}
|
||||
body, _ := json.Marshal(reqBody)
|
||||
req := httptest.NewRequest("POST", "/auth/device/token", bytes.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
var response DeviceTokenResponse
|
||||
if err := json.NewDecoder(rr.Body).Decode(&response); err != nil {
|
||||
t.Fatalf("Failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if response.Error != "" {
|
||||
t.Errorf("Expected no error, got %s", response.Error)
|
||||
}
|
||||
if response.DeviceSecret == "" {
|
||||
t.Error("Expected device_secret to be set")
|
||||
}
|
||||
if response.DID != "did:plc:test123" {
|
||||
t.Errorf("Expected DID 'did:plc:test123', got %s", response.DID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceTokenHandler_MethodNotAllowed(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
handler := &DeviceTokenHandler{
|
||||
Store: store,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/auth/device/token", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusMethodNotAllowed {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusMethodNotAllowed, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceApprovalPageHandler_NotLoggedIn(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
sessionStore := db.NewSessionStore(database)
|
||||
|
||||
handler := &DeviceApprovalPageHandler{
|
||||
Store: store,
|
||||
SessionStore: sessionStore,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/device?user_code=ABC123", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
// Should redirect to login
|
||||
if rr.Code != http.StatusFound {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusFound, rr.Code)
|
||||
}
|
||||
|
||||
location := rr.Header().Get("Location")
|
||||
if !strings.Contains(location, "/auth/oauth/login") {
|
||||
t.Errorf("Expected redirect to login, got %s", location)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceApprovalPageHandler_MissingUserCode(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
sessionStore := db.NewSessionStore(database)
|
||||
|
||||
// Create user first (required for foreign key)
|
||||
_, err := database.Exec(`
|
||||
INSERT INTO users (did, handle, pds_endpoint, last_seen)
|
||||
VALUES (?, ?, ?, ?)
|
||||
`, "did:plc:test123", "test.bsky.social", "https://bsky.social", time.Now())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create user: %v", err)
|
||||
}
|
||||
|
||||
// Create a session
|
||||
sessionID, _ := sessionStore.Create("did:plc:test123", "test.bsky.social", "https://pds.example.com", 24*time.Hour)
|
||||
|
||||
handler := &DeviceApprovalPageHandler{
|
||||
Store: store,
|
||||
SessionStore: sessionStore,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/device", nil) // No user_code parameter
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: "atcr_session",
|
||||
Value: sessionID,
|
||||
})
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusBadRequest {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceApprovalPageHandler_MethodNotAllowed(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
sessionStore := db.NewSessionStore(database)
|
||||
|
||||
handler := &DeviceApprovalPageHandler{
|
||||
Store: store,
|
||||
SessionStore: sessionStore,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("POST", "/device?user_code=ABC123", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusMethodNotAllowed {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusMethodNotAllowed, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceApproveHandler_Unauthorized(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
sessionStore := db.NewSessionStore(database)
|
||||
|
||||
handler := &DeviceApproveHandler{
|
||||
Store: store,
|
||||
SessionStore: sessionStore,
|
||||
}
|
||||
|
||||
reqBody := DeviceApproveRequest{
|
||||
UserCode: "ABC123",
|
||||
Approve: true,
|
||||
}
|
||||
body, _ := json.Marshal(reqBody)
|
||||
req := httptest.NewRequest("POST", "/device/approve", bytes.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusUnauthorized {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusUnauthorized, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceApproveHandler_Deny(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
sessionStore := db.NewSessionStore(database)
|
||||
|
||||
// Create user first (required for foreign key)
|
||||
_, err := database.Exec(`
|
||||
INSERT INTO users (did, handle, pds_endpoint, last_seen)
|
||||
VALUES (?, ?, ?, ?)
|
||||
`, "did:plc:test123", "test.bsky.social", "https://bsky.social", time.Now())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create user: %v", err)
|
||||
}
|
||||
|
||||
// Create a session
|
||||
sessionID, _ := sessionStore.Create("did:plc:test123", "test.bsky.social", "https://pds.example.com", 24*time.Hour)
|
||||
|
||||
handler := &DeviceApproveHandler{
|
||||
Store: store,
|
||||
SessionStore: sessionStore,
|
||||
}
|
||||
|
||||
reqBody := DeviceApproveRequest{
|
||||
UserCode: "ABC123",
|
||||
Approve: false,
|
||||
}
|
||||
body, _ := json.Marshal(reqBody)
|
||||
req := httptest.NewRequest("POST", "/device/approve", bytes.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: "atcr_session",
|
||||
Value: sessionID,
|
||||
})
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
var response map[string]string
|
||||
if err := json.NewDecoder(rr.Body).Decode(&response); err != nil {
|
||||
t.Fatalf("Failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if response["status"] != "denied" {
|
||||
t.Errorf("Expected status 'denied', got %s", response["status"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceApproveHandler_MethodNotAllowed(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
sessionStore := db.NewSessionStore(database)
|
||||
|
||||
handler := &DeviceApproveHandler{
|
||||
Store: store,
|
||||
SessionStore: sessionStore,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/device/approve", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusMethodNotAllowed {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusMethodNotAllowed, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListDevicesHandler_Unauthorized(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
sessionStore := db.NewSessionStore(database)
|
||||
|
||||
handler := &ListDevicesHandler{
|
||||
Store: store,
|
||||
SessionStore: sessionStore,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/devices", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusUnauthorized {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusUnauthorized, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListDevicesHandler_Success(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
sessionStore := db.NewSessionStore(database)
|
||||
|
||||
// Create user first (required for foreign key)
|
||||
_, err := database.Exec(`
|
||||
INSERT INTO users (did, handle, pds_endpoint, last_seen)
|
||||
VALUES (?, ?, ?, ?)
|
||||
`, "did:plc:test123", "test.bsky.social", "https://bsky.social", time.Now())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create user: %v", err)
|
||||
}
|
||||
|
||||
// Create a session
|
||||
sessionID, _ := sessionStore.Create("did:plc:test123", "test.bsky.social", "https://pds.example.com", 24*time.Hour)
|
||||
|
||||
// Create some devices
|
||||
pending, _ := store.CreatePendingAuth("Device 1", "127.0.0.1", "TestAgent/1.0")
|
||||
store.ApprovePending(pending.UserCode, "did:plc:test123", "test.bsky.social")
|
||||
|
||||
handler := &ListDevicesHandler{
|
||||
Store: store,
|
||||
SessionStore: sessionStore,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/devices", nil)
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: "atcr_session",
|
||||
Value: sessionID,
|
||||
})
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
var devices []db.Device
|
||||
if err := json.NewDecoder(rr.Body).Decode(&devices); err != nil {
|
||||
t.Fatalf("Failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if len(devices) != 1 {
|
||||
t.Errorf("Expected 1 device, got %d", len(devices))
|
||||
}
|
||||
}
|
||||
|
||||
func TestListDevicesHandler_MethodNotAllowed(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
sessionStore := db.NewSessionStore(database)
|
||||
|
||||
handler := &ListDevicesHandler{
|
||||
Store: store,
|
||||
SessionStore: sessionStore,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("POST", "/api/devices", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusMethodNotAllowed {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusMethodNotAllowed, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRevokeDeviceHandler_Unauthorized(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
sessionStore := db.NewSessionStore(database)
|
||||
|
||||
handler := &RevokeDeviceHandler{
|
||||
Store: store,
|
||||
SessionStore: sessionStore,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("DELETE", "/api/devices/device123", nil)
|
||||
|
||||
// Add chi URL parameter
|
||||
rctx := chi.NewRouteContext()
|
||||
rctx.URLParams.Add("id", "device123")
|
||||
req = req.WithContext(context.WithValue(req.Context(), chi.RouteCtxKey, rctx))
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusUnauthorized {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusUnauthorized, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRevokeDeviceHandler_MethodNotAllowed(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
store := db.NewDeviceStore(database)
|
||||
sessionStore := db.NewSessionStore(database)
|
||||
|
||||
handler := &RevokeDeviceHandler{
|
||||
Store: store,
|
||||
SessionStore: sessionStore,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/devices/device123", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusMethodNotAllowed {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusMethodNotAllowed, rr.Code)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,6 @@
|
||||
// Package handlers provides HTTP handlers for the AppView web UI, including
|
||||
// home page, repository browsing, search, user authentication, settings,
|
||||
// device management, and API endpoints for the web interface.
|
||||
package handlers
|
||||
|
||||
import (
|
||||
@@ -7,6 +10,8 @@ import (
|
||||
"strconv"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"atcr.io/pkg/appview/holdhealth"
|
||||
"atcr.io/pkg/appview/middleware"
|
||||
)
|
||||
|
||||
// HomeHandler handles the home page
|
||||
@@ -17,8 +22,14 @@ type HomeHandler struct {
|
||||
}
|
||||
|
||||
func (h *HomeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Get current user DID (empty string if not logged in)
|
||||
var currentUserDID string
|
||||
if user := middleware.GetUser(r); user != nil {
|
||||
currentUserDID = user.DID
|
||||
}
|
||||
|
||||
// Fetch featured repositories (top 6)
|
||||
featured, err := db.GetFeaturedRepositories(h.DB, 6)
|
||||
featured, err := db.GetFeaturedRepositories(h.DB, 6, currentUserDID)
|
||||
if err != nil {
|
||||
// Log error but continue - featured section will be empty
|
||||
featured = []db.FeaturedRepository{}
|
||||
@@ -35,6 +46,7 @@ func (h *HomeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
IconURL: repo.IconURL,
|
||||
StarCount: repo.StarCount,
|
||||
PullCount: repo.PullCount,
|
||||
IsStarred: repo.IsStarred,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,9 +66,10 @@ func (h *HomeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// RecentPushesHandler handles the HTMX request for recent pushes
|
||||
type RecentPushesHandler struct {
|
||||
DB *sql.DB
|
||||
Templates *template.Template
|
||||
RegistryURL string
|
||||
DB *sql.DB
|
||||
Templates *template.Template
|
||||
RegistryURL string
|
||||
HealthChecker *holdhealth.Checker
|
||||
}
|
||||
|
||||
func (h *RecentPushesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -72,12 +85,47 @@ func (h *RecentPushesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
userFilter = r.URL.Query().Get("q")
|
||||
}
|
||||
|
||||
pushes, total, err := db.GetRecentPushes(h.DB, limit, offset, userFilter)
|
||||
// Get current user DID (empty string if not logged in)
|
||||
var currentUserDID string
|
||||
if user := middleware.GetUser(r); user != nil {
|
||||
currentUserDID = user.DID
|
||||
}
|
||||
|
||||
pushes, total, err := db.GetRecentPushes(h.DB, limit, offset, userFilter, currentUserDID)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Check health status and filter out unreachable manifests for home page
|
||||
// Use GetCachedStatus only (no blocking) - background worker keeps cache fresh
|
||||
if h.HealthChecker != nil {
|
||||
reachablePushes := []db.Push{}
|
||||
for i := range pushes {
|
||||
if pushes[i].HoldEndpoint != "" {
|
||||
// Use cached status only - don't block on health checks
|
||||
cached := h.HealthChecker.GetCachedStatus(pushes[i].HoldEndpoint)
|
||||
if cached != nil {
|
||||
pushes[i].Reachable = cached.Reachable
|
||||
// Only show reachable pushes on home page
|
||||
if cached.Reachable {
|
||||
reachablePushes = append(reachablePushes, pushes[i])
|
||||
}
|
||||
} else {
|
||||
// No cached status - optimistically show it (background worker will check)
|
||||
pushes[i].Reachable = true
|
||||
reachablePushes = append(reachablePushes, pushes[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
pushes = reachablePushes
|
||||
} else {
|
||||
// If no health checker, assume all are reachable (backward compatibility)
|
||||
for i := range pushes {
|
||||
pushes[i].Reachable = true
|
||||
}
|
||||
}
|
||||
|
||||
data := struct {
|
||||
PageData
|
||||
Pushes []db.Push
|
||||
|
||||
@@ -2,17 +2,22 @@ package handlers
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"atcr.io/pkg/appview/middleware"
|
||||
"github.com/gorilla/mux"
|
||||
"atcr.io/pkg/atproto"
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
// DeleteTagHandler handles deleting a tag
|
||||
type DeleteTagHandler struct {
|
||||
DB *sql.DB
|
||||
// TODO: Add ATProto client for deleting from PDS
|
||||
DB *sql.DB
|
||||
Refresher *oauth.Refresher
|
||||
}
|
||||
|
||||
func (h *DeleteTagHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -22,11 +27,29 @@ func (h *DeleteTagHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
repo := vars["repository"]
|
||||
tag := vars["tag"]
|
||||
repo := chi.URLParam(r, "repository")
|
||||
tag := chi.URLParam(r, "tag")
|
||||
|
||||
// TODO: Delete from PDS via ATProto client
|
||||
// Get OAuth session for the authenticated user
|
||||
session, err := h.Refresher.GetSession(r.Context(), user.DID)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to get OAuth session: %v", err), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Create ATProto client with OAuth credentials
|
||||
apiClient := session.APIClient()
|
||||
pdsClient := atproto.NewClientWithIndigoClient(user.PDSEndpoint, user.DID, apiClient)
|
||||
|
||||
// Compute rkey for tag record (repository_tag with slashes replaced)
|
||||
rkey := fmt.Sprintf("%s_%s", repo, tag)
|
||||
rkey = strings.ReplaceAll(rkey, "/", "-")
|
||||
|
||||
// Delete from PDS first
|
||||
if err := pdsClient.DeleteRecord(r.Context(), atproto.TagCollection, rkey); err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to delete tag from PDS: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete from cache
|
||||
if err := db.DeleteTag(h.DB, user.DID, repo, tag); err != nil {
|
||||
@@ -40,8 +63,8 @@ func (h *DeleteTagHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// DeleteManifestHandler handles deleting a manifest
|
||||
type DeleteManifestHandler struct {
|
||||
DB *sql.DB
|
||||
// TODO: Add ATProto client for deleting from PDS
|
||||
DB *sql.DB
|
||||
Refresher *oauth.Refresher
|
||||
}
|
||||
|
||||
func (h *DeleteManifestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -51,9 +74,9 @@ func (h *DeleteManifestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
repo := vars["repository"]
|
||||
digest := vars["digest"]
|
||||
repo := chi.URLParam(r, "repository")
|
||||
digest := chi.URLParam(r, "digest")
|
||||
confirmed := r.URL.Query().Get("confirm") == "true"
|
||||
|
||||
// Check if manifest is tagged
|
||||
tagged, err := db.IsManifestTagged(h.DB, user.DID, repo, digest)
|
||||
@@ -62,12 +85,68 @@ func (h *DeleteManifestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
if tagged {
|
||||
http.Error(w, "Cannot delete tagged manifest", http.StatusBadRequest)
|
||||
// If tagged and not confirmed, return tag list and require confirmation
|
||||
if tagged && !confirmed {
|
||||
tags, err := db.GetManifestTags(h.DB, user.DID, repo, digest)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusConflict)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"error": "confirmation_required",
|
||||
"message": "This manifest has associated tags that will also be deleted",
|
||||
"tags": tags,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Delete from PDS via ATProto client
|
||||
// Get OAuth session for the authenticated user
|
||||
session, err := h.Refresher.GetSession(r.Context(), user.DID)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to get OAuth session: %v", err), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Create ATProto client with OAuth credentials
|
||||
apiClient := session.APIClient()
|
||||
pdsClient := atproto.NewClientWithIndigoClient(user.PDSEndpoint, user.DID, apiClient)
|
||||
|
||||
// If tagged and confirmed, delete all tags first
|
||||
if tagged && confirmed {
|
||||
tags, err := db.GetManifestTags(h.DB, user.DID, repo, digest)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to get tags: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete each tag from PDS and database
|
||||
for _, tag := range tags {
|
||||
// Delete from PDS
|
||||
tagRKey := fmt.Sprintf("%s:%s", repo, tag)
|
||||
if err := pdsClient.DeleteRecord(r.Context(), atproto.TagCollection, tagRKey); err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to delete tag '%s' from PDS: %v", tag, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete from cache
|
||||
if err := db.DeleteTag(h.DB, user.DID, repo, tag); err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to delete tag '%s' from cache: %v", tag, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute rkey for manifest record (digest without "sha256:" prefix)
|
||||
rkey := strings.TrimPrefix(digest, "sha256:")
|
||||
|
||||
// Delete from PDS first
|
||||
if err := pdsClient.DeleteRecord(r.Context(), atproto.ManifestCollection, rkey); err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to delete manifest from PDS: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete from cache
|
||||
if err := db.DeleteManifest(h.DB, user.DID, repo, digest); err != nil {
|
||||
|
||||
68
pkg/appview/handlers/images_test.go
Normal file
68
pkg/appview/handlers/images_test.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
func TestDeleteTagHandler_Unauthorized(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
handler := &DeleteTagHandler{
|
||||
DB: database,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("DELETE", "/alice/myapp/tags/latest", nil)
|
||||
|
||||
// Add chi URL parameters
|
||||
rctx := chi.NewRouteContext()
|
||||
rctx.URLParams.Add("handle", "alice")
|
||||
rctx.URLParams.Add("repository", "myapp")
|
||||
rctx.URLParams.Add("tag", "latest")
|
||||
req = req.WithContext(context.WithValue(req.Context(), chi.RouteCtxKey, rctx))
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
// Should return unauthorized without user in context
|
||||
if rr.Code != http.StatusUnauthorized {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusUnauthorized, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteManifestHandler_Unauthorized(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
handler := &DeleteManifestHandler{
|
||||
DB: database,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("DELETE", "/alice/myapp/manifests/sha256:abc123", nil)
|
||||
|
||||
// Add chi URL parameters
|
||||
rctx := chi.NewRouteContext()
|
||||
rctx.URLParams.Add("handle", "alice")
|
||||
rctx.URLParams.Add("repository", "myapp")
|
||||
rctx.URLParams.Add("digest", "sha256:abc123")
|
||||
req = req.WithContext(context.WithValue(req.Context(), chi.RouteCtxKey, rctx))
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
// Should return unauthorized without user in context
|
||||
if rr.Code != http.StatusUnauthorized {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusUnauthorized, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Add comprehensive tests with authentication
|
||||
// - Test tag deletion with proper auth
|
||||
// - Test manifest deletion with proper auth
|
||||
// - Test deletion of non-existent tags
|
||||
// - Test unauthorized deletion attempts (wrong user)
|
||||
67
pkg/appview/handlers/logout.go
Normal file
67
pkg/appview/handlers/logout.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
"github.com/bluesky-social/indigo/atproto/syntax"
|
||||
)
|
||||
|
||||
// LogoutHandler handles user logout with proper OAuth token revocation
|
||||
type LogoutHandler struct {
|
||||
OAuthApp *oauth.App
|
||||
Refresher *oauth.Refresher
|
||||
SessionStore *db.SessionStore
|
||||
OAuthStore *db.OAuthStore
|
||||
}
|
||||
|
||||
func (h *LogoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Get UI session ID from cookie
|
||||
uiSessionID, hasSession := db.GetSessionID(r)
|
||||
if !hasSession {
|
||||
// No session to logout from, just redirect
|
||||
http.Redirect(w, r, "/", http.StatusFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Get UI session to extract OAuth session ID and user info
|
||||
uiSession, ok := h.SessionStore.Get(uiSessionID)
|
||||
if ok && uiSession != nil && uiSession.DID != "" {
|
||||
// Parse DID for OAuth logout
|
||||
did, err := syntax.ParseDID(uiSession.DID)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to parse DID for logout", "component", "logout", "did", uiSession.DID, "error", err)
|
||||
} else {
|
||||
// Attempt to revoke OAuth tokens on PDS side
|
||||
if uiSession.OAuthSessionID != "" {
|
||||
// Call indigo's Logout to revoke tokens on PDS
|
||||
if err := h.OAuthApp.GetClientApp().Logout(r.Context(), did, uiSession.OAuthSessionID); err != nil {
|
||||
// Log error but don't block logout - best effort revocation
|
||||
slog.Warn("Failed to revoke OAuth tokens on PDS", "component", "logout", "did", uiSession.DID, "error", err)
|
||||
} else {
|
||||
slog.Info("Successfully revoked OAuth tokens on PDS", "component", "logout", "did", uiSession.DID)
|
||||
}
|
||||
|
||||
// Invalidate refresher cache to clear local access tokens
|
||||
h.Refresher.InvalidateSession(uiSession.DID)
|
||||
slog.Info("Invalidated local OAuth cache", "component", "logout", "did", uiSession.DID)
|
||||
|
||||
// Delete OAuth session from database (cleanup, might already be done by Logout)
|
||||
if err := h.OAuthStore.DeleteSession(r.Context(), did, uiSession.OAuthSessionID); err != nil {
|
||||
slog.Warn("Failed to delete OAuth session from database", "component", "logout", "error", err)
|
||||
}
|
||||
} else {
|
||||
slog.Warn("No OAuth session ID found for user", "component", "logout", "did", uiSession.DID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Always delete UI session and clear cookie, even if OAuth revocation failed
|
||||
h.SessionStore.Delete(uiSessionID)
|
||||
db.ClearCookie(w)
|
||||
|
||||
// Redirect to home page
|
||||
http.Redirect(w, r, "/", http.StatusFound)
|
||||
}
|
||||
97
pkg/appview/handlers/logout_test.go
Normal file
97
pkg/appview/handlers/logout_test.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
)
|
||||
|
||||
func TestLogoutHandler_NoSession(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
sessionStore := db.NewSessionStore(database)
|
||||
|
||||
handler := &LogoutHandler{
|
||||
SessionStore: sessionStore,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/auth/logout", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
// Should redirect even with no session
|
||||
if rr.Code != http.StatusFound {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusFound, rr.Code)
|
||||
}
|
||||
|
||||
location := rr.Header().Get("Location")
|
||||
if location != "/" {
|
||||
t.Errorf("Expected redirect to /, got %s", location)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogoutHandler_WithSession(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
sessionStore := db.NewSessionStore(database)
|
||||
|
||||
// Create a user first (required for foreign key)
|
||||
_, err := database.Exec(`
|
||||
INSERT INTO users (did, handle, pds_endpoint, last_seen)
|
||||
VALUES (?, ?, ?, ?)
|
||||
`, "did:plc:test123", "test.bsky.social", "https://bsky.social", time.Now())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create user: %v", err)
|
||||
}
|
||||
|
||||
// Create a session
|
||||
sessionID, err := sessionStore.Create("did:plc:test123", "test.bsky.social", "https://bsky.social", 24*time.Hour)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create session: %v", err)
|
||||
}
|
||||
|
||||
handler := &LogoutHandler{
|
||||
SessionStore: sessionStore,
|
||||
OAuthStore: db.NewOAuthStore(database),
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/auth/logout", nil)
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: "atcr_session",
|
||||
Value: sessionID,
|
||||
})
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
// Should redirect
|
||||
if rr.Code != http.StatusFound {
|
||||
t.Errorf("Expected status %d, got %d", http.StatusFound, rr.Code)
|
||||
}
|
||||
|
||||
// Should clear cookie
|
||||
cookies := rr.Result().Cookies()
|
||||
found := false
|
||||
for _, cookie := range cookies {
|
||||
if cookie.Name == "atcr_session" {
|
||||
found = true
|
||||
if cookie.MaxAge != -1 {
|
||||
t.Errorf("Expected cookie MaxAge=-1, got %d", cookie.MaxAge)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Error("Expected atcr_session cookie to be cleared")
|
||||
}
|
||||
|
||||
// Session should be deleted
|
||||
_, exists := sessionStore.Get(sessionID)
|
||||
if exists {
|
||||
t.Error("Expected session to be deleted")
|
||||
}
|
||||
}
|
||||
76
pkg/appview/handlers/manifest_health.go
Normal file
76
pkg/appview/handlers/manifest_health.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/appview/holdhealth"
|
||||
)
|
||||
|
||||
// ManifestHealthHandler handles HTMX polling for manifest health status
|
||||
type ManifestHealthHandler struct {
|
||||
HealthChecker *holdhealth.Checker
|
||||
}
|
||||
|
||||
func (h *ManifestHealthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Get endpoint from query parameter
|
||||
endpoint := r.URL.Query().Get("endpoint")
|
||||
if endpoint == "" {
|
||||
http.Error(w, "endpoint parameter required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Decode URL-encoded endpoint
|
||||
endpoint, err := url.QueryUnescape(endpoint)
|
||||
if err != nil {
|
||||
http.Error(w, "invalid endpoint parameter", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Try to get cached status first (instant if background worker has checked it)
|
||||
cached := h.HealthChecker.GetCachedStatus(endpoint)
|
||||
if cached != nil {
|
||||
// Cache hit - return final status
|
||||
h.renderBadge(w, endpoint, cached.Reachable, false)
|
||||
return
|
||||
}
|
||||
|
||||
// Cache miss - perform quick check with 2 second timeout
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
reachable, err := h.HealthChecker.CheckHealth(ctx, endpoint)
|
||||
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
// Still pending - render "Checking..." badge with HTMX retry
|
||||
h.renderBadge(w, endpoint, false, true)
|
||||
} else if err != nil {
|
||||
// Error - mark as unreachable
|
||||
h.renderBadge(w, endpoint, false, false)
|
||||
} else {
|
||||
// Success
|
||||
h.renderBadge(w, endpoint, reachable, false)
|
||||
}
|
||||
}
|
||||
|
||||
// renderBadge renders the appropriate badge HTML snippet
|
||||
func (h *ManifestHealthHandler) renderBadge(w http.ResponseWriter, endpoint string, reachable, pending bool) {
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
|
||||
if pending {
|
||||
// Still checking - render badge with HTMX retry after 3 seconds
|
||||
retryURL := "/api/manifest-health?endpoint=" + url.QueryEscape(endpoint)
|
||||
w.Write([]byte(`<span class="checking-badge"
|
||||
hx-get="` + retryURL + `"
|
||||
hx-trigger="load delay:3s"
|
||||
hx-swap="outerHTML">🔄 Checking...</span>`))
|
||||
} else if !reachable {
|
||||
// Unreachable - render offline badge
|
||||
w.Write([]byte(`<span class="offline-badge">⚠️ Offline</span>`))
|
||||
} else {
|
||||
// Reachable - no badge (empty response)
|
||||
w.Write([]byte(``))
|
||||
}
|
||||
}
|
||||
@@ -1,32 +1,213 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"log"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"atcr.io/pkg/appview/holdhealth"
|
||||
"atcr.io/pkg/appview/middleware"
|
||||
"atcr.io/pkg/appview/readme"
|
||||
"atcr.io/pkg/atproto"
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
"github.com/bluesky-social/indigo/atproto/identity"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
// RepositoryPageHandler handles the public repository page
|
||||
type RepositoryPageHandler struct {
|
||||
DB *sql.DB
|
||||
Templates *template.Template
|
||||
RegistryURL string
|
||||
Directory identity.Directory
|
||||
Refresher *oauth.Refresher
|
||||
DB *sql.DB
|
||||
Templates *template.Template
|
||||
RegistryURL string
|
||||
Directory identity.Directory
|
||||
Refresher *oauth.Refresher
|
||||
HealthChecker *holdhealth.Checker
|
||||
ReadmeCache *readme.Cache
|
||||
}
|
||||
|
||||
// queryVulnerabilities queries the hold service for vulnerability scan results
|
||||
func (h *RepositoryPageHandler) queryVulnerabilities(ctx context.Context, holdEndpoint string, digest string) (*db.VulnerabilitySummary, error) {
|
||||
// Skip if no hold endpoint
|
||||
if holdEndpoint == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Query referrers endpoint for vulnerability scan results
|
||||
// Match the artifactType used by the scanner in pkg/hold/scanner/storage.go
|
||||
artifactType := "application/vnd.atcr.vulnerabilities+json"
|
||||
|
||||
// Properly encode query parameters (especially the + in the media type)
|
||||
queryParams := url.Values{}
|
||||
queryParams.Set("digest", digest)
|
||||
queryParams.Set("artifactType", artifactType)
|
||||
requestURL := fmt.Sprintf("%s/xrpc/io.atcr.hold.getReferrers?%s", holdEndpoint, queryParams.Encode())
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", requestURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
// No scan results found
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("failed to query referrers: %s - %s", resp.Status, string(body))
|
||||
}
|
||||
|
||||
// Parse response
|
||||
var result struct {
|
||||
Referrers []struct {
|
||||
Annotations map[string]string `json:"annotations"`
|
||||
} `json:"referrers"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode referrers response: %w", err)
|
||||
}
|
||||
|
||||
slog.Debug("Received referrers response",
|
||||
"digest", digest,
|
||||
"referrerCount", len(result.Referrers))
|
||||
|
||||
// Find the most recent vulnerability scan result
|
||||
if len(result.Referrers) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Parse vulnerability counts from annotations
|
||||
// Match the annotation keys used by the scanner in pkg/hold/scanner/storage.go
|
||||
annotations := result.Referrers[0].Annotations
|
||||
slog.Debug("First referrer annotations",
|
||||
"digest", digest,
|
||||
"annotations", annotations,
|
||||
"annotationsLen", len(annotations))
|
||||
|
||||
summary := &db.VulnerabilitySummary{}
|
||||
|
||||
if critical, ok := annotations["io.atcr.vuln.critical"]; ok {
|
||||
summary.Critical, _ = strconv.Atoi(critical)
|
||||
}
|
||||
if high, ok := annotations["io.atcr.vuln.high"]; ok {
|
||||
summary.High, _ = strconv.Atoi(high)
|
||||
}
|
||||
if medium, ok := annotations["io.atcr.vuln.medium"]; ok {
|
||||
summary.Medium, _ = strconv.Atoi(medium)
|
||||
}
|
||||
if low, ok := annotations["io.atcr.vuln.low"]; ok {
|
||||
summary.Low, _ = strconv.Atoi(low)
|
||||
}
|
||||
if total, ok := annotations["io.atcr.vuln.total"]; ok {
|
||||
summary.Total, _ = strconv.Atoi(total)
|
||||
}
|
||||
|
||||
// If Total is missing or 0, calculate from individual counts
|
||||
if summary.Total == 0 {
|
||||
summary.Total = summary.Critical + summary.High + summary.Medium + summary.Low
|
||||
}
|
||||
|
||||
slog.Debug("Parsed vulnerability summary",
|
||||
"digest", digest,
|
||||
"critical", summary.Critical,
|
||||
"high", summary.High,
|
||||
"medium", summary.Medium,
|
||||
"low", summary.Low,
|
||||
"total", summary.Total)
|
||||
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
// HandleVulnerabilityDetails returns the full vulnerability report for a manifest
|
||||
func (h *RepositoryPageHandler) HandleVulnerabilityDetails(w http.ResponseWriter, r *http.Request) {
|
||||
digest := r.URL.Query().Get("digest")
|
||||
holdEndpoint := r.URL.Query().Get("holdEndpoint")
|
||||
|
||||
if digest == "" || holdEndpoint == "" {
|
||||
http.Error(w, "digest and holdEndpoint required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Query referrers to get the vulnerability report digest
|
||||
artifactType := "application/vnd.atcr.vulnerabilities+json"
|
||||
queryParams := url.Values{}
|
||||
queryParams.Set("digest", digest)
|
||||
queryParams.Set("artifactType", artifactType)
|
||||
requestURL := fmt.Sprintf("%s/xrpc/io.atcr.hold.getReferrers?%s", holdEndpoint, queryParams.Encode())
|
||||
|
||||
req, err := http.NewRequestWithContext(r.Context(), "GET", requestURL, nil)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
http.Error(w, "No vulnerability scan found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
http.Error(w, "Failed to query referrers", resp.StatusCode)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse response - now includes the vulnerability report data directly
|
||||
var result struct {
|
||||
Referrers []struct {
|
||||
Digest string `json:"digest"`
|
||||
Annotations map[string]string `json:"annotations"`
|
||||
ReportData map[string]interface{} `json:"reportData"` // The actual vulnerability report
|
||||
} `json:"referrers"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
http.Error(w, "Failed to decode referrers response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if len(result.Referrers) == 0 {
|
||||
http.Error(w, "No vulnerability scan found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if reportData is included
|
||||
if result.Referrers[0].ReportData == nil {
|
||||
http.Error(w, "Vulnerability report data not available", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Return the vulnerability report JSON directly
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(result.Referrers[0].ReportData)
|
||||
}
|
||||
|
||||
func (h *RepositoryPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
handle := vars["handle"]
|
||||
repository := vars["repository"]
|
||||
handle := chi.URLParam(r, "handle")
|
||||
repository := chi.URLParam(r, "repository")
|
||||
|
||||
// Look up user by handle
|
||||
owner, err := db.GetUserByHandle(h.DB, handle)
|
||||
@@ -40,22 +221,153 @@ func (h *RepositoryPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch repository data
|
||||
repo, err := db.GetRepository(h.DB, owner.DID, repository)
|
||||
// Fetch tags with platform information
|
||||
tagsWithPlatforms, err := db.GetTagsWithPlatforms(h.DB, owner.DID, repository)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if repo == nil || len(repo.Manifests) == 0 {
|
||||
// Fetch top-level manifests (filters out platform-specific manifests)
|
||||
manifests, err := db.GetTopLevelManifests(h.DB, owner.DID, repository, 50, 0)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Query vulnerability scan results for each manifest (concurrent with 2s timeout)
|
||||
{
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
|
||||
for i := range manifests {
|
||||
// Skip manifest lists - only query for image manifests
|
||||
if manifests[i].IsManifestList {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
|
||||
vulnerabilities, err := h.queryVulnerabilities(ctx, manifests[idx].Manifest.HoldEndpoint, manifests[idx].Manifest.Digest)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to query vulnerabilities",
|
||||
"digest", manifests[idx].Manifest.Digest,
|
||||
"error", err)
|
||||
return
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
if vulnerabilities != nil && vulnerabilities.Total > 0 {
|
||||
manifests[idx].Vulnerabilities = vulnerabilities
|
||||
manifests[idx].HasVulnerabilities = true
|
||||
}
|
||||
mu.Unlock()
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Check health status for each manifest's hold endpoint (concurrent with 1s timeout)
|
||||
if h.HealthChecker != nil {
|
||||
// Create context with 1 second deadline for fast-fail
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
|
||||
for i := range manifests {
|
||||
if manifests[i].HoldEndpoint == "" {
|
||||
// No hold endpoint, mark as unreachable
|
||||
manifests[i].Reachable = false
|
||||
manifests[i].Pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
|
||||
endpoint := manifests[idx].HoldEndpoint
|
||||
|
||||
// Try to get cached status first (instant)
|
||||
if cached := h.HealthChecker.GetCachedStatus(endpoint); cached != nil {
|
||||
mu.Lock()
|
||||
manifests[idx].Reachable = cached.Reachable
|
||||
manifests[idx].Pending = false
|
||||
mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Perform health check with timeout context
|
||||
reachable, err := h.HealthChecker.CheckHealth(ctx, endpoint)
|
||||
|
||||
mu.Lock()
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
// Timeout - mark as pending for HTMX polling
|
||||
manifests[idx].Reachable = false
|
||||
manifests[idx].Pending = true
|
||||
} else if err != nil {
|
||||
// Error - mark as unreachable
|
||||
manifests[idx].Reachable = false
|
||||
manifests[idx].Pending = false
|
||||
} else {
|
||||
// Success
|
||||
manifests[idx].Reachable = reachable
|
||||
manifests[idx].Pending = false
|
||||
}
|
||||
mu.Unlock()
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for all checks to complete or timeout
|
||||
wg.Wait()
|
||||
} else {
|
||||
// If no health checker, assume all are reachable (backward compatibility)
|
||||
for i := range manifests {
|
||||
manifests[i].Reachable = true
|
||||
manifests[i].Pending = false
|
||||
}
|
||||
}
|
||||
|
||||
if len(tagsWithPlatforms) == 0 && len(manifests) == 0 {
|
||||
http.Error(w, "Repository not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Create repository summary
|
||||
repo := &db.Repository{
|
||||
Name: repository,
|
||||
TagCount: len(tagsWithPlatforms),
|
||||
ManifestCount: len(manifests),
|
||||
}
|
||||
|
||||
// Fetch repository metadata from annotations table
|
||||
metadata, err := db.GetRepositoryMetadata(h.DB, owner.DID, repository)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to fetch repository metadata", "error", err)
|
||||
// Continue without metadata on error
|
||||
} else {
|
||||
repo.Title = metadata["org.opencontainers.image.title"]
|
||||
repo.Description = metadata["org.opencontainers.image.description"]
|
||||
repo.SourceURL = metadata["org.opencontainers.image.source"]
|
||||
repo.DocumentationURL = metadata["org.opencontainers.image.documentation"]
|
||||
repo.Licenses = metadata["org.opencontainers.image.licenses"]
|
||||
repo.IconURL = metadata["io.atcr.icon"]
|
||||
repo.ReadmeURL = metadata["io.atcr.readme"]
|
||||
repo.Version = metadata["org.opencontainers.image.version"]
|
||||
}
|
||||
|
||||
// Fetch star count
|
||||
stats, err := db.GetRepositoryStats(h.DB, owner.DID, repository)
|
||||
if err != nil {
|
||||
log.Printf("Failed to fetch repository stats: %v", err)
|
||||
slog.Warn("Failed to fetch repository stats", "error", err)
|
||||
// Continue with zero stats on error
|
||||
stats = &db.RepositoryStats{StarCount: 0}
|
||||
}
|
||||
@@ -84,20 +396,42 @@ func (h *RepositoryPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
isOwner = (user.DID == owner.DID)
|
||||
}
|
||||
|
||||
// Fetch README content if available
|
||||
var readmeHTML template.HTML
|
||||
if repo.ReadmeURL != "" && h.ReadmeCache != nil {
|
||||
// Fetch with timeout
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
html, err := h.ReadmeCache.Get(ctx, repo.ReadmeURL)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to fetch README", "url", repo.ReadmeURL, "error", err)
|
||||
// Continue without README on error
|
||||
} else {
|
||||
readmeHTML = template.HTML(html)
|
||||
}
|
||||
}
|
||||
|
||||
data := struct {
|
||||
PageData
|
||||
Owner *db.User // Repository owner
|
||||
Repository *db.Repository
|
||||
Owner *db.User // Repository owner
|
||||
Repository *db.Repository // Repository summary
|
||||
Tags []db.TagWithPlatforms // Tags with platform info
|
||||
Manifests []db.ManifestWithMetadata // Top-level manifests only
|
||||
StarCount int
|
||||
IsStarred bool
|
||||
IsOwner bool // Whether current user owns this repository
|
||||
ReadmeHTML template.HTML
|
||||
}{
|
||||
PageData: NewPageData(r, h.RegistryURL),
|
||||
Owner: owner,
|
||||
Repository: repo,
|
||||
Tags: tagsWithPlatforms,
|
||||
Manifests: manifests,
|
||||
StarCount: stats.StarCount,
|
||||
IsStarred: isStarred,
|
||||
IsOwner: isOwner,
|
||||
ReadmeHTML: readmeHTML,
|
||||
}
|
||||
|
||||
if err := h.Templates.ExecuteTemplate(w, "repository", data); err != nil {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"atcr.io/pkg/appview/middleware"
|
||||
)
|
||||
|
||||
// SearchHandler handles the search page
|
||||
@@ -77,7 +78,13 @@ func (h *SearchResultsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
offset, _ = strconv.Atoi(o)
|
||||
}
|
||||
|
||||
pushes, total, err := db.SearchPushes(h.DB, query, limit, offset)
|
||||
// Get current user DID (empty string if not logged in)
|
||||
var currentUserDID string
|
||||
if user := middleware.GetUser(r); user != nil {
|
||||
currentUserDID = user.DID
|
||||
}
|
||||
|
||||
pushes, total, err := db.SearchPushes(h.DB, query, limit, offset, currentUserDID)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/appview/middleware"
|
||||
"atcr.io/pkg/appview/storage"
|
||||
"atcr.io/pkg/atproto"
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
)
|
||||
@@ -29,7 +30,7 @@ func (h *SettingsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
session, err := h.Refresher.GetSession(r.Context(), user.DID)
|
||||
if err != nil {
|
||||
// OAuth session not found or expired - redirect to re-authenticate
|
||||
fmt.Printf("WARNING [settings]: OAuth session not found for %s: %v - redirecting to login\n", user.DID, err)
|
||||
slog.Warn("OAuth session not found, redirecting to login", "component", "settings", "did", user.DID, "error", err)
|
||||
http.Redirect(w, r, "/auth/oauth/login?return_to=/settings", http.StatusFound)
|
||||
return
|
||||
}
|
||||
@@ -41,22 +42,22 @@ func (h *SettingsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
client := atproto.NewClientWithIndigoClient(user.PDSEndpoint, user.DID, apiClient)
|
||||
|
||||
// Fetch sailor profile
|
||||
profile, err := atproto.GetProfile(r.Context(), client)
|
||||
profile, err := storage.GetProfile(r.Context(), client)
|
||||
if err != nil {
|
||||
// Error fetching profile - log out user
|
||||
fmt.Printf("WARNING [settings]: Failed to fetch profile for %s: %v - logging out\n", user.DID, err)
|
||||
slog.Warn("Failed to fetch profile, logging out", "component", "settings", "did", user.DID, "error", err)
|
||||
http.Redirect(w, r, "/auth/logout", http.StatusFound)
|
||||
return
|
||||
}
|
||||
|
||||
if profile == nil {
|
||||
// Profile doesn't exist yet (404) - user needs to log out and back in to create it
|
||||
fmt.Printf("WARNING [settings]: Profile doesn't exist for %s - logging out\n", user.DID)
|
||||
slog.Warn("Profile doesn't exist, logging out", "component", "settings", "did", user.DID)
|
||||
http.Redirect(w, r, "/auth/logout", http.StatusFound)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("DEBUG [settings]: Fetched profile for %s: defaultHold=%s\n", user.DID, profile.DefaultHold)
|
||||
slog.Debug("Fetched profile", "component", "settings", "did", user.DID, "default_hold", profile.DefaultHold)
|
||||
|
||||
data := struct {
|
||||
PageData
|
||||
@@ -99,7 +100,7 @@ func (h *UpdateDefaultHoldHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
|
||||
session, err := h.Refresher.GetSession(r.Context(), user.DID)
|
||||
if err != nil {
|
||||
// OAuth session not found or expired - redirect to re-authenticate
|
||||
fmt.Printf("WARNING [settings]: OAuth session not found for %s: %v - redirecting to login\n", user.DID, err)
|
||||
slog.Warn("OAuth session not found, redirecting to login", "component", "settings", "did", user.DID, "error", err)
|
||||
http.Redirect(w, r, "/auth/oauth/login?return_to=/settings", http.StatusFound)
|
||||
return
|
||||
}
|
||||
@@ -111,7 +112,7 @@ func (h *UpdateDefaultHoldHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
|
||||
client := atproto.NewClientWithIndigoClient(user.PDSEndpoint, user.DID, apiClient)
|
||||
|
||||
// Fetch existing profile or create new one
|
||||
profile, err := atproto.GetProfile(r.Context(), client)
|
||||
profile, err := storage.GetProfile(r.Context(), client)
|
||||
if err != nil || profile == nil {
|
||||
// Profile doesn't exist, create new one
|
||||
profile = atproto.NewSailorProfileRecord(holdEndpoint)
|
||||
@@ -122,11 +123,11 @@ func (h *UpdateDefaultHoldHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
// Save profile
|
||||
if err := atproto.UpdateProfile(r.Context(), client, profile); err != nil {
|
||||
if err := storage.UpdateProfile(r.Context(), client, profile); err != nil {
|
||||
http.Error(w, "Failed to update profile: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
w.Write([]byte(`<div class="success">✓ Default hold updated successfully!</div>`))
|
||||
w.Write([]byte(`<div class="success"><i data-lucide="check"></i> Default hold updated successfully!</div>`))
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
// UserPageHandler handles the public user page showing all images for a user
|
||||
@@ -17,8 +17,7 @@ type UserPageHandler struct {
|
||||
}
|
||||
|
||||
func (h *UserPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
handle := vars["handle"]
|
||||
handle := chi.URLParam(r, "handle")
|
||||
|
||||
// Look up user by handle
|
||||
viewedUser, err := db.GetUserByHandle(h.DB, handle)
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import "strings"
|
||||
|
||||
// TrimRegistryURL removes http:// or https:// prefix from a URL
|
||||
// for use in Docker commands where only the host:port is needed
|
||||
func TrimRegistryURL(url string) string {
|
||||
url = strings.TrimPrefix(url, "https://")
|
||||
url = strings.TrimPrefix(url, "http://")
|
||||
return url
|
||||
}
|
||||
182
pkg/appview/holdhealth/checker.go
Normal file
182
pkg/appview/holdhealth/checker.go
Normal file
@@ -0,0 +1,182 @@
|
||||
// Package holdhealth provides health checking for hold service endpoints.
|
||||
// It periodically checks hold availability and caches health status with
|
||||
// configurable TTL to avoid excessive health check requests.
|
||||
package holdhealth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
)
|
||||
|
||||
// HealthStatus represents the health status of a hold endpoint
|
||||
type HealthStatus struct {
|
||||
Reachable bool
|
||||
LastChecked time.Time
|
||||
LastError error
|
||||
}
|
||||
|
||||
// Checker manages health checking for hold endpoints
|
||||
type Checker struct {
|
||||
client *http.Client
|
||||
cache map[string]*HealthStatus
|
||||
cacheMu sync.RWMutex
|
||||
cacheTTL time.Duration
|
||||
cleanupMu sync.Mutex
|
||||
}
|
||||
|
||||
// NewChecker creates a new health checker with the specified cache TTL
|
||||
func NewChecker(cacheTTL time.Duration) *Checker {
|
||||
return NewCheckerWithTimeout(cacheTTL, 2*time.Second)
|
||||
}
|
||||
|
||||
// NewCheckerWithTimeout creates a new health checker with custom timeout
|
||||
// Useful for testing with shorter timeouts
|
||||
func NewCheckerWithTimeout(cacheTTL, httpTimeout time.Duration) *Checker {
|
||||
return &Checker{
|
||||
client: &http.Client{
|
||||
Timeout: httpTimeout,
|
||||
},
|
||||
cache: make(map[string]*HealthStatus),
|
||||
cacheTTL: cacheTTL,
|
||||
}
|
||||
}
|
||||
|
||||
// CheckHealth performs an HTTP health check on the hold endpoint
|
||||
// Accepts either DID (did:web:host) or URL (https://host) format
|
||||
// Checks {endpoint}/xrpc/_health and returns true if reachable
|
||||
func (c *Checker) CheckHealth(ctx context.Context, endpoint string) (bool, error) {
|
||||
// Convert DID to HTTP URL if needed
|
||||
// did:web:hold.example.com → https://hold.example.com
|
||||
// https://hold.example.com → https://hold.example.com (passthrough)
|
||||
httpURL := atproto.ResolveHoldURL(endpoint)
|
||||
|
||||
// Build health check URL
|
||||
healthURL := httpURL + "/xrpc/_health"
|
||||
|
||||
// Create request with context
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", healthURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
// Perform request
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Check status code
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// GetStatus returns the cached health status for an endpoint
|
||||
// If the cache is expired or missing, it performs an on-demand check
|
||||
func (c *Checker) GetStatus(ctx context.Context, endpoint string) *HealthStatus {
|
||||
// Check cache first
|
||||
c.cacheMu.RLock()
|
||||
status, exists := c.cache[endpoint]
|
||||
c.cacheMu.RUnlock()
|
||||
|
||||
// If cached and not expired, return it
|
||||
if exists && time.Since(status.LastChecked) < c.cacheTTL {
|
||||
return status
|
||||
}
|
||||
|
||||
// On-demand check
|
||||
reachable, err := c.CheckHealth(ctx, endpoint)
|
||||
|
||||
// Update cache
|
||||
newStatus := &HealthStatus{
|
||||
Reachable: reachable,
|
||||
LastChecked: time.Now(),
|
||||
LastError: err,
|
||||
}
|
||||
|
||||
c.cacheMu.Lock()
|
||||
c.cache[endpoint] = newStatus
|
||||
c.cacheMu.Unlock()
|
||||
|
||||
return newStatus
|
||||
}
|
||||
|
||||
// GetCachedStatus returns the cached status without performing a check
|
||||
// Returns nil if no cached status exists
|
||||
func (c *Checker) GetCachedStatus(endpoint string) *HealthStatus {
|
||||
c.cacheMu.RLock()
|
||||
defer c.cacheMu.RUnlock()
|
||||
|
||||
status, exists := c.cache[endpoint]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return nil if expired
|
||||
if time.Since(status.LastChecked) > c.cacheTTL {
|
||||
return nil
|
||||
}
|
||||
|
||||
return status
|
||||
}
|
||||
|
||||
// SetStatus manually sets the health status for an endpoint
|
||||
// Used by the background worker to update cache
|
||||
func (c *Checker) SetStatus(endpoint string, reachable bool, err error) {
|
||||
status := &HealthStatus{
|
||||
Reachable: reachable,
|
||||
LastChecked: time.Now(),
|
||||
LastError: err,
|
||||
}
|
||||
|
||||
c.cacheMu.Lock()
|
||||
c.cache[endpoint] = status
|
||||
c.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
// Cleanup removes stale cache entries (older than 30 minutes)
|
||||
func (c *Checker) Cleanup() {
|
||||
c.cleanupMu.Lock()
|
||||
defer c.cleanupMu.Unlock()
|
||||
|
||||
c.cacheMu.Lock()
|
||||
defer c.cacheMu.Unlock()
|
||||
|
||||
cutoff := time.Now().Add(-30 * time.Minute)
|
||||
for endpoint, status := range c.cache {
|
||||
if status.LastChecked.Before(cutoff) {
|
||||
delete(c.cache, endpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetCacheStats returns cache statistics for debugging
|
||||
func (c *Checker) GetCacheStats() map[string]any {
|
||||
c.cacheMu.RLock()
|
||||
defer c.cacheMu.RUnlock()
|
||||
|
||||
reachable := 0
|
||||
unreachable := 0
|
||||
|
||||
for _, status := range c.cache {
|
||||
if status.Reachable {
|
||||
reachable++
|
||||
} else {
|
||||
unreachable++
|
||||
}
|
||||
}
|
||||
|
||||
return map[string]any{
|
||||
"total": len(c.cache),
|
||||
"reachable": reachable,
|
||||
"unreachable": unreachable,
|
||||
}
|
||||
}
|
||||
328
pkg/appview/holdhealth/checker_test.go
Normal file
328
pkg/appview/holdhealth/checker_test.go
Normal file
@@ -0,0 +1,328 @@
|
||||
package holdhealth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
)
|
||||
|
||||
func TestNewChecker(t *testing.T) {
|
||||
cacheTTL := 15 * time.Minute
|
||||
checker := NewChecker(cacheTTL)
|
||||
|
||||
if checker == nil {
|
||||
t.Fatal("NewChecker returned nil")
|
||||
}
|
||||
|
||||
if checker.cacheTTL != cacheTTL {
|
||||
t.Errorf("Expected cache TTL %v, got %v", cacheTTL, checker.cacheTTL)
|
||||
}
|
||||
|
||||
if checker.cache == nil {
|
||||
t.Error("Cache map not initialized")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckHealth_Success(t *testing.T) {
|
||||
// Create test server that returns 200
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/xrpc/_health" {
|
||||
t.Errorf("Expected path /xrpc/_health, got %s", r.URL.Path)
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`{"version": "1.0.0"}`))
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
checker := NewChecker(15 * time.Minute)
|
||||
ctx := context.Background()
|
||||
|
||||
reachable, err := checker.CheckHealth(ctx, server.URL)
|
||||
if err != nil {
|
||||
t.Errorf("CheckHealth returned error: %v", err)
|
||||
}
|
||||
|
||||
if !reachable {
|
||||
t.Error("Expected hold to be reachable")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckHealth_WithDID(t *testing.T) {
|
||||
// Create test server that returns 200
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/xrpc/_health" {
|
||||
t.Errorf("Expected path /xrpc/_health, got %s", r.URL.Path)
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`{"version": "1.0.0"}`))
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
checker := NewChecker(15 * time.Minute)
|
||||
ctx := context.Background()
|
||||
|
||||
// Test with DID format (did:web:host)
|
||||
// Extract host:port from test server URL
|
||||
// http://127.0.0.1:12345 → did:web:127.0.0.1:12345
|
||||
serverURL := server.URL
|
||||
didFormat := "did:web:" + serverURL[7:] // Remove "http://"
|
||||
|
||||
reachable, err := checker.CheckHealth(ctx, didFormat)
|
||||
if err != nil {
|
||||
t.Errorf("CheckHealth with DID returned error: %v", err)
|
||||
}
|
||||
|
||||
if !reachable {
|
||||
t.Error("Expected hold to be reachable with DID format")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckHealth_Failure(t *testing.T) {
|
||||
// Create test server that returns 500
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
checker := NewChecker(15 * time.Minute)
|
||||
ctx := context.Background()
|
||||
|
||||
reachable, err := checker.CheckHealth(ctx, server.URL)
|
||||
if err == nil {
|
||||
t.Error("Expected error for 500 status code")
|
||||
}
|
||||
|
||||
if reachable {
|
||||
t.Error("Expected hold to be unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckHealth_Timeout(t *testing.T) {
|
||||
// Create test server that delays longer than client timeout
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
time.Sleep(200 * time.Millisecond) // Longer than 100ms test timeout
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Use custom timeout of 100ms for faster test
|
||||
checker := NewCheckerWithTimeout(15*time.Minute, 100*time.Millisecond)
|
||||
ctx := context.Background()
|
||||
|
||||
reachable, err := checker.CheckHealth(ctx, server.URL)
|
||||
if err == nil {
|
||||
t.Error("Expected timeout error")
|
||||
}
|
||||
|
||||
if reachable {
|
||||
t.Error("Expected hold to be unreachable due to timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetStatus_CacheHit(t *testing.T) {
|
||||
checker := NewChecker(15 * time.Minute)
|
||||
endpoint := "https://example.com"
|
||||
|
||||
// Manually set cached status
|
||||
checker.SetStatus(endpoint, true, nil)
|
||||
|
||||
// Get status should return cached value
|
||||
status := checker.GetStatus(context.Background(), endpoint)
|
||||
if status == nil {
|
||||
t.Fatal("GetStatus returned nil")
|
||||
return
|
||||
}
|
||||
|
||||
if !status.Reachable {
|
||||
t.Error("Expected cached status to be reachable")
|
||||
}
|
||||
|
||||
if status.LastError != nil {
|
||||
t.Errorf("Expected no error, got %v", status.LastError)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetStatus_CacheMiss(t *testing.T) {
|
||||
// Create test server
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
checker := NewChecker(15 * time.Minute)
|
||||
|
||||
// Get status should perform check on cache miss
|
||||
status := checker.GetStatus(context.Background(), server.URL)
|
||||
if status == nil {
|
||||
t.Fatal("GetStatus returned nil")
|
||||
return
|
||||
}
|
||||
|
||||
if !status.Reachable {
|
||||
t.Error("Expected status to be reachable")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetStatus_CacheExpiry(t *testing.T) {
|
||||
// Create checker with very short TTL
|
||||
checker := NewChecker(100 * time.Millisecond)
|
||||
endpoint := "https://example.com"
|
||||
|
||||
// Set cached status
|
||||
checker.SetStatus(endpoint, true, nil)
|
||||
|
||||
// Wait for cache to expire
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
|
||||
// GetCachedStatus should return nil for expired entry
|
||||
status := checker.GetCachedStatus(endpoint)
|
||||
if status != nil {
|
||||
t.Error("Expected nil for expired cache entry")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetStatus(t *testing.T) {
|
||||
checker := NewChecker(15 * time.Minute)
|
||||
endpoint := "https://example.com"
|
||||
|
||||
// Set status
|
||||
checker.SetStatus(endpoint, true, nil)
|
||||
|
||||
// Verify it was set
|
||||
status := checker.GetCachedStatus(endpoint)
|
||||
if status == nil {
|
||||
t.Fatal("Status not found in cache")
|
||||
return
|
||||
}
|
||||
|
||||
if !status.Reachable {
|
||||
t.Error("Expected status to be reachable")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanup(t *testing.T) {
|
||||
checker := NewChecker(1 * time.Minute)
|
||||
|
||||
// Add old entry (simulate old timestamp by manually setting it)
|
||||
endpoint := "https://example.com"
|
||||
checker.cache[endpoint] = &HealthStatus{
|
||||
Reachable: true,
|
||||
LastChecked: time.Now().Add(-31 * time.Minute), // 31 minutes ago
|
||||
}
|
||||
|
||||
// Add recent entry
|
||||
recentEndpoint := "https://recent.com"
|
||||
checker.SetStatus(recentEndpoint, true, nil)
|
||||
|
||||
// Run cleanup
|
||||
checker.Cleanup()
|
||||
|
||||
// Old entry should be removed
|
||||
if checker.GetCachedStatus(endpoint) != nil {
|
||||
t.Error("Expected old entry to be cleaned up")
|
||||
}
|
||||
|
||||
// Recent entry should remain
|
||||
if checker.GetCachedStatus(recentEndpoint) == nil {
|
||||
t.Error("Expected recent entry to remain after cleanup")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCacheStats(t *testing.T) {
|
||||
checker := NewChecker(15 * time.Minute)
|
||||
|
||||
// Add some entries
|
||||
checker.SetStatus("https://reachable1.com", true, nil)
|
||||
checker.SetStatus("https://reachable2.com", true, nil)
|
||||
checker.SetStatus("https://unreachable1.com", false, nil)
|
||||
|
||||
stats := checker.GetCacheStats()
|
||||
|
||||
total, ok := stats["total"].(int)
|
||||
if !ok || total != 3 {
|
||||
t.Errorf("Expected total=3, got %v", stats["total"])
|
||||
}
|
||||
|
||||
reachable, ok := stats["reachable"].(int)
|
||||
if !ok || reachable != 2 {
|
||||
t.Errorf("Expected reachable=2, got %v", stats["reachable"])
|
||||
}
|
||||
|
||||
unreachable, ok := stats["unreachable"].(int)
|
||||
if !ok || unreachable != 1 {
|
||||
t.Errorf("Expected unreachable=1, got %v", stats["unreachable"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewWorkerWithStartupDelay(t *testing.T) {
|
||||
checker := NewChecker(15 * time.Minute)
|
||||
|
||||
// Test NewWorker (no delay)
|
||||
worker := NewWorker(checker, nil, 5*time.Minute)
|
||||
if worker.startupDelay != 0 {
|
||||
t.Errorf("Expected startupDelay=0 for NewWorker, got %v", worker.startupDelay)
|
||||
}
|
||||
|
||||
// Test NewWorkerWithStartupDelay
|
||||
startupDelay := 5 * time.Second
|
||||
workerWithDelay := NewWorkerWithStartupDelay(checker, nil, 5*time.Minute, startupDelay)
|
||||
if workerWithDelay.startupDelay != startupDelay {
|
||||
t.Errorf("Expected startupDelay=%v, got %v", startupDelay, workerWithDelay.startupDelay)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeHoldEndpoint(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "HTTP URL",
|
||||
input: "http://hold01.atcr.io",
|
||||
expected: "did:web:hold01.atcr.io",
|
||||
},
|
||||
{
|
||||
name: "HTTPS URL",
|
||||
input: "https://hold01.atcr.io",
|
||||
expected: "did:web:hold01.atcr.io",
|
||||
},
|
||||
{
|
||||
name: "HTTP URL with port",
|
||||
input: "http://172.28.0.3:8080",
|
||||
expected: "did:web:172.28.0.3:8080",
|
||||
},
|
||||
{
|
||||
name: "HTTP URL with trailing slash",
|
||||
input: "http://hold01.atcr.io/",
|
||||
expected: "did:web:hold01.atcr.io",
|
||||
},
|
||||
{
|
||||
name: "HTTP URL with path",
|
||||
input: "http://hold01.atcr.io/some/path",
|
||||
expected: "did:web:hold01.atcr.io",
|
||||
},
|
||||
{
|
||||
name: "Already a DID",
|
||||
input: "did:web:hold01.atcr.io",
|
||||
expected: "did:web:hold01.atcr.io",
|
||||
},
|
||||
{
|
||||
name: "DID with port",
|
||||
input: "did:web:172.28.0.3:8080",
|
||||
expected: "did:web:172.28.0.3:8080",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := atproto.ResolveHoldDIDFromURL(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("normalizeHoldEndpoint(%q) = %q, want %q", tt.input, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
222
pkg/appview/holdhealth/worker.go
Normal file
222
pkg/appview/holdhealth/worker.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package holdhealth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
)
|
||||
|
||||
// DBQuerier interface for database queries (allows mocking in tests)
|
||||
type DBQuerier interface {
|
||||
GetUniqueHoldEndpoints() ([]string, error)
|
||||
}
|
||||
|
||||
// Worker runs background health checks for hold endpoints
|
||||
type Worker struct {
|
||||
checker *Checker
|
||||
db DBQuerier
|
||||
refreshTicker *time.Ticker
|
||||
cleanupTicker *time.Ticker
|
||||
stopChan chan struct{}
|
||||
wg sync.WaitGroup
|
||||
startupDelay time.Duration
|
||||
}
|
||||
|
||||
// NewWorker creates a new background worker
|
||||
func NewWorker(checker *Checker, db DBQuerier, refreshInterval time.Duration) *Worker {
|
||||
return &Worker{
|
||||
checker: checker,
|
||||
db: db,
|
||||
refreshTicker: time.NewTicker(refreshInterval),
|
||||
cleanupTicker: time.NewTicker(30 * time.Minute), // Cleanup every 30 minutes
|
||||
stopChan: make(chan struct{}),
|
||||
startupDelay: 0, // No delay by default for backward compatibility
|
||||
}
|
||||
}
|
||||
|
||||
// NewWorkerWithStartupDelay creates a new background worker with a startup delay
|
||||
func NewWorkerWithStartupDelay(checker *Checker, db DBQuerier, refreshInterval, startupDelay time.Duration) *Worker {
|
||||
return &Worker{
|
||||
checker: checker,
|
||||
db: db,
|
||||
refreshTicker: time.NewTicker(refreshInterval),
|
||||
cleanupTicker: time.NewTicker(30 * time.Minute), // Cleanup every 30 minutes
|
||||
stopChan: make(chan struct{}),
|
||||
startupDelay: startupDelay,
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins the background worker
|
||||
func (w *Worker) Start(ctx context.Context) {
|
||||
w.wg.Add(1)
|
||||
go func() {
|
||||
defer w.wg.Done()
|
||||
|
||||
slog.Info("Hold health worker starting background health checks")
|
||||
|
||||
// Wait for services to be ready (Docker startup race condition)
|
||||
if w.startupDelay > 0 {
|
||||
slog.Info("Hold health worker waiting for services to be ready", "delay", w.startupDelay)
|
||||
select {
|
||||
case <-time.After(w.startupDelay):
|
||||
// Continue with initial check
|
||||
case <-ctx.Done():
|
||||
slog.Info("Hold health worker context cancelled during startup delay")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Perform initial check
|
||||
w.refreshAllHolds(ctx)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
slog.Info("Hold health worker context cancelled, stopping")
|
||||
return
|
||||
case <-w.stopChan:
|
||||
slog.Info("Hold health worker stop signal received")
|
||||
return
|
||||
case <-w.refreshTicker.C:
|
||||
w.refreshAllHolds(ctx)
|
||||
case <-w.cleanupTicker.C:
|
||||
slog.Info("Hold health worker running cache cleanup")
|
||||
w.checker.Cleanup()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop gracefully stops the worker
|
||||
func (w *Worker) Stop() {
|
||||
close(w.stopChan)
|
||||
w.refreshTicker.Stop()
|
||||
w.cleanupTicker.Stop()
|
||||
w.wg.Wait()
|
||||
slog.Info("Hold health worker stopped")
|
||||
}
|
||||
|
||||
// refreshAllHolds queries the database for unique hold endpoints and refreshes their health status
|
||||
func (w *Worker) refreshAllHolds(ctx context.Context) {
|
||||
slog.Info("Hold health worker starting refresh cycle")
|
||||
|
||||
// Get unique hold endpoints from database
|
||||
endpoints, err := w.db.GetUniqueHoldEndpoints()
|
||||
if err != nil {
|
||||
slog.Error("Hold health worker failed to fetch hold endpoints", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(endpoints) == 0 {
|
||||
slog.Info("Hold health worker no hold endpoints to check")
|
||||
return
|
||||
}
|
||||
|
||||
slog.Info("Hold health worker fetched hold endpoint entries from database", "count", len(endpoints))
|
||||
|
||||
// Deduplicate endpoints by normalizing to canonical DID format
|
||||
// This handles cases where the same hold is stored with different representations:
|
||||
// - http://172.28.0.3:8080 (internal IP)
|
||||
// - http://hold01.atcr.io (external hostname)
|
||||
// - did:web:hold01.atcr.io (DID format)
|
||||
// All normalize to the same DID: did:web:hold01.atcr.io (or did:web:172.28.0.3:8080)
|
||||
seen := make(map[string]bool)
|
||||
uniqueEndpoints := make([]string, 0, len(endpoints))
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
// Normalize to canonical DID format
|
||||
normalizedDID := atproto.ResolveHoldDIDFromURL(endpoint)
|
||||
|
||||
// Skip if we've already seen this normalized DID
|
||||
if seen[normalizedDID] {
|
||||
continue
|
||||
}
|
||||
|
||||
seen[normalizedDID] = true
|
||||
// Use the normalized DID for health checks
|
||||
uniqueEndpoints = append(uniqueEndpoints, normalizedDID)
|
||||
}
|
||||
|
||||
slog.Info("Hold health worker checking unique hold endpoints", "unique_count", len(uniqueEndpoints), "total_count", len(endpoints))
|
||||
|
||||
// Check health concurrently with rate limiting
|
||||
// Use a semaphore to limit concurrent requests (max 10 at a time)
|
||||
sem := make(chan struct{}, 10)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
reachable := 0
|
||||
unreachable := 0
|
||||
var statsMu sync.Mutex
|
||||
|
||||
for _, endpoint := range uniqueEndpoints {
|
||||
wg.Add(1)
|
||||
|
||||
go func(ep string) {
|
||||
defer wg.Done()
|
||||
|
||||
// Acquire semaphore
|
||||
sem <- struct{}{}
|
||||
defer func() { <-sem }()
|
||||
|
||||
// Check health
|
||||
isReachable, err := w.checker.CheckHealth(ctx, ep)
|
||||
|
||||
// Update cache
|
||||
w.checker.SetStatus(ep, isReachable, err)
|
||||
|
||||
// Update stats
|
||||
statsMu.Lock()
|
||||
if isReachable {
|
||||
reachable++
|
||||
} else {
|
||||
unreachable++
|
||||
slog.Warn("Hold health worker hold unreachable", "endpoint", ep, "error", err)
|
||||
}
|
||||
statsMu.Unlock()
|
||||
}(endpoint)
|
||||
}
|
||||
|
||||
// Wait for all checks to complete
|
||||
wg.Wait()
|
||||
|
||||
slog.Info("Hold health worker refresh complete", "reachable", reachable, "unreachable", unreachable)
|
||||
}
|
||||
|
||||
// DBAdapter wraps sql.DB to implement DBQuerier interface
|
||||
type DBAdapter struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// NewDBAdapter creates a new database adapter
|
||||
func NewDBAdapter(db *sql.DB) *DBAdapter {
|
||||
return &DBAdapter{db: db}
|
||||
}
|
||||
|
||||
// GetUniqueHoldEndpoints queries the database for unique hold endpoints
|
||||
func (a *DBAdapter) GetUniqueHoldEndpoints() ([]string, error) {
|
||||
rows, err := a.db.Query(`SELECT DISTINCT hold_endpoint FROM manifests WHERE hold_endpoint != ''`)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query hold endpoints: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var endpoints []string
|
||||
for rows.Next() {
|
||||
var endpoint string
|
||||
if err := rows.Scan(&endpoint); err != nil {
|
||||
return nil, fmt.Errorf("failed to scan endpoint: %w", err)
|
||||
}
|
||||
endpoints = append(endpoints, endpoint)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error iterating rows: %w", err)
|
||||
}
|
||||
|
||||
return endpoints, nil
|
||||
}
|
||||
13
pkg/appview/holdhealth/worker_test.go
Normal file
13
pkg/appview/holdhealth/worker_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package holdhealth
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestWorker_Struct(t *testing.T) {
|
||||
// Simple struct test
|
||||
worker := &Worker{}
|
||||
if worker == nil {
|
||||
t.Error("Expected non-nil worker")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Add background health check tests
|
||||
@@ -5,21 +5,21 @@ import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/bluesky-social/indigo/atproto/identity"
|
||||
"github.com/bluesky-social/indigo/atproto/syntax"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"atcr.io/pkg/atproto"
|
||||
)
|
||||
|
||||
// BackfillWorker uses com.atproto.sync.listReposByCollection to backfill historical data
|
||||
type BackfillWorker struct {
|
||||
db *sql.DB
|
||||
client *atproto.Client
|
||||
directory identity.Directory
|
||||
db *sql.DB
|
||||
client *atproto.Client
|
||||
processor *Processor // Shared processor for DB operations
|
||||
defaultHoldDID string // Default hold DID from AppView config (e.g., "did:web:hold01.atcr.io")
|
||||
testMode bool // If true, suppress warnings for external holds
|
||||
}
|
||||
|
||||
// BackfillState tracks backfill progress
|
||||
@@ -34,38 +34,52 @@ type BackfillState struct {
|
||||
}
|
||||
|
||||
// NewBackfillWorker creates a backfill worker using sync API
|
||||
func NewBackfillWorker(database *sql.DB, relayEndpoint string) (*BackfillWorker, error) {
|
||||
// defaultHoldDID should be in format "did:web:hold01.atcr.io"
|
||||
// To find a hold's DID, visit: https://hold-url/.well-known/did.json
|
||||
func NewBackfillWorker(database *sql.DB, relayEndpoint, defaultHoldDID string, testMode bool) (*BackfillWorker, error) {
|
||||
// Create client for relay - used only for listReposByCollection
|
||||
client := atproto.NewClient(relayEndpoint, "", "")
|
||||
|
||||
return &BackfillWorker{
|
||||
db: database,
|
||||
client: client, // This points to the relay
|
||||
directory: identity.DefaultDirectory(),
|
||||
db: database,
|
||||
client: client, // This points to the relay
|
||||
processor: NewProcessor(database, false), // No cache for batch processing
|
||||
defaultHoldDID: defaultHoldDID,
|
||||
testMode: testMode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start runs the backfill for all ATCR collections
|
||||
func (b *BackfillWorker) Start(ctx context.Context) error {
|
||||
fmt.Println("Backfill: Starting sync-based backfill...")
|
||||
slog.Info("Backfill: Starting sync-based backfill...")
|
||||
|
||||
// First, query and cache the default hold's captain record
|
||||
if b.defaultHoldDID != "" {
|
||||
slog.Info("Backfill querying default hold captain record", "hold_did", b.defaultHoldDID)
|
||||
if err := b.queryCaptainRecord(ctx, b.defaultHoldDID); err != nil {
|
||||
slog.Warn("Backfill failed to query default hold captain record", "error", err)
|
||||
// Don't fail the whole backfill - just warn
|
||||
}
|
||||
}
|
||||
|
||||
collections := []string{
|
||||
atproto.ManifestCollection, // io.atcr.manifest
|
||||
atproto.TagCollection, // io.atcr.tag
|
||||
atproto.StarCollection, // io.atcr.sailor.star
|
||||
atproto.ManifestCollection, // io.atcr.manifest
|
||||
atproto.TagCollection, // io.atcr.tag
|
||||
atproto.StarCollection, // io.atcr.sailor.star
|
||||
atproto.SailorProfileCollection, // io.atcr.sailor.profile
|
||||
}
|
||||
|
||||
for _, collection := range collections {
|
||||
fmt.Printf("Backfill: Processing collection: %s\n", collection)
|
||||
slog.Info("Backfill processing collection", "collection", collection)
|
||||
|
||||
if err := b.backfillCollection(ctx, collection); err != nil {
|
||||
return fmt.Errorf("failed to backfill collection %s: %w", collection, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Backfill: Completed collection: %s\n", collection)
|
||||
slog.Info("Backfill completed collection", "collection", collection)
|
||||
}
|
||||
|
||||
fmt.Println("Backfill: All collections completed!")
|
||||
slog.Info("Backfill: All collections completed!")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -83,13 +97,13 @@ func (b *BackfillWorker) backfillCollection(ctx context.Context, collection stri
|
||||
return fmt.Errorf("failed to list repos: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Backfill: Found %d repos with %s (cursor: %s)\n", len(result.Repos), collection, repoCursor)
|
||||
slog.Info("Backfill found repos with collection", "count", len(result.Repos), "collection", collection, "cursor", repoCursor)
|
||||
|
||||
// Process each repo (DID)
|
||||
for _, repo := range result.Repos {
|
||||
recordCount, err := b.backfillRepo(ctx, repo.DID, collection)
|
||||
if err != nil {
|
||||
fmt.Printf("WARNING: Failed to backfill repo %s: %v\n", repo.DID, err)
|
||||
slog.Warn("Backfill failed to backfill repo", "did", repo.DID, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -97,7 +111,7 @@ func (b *BackfillWorker) backfillCollection(ctx context.Context, collection stri
|
||||
processedRecords += recordCount
|
||||
|
||||
if processedRepos%10 == 0 {
|
||||
fmt.Printf("Backfill: Progress - %d repos, %d records\n", processedRepos, processedRecords)
|
||||
slog.Info("Backfill progress", "repos", processedRepos, "records", processedRecords)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,35 +123,26 @@ func (b *BackfillWorker) backfillCollection(ctx context.Context, collection stri
|
||||
repoCursor = result.Cursor
|
||||
}
|
||||
|
||||
fmt.Printf("Backfill: Collection %s complete - %d repos, %d records\n", collection, processedRepos, processedRecords)
|
||||
slog.Info("Backfill collection complete", "collection", collection, "repos", processedRepos, "records", processedRecords)
|
||||
return nil
|
||||
}
|
||||
|
||||
// backfillRepo backfills all records for a single repo/DID
|
||||
func (b *BackfillWorker) backfillRepo(ctx context.Context, did, collection string) (int, error) {
|
||||
// Ensure user exists in database and get their PDS endpoint
|
||||
if err := b.ensureUser(ctx, did); err != nil {
|
||||
if err := b.processor.EnsureUser(ctx, did); err != nil {
|
||||
return 0, fmt.Errorf("failed to ensure user: %w", err)
|
||||
}
|
||||
|
||||
// Resolve DID to get user's PDS endpoint
|
||||
didParsed, err := syntax.ParseDID(did)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid DID %s: %w", did, err)
|
||||
}
|
||||
|
||||
ident, err := b.directory.LookupDID(ctx, didParsed)
|
||||
pdsEndpoint, err := atproto.ResolveDIDToPDS(ctx, did)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to resolve DID to PDS: %w", err)
|
||||
}
|
||||
|
||||
pdsEndpoint := ident.PDSEndpoint()
|
||||
if pdsEndpoint == "" {
|
||||
return 0, fmt.Errorf("no PDS endpoint found for DID %s", did)
|
||||
}
|
||||
|
||||
// Create a client for this user's PDS
|
||||
pdsClient := atproto.NewClient(pdsEndpoint, "", "")
|
||||
// Create a client for this user's PDS with the user's DID
|
||||
// This allows GetRecord to work properly with the repo parameter
|
||||
pdsClient := atproto.NewClient(pdsEndpoint, did, "")
|
||||
|
||||
var recordCursor string
|
||||
recordCount := 0
|
||||
@@ -157,12 +162,13 @@ func (b *BackfillWorker) backfillRepo(ctx context.Context, did, collection strin
|
||||
// Process each record
|
||||
for _, record := range records {
|
||||
// Track what we found for deletion reconciliation
|
||||
if collection == atproto.ManifestCollection {
|
||||
switch collection {
|
||||
case atproto.ManifestCollection:
|
||||
var manifestRecord atproto.ManifestRecord
|
||||
if err := json.Unmarshal(record.Value, &manifestRecord); err == nil {
|
||||
foundManifestDigests = append(foundManifestDigests, manifestRecord.Digest)
|
||||
}
|
||||
} else if collection == atproto.TagCollection {
|
||||
case atproto.TagCollection:
|
||||
var tagRecord atproto.TagRecord
|
||||
if err := json.Unmarshal(record.Value, &tagRecord); err == nil {
|
||||
foundTags = append(foundTags, struct{ Repository, Tag string }{
|
||||
@@ -170,7 +176,7 @@ func (b *BackfillWorker) backfillRepo(ctx context.Context, did, collection strin
|
||||
Tag: tagRecord.Tag,
|
||||
})
|
||||
}
|
||||
} else if collection == atproto.StarCollection {
|
||||
case atproto.StarCollection:
|
||||
var starRecord atproto.StarRecord
|
||||
if err := json.Unmarshal(record.Value, &starRecord); err == nil {
|
||||
key := fmt.Sprintf("%s/%s", starRecord.Subject.DID, starRecord.Subject.Repository)
|
||||
@@ -179,7 +185,7 @@ func (b *BackfillWorker) backfillRepo(ctx context.Context, did, collection strin
|
||||
}
|
||||
|
||||
if err := b.processRecord(ctx, did, collection, &record); err != nil {
|
||||
fmt.Printf("WARNING: Failed to process record %s: %v\n", record.URI, err)
|
||||
slog.Warn("Backfill failed to process record", "uri", record.URI, "error", err)
|
||||
continue
|
||||
}
|
||||
recordCount++
|
||||
@@ -195,13 +201,19 @@ func (b *BackfillWorker) backfillRepo(ctx context.Context, did, collection strin
|
||||
|
||||
// Reconcile deletions - remove records from DB that no longer exist on PDS
|
||||
if err := b.reconcileDeletions(did, collection, foundManifestDigests, foundTags, foundStars); err != nil {
|
||||
fmt.Printf("WARNING: Failed to reconcile deletions for %s: %v\n", did, err)
|
||||
slog.Warn("Backfill failed to reconcile deletions", "did", did, "error", err)
|
||||
}
|
||||
|
||||
// After processing manifests, clean up orphaned tags (tags pointing to non-existent manifests)
|
||||
if collection == atproto.ManifestCollection {
|
||||
if err := db.CleanupOrphanedTags(b.db, did); err != nil {
|
||||
fmt.Printf("WARNING: Failed to cleanup orphaned tags for %s: %v\n", did, err)
|
||||
slog.Warn("Backfill failed to cleanup orphaned tags", "did", did, "error", err)
|
||||
}
|
||||
|
||||
// Reconcile annotations - ensure they come from newest manifest per repository
|
||||
// This fixes out-of-order backfill where older manifests can overwrite newer annotations
|
||||
if err := b.reconcileAnnotations(ctx, did, pdsClient); err != nil {
|
||||
slog.Warn("Backfill failed to reconcile annotations", "did", did, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -226,7 +238,7 @@ func (b *BackfillWorker) reconcileDeletions(did, collection string, foundManifes
|
||||
// Log deletions
|
||||
deleted := len(dbDigests) - len(foundManifestDigests)
|
||||
if deleted > 0 {
|
||||
fmt.Printf("Backfill: Deleted %d orphaned manifests for %s\n", deleted, did)
|
||||
slog.Info("Backfill deleted orphaned manifests", "count", deleted, "did", did)
|
||||
}
|
||||
|
||||
case atproto.TagCollection:
|
||||
@@ -244,7 +256,7 @@ func (b *BackfillWorker) reconcileDeletions(did, collection string, foundManifes
|
||||
// Log deletions
|
||||
deleted := len(dbTags) - len(foundTags)
|
||||
if deleted > 0 {
|
||||
fmt.Printf("Backfill: Deleted %d orphaned tags for %s\n", deleted, did)
|
||||
slog.Info("Backfill deleted orphaned tags", "count", deleted, "did", did)
|
||||
}
|
||||
|
||||
case atproto.StarCollection:
|
||||
@@ -262,175 +274,142 @@ func (b *BackfillWorker) reconcileDeletions(did, collection string, foundManifes
|
||||
func (b *BackfillWorker) processRecord(ctx context.Context, did, collection string, record *atproto.Record) error {
|
||||
switch collection {
|
||||
case atproto.ManifestCollection:
|
||||
return b.processManifestRecord(did, record)
|
||||
_, err := b.processor.ProcessManifest(context.Background(), did, record.Value)
|
||||
return err
|
||||
case atproto.TagCollection:
|
||||
return b.processTagRecord(did, record)
|
||||
return b.processor.ProcessTag(context.Background(), did, record.Value)
|
||||
case atproto.StarCollection:
|
||||
return b.processStarRecord(did, record)
|
||||
return b.processor.ProcessStar(context.Background(), did, record.Value)
|
||||
case atproto.SailorProfileCollection:
|
||||
return b.processor.ProcessSailorProfile(ctx, did, record.Value, b.queryCaptainRecordWrapper)
|
||||
default:
|
||||
return fmt.Errorf("unsupported collection: %s", collection)
|
||||
}
|
||||
}
|
||||
|
||||
// processManifestRecord processes a manifest record
|
||||
func (b *BackfillWorker) processManifestRecord(did string, record *atproto.Record) error {
|
||||
var manifestRecord atproto.ManifestRecord
|
||||
if err := json.Unmarshal(record.Value, &manifestRecord); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal manifest: %w", err)
|
||||
}
|
||||
|
||||
// Extract OCI annotations from manifest
|
||||
var title, description, sourceURL, documentationURL, licenses, iconURL string
|
||||
if manifestRecord.Annotations != nil {
|
||||
title = manifestRecord.Annotations["org.opencontainers.image.title"]
|
||||
description = manifestRecord.Annotations["org.opencontainers.image.description"]
|
||||
sourceURL = manifestRecord.Annotations["org.opencontainers.image.source"]
|
||||
documentationURL = manifestRecord.Annotations["org.opencontainers.image.documentation"]
|
||||
licenses = manifestRecord.Annotations["org.opencontainers.image.licenses"]
|
||||
iconURL = manifestRecord.Annotations["io.atcr.icon"]
|
||||
}
|
||||
|
||||
// Insert manifest
|
||||
manifestID, err := db.InsertManifest(b.db, &db.Manifest{
|
||||
DID: did,
|
||||
Repository: manifestRecord.Repository,
|
||||
Digest: manifestRecord.Digest,
|
||||
MediaType: manifestRecord.MediaType,
|
||||
SchemaVersion: manifestRecord.SchemaVersion,
|
||||
ConfigDigest: manifestRecord.Config.Digest,
|
||||
ConfigSize: manifestRecord.Config.Size,
|
||||
HoldEndpoint: manifestRecord.HoldEndpoint,
|
||||
CreatedAt: manifestRecord.CreatedAt,
|
||||
Title: title,
|
||||
Description: description,
|
||||
SourceURL: sourceURL,
|
||||
DocumentationURL: documentationURL,
|
||||
Licenses: licenses,
|
||||
IconURL: iconURL,
|
||||
})
|
||||
if err != nil {
|
||||
// Skip if already exists
|
||||
if strings.Contains(err.Error(), "UNIQUE constraint failed") {
|
||||
// queryCaptainRecordWrapper wraps queryCaptainRecord with backfill-specific logic
|
||||
func (b *BackfillWorker) queryCaptainRecordWrapper(ctx context.Context, holdDID string) error {
|
||||
if err := b.queryCaptainRecord(ctx, holdDID); err != nil {
|
||||
// In test mode, only warn about default hold (local hold)
|
||||
// External/production holds may not have captain records yet (dev ahead of prod)
|
||||
if b.testMode && holdDID != b.defaultHoldDID {
|
||||
// Suppress warning for external holds in test mode
|
||||
return nil
|
||||
}
|
||||
slog.Warn("Backfill failed to query captain record for hold", "hold_did", holdDID, "error", err)
|
||||
// Don't fail the whole backfill - just skip this hold
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// queryCaptainRecord queries a hold's captain record and caches it in the database
|
||||
func (b *BackfillWorker) queryCaptainRecord(ctx context.Context, holdDID string) error {
|
||||
// Check if we already have it cached (skip if recently updated)
|
||||
existing, err := db.GetCaptainRecord(b.db, holdDID)
|
||||
if err == nil && existing != nil {
|
||||
// If cached within last hour, skip refresh
|
||||
if time.Since(existing.UpdatedAt) < 1*time.Hour {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to insert manifest: %w", err)
|
||||
}
|
||||
|
||||
// Insert layers
|
||||
for i, layer := range manifestRecord.Layers {
|
||||
if err := db.InsertLayer(b.db, &db.Layer{
|
||||
ManifestID: manifestID,
|
||||
Digest: layer.Digest,
|
||||
MediaType: layer.MediaType,
|
||||
Size: layer.Size,
|
||||
LayerIndex: i,
|
||||
}); err != nil {
|
||||
// Continue on error - layer might already exist
|
||||
// Resolve hold DID to URL
|
||||
holdURL := atproto.ResolveHoldURL(holdDID)
|
||||
|
||||
// Create client for hold's PDS
|
||||
holdClient := atproto.NewClient(holdURL, holdDID, "")
|
||||
|
||||
// Query captain record with retries (for Docker startup timing)
|
||||
var record *atproto.Record
|
||||
maxRetries := 3
|
||||
for attempt := 1; attempt <= maxRetries; attempt++ {
|
||||
record, err = holdClient.GetRecord(ctx, "io.atcr.hold.captain", "self")
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Retry on connection errors (hold service might still be starting)
|
||||
if attempt < maxRetries && strings.Contains(err.Error(), "connection refused") {
|
||||
slog.Info("Backfill hold not ready, retrying", "attempt", attempt, "max_retries", maxRetries)
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to get captain record: %w", err)
|
||||
}
|
||||
|
||||
// Parse captain record directly into db struct
|
||||
var captainRecord db.HoldCaptainRecord
|
||||
if err := json.Unmarshal(record.Value, &captainRecord); err != nil {
|
||||
return fmt.Errorf("failed to parse captain record: %w", err)
|
||||
}
|
||||
|
||||
// Set fields not from JSON
|
||||
captainRecord.HoldDID = holdDID
|
||||
captainRecord.UpdatedAt = time.Now()
|
||||
|
||||
if err := db.UpsertCaptainRecord(b.db, &captainRecord); err != nil {
|
||||
return fmt.Errorf("failed to cache captain record: %w", err)
|
||||
}
|
||||
|
||||
slog.Info("Backfill cached captain record for hold", "hold_did", holdDID, "owner_did", captainRecord.OwnerDID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconcileAnnotations ensures annotations come from the newest manifest in each repository
|
||||
// This fixes the out-of-order backfill issue where older manifests can overwrite newer annotations
|
||||
func (b *BackfillWorker) reconcileAnnotations(ctx context.Context, did string, pdsClient *atproto.Client) error {
|
||||
// Get all repositories for this DID
|
||||
repositories, err := db.GetRepositoriesForDID(b.db, did)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get repositories: %w", err)
|
||||
}
|
||||
|
||||
for _, repo := range repositories {
|
||||
// Find newest manifest for this repository
|
||||
newestManifest, err := db.GetNewestManifestForRepo(b.db, did, repo)
|
||||
if err != nil {
|
||||
slog.Warn("Backfill failed to get newest manifest for repo", "did", did, "repository", repo, "error", err)
|
||||
continue // Skip on error
|
||||
}
|
||||
|
||||
// Fetch the full manifest record from PDS using the digest as rkey
|
||||
rkey := strings.TrimPrefix(newestManifest.Digest, "sha256:")
|
||||
record, err := pdsClient.GetRecord(ctx, atproto.ManifestCollection, rkey)
|
||||
if err != nil {
|
||||
slog.Warn("Backfill failed to fetch manifest record for repo", "did", did, "repository", repo, "error", err)
|
||||
continue // Skip on error
|
||||
}
|
||||
|
||||
// Parse manifest record
|
||||
var manifestRecord atproto.ManifestRecord
|
||||
if err := json.Unmarshal(record.Value, &manifestRecord); err != nil {
|
||||
slog.Warn("Backfill failed to parse manifest record for repo", "did", did, "repository", repo, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update annotations from newest manifest only
|
||||
if len(manifestRecord.Annotations) > 0 {
|
||||
// Filter out empty annotations
|
||||
hasData := false
|
||||
for _, value := range manifestRecord.Annotations {
|
||||
if value != "" {
|
||||
hasData = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if hasData {
|
||||
err = db.UpsertRepositoryAnnotations(b.db, did, repo, manifestRecord.Annotations)
|
||||
if err != nil {
|
||||
slog.Warn("Backfill failed to reconcile annotations for repo", "did", did, "repository", repo, "error", err)
|
||||
} else {
|
||||
slog.Info("Backfill reconciled annotations for repo from newest manifest", "did", did, "repository", repo, "digest", newestManifest.Digest)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// processTagRecord processes a tag record
|
||||
func (b *BackfillWorker) processTagRecord(did string, record *atproto.Record) error {
|
||||
var tagRecord atproto.TagRecord
|
||||
if err := json.Unmarshal(record.Value, &tagRecord); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal tag: %w", err)
|
||||
}
|
||||
|
||||
// Insert or update tag
|
||||
return db.UpsertTag(b.db, &db.Tag{
|
||||
DID: did,
|
||||
Repository: tagRecord.Repository,
|
||||
Tag: tagRecord.Tag,
|
||||
Digest: tagRecord.ManifestDigest,
|
||||
CreatedAt: tagRecord.UpdatedAt,
|
||||
})
|
||||
}
|
||||
|
||||
// processStarRecord processes a star record
|
||||
func (b *BackfillWorker) processStarRecord(did string, record *atproto.Record) error {
|
||||
var starRecord atproto.StarRecord
|
||||
if err := json.Unmarshal(record.Value, &starRecord); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal star: %w", err)
|
||||
}
|
||||
|
||||
// Upsert the star record (idempotent - won't duplicate)
|
||||
// The DID here is the starrer (user who starred)
|
||||
// The subject contains the owner DID and repository
|
||||
// Star count will be calculated on demand from the stars table
|
||||
return db.UpsertStar(b.db, did, starRecord.Subject.DID, starRecord.Subject.Repository, starRecord.CreatedAt)
|
||||
}
|
||||
|
||||
// ensureUser resolves and upserts a user by DID
|
||||
func (b *BackfillWorker) ensureUser(ctx context.Context, did string) error {
|
||||
// Check if user already exists
|
||||
existingUser, err := db.GetUserByDID(b.db, did)
|
||||
if err == nil && existingUser != nil {
|
||||
// Update last seen
|
||||
existingUser.LastSeen = time.Now()
|
||||
return db.UpsertUser(b.db, existingUser)
|
||||
}
|
||||
|
||||
// Resolve DID to get handle and PDS endpoint
|
||||
didParsed, err := syntax.ParseDID(did)
|
||||
if err != nil {
|
||||
// Fallback: use DID as handle
|
||||
user := &db.User{
|
||||
DID: did,
|
||||
Handle: did,
|
||||
PDSEndpoint: "https://bsky.social",
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
return db.UpsertUser(b.db, user)
|
||||
}
|
||||
|
||||
ident, err := b.directory.LookupDID(ctx, didParsed)
|
||||
if err != nil {
|
||||
// Fallback: use DID as handle
|
||||
user := &db.User{
|
||||
DID: did,
|
||||
Handle: did,
|
||||
PDSEndpoint: "https://bsky.social",
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
return db.UpsertUser(b.db, user)
|
||||
}
|
||||
|
||||
resolvedDID := ident.DID.String()
|
||||
handle := ident.Handle.String()
|
||||
pdsEndpoint := ident.PDSEndpoint()
|
||||
|
||||
// If handle is invalid or PDS is missing, use defaults
|
||||
if handle == "handle.invalid" || handle == "" {
|
||||
handle = resolvedDID
|
||||
}
|
||||
if pdsEndpoint == "" {
|
||||
pdsEndpoint = "https://bsky.social"
|
||||
}
|
||||
|
||||
// Fetch user's Bluesky profile (including avatar)
|
||||
// Use public Bluesky AppView API (doesn't require auth for public profiles)
|
||||
avatar := ""
|
||||
publicClient := atproto.NewClient("https://public.api.bsky.app", "", "")
|
||||
profile, err := publicClient.GetActorProfile(ctx, resolvedDID)
|
||||
if err != nil {
|
||||
fmt.Printf("WARNING [backfill]: Failed to fetch profile for DID %s: %v\n", resolvedDID, err)
|
||||
// Continue without avatar
|
||||
} else {
|
||||
avatar = profile.Avatar
|
||||
}
|
||||
|
||||
// Upsert to database
|
||||
user := &db.User{
|
||||
DID: resolvedDID,
|
||||
Handle: handle,
|
||||
PDSEndpoint: pdsEndpoint,
|
||||
Avatar: avatar,
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
|
||||
return db.UpsertUser(b.db, user)
|
||||
}
|
||||
|
||||
386
pkg/appview/jetstream/processor.go
Normal file
386
pkg/appview/jetstream/processor.go
Normal file
@@ -0,0 +1,386 @@
|
||||
package jetstream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"atcr.io/pkg/atproto"
|
||||
)
|
||||
|
||||
// Processor handles shared database operations for both Worker (live) and Backfill (sync)
|
||||
// This eliminates code duplication between the two data ingestion paths
|
||||
type Processor struct {
|
||||
db *sql.DB
|
||||
userCache *UserCache // Optional - enabled for Worker, disabled for Backfill
|
||||
useCache bool
|
||||
}
|
||||
|
||||
// NewProcessor creates a new shared processor
|
||||
// useCache: true for Worker (live streaming), false for Backfill (batch processing)
|
||||
func NewProcessor(database *sql.DB, useCache bool) *Processor {
|
||||
p := &Processor{
|
||||
db: database,
|
||||
useCache: useCache,
|
||||
}
|
||||
|
||||
if useCache {
|
||||
p.userCache = &UserCache{
|
||||
cache: make(map[string]*db.User),
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// EnsureUser resolves and upserts a user by DID
|
||||
// Uses cache if enabled (Worker), queries DB if cache disabled (Backfill)
|
||||
func (p *Processor) EnsureUser(ctx context.Context, did string) error {
|
||||
// Check cache first (if enabled)
|
||||
if p.useCache && p.userCache != nil {
|
||||
if _, ok := p.userCache.cache[did]; ok {
|
||||
// User in cache - just update last seen timestamp
|
||||
return db.UpdateUserLastSeen(p.db, did)
|
||||
}
|
||||
} else if !p.useCache {
|
||||
// No cache - check if user already exists in DB
|
||||
existingUser, err := db.GetUserByDID(p.db, did)
|
||||
if err == nil && existingUser != nil {
|
||||
// User exists - just update last seen timestamp
|
||||
return db.UpdateUserLastSeen(p.db, did)
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve DID to get handle and PDS endpoint
|
||||
resolvedDID, handle, pdsEndpoint, err := atproto.ResolveIdentity(ctx, did)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Fetch user's Bluesky profile record from their PDS (including avatar)
|
||||
avatarURL := ""
|
||||
client := atproto.NewClient(pdsEndpoint, "", "")
|
||||
profileRecord, err := client.GetProfileRecord(ctx, resolvedDID)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to fetch profile record", "component", "processor", "did", resolvedDID, "error", err)
|
||||
// Continue without avatar
|
||||
} else if profileRecord.Avatar != nil && profileRecord.Avatar.Ref.Link != "" {
|
||||
avatarURL = atproto.BlobCDNURL(resolvedDID, profileRecord.Avatar.Ref.Link)
|
||||
}
|
||||
|
||||
// Create user record
|
||||
user := &db.User{
|
||||
DID: resolvedDID,
|
||||
Handle: handle,
|
||||
PDSEndpoint: pdsEndpoint,
|
||||
Avatar: avatarURL,
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
|
||||
// Cache if enabled
|
||||
if p.useCache {
|
||||
p.userCache.cache[did] = user
|
||||
}
|
||||
|
||||
// Upsert to database
|
||||
// Use UpsertUser if we successfully fetched an avatar (to update existing users)
|
||||
// Use UpsertUserIgnoreAvatar if fetch failed (to preserve existing avatars)
|
||||
if avatarURL != "" {
|
||||
return db.UpsertUser(p.db, user)
|
||||
}
|
||||
return db.UpsertUserIgnoreAvatar(p.db, user)
|
||||
}
|
||||
|
||||
// ProcessManifest processes a manifest record and stores it in the database
|
||||
// Returns the manifest ID for further processing (layers/references)
|
||||
func (p *Processor) ProcessManifest(ctx context.Context, did string, recordData []byte) (int64, error) {
|
||||
// Unmarshal manifest record
|
||||
var manifestRecord atproto.ManifestRecord
|
||||
if err := json.Unmarshal(recordData, &manifestRecord); err != nil {
|
||||
return 0, fmt.Errorf("failed to unmarshal manifest: %w", err)
|
||||
}
|
||||
// Detect manifest type
|
||||
isManifestList := len(manifestRecord.Manifests) > 0
|
||||
|
||||
// Parse CreatedAt timestamp
|
||||
createdAt, err := time.Parse(time.RFC3339, manifestRecord.CreatedAt)
|
||||
if err != nil {
|
||||
// If parsing fails, use current time
|
||||
createdAt = time.Now()
|
||||
}
|
||||
|
||||
// Prepare manifest for insertion (WITHOUT annotation fields)
|
||||
manifest := &db.Manifest{
|
||||
DID: did,
|
||||
Repository: manifestRecord.Repository,
|
||||
Digest: manifestRecord.Digest,
|
||||
MediaType: manifestRecord.MediaType,
|
||||
SchemaVersion: int(manifestRecord.SchemaVersion),
|
||||
HoldEndpoint: manifestRecord.HoldEndpoint,
|
||||
CreatedAt: createdAt,
|
||||
// Annotations removed - stored separately in repository_annotations table
|
||||
}
|
||||
|
||||
// Set config fields only for image manifests (not manifest lists)
|
||||
if !isManifestList && manifestRecord.Config != nil {
|
||||
manifest.ConfigDigest = manifestRecord.Config.Digest
|
||||
manifest.ConfigSize = manifestRecord.Config.Size
|
||||
}
|
||||
|
||||
// Insert manifest
|
||||
manifestID, err := db.InsertManifest(p.db, manifest)
|
||||
if err != nil {
|
||||
// For backfill: if manifest already exists, get its ID
|
||||
if strings.Contains(err.Error(), "UNIQUE constraint failed") {
|
||||
var existingID int64
|
||||
err := p.db.QueryRow(`
|
||||
SELECT id FROM manifests
|
||||
WHERE did = ? AND repository = ? AND digest = ?
|
||||
`, manifest.DID, manifest.Repository, manifest.Digest).Scan(&existingID)
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get existing manifest ID: %w", err)
|
||||
}
|
||||
manifestID = existingID
|
||||
} else {
|
||||
return 0, fmt.Errorf("failed to insert manifest: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update repository annotations ONLY if manifest has at least one non-empty annotation
|
||||
if manifestRecord.Annotations != nil {
|
||||
hasData := false
|
||||
for _, value := range manifestRecord.Annotations {
|
||||
if value != "" {
|
||||
hasData = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if hasData {
|
||||
// Replace all annotations for this repository
|
||||
err = db.UpsertRepositoryAnnotations(p.db, did, manifestRecord.Repository, manifestRecord.Annotations)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to upsert annotations: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Insert manifest references or layers
|
||||
if isManifestList {
|
||||
// Insert manifest references (for manifest lists/indexes)
|
||||
for i, ref := range manifestRecord.Manifests {
|
||||
platformArch := ""
|
||||
platformOS := ""
|
||||
platformVariant := ""
|
||||
platformOSVersion := ""
|
||||
|
||||
if ref.Platform != nil {
|
||||
platformArch = ref.Platform.Architecture
|
||||
platformOS = ref.Platform.OS
|
||||
platformVariant = ref.Platform.Variant
|
||||
platformOSVersion = ref.Platform.OSVersion
|
||||
}
|
||||
|
||||
if err := db.InsertManifestReference(p.db, &db.ManifestReference{
|
||||
ManifestID: manifestID,
|
||||
Digest: ref.Digest,
|
||||
MediaType: ref.MediaType,
|
||||
Size: ref.Size,
|
||||
PlatformArchitecture: platformArch,
|
||||
PlatformOS: platformOS,
|
||||
PlatformVariant: platformVariant,
|
||||
PlatformOSVersion: platformOSVersion,
|
||||
ReferenceIndex: i,
|
||||
}); err != nil {
|
||||
// Continue on error - reference might already exist
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Insert layers (for image manifests)
|
||||
for i, layer := range manifestRecord.Layers {
|
||||
if err := db.InsertLayer(p.db, &db.Layer{
|
||||
ManifestID: manifestID,
|
||||
Digest: layer.Digest,
|
||||
MediaType: layer.MediaType,
|
||||
Size: layer.Size,
|
||||
LayerIndex: i,
|
||||
}); err != nil {
|
||||
// Continue on error - layer might already exist
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return manifestID, nil
|
||||
}
|
||||
|
||||
// ProcessTag processes a tag record and stores it in the database
|
||||
func (p *Processor) ProcessTag(ctx context.Context, did string, recordData []byte) error {
|
||||
// Unmarshal tag record
|
||||
var tagRecord atproto.TagRecord
|
||||
if err := json.Unmarshal(recordData, &tagRecord); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal tag: %w", err)
|
||||
}
|
||||
// Extract digest from tag record (tries manifest field first, falls back to manifestDigest)
|
||||
manifestDigest, err := tagRecord.GetManifestDigest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get manifest digest from tag record: %w", err)
|
||||
}
|
||||
|
||||
// Insert or update tag
|
||||
return db.UpsertTag(p.db, &db.Tag{
|
||||
DID: did,
|
||||
Repository: tagRecord.Repository,
|
||||
Tag: tagRecord.Tag,
|
||||
Digest: manifestDigest,
|
||||
CreatedAt: tagRecord.UpdatedAt,
|
||||
})
|
||||
}
|
||||
|
||||
// ProcessStar processes a star record and stores it in the database
|
||||
func (p *Processor) ProcessStar(ctx context.Context, did string, recordData []byte) error {
|
||||
// Unmarshal star record
|
||||
var starRecord atproto.StarRecord
|
||||
if err := json.Unmarshal(recordData, &starRecord); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal star: %w", err)
|
||||
}
|
||||
// Upsert the star record (idempotent - won't duplicate)
|
||||
// The DID here is the starrer (user who starred)
|
||||
// The subject contains the owner DID and repository
|
||||
// Star count will be calculated on demand from the stars table
|
||||
return db.UpsertStar(p.db, did, starRecord.Subject.DID, starRecord.Subject.Repository, starRecord.CreatedAt)
|
||||
}
|
||||
|
||||
// ProcessSailorProfile processes a sailor profile record
|
||||
// This is primarily used by backfill to cache captain records for holds
|
||||
func (p *Processor) ProcessSailorProfile(ctx context.Context, did string, recordData []byte, queryCaptainFn func(context.Context, string) error) error {
|
||||
// Unmarshal sailor profile record
|
||||
var profileRecord atproto.SailorProfileRecord
|
||||
if err := json.Unmarshal(recordData, &profileRecord); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal sailor profile: %w", err)
|
||||
}
|
||||
|
||||
// Skip if no default hold set
|
||||
if profileRecord.DefaultHold == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert hold URL/DID to canonical DID
|
||||
holdDID := atproto.ResolveHoldDIDFromURL(profileRecord.DefaultHold)
|
||||
if holdDID == "" {
|
||||
slog.Warn("Invalid hold reference in profile", "component", "processor", "did", did, "default_hold", profileRecord.DefaultHold)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Query and cache the captain record using provided function
|
||||
// This allows backfill-specific logic (retries, test mode handling) without duplicating it here
|
||||
if queryCaptainFn != nil {
|
||||
return queryCaptainFn(ctx, holdDID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessIdentity handles identity change events (handle updates)
|
||||
// This is called when Jetstream receives an identity event indicating a handle change.
|
||||
// The identity cache is invalidated to ensure the next lookup uses the new handle,
|
||||
// and the database is updated to reflect the change in the UI.
|
||||
//
|
||||
// Only processes events for users who already exist in our database (have ATCR activity).
|
||||
func (p *Processor) ProcessIdentity(ctx context.Context, did string, newHandle string) error {
|
||||
// Check if user exists in our database - only update if they're an ATCR user
|
||||
user, err := db.GetUserByDID(p.db, did)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check user existence: %w", err)
|
||||
}
|
||||
|
||||
// Skip if user doesn't exist - they don't have any ATCR activity (manifests, profiles, etc.)
|
||||
if user == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update handle in database
|
||||
if err := db.UpdateUserHandle(p.db, did, newHandle); err != nil {
|
||||
slog.Warn("Failed to update user handle in database",
|
||||
"component", "processor",
|
||||
"did", did,
|
||||
"handle", newHandle,
|
||||
"error", err)
|
||||
// Continue to invalidate cache even if DB update fails
|
||||
}
|
||||
|
||||
// Invalidate cached identity data to force re-resolution on next lookup
|
||||
if err := atproto.InvalidateIdentity(ctx, did); err != nil {
|
||||
slog.Warn("Failed to invalidate identity cache",
|
||||
"component", "processor",
|
||||
"did", did,
|
||||
"error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
slog.Info("Processed identity change event",
|
||||
"component", "processor",
|
||||
"did", did,
|
||||
"old_handle", user.Handle,
|
||||
"new_handle", newHandle)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessAccount handles account status events (deactivation/reactivation)
|
||||
// This is called when Jetstream receives an account event indicating status changes.
|
||||
//
|
||||
// IMPORTANT: Deactivation events are ambiguous - they could indicate:
|
||||
// 1. Permanent account deactivation (user deleted account)
|
||||
// 2. PDS migration (account deactivated at old PDS, reactivated at new PDS)
|
||||
//
|
||||
// We DO NOT delete user data on deactivation events. Instead, we invalidate the
|
||||
// identity cache. On the next resolution attempt:
|
||||
// - If migrated: Resolution finds the new PDS and updates the database automatically
|
||||
// - If truly deactivated: Resolution fails and user won't appear in new queries
|
||||
//
|
||||
// This approach prevents data loss from PDS migrations while still handling deactivations.
|
||||
//
|
||||
// Only processes events for users who already exist in our database (have ATCR activity).
|
||||
func (p *Processor) ProcessAccount(ctx context.Context, did string, active bool, status string) error {
|
||||
// Only process deactivation events
|
||||
if active || status != "deactivated" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if user exists in our database - only update if they're an ATCR user
|
||||
user, err := db.GetUserByDID(p.db, did)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check user existence: %w", err)
|
||||
}
|
||||
|
||||
// Skip if user doesn't exist - they don't have any ATCR activity
|
||||
if user == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Invalidate cached identity data to force re-resolution on next lookup
|
||||
// This will discover if the account was migrated (new PDS) or truly deactivated (resolution fails)
|
||||
if err := atproto.InvalidateIdentity(ctx, did); err != nil {
|
||||
slog.Warn("Failed to invalidate identity cache for deactivated account",
|
||||
"component", "processor",
|
||||
"did", did,
|
||||
"error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
slog.Info("Processed account deactivation event - cache invalidated",
|
||||
"component", "processor",
|
||||
"did", did,
|
||||
"handle", user.Handle,
|
||||
"status", status)
|
||||
|
||||
return nil
|
||||
}
|
||||
693
pkg/appview/jetstream/processor_test.go
Normal file
693
pkg/appview/jetstream/processor_test.go
Normal file
@@ -0,0 +1,693 @@
|
||||
package jetstream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// setupTestDB creates an in-memory SQLite database for testing
|
||||
func setupTestDB(t *testing.T) *sql.DB {
|
||||
database, err := sql.Open("sqlite3", ":memory:")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open test database: %v", err)
|
||||
}
|
||||
|
||||
// Create schema
|
||||
schema := `
|
||||
CREATE TABLE users (
|
||||
did TEXT PRIMARY KEY,
|
||||
handle TEXT NOT NULL,
|
||||
pds_endpoint TEXT NOT NULL,
|
||||
avatar TEXT,
|
||||
last_seen TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE manifests (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
hold_endpoint TEXT NOT NULL,
|
||||
schema_version INTEGER NOT NULL,
|
||||
media_type TEXT NOT NULL,
|
||||
config_digest TEXT,
|
||||
config_size INTEGER,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
UNIQUE(did, repository, digest)
|
||||
);
|
||||
|
||||
CREATE TABLE repository_annotations (
|
||||
did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
key TEXT NOT NULL,
|
||||
value TEXT NOT NULL,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY(did, repository, key),
|
||||
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE TABLE layers (
|
||||
manifest_id INTEGER NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
size INTEGER NOT NULL,
|
||||
media_type TEXT NOT NULL,
|
||||
layer_index INTEGER NOT NULL,
|
||||
PRIMARY KEY(manifest_id, layer_index)
|
||||
);
|
||||
|
||||
CREATE TABLE manifest_references (
|
||||
manifest_id INTEGER NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
media_type TEXT NOT NULL,
|
||||
size INTEGER NOT NULL,
|
||||
platform_architecture TEXT,
|
||||
platform_os TEXT,
|
||||
platform_variant TEXT,
|
||||
platform_os_version TEXT,
|
||||
reference_index INTEGER NOT NULL,
|
||||
PRIMARY KEY(manifest_id, reference_index)
|
||||
);
|
||||
|
||||
CREATE TABLE tags (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
tag TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
UNIQUE(did, repository, tag)
|
||||
);
|
||||
|
||||
CREATE TABLE stars (
|
||||
starrer_did TEXT NOT NULL,
|
||||
owner_did TEXT NOT NULL,
|
||||
repository TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY(starrer_did, owner_did, repository)
|
||||
);
|
||||
`
|
||||
|
||||
if _, err := database.Exec(schema); err != nil {
|
||||
t.Fatalf("Failed to create schema: %v", err)
|
||||
}
|
||||
|
||||
return database
|
||||
}
|
||||
|
||||
func TestNewProcessor(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
useCache bool
|
||||
}{
|
||||
{"with cache", true},
|
||||
{"without cache", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
p := NewProcessor(database, tt.useCache)
|
||||
if p == nil {
|
||||
t.Fatal("NewProcessor returned nil")
|
||||
}
|
||||
if p.db != database {
|
||||
t.Error("Processor database not set correctly")
|
||||
}
|
||||
if p.useCache != tt.useCache {
|
||||
t.Errorf("useCache = %v, want %v", p.useCache, tt.useCache)
|
||||
}
|
||||
if tt.useCache && p.userCache == nil {
|
||||
t.Error("Cache enabled but userCache is nil")
|
||||
}
|
||||
if !tt.useCache && p.userCache != nil {
|
||||
t.Error("Cache disabled but userCache is not nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessManifest_ImageManifest(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
p := NewProcessor(database, false)
|
||||
ctx := context.Background()
|
||||
|
||||
// Create test manifest record
|
||||
manifestRecord := &atproto.ManifestRecord{
|
||||
Repository: "test-app",
|
||||
Digest: "sha256:abc123",
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
SchemaVersion: 2,
|
||||
HoldEndpoint: "did:web:hold01.atcr.io",
|
||||
CreatedAt: time.Now().String(),
|
||||
Config: &atproto.BlobReference{
|
||||
Digest: "sha256:config123",
|
||||
Size: 1234,
|
||||
},
|
||||
Layers: []atproto.BlobReference{
|
||||
{Digest: "sha256:layer1", Size: 5000, MediaType: "application/vnd.oci.image.layer.v1.tar+gzip"},
|
||||
{Digest: "sha256:layer2", Size: 3000, MediaType: "application/vnd.oci.image.layer.v1.tar+gzip"},
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"org.opencontainers.image.title": "Test App",
|
||||
"org.opencontainers.image.description": "A test application",
|
||||
"org.opencontainers.image.source": "https://github.com/test/app",
|
||||
"org.opencontainers.image.licenses": "MIT",
|
||||
"io.atcr.icon": "https://example.com/icon.png",
|
||||
},
|
||||
}
|
||||
|
||||
// Marshal to bytes for ProcessManifest
|
||||
recordBytes, err := json.Marshal(manifestRecord)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal manifest: %v", err)
|
||||
}
|
||||
|
||||
// Process manifest
|
||||
manifestID, err := p.ProcessManifest(ctx, "did:plc:test123", recordBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessManifest failed: %v", err)
|
||||
}
|
||||
if manifestID == 0 {
|
||||
t.Error("Expected non-zero manifest ID")
|
||||
}
|
||||
|
||||
// Verify manifest was inserted
|
||||
var count int
|
||||
err = database.QueryRow("SELECT COUNT(*) FROM manifests WHERE did = ? AND repository = ? AND digest = ?",
|
||||
"did:plc:test123", "test-app", "sha256:abc123").Scan(&count)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query manifests: %v", err)
|
||||
}
|
||||
if count != 1 {
|
||||
t.Errorf("Expected 1 manifest, got %d", count)
|
||||
}
|
||||
|
||||
// Verify annotations were stored in repository_annotations table
|
||||
var title, source string
|
||||
err = database.QueryRow("SELECT value FROM repository_annotations WHERE did = ? AND repository = ? AND key = ?",
|
||||
"did:plc:test123", "test-app", "org.opencontainers.image.title").Scan(&title)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query title annotation: %v", err)
|
||||
}
|
||||
if title != "Test App" {
|
||||
t.Errorf("title = %q, want %q", title, "Test App")
|
||||
}
|
||||
|
||||
err = database.QueryRow("SELECT value FROM repository_annotations WHERE did = ? AND repository = ? AND key = ?",
|
||||
"did:plc:test123", "test-app", "org.opencontainers.image.source").Scan(&source)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query source annotation: %v", err)
|
||||
}
|
||||
if source != "https://github.com/test/app" {
|
||||
t.Errorf("source = %q, want %q", source, "https://github.com/test/app")
|
||||
}
|
||||
|
||||
// Verify layers were inserted
|
||||
var layerCount int
|
||||
err = database.QueryRow("SELECT COUNT(*) FROM layers WHERE manifest_id = ?", manifestID).Scan(&layerCount)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query layers: %v", err)
|
||||
}
|
||||
if layerCount != 2 {
|
||||
t.Errorf("Expected 2 layers, got %d", layerCount)
|
||||
}
|
||||
|
||||
// Verify no manifest references (this is an image, not a list)
|
||||
var refCount int
|
||||
err = database.QueryRow("SELECT COUNT(*) FROM manifest_references WHERE manifest_id = ?", manifestID).Scan(&refCount)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query manifest_references: %v", err)
|
||||
}
|
||||
if refCount != 0 {
|
||||
t.Errorf("Expected 0 manifest references, got %d", refCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessManifest_ManifestList(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
p := NewProcessor(database, false)
|
||||
ctx := context.Background()
|
||||
|
||||
// Create test manifest list record
|
||||
manifestRecord := &atproto.ManifestRecord{
|
||||
Repository: "test-app",
|
||||
Digest: "sha256:list123",
|
||||
MediaType: "application/vnd.oci.image.index.v1+json",
|
||||
SchemaVersion: 2,
|
||||
HoldEndpoint: "did:web:hold01.atcr.io",
|
||||
CreatedAt: time.Now().String(),
|
||||
Manifests: []atproto.ManifestReference{
|
||||
{
|
||||
Digest: "sha256:amd64manifest",
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
Size: 1000,
|
||||
Platform: &atproto.Platform{
|
||||
Architecture: "amd64",
|
||||
OS: "linux",
|
||||
},
|
||||
},
|
||||
{
|
||||
Digest: "sha256:arm64manifest",
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
Size: 1100,
|
||||
Platform: &atproto.Platform{
|
||||
Architecture: "arm64",
|
||||
OS: "linux",
|
||||
Variant: "v8",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Marshal to bytes for ProcessManifest
|
||||
recordBytes, err := json.Marshal(manifestRecord)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal manifest: %v", err)
|
||||
}
|
||||
|
||||
// Process manifest list
|
||||
manifestID, err := p.ProcessManifest(ctx, "did:plc:test123", recordBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessManifest failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify manifest references were inserted
|
||||
var refCount int
|
||||
err = database.QueryRow("SELECT COUNT(*) FROM manifest_references WHERE manifest_id = ?", manifestID).Scan(&refCount)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query manifest_references: %v", err)
|
||||
}
|
||||
if refCount != 2 {
|
||||
t.Errorf("Expected 2 manifest references, got %d", refCount)
|
||||
}
|
||||
|
||||
// Verify platform info was stored
|
||||
var arch, os string
|
||||
err = database.QueryRow("SELECT platform_architecture, platform_os FROM manifest_references WHERE manifest_id = ? AND reference_index = 0", manifestID).Scan(&arch, &os)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query platform info: %v", err)
|
||||
}
|
||||
if arch != "amd64" {
|
||||
t.Errorf("platform_architecture = %q, want %q", arch, "amd64")
|
||||
}
|
||||
if os != "linux" {
|
||||
t.Errorf("platform_os = %q, want %q", os, "linux")
|
||||
}
|
||||
|
||||
// Verify no layers (this is a list, not an image)
|
||||
var layerCount int
|
||||
err = database.QueryRow("SELECT COUNT(*) FROM layers WHERE manifest_id = ?", manifestID).Scan(&layerCount)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query layers: %v", err)
|
||||
}
|
||||
if layerCount != 0 {
|
||||
t.Errorf("Expected 0 layers, got %d", layerCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessTag(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
p := NewProcessor(database, false)
|
||||
ctx := context.Background()
|
||||
|
||||
// Create test tag record (using ManifestDigest field for simplicity)
|
||||
tagRecord := &atproto.TagRecord{
|
||||
Repository: "test-app",
|
||||
Tag: "latest",
|
||||
ManifestDigest: "sha256:abc123",
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
// Marshal to bytes for ProcessTag
|
||||
recordBytes, err := json.Marshal(tagRecord)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal tag: %v", err)
|
||||
}
|
||||
|
||||
// Process tag
|
||||
err = p.ProcessTag(ctx, "did:plc:test123", recordBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessTag failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify tag was inserted
|
||||
var count int
|
||||
err = database.QueryRow("SELECT COUNT(*) FROM tags WHERE did = ? AND repository = ? AND tag = ?",
|
||||
"did:plc:test123", "test-app", "latest").Scan(&count)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query tags: %v", err)
|
||||
}
|
||||
if count != 1 {
|
||||
t.Errorf("Expected 1 tag, got %d", count)
|
||||
}
|
||||
|
||||
// Verify digest was stored
|
||||
var digest string
|
||||
err = database.QueryRow("SELECT digest FROM tags WHERE did = ? AND repository = ? AND tag = ?",
|
||||
"did:plc:test123", "test-app", "latest").Scan(&digest)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query tag digest: %v", err)
|
||||
}
|
||||
if digest != "sha256:abc123" {
|
||||
t.Errorf("digest = %q, want %q", digest, "sha256:abc123")
|
||||
}
|
||||
|
||||
// Test upserting same tag with new digest
|
||||
tagRecord.ManifestDigest = "sha256:newdigest"
|
||||
recordBytes, err = json.Marshal(tagRecord)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal tag: %v", err)
|
||||
}
|
||||
err = p.ProcessTag(ctx, "did:plc:test123", recordBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessTag (upsert) failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify tag was updated
|
||||
err = database.QueryRow("SELECT digest FROM tags WHERE did = ? AND repository = ? AND tag = ?",
|
||||
"did:plc:test123", "test-app", "latest").Scan(&digest)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query updated tag: %v", err)
|
||||
}
|
||||
if digest != "sha256:newdigest" {
|
||||
t.Errorf("digest = %q, want %q", digest, "sha256:newdigest")
|
||||
}
|
||||
|
||||
// Verify still only one tag (upsert, not insert)
|
||||
err = database.QueryRow("SELECT COUNT(*) FROM tags WHERE did = ? AND repository = ? AND tag = ?",
|
||||
"did:plc:test123", "test-app", "latest").Scan(&count)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query tags after upsert: %v", err)
|
||||
}
|
||||
if count != 1 {
|
||||
t.Errorf("Expected 1 tag after upsert, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessStar(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
p := NewProcessor(database, false)
|
||||
ctx := context.Background()
|
||||
|
||||
// Create test star record
|
||||
starRecord := &atproto.StarRecord{
|
||||
Subject: atproto.StarSubject{
|
||||
DID: "did:plc:owner123",
|
||||
Repository: "test-app",
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
// Marshal to bytes for ProcessStar
|
||||
recordBytes, err := json.Marshal(starRecord)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal star: %v", err)
|
||||
}
|
||||
|
||||
// Process star
|
||||
err = p.ProcessStar(ctx, "did:plc:starrer123", recordBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessStar failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify star was inserted
|
||||
var count int
|
||||
err = database.QueryRow("SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = ? AND repository = ?",
|
||||
"did:plc:starrer123", "did:plc:owner123", "test-app").Scan(&count)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query stars: %v", err)
|
||||
}
|
||||
if count != 1 {
|
||||
t.Errorf("Expected 1 star, got %d", count)
|
||||
}
|
||||
|
||||
// Test upserting same star (should be idempotent)
|
||||
recordBytes, err = json.Marshal(starRecord)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal star: %v", err)
|
||||
}
|
||||
err = p.ProcessStar(ctx, "did:plc:starrer123", recordBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessStar (upsert) failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify still only one star
|
||||
err = database.QueryRow("SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = ? AND repository = ?",
|
||||
"did:plc:starrer123", "did:plc:owner123", "test-app").Scan(&count)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query stars after upsert: %v", err)
|
||||
}
|
||||
if count != 1 {
|
||||
t.Errorf("Expected 1 star after upsert, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessManifest_Duplicate(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
p := NewProcessor(database, false)
|
||||
ctx := context.Background()
|
||||
|
||||
manifestRecord := &atproto.ManifestRecord{
|
||||
Repository: "test-app",
|
||||
Digest: "sha256:abc123",
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
SchemaVersion: 2,
|
||||
HoldEndpoint: "did:web:hold01.atcr.io",
|
||||
CreatedAt: time.Now().String(),
|
||||
}
|
||||
|
||||
// Marshal to bytes for ProcessManifest
|
||||
recordBytes, err := json.Marshal(manifestRecord)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal manifest: %v", err)
|
||||
}
|
||||
|
||||
// Insert first time
|
||||
id1, err := p.ProcessManifest(ctx, "did:plc:test123", recordBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("First ProcessManifest failed: %v", err)
|
||||
}
|
||||
|
||||
// Insert duplicate
|
||||
id2, err := p.ProcessManifest(ctx, "did:plc:test123", recordBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("Duplicate ProcessManifest failed: %v", err)
|
||||
}
|
||||
|
||||
// Should return existing ID
|
||||
if id1 != id2 {
|
||||
t.Errorf("Duplicate manifest got different ID: %d vs %d", id1, id2)
|
||||
}
|
||||
|
||||
// Verify only one manifest exists
|
||||
var count int
|
||||
err = database.QueryRow("SELECT COUNT(*) FROM manifests WHERE did = ? AND digest = ?",
|
||||
"did:plc:test123", "sha256:abc123").Scan(&count)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query manifests: %v", err)
|
||||
}
|
||||
if count != 1 {
|
||||
t.Errorf("Expected 1 manifest, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessManifest_EmptyAnnotations(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
defer database.Close()
|
||||
|
||||
p := NewProcessor(database, false)
|
||||
ctx := context.Background()
|
||||
|
||||
// Manifest with nil annotations
|
||||
manifestRecord := &atproto.ManifestRecord{
|
||||
Repository: "test-app",
|
||||
Digest: "sha256:abc123",
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
SchemaVersion: 2,
|
||||
HoldEndpoint: "did:web:hold01.atcr.io",
|
||||
CreatedAt: time.Now().String(),
|
||||
Annotations: nil,
|
||||
}
|
||||
|
||||
// Marshal to bytes for ProcessManifest
|
||||
recordBytes, err := json.Marshal(manifestRecord)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal manifest: %v", err)
|
||||
}
|
||||
|
||||
_, err = p.ProcessManifest(ctx, "did:plc:test123", recordBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessManifest failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify no annotations were stored (nil annotations should not create entries)
|
||||
var annotationCount int
|
||||
err = database.QueryRow("SELECT COUNT(*) FROM repository_annotations WHERE did = ? AND repository = ?",
|
||||
"did:plc:test123", "test-app").Scan(&annotationCount)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query annotations: %v", err)
|
||||
}
|
||||
if annotationCount != 0 {
|
||||
t.Errorf("Expected 0 annotations for nil annotations, got %d", annotationCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessIdentity(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
processor := NewProcessor(db, false)
|
||||
|
||||
// Setup: Create test user
|
||||
testDID := "did:plc:alice123"
|
||||
testHandle := "alice.bsky.social"
|
||||
testPDS := "https://bsky.social"
|
||||
_, err := db.Exec(`
|
||||
INSERT INTO users (did, handle, pds_endpoint, last_seen)
|
||||
VALUES (?, ?, ?, ?)
|
||||
`, testDID, testHandle, testPDS, time.Now())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to insert test user: %v", err)
|
||||
}
|
||||
|
||||
// Test 1: Process identity change event
|
||||
newHandle := "alice-new.bsky.social"
|
||||
err = processor.ProcessIdentity(context.Background(), testDID, newHandle)
|
||||
// Note: This will fail to invalidate cache since we don't have a real identity directory,
|
||||
// but we can still verify the database update happened
|
||||
if err != nil {
|
||||
t.Logf("Expected cache invalidation error (no real directory): %v", err)
|
||||
}
|
||||
|
||||
// Verify handle was updated in database
|
||||
var retrievedHandle string
|
||||
err = db.QueryRow(`
|
||||
SELECT handle FROM users WHERE did = ?
|
||||
`, testDID).Scan(&retrievedHandle)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query updated user: %v", err)
|
||||
}
|
||||
if retrievedHandle != newHandle {
|
||||
t.Errorf("Expected handle '%s', got '%s'", newHandle, retrievedHandle)
|
||||
}
|
||||
|
||||
// Test 2: Process identity change for non-existent user
|
||||
// Should not error (UPDATE just affects 0 rows)
|
||||
err = processor.ProcessIdentity(context.Background(), "did:plc:nonexistent", "new.handle")
|
||||
if err != nil {
|
||||
t.Logf("Expected cache invalidation error: %v", err)
|
||||
}
|
||||
|
||||
// Test 3: Process multiple identity changes
|
||||
handles := []string{"alice1.bsky.social", "alice2.bsky.social", "alice3.bsky.social"}
|
||||
for _, handle := range handles {
|
||||
err = processor.ProcessIdentity(context.Background(), testDID, handle)
|
||||
if err != nil {
|
||||
t.Logf("Expected cache invalidation error: %v", err)
|
||||
}
|
||||
|
||||
err = db.QueryRow(`
|
||||
SELECT handle FROM users WHERE did = ?
|
||||
`, testDID).Scan(&retrievedHandle)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query user after handle update: %v", err)
|
||||
}
|
||||
if retrievedHandle != handle {
|
||||
t.Errorf("Expected handle '%s', got '%s'", handle, retrievedHandle)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessAccount(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
processor := NewProcessor(db, false)
|
||||
|
||||
// Setup: Create test user
|
||||
testDID := "did:plc:bob456"
|
||||
testHandle := "bob.bsky.social"
|
||||
testPDS := "https://bsky.social"
|
||||
_, err := db.Exec(`
|
||||
INSERT INTO users (did, handle, pds_endpoint, last_seen)
|
||||
VALUES (?, ?, ?, ?)
|
||||
`, testDID, testHandle, testPDS, time.Now())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to insert test user: %v", err)
|
||||
}
|
||||
|
||||
// Test 1: Process account deactivation event
|
||||
err = processor.ProcessAccount(context.Background(), testDID, false, "deactivated")
|
||||
// Note: Cache invalidation will fail without real directory, but that's expected
|
||||
if err != nil {
|
||||
t.Logf("Expected cache invalidation error (no real directory): %v", err)
|
||||
}
|
||||
|
||||
// Verify user still exists in database (we don't delete on deactivation)
|
||||
var exists bool
|
||||
err = db.QueryRow(`
|
||||
SELECT EXISTS(SELECT 1 FROM users WHERE did = ?)
|
||||
`, testDID).Scan(&exists)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check if user exists: %v", err)
|
||||
}
|
||||
if !exists {
|
||||
t.Error("User should still exist after deactivation event (no deletion)")
|
||||
}
|
||||
|
||||
// Test 2: Process account with active=true (should be ignored)
|
||||
err = processor.ProcessAccount(context.Background(), testDID, true, "active")
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for active account, got: %v", err)
|
||||
}
|
||||
|
||||
// Test 3: Process account with status != "deactivated" (should be ignored)
|
||||
err = processor.ProcessAccount(context.Background(), testDID, false, "suspended")
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for non-deactivated status, got: %v", err)
|
||||
}
|
||||
|
||||
// Test 4: Process account deactivation for non-existent user
|
||||
err = processor.ProcessAccount(context.Background(), "did:plc:nonexistent", false, "deactivated")
|
||||
// Cache invalidation will fail, but that's expected
|
||||
if err != nil {
|
||||
t.Logf("Expected cache invalidation error: %v", err)
|
||||
}
|
||||
|
||||
// Test 5: Process multiple deactivation events (idempotent)
|
||||
for i := 0; i < 3; i++ {
|
||||
err = processor.ProcessAccount(context.Background(), testDID, false, "deactivated")
|
||||
if err != nil {
|
||||
t.Logf("Expected cache invalidation error on iteration %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// User should still exist after multiple deactivations
|
||||
err = db.QueryRow(`
|
||||
SELECT EXISTS(SELECT 1 FROM users WHERE did = ?)
|
||||
`, testDID).Scan(&exists)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check if user exists after multiple deactivations: %v", err)
|
||||
}
|
||||
if !exists {
|
||||
t.Error("User should still exist after multiple deactivation events")
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,6 @@
|
||||
// Package jetstream provides an ATProto Jetstream consumer for real-time updates.
|
||||
// It connects to the Bluesky Jetstream WebSocket, processes repository events,
|
||||
// indexes manifests and tags, and populates the AppView database for the web UI.
|
||||
package jetstream
|
||||
|
||||
import (
|
||||
@@ -5,13 +8,11 @@ import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/bluesky-social/indigo/atproto/identity"
|
||||
"github.com/bluesky-social/indigo/atproto/syntax"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"atcr.io/pkg/atproto"
|
||||
"github.com/gorilla/websocket"
|
||||
@@ -33,8 +34,7 @@ type Worker struct {
|
||||
startCursor int64
|
||||
wantedCollections []string
|
||||
debugCollectionCount int
|
||||
userCache *UserCache
|
||||
directory identity.Directory
|
||||
processor *Processor // Shared processor for DB operations
|
||||
eventCallback EventCallback
|
||||
connStartTime time.Time // Track when connection started for debugging
|
||||
|
||||
@@ -65,10 +65,7 @@ func NewWorker(database *sql.DB, jetstreamURL string, startCursor int64) *Worker
|
||||
atproto.TagCollection, // io.atcr.tag
|
||||
atproto.StarCollection, // io.atcr.sailor.star
|
||||
},
|
||||
userCache: &UserCache{
|
||||
cache: make(map[string]*db.User),
|
||||
},
|
||||
directory: identity.DefaultDirectory(),
|
||||
processor: NewProcessor(database, true), // Use cache for live streaming
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,14 +90,14 @@ func (w *Worker) Start(ctx context.Context) error {
|
||||
// Calculate lag (cursor is in microseconds)
|
||||
now := time.Now().UnixMicro()
|
||||
lagSeconds := float64(now-w.startCursor) / 1_000_000.0
|
||||
fmt.Printf("Jetstream: Starting from cursor %d (%.1f seconds behind live)\n", w.startCursor, lagSeconds)
|
||||
slog.Info("Jetstream starting from cursor", "cursor", w.startCursor, "lag_seconds", lagSeconds)
|
||||
}
|
||||
|
||||
// Disable compression for now to debug
|
||||
// q.Set("compress", "true")
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
fmt.Printf("Connecting to Jetstream: %s\n", u.String())
|
||||
slog.Info("Connecting to Jetstream", "url", u.String())
|
||||
|
||||
// Connect to Jetstream
|
||||
conn, _, err := websocket.DefaultDialer.DialContext(ctx, u.String(), nil)
|
||||
@@ -142,7 +139,7 @@ func (w *Worker) Start(ctx context.Context) error {
|
||||
}
|
||||
defer decoder.Close()
|
||||
|
||||
fmt.Println("Connected to Jetstream, listening for events...")
|
||||
slog.Info("Connected to Jetstream, listening for events...")
|
||||
|
||||
// Start heartbeat ticker to show Jetstream is alive
|
||||
heartbeatTicker := time.NewTicker(30 * time.Second)
|
||||
@@ -173,8 +170,7 @@ func (w *Worker) Start(ctx context.Context) error {
|
||||
|
||||
// If no pong for 60 seconds, connection is likely dead
|
||||
if timeSinceLastPong > 60*time.Second {
|
||||
fmt.Printf("Jetstream: No pong received for %s (sent %d pings, got %d pongs), closing connection\n",
|
||||
timeSinceLastPong, pingsTotal, pongsTotal)
|
||||
slog.Info("Jetstream no pong received, closing connection", "time_since_last_pong", timeSinceLastPong, "pings_sent", pingsTotal, "pongs_received", pongsTotal)
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
@@ -182,7 +178,7 @@ func (w *Worker) Start(ctx context.Context) error {
|
||||
// Send ping with write deadline
|
||||
conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
|
||||
if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil {
|
||||
fmt.Printf("Jetstream: Failed to send ping: %v\n", err)
|
||||
slog.Warn("Jetstream failed to send ping", "error", err)
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
@@ -204,7 +200,7 @@ func (w *Worker) Start(ctx context.Context) error {
|
||||
return ctx.Err()
|
||||
case <-heartbeatTicker.C:
|
||||
elapsed := time.Since(lastHeartbeat)
|
||||
fmt.Printf("Jetstream: Alive (processed %d events in last %.0fs)\n", eventCount, elapsed.Seconds())
|
||||
slog.Info("Jetstream alive", "events_processed", eventCount, "elapsed_seconds", elapsed.Seconds())
|
||||
eventCount = 0
|
||||
lastHeartbeat = time.Now()
|
||||
default:
|
||||
@@ -240,13 +236,7 @@ func (w *Worker) Start(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Log detailed context about the failure
|
||||
fmt.Printf("Jetstream: Connection closed after %s\n", connDuration)
|
||||
fmt.Printf(" - Events in last 30s: %d\n", eventCount)
|
||||
fmt.Printf(" - Time since last event: %s\n", timeSinceLastEvent)
|
||||
fmt.Printf(" - Ping/Pong: %d/%d (%.1f%% success)\n", pongsTotal, pingsTotal, pongRate)
|
||||
fmt.Printf(" - Last pong: %s ago\n", timeSinceLastPong)
|
||||
fmt.Printf(" - Error: %v\n", err)
|
||||
fmt.Printf(" - Diagnosis: %s\n", diagnosis)
|
||||
slog.Info("Jetstream connection closed", "duration", connDuration, "events_in_last_30s", eventCount, "time_since_last_event", timeSinceLastEvent, "pongs_received", pongsTotal, "pings_sent", pingsTotal, "pong_rate_pct", pongRate, "time_since_last_pong", timeSinceLastPong, "error", err, "diagnosis", diagnosis)
|
||||
|
||||
return fmt.Errorf("failed to read message: %w", err)
|
||||
}
|
||||
@@ -256,7 +246,7 @@ func (w *Worker) Start(ctx context.Context) error {
|
||||
_ = decoder // Keep decoder to avoid unused variable error
|
||||
|
||||
if err := w.processMessage(message); err != nil {
|
||||
fmt.Printf("ERROR processing message: %v\n", err)
|
||||
slog.Error("ERROR processing message", "error", err)
|
||||
// Continue processing other messages
|
||||
} else {
|
||||
eventCount++
|
||||
@@ -294,125 +284,61 @@ func (w *Worker) processMessage(message []byte) error {
|
||||
w.eventCallback(event.TimeUS)
|
||||
}
|
||||
|
||||
// Only process commit events
|
||||
if event.Kind != "commit" {
|
||||
return nil
|
||||
}
|
||||
// Process based on event kind
|
||||
switch event.Kind {
|
||||
case "commit":
|
||||
commit := event.Commit
|
||||
if commit == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
commit := event.Commit
|
||||
if commit == nil {
|
||||
return nil
|
||||
}
|
||||
// Set DID on commit from parent event
|
||||
commit.DID = event.DID
|
||||
|
||||
// Set DID on commit from parent event
|
||||
commit.DID = event.DID
|
||||
// Debug: log first few collections we see to understand what's coming through
|
||||
if w.debugCollectionCount < 5 {
|
||||
slog.Debug("Jetstream received collection", "collection", commit.Collection, "did", commit.DID)
|
||||
w.debugCollectionCount++
|
||||
}
|
||||
|
||||
// Debug: log first few collections we see to understand what's coming through
|
||||
if w.debugCollectionCount < 5 {
|
||||
fmt.Printf("Jetstream DEBUG: Received collection=%s, did=%s\n", commit.Collection, commit.DID)
|
||||
w.debugCollectionCount++
|
||||
}
|
||||
// Process based on collection
|
||||
switch commit.Collection {
|
||||
case atproto.ManifestCollection:
|
||||
slog.Info("Jetstream processing manifest event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey)
|
||||
return w.processManifest(commit)
|
||||
case atproto.TagCollection:
|
||||
slog.Info("Jetstream processing tag event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey)
|
||||
return w.processTag(commit)
|
||||
case atproto.StarCollection:
|
||||
slog.Info("Jetstream processing star event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey)
|
||||
return w.processStar(commit)
|
||||
default:
|
||||
// Ignore other collections
|
||||
return nil
|
||||
}
|
||||
|
||||
case "identity":
|
||||
if event.Identity == nil {
|
||||
return nil
|
||||
}
|
||||
return w.processIdentity(&event)
|
||||
|
||||
case "account":
|
||||
if event.Account == nil {
|
||||
return nil
|
||||
}
|
||||
return w.processAccount(&event)
|
||||
|
||||
// Process based on collection
|
||||
switch commit.Collection {
|
||||
case atproto.ManifestCollection:
|
||||
fmt.Printf("Jetstream: Processing manifest event: did=%s, operation=%s, rkey=%s\n",
|
||||
commit.DID, commit.Operation, commit.RKey)
|
||||
return w.processManifest(commit)
|
||||
case atproto.TagCollection:
|
||||
fmt.Printf("Jetstream: Processing tag event: did=%s, operation=%s, rkey=%s\n",
|
||||
commit.DID, commit.Operation, commit.RKey)
|
||||
return w.processTag(commit)
|
||||
case atproto.StarCollection:
|
||||
fmt.Printf("Jetstream: Processing star event: did=%s, operation=%s, rkey=%s\n",
|
||||
commit.DID, commit.Operation, commit.RKey)
|
||||
return w.processStar(commit)
|
||||
default:
|
||||
// Ignore other collections
|
||||
// Ignore unknown event kinds
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ensureUser resolves and upserts a user by DID
|
||||
func (w *Worker) ensureUser(ctx context.Context, did string) error {
|
||||
// Check cache first
|
||||
if user, ok := w.userCache.cache[did]; ok {
|
||||
// Update last seen
|
||||
user.LastSeen = time.Now()
|
||||
return db.UpsertUser(w.db, user)
|
||||
}
|
||||
|
||||
// Resolve DID to get handle and PDS endpoint
|
||||
didParsed, err := syntax.ParseDID(did)
|
||||
if err != nil {
|
||||
fmt.Printf("WARNING: Invalid DID %s: %v (using DID as handle)\n", did, err)
|
||||
// Fallback: use DID as handle
|
||||
user := &db.User{
|
||||
DID: did,
|
||||
Handle: did,
|
||||
PDSEndpoint: "https://bsky.social", // Default PDS endpoint as fallback
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
w.userCache.cache[did] = user
|
||||
return db.UpsertUser(w.db, user)
|
||||
}
|
||||
|
||||
ident, err := w.directory.LookupDID(ctx, didParsed)
|
||||
if err != nil {
|
||||
fmt.Printf("WARNING: Failed to resolve DID %s: %v (using DID as handle)\n", did, err)
|
||||
// Fallback: use DID as handle
|
||||
user := &db.User{
|
||||
DID: did,
|
||||
Handle: did,
|
||||
PDSEndpoint: "https://bsky.social", // Default PDS endpoint as fallback
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
w.userCache.cache[did] = user
|
||||
return db.UpsertUser(w.db, user)
|
||||
}
|
||||
|
||||
resolvedDID := ident.DID.String()
|
||||
handle := ident.Handle.String()
|
||||
pdsEndpoint := ident.PDSEndpoint()
|
||||
|
||||
// If handle is invalid or PDS is missing, use defaults
|
||||
if handle == "handle.invalid" || handle == "" {
|
||||
handle = resolvedDID
|
||||
}
|
||||
if pdsEndpoint == "" {
|
||||
pdsEndpoint = "https://bsky.social"
|
||||
}
|
||||
|
||||
// Fetch user's Bluesky profile (including avatar)
|
||||
// Use public Bluesky AppView API (doesn't require auth for public profiles)
|
||||
avatar := ""
|
||||
publicClient := atproto.NewClient("https://public.api.bsky.app", "", "")
|
||||
profile, err := publicClient.GetActorProfile(ctx, resolvedDID)
|
||||
if err != nil {
|
||||
fmt.Printf("WARNING [worker]: Failed to fetch profile for DID %s: %v\n", resolvedDID, err)
|
||||
// Continue without avatar
|
||||
} else {
|
||||
avatar = profile.Avatar
|
||||
}
|
||||
|
||||
// Cache the user
|
||||
user := &db.User{
|
||||
DID: resolvedDID,
|
||||
Handle: handle,
|
||||
PDSEndpoint: pdsEndpoint,
|
||||
Avatar: avatar,
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
w.userCache.cache[did] = user
|
||||
|
||||
// Upsert to database
|
||||
return db.UpsertUser(w.db, user)
|
||||
}
|
||||
|
||||
// processManifest processes a manifest commit event
|
||||
func (w *Worker) processManifest(commit *CommitEvent) error {
|
||||
// Resolve and upsert user with handle/PDS endpoint
|
||||
if err := w.ensureUser(context.Background(), commit.DID); err != nil {
|
||||
if err := w.processor.EnsureUser(context.Background(), commit.DID); err != nil {
|
||||
return fmt.Errorf("failed to ensure user: %w", err)
|
||||
}
|
||||
|
||||
@@ -427,111 +353,59 @@ func (w *Worker) processManifest(commit *CommitEvent) error {
|
||||
}
|
||||
|
||||
// Parse manifest record
|
||||
var manifestRecord atproto.ManifestRecord
|
||||
if commit.Record != nil {
|
||||
recordBytes, err := json.Marshal(commit.Record)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal record: %w", err)
|
||||
}
|
||||
if err := json.Unmarshal(recordBytes, &manifestRecord); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal manifest: %w", err)
|
||||
}
|
||||
} else {
|
||||
// No record data, can't process
|
||||
return nil
|
||||
if commit.Record == nil {
|
||||
return nil // No record data, can't process
|
||||
}
|
||||
|
||||
// Extract OCI annotations from manifest
|
||||
var title, description, sourceURL, documentationURL, licenses, iconURL string
|
||||
if manifestRecord.Annotations != nil {
|
||||
title = manifestRecord.Annotations["org.opencontainers.image.title"]
|
||||
description = manifestRecord.Annotations["org.opencontainers.image.description"]
|
||||
sourceURL = manifestRecord.Annotations["org.opencontainers.image.source"]
|
||||
documentationURL = manifestRecord.Annotations["org.opencontainers.image.documentation"]
|
||||
licenses = manifestRecord.Annotations["org.opencontainers.image.licenses"]
|
||||
iconURL = manifestRecord.Annotations["io.atcr.icon"]
|
||||
}
|
||||
|
||||
// Insert manifest
|
||||
manifestID, err := db.InsertManifest(w.db, &db.Manifest{
|
||||
DID: commit.DID,
|
||||
Repository: manifestRecord.Repository,
|
||||
Digest: manifestRecord.Digest,
|
||||
MediaType: manifestRecord.MediaType,
|
||||
SchemaVersion: manifestRecord.SchemaVersion,
|
||||
ConfigDigest: manifestRecord.Config.Digest,
|
||||
ConfigSize: manifestRecord.Config.Size,
|
||||
HoldEndpoint: manifestRecord.HoldEndpoint,
|
||||
CreatedAt: manifestRecord.CreatedAt,
|
||||
Title: title,
|
||||
Description: description,
|
||||
SourceURL: sourceURL,
|
||||
DocumentationURL: documentationURL,
|
||||
Licenses: licenses,
|
||||
IconURL: iconURL,
|
||||
})
|
||||
// Marshal map to bytes for processing
|
||||
recordBytes, err := json.Marshal(commit.Record)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert manifest: %w", err)
|
||||
return fmt.Errorf("failed to marshal record: %w", err)
|
||||
}
|
||||
|
||||
// Insert layers
|
||||
for i, layer := range manifestRecord.Layers {
|
||||
if err := db.InsertLayer(w.db, &db.Layer{
|
||||
ManifestID: manifestID,
|
||||
Digest: layer.Digest,
|
||||
MediaType: layer.MediaType,
|
||||
Size: layer.Size,
|
||||
LayerIndex: i,
|
||||
}); err != nil {
|
||||
// Continue on error - layer might already exist
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
// Use shared processor for DB operations
|
||||
_, err = w.processor.ProcessManifest(context.Background(), commit.DID, recordBytes)
|
||||
return err
|
||||
}
|
||||
|
||||
// processTag processes a tag commit event
|
||||
func (w *Worker) processTag(commit *CommitEvent) error {
|
||||
// Resolve and upsert user with handle/PDS endpoint
|
||||
if err := w.ensureUser(context.Background(), commit.DID); err != nil {
|
||||
if err := w.processor.EnsureUser(context.Background(), commit.DID); err != nil {
|
||||
return fmt.Errorf("failed to ensure user: %w", err)
|
||||
}
|
||||
|
||||
if commit.Operation == "delete" {
|
||||
// Delete tag - decode rkey back to repository and tag
|
||||
repo, tag := atproto.RKeyToRepositoryTag(commit.RKey)
|
||||
return db.DeleteTag(w.db, commit.DID, repo, tag)
|
||||
}
|
||||
|
||||
// Parse tag record
|
||||
var tagRecord atproto.TagRecord
|
||||
if commit.Record != nil {
|
||||
recordBytes, err := json.Marshal(commit.Record)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal record: %w", err)
|
||||
slog.Info("Jetstream deleting tag", "did", commit.DID, "repository", repo, "tag", tag, "rkey", commit.RKey)
|
||||
if err := db.DeleteTag(w.db, commit.DID, repo, tag); err != nil {
|
||||
slog.Error("Jetstream ERROR deleting tag", "error", err)
|
||||
return err
|
||||
}
|
||||
if err := json.Unmarshal(recordBytes, &tagRecord); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal tag: %w", err)
|
||||
}
|
||||
} else {
|
||||
slog.Info("Jetstream successfully deleted tag", "did", commit.DID, "repository", repo, "tag", tag)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Insert or update tag
|
||||
return db.UpsertTag(w.db, &db.Tag{
|
||||
DID: commit.DID,
|
||||
Repository: tagRecord.Repository,
|
||||
Tag: tagRecord.Tag,
|
||||
Digest: tagRecord.ManifestDigest,
|
||||
CreatedAt: tagRecord.UpdatedAt,
|
||||
})
|
||||
// Parse tag record
|
||||
if commit.Record == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshal map to bytes for processing
|
||||
recordBytes, err := json.Marshal(commit.Record)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal record: %w", err)
|
||||
}
|
||||
|
||||
// Use shared processor for DB operations
|
||||
return w.processor.ProcessTag(context.Background(), commit.DID, recordBytes)
|
||||
}
|
||||
|
||||
// processStar processes a star commit event
|
||||
func (w *Worker) processStar(commit *CommitEvent) error {
|
||||
// Resolve and upsert the user who starred (starrer)
|
||||
if err := w.ensureUser(context.Background(), commit.DID); err != nil {
|
||||
if err := w.processor.EnsureUser(context.Background(), commit.DID); err != nil {
|
||||
return fmt.Errorf("failed to ensure user: %w", err)
|
||||
}
|
||||
|
||||
@@ -548,21 +422,40 @@ func (w *Worker) processStar(commit *CommitEvent) error {
|
||||
}
|
||||
|
||||
// Parse star record
|
||||
var starRecord atproto.StarRecord
|
||||
if commit.Record != nil {
|
||||
recordBytes, err := json.Marshal(commit.Record)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal record: %w", err)
|
||||
}
|
||||
if err := json.Unmarshal(recordBytes, &starRecord); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal star: %w", err)
|
||||
}
|
||||
} else {
|
||||
if commit.Record == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upsert the star record (idempotent - star count will be calculated on demand)
|
||||
return db.UpsertStar(w.db, commit.DID, starRecord.Subject.DID, starRecord.Subject.Repository, starRecord.CreatedAt)
|
||||
// Marshal map to bytes for processing
|
||||
recordBytes, err := json.Marshal(commit.Record)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal record: %w", err)
|
||||
}
|
||||
|
||||
// Use shared processor for DB operations
|
||||
return w.processor.ProcessStar(context.Background(), commit.DID, recordBytes)
|
||||
}
|
||||
|
||||
// processIdentity processes an identity event (handle change)
|
||||
func (w *Worker) processIdentity(event *JetstreamEvent) error {
|
||||
if event.Identity == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
identity := event.Identity
|
||||
// Process via shared processor (only ATCR users will be logged at Info level)
|
||||
return w.processor.ProcessIdentity(context.Background(), identity.DID, identity.Handle)
|
||||
}
|
||||
|
||||
// processAccount processes an account event (status change)
|
||||
func (w *Worker) processAccount(event *JetstreamEvent) error {
|
||||
if event.Account == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
account := event.Account
|
||||
// Process via shared processor (only ATCR users will be logged at Info level)
|
||||
return w.processor.ProcessAccount(context.Background(), account.DID, account.Active, account.Status)
|
||||
}
|
||||
|
||||
// JetstreamEvent represents a Jetstream event
|
||||
|
||||
2
pkg/appview/licenses/.gitignore
vendored
Normal file
2
pkg/appview/licenses/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
# Generated SPDX license data
|
||||
spdx-licenses.json
|
||||
64
pkg/appview/licenses/integration_test.go
Normal file
64
pkg/appview/licenses/integration_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package licenses_test
|
||||
|
||||
import (
|
||||
"html/template"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"atcr.io/pkg/appview/licenses"
|
||||
)
|
||||
|
||||
// Test template integration with parseLicenses
|
||||
func TestTemplateIntegration(t *testing.T) {
|
||||
funcMap := template.FuncMap{
|
||||
"parseLicenses": func(licensesStr string) []licenses.LicenseInfo {
|
||||
return licenses.ParseLicenses(licensesStr)
|
||||
},
|
||||
}
|
||||
|
||||
tmplStr := `{{ range parseLicenses . }}{{ if .IsValid }}[VALID:{{ .SPDXID }}:{{ .URL }}]{{ else }}[INVALID:{{ .Name }}]{{ end }}{{ end }}`
|
||||
|
||||
tmpl := template.Must(template.New("test").Funcs(funcMap).Parse(tmplStr))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
wantText string
|
||||
}{
|
||||
{
|
||||
name: "MIT license",
|
||||
input: "MIT",
|
||||
wantText: "[VALID:MIT:https://spdx.org/licenses/MIT.html]",
|
||||
},
|
||||
{
|
||||
name: "Multiple licenses",
|
||||
input: "MIT, Apache-2.0",
|
||||
wantText: "[VALID:MIT:https://spdx.org/licenses/MIT.html][VALID:Apache-2.0:https://spdx.org/licenses/Apache-2.0.html]",
|
||||
},
|
||||
{
|
||||
name: "Unknown license",
|
||||
input: "CustomProprietary",
|
||||
wantText: "[INVALID:CustomProprietary]",
|
||||
},
|
||||
{
|
||||
name: "Mixed valid and invalid",
|
||||
input: "MIT, CustomLicense, Apache-2.0",
|
||||
wantText: "[VALID:MIT:https://spdx.org/licenses/MIT.html][INVALID:CustomLicense][VALID:Apache-2.0:https://spdx.org/licenses/Apache-2.0.html]",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var buf strings.Builder
|
||||
err := tmpl.Execute(&buf, tt.input)
|
||||
if err != nil {
|
||||
t.Fatalf("Template execution failed: %v", err)
|
||||
}
|
||||
|
||||
got := buf.String()
|
||||
if got != tt.wantText {
|
||||
t.Errorf("Template output mismatch:\nGot: %s\nWant: %s", got, tt.wantText)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
170
pkg/appview/licenses/licenses.go
Normal file
170
pkg/appview/licenses/licenses.go
Normal file
@@ -0,0 +1,170 @@
|
||||
// Package licenses provides SPDX license validation and parsing for container
|
||||
// image annotations. It embeds the official SPDX license list and provides
|
||||
// functions to look up license identifiers, validate them, and parse
|
||||
// multi-license strings with fuzzy matching support.
|
||||
package licenses
|
||||
|
||||
//go:generate curl -fsSL -o spdx-licenses.json https://spdx.org/licenses/licenses.json
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//go:embed spdx-licenses.json
|
||||
var spdxLicensesJSON []byte
|
||||
|
||||
// SPDXLicense represents a license from the SPDX license list
|
||||
type SPDXLicense struct {
|
||||
LicenseID string `json:"licenseId"`
|
||||
Name string `json:"name"`
|
||||
Reference string `json:"reference"`
|
||||
IsOsiApproved bool `json:"isOsiApproved"`
|
||||
IsDeprecated bool `json:"isDeprecatedLicenseId"`
|
||||
DetailsURL string `json:"detailsUrl"`
|
||||
SeeAlso []string `json:"seeAlso"`
|
||||
IsFsfLibre bool `json:"isFsfLibre,omitempty"`
|
||||
}
|
||||
|
||||
// SPDXLicenseList represents the complete SPDX license list JSON structure
|
||||
type SPDXLicenseList struct {
|
||||
LicenseListVersion string `json:"licenseListVersion"`
|
||||
Licenses []SPDXLicense `json:"licenses"`
|
||||
ReleaseDate string `json:"releaseDate"`
|
||||
}
|
||||
|
||||
// LicenseInfo represents parsed license information for template rendering
|
||||
type LicenseInfo struct {
|
||||
Name string // Original name from annotation
|
||||
SPDXID string // Normalized SPDX identifier
|
||||
URL string // Link to SPDX license page
|
||||
IsValid bool // Whether this is a recognized SPDX license
|
||||
}
|
||||
|
||||
var spdxLicenses map[string]SPDXLicense
|
||||
var spdxLicenseListVersion string
|
||||
|
||||
// init parses the embedded SPDX license list JSON and builds a lookup map
|
||||
func init() {
|
||||
var list SPDXLicenseList
|
||||
if err := json.Unmarshal(spdxLicensesJSON, &list); err != nil {
|
||||
// If parsing fails, just use an empty map
|
||||
spdxLicenses = make(map[string]SPDXLicense)
|
||||
return
|
||||
}
|
||||
|
||||
spdxLicenseListVersion = list.LicenseListVersion
|
||||
|
||||
// Build lookup map: licenseId -> SPDXLicense
|
||||
spdxLicenses = make(map[string]SPDXLicense, len(list.Licenses))
|
||||
for _, lic := range list.Licenses {
|
||||
// Store with original ID
|
||||
spdxLicenses[lic.LicenseID] = lic
|
||||
|
||||
// Also store normalized version (lowercase, no spaces/dashes)
|
||||
normalized := normalizeID(lic.LicenseID)
|
||||
spdxLicenses[normalized] = lic
|
||||
}
|
||||
}
|
||||
|
||||
// normalizeID converts a license ID to a normalized form for fuzzy matching
|
||||
// Examples: "Apache-2.0" -> "apache20", "GPL-3.0-only" -> "gpl30only"
|
||||
func normalizeID(id string) string {
|
||||
id = strings.ToLower(id)
|
||||
id = strings.ReplaceAll(id, "-", "")
|
||||
id = strings.ReplaceAll(id, "_", "")
|
||||
id = strings.ReplaceAll(id, ".", "")
|
||||
id = strings.ReplaceAll(id, " ", "")
|
||||
return id
|
||||
}
|
||||
|
||||
// GetLicenseInfo looks up a license by SPDX ID with fuzzy matching
|
||||
func GetLicenseInfo(licenseID string) (LicenseInfo, bool) {
|
||||
// Try exact match first
|
||||
if lic, ok := spdxLicenses[licenseID]; ok {
|
||||
return LicenseInfo{
|
||||
Name: lic.Name,
|
||||
SPDXID: lic.LicenseID,
|
||||
URL: lic.Reference,
|
||||
IsValid: true,
|
||||
}, true
|
||||
}
|
||||
|
||||
// Try normalized match
|
||||
normalized := normalizeID(licenseID)
|
||||
if lic, ok := spdxLicenses[normalized]; ok {
|
||||
return LicenseInfo{
|
||||
Name: lic.Name,
|
||||
SPDXID: lic.LicenseID,
|
||||
URL: lic.Reference,
|
||||
IsValid: true,
|
||||
}, true
|
||||
}
|
||||
|
||||
// Not found - return invalid license info
|
||||
return LicenseInfo{
|
||||
Name: licenseID,
|
||||
SPDXID: licenseID,
|
||||
URL: "",
|
||||
IsValid: false,
|
||||
}, false
|
||||
}
|
||||
|
||||
// ParseLicenses parses a license string (possibly containing multiple licenses)
|
||||
// and returns a slice of LicenseInfo structs.
|
||||
//
|
||||
// Supported separators: comma, semicolon, " AND ", " OR "
|
||||
// Examples:
|
||||
// - "MIT" -> [{MIT}]
|
||||
// - "MIT, Apache-2.0" -> [{MIT}, {Apache-2.0}]
|
||||
// - "MIT AND Apache-2.0" -> [{MIT}, {Apache-2.0}]
|
||||
func ParseLicenses(licensesStr string) []LicenseInfo {
|
||||
if licensesStr == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Split on various separators
|
||||
licensesStr = strings.ReplaceAll(licensesStr, " AND ", ",")
|
||||
licensesStr = strings.ReplaceAll(licensesStr, " OR ", ",")
|
||||
licensesStr = strings.ReplaceAll(licensesStr, ";", ",")
|
||||
|
||||
parts := strings.Split(licensesStr, ",")
|
||||
|
||||
var result []LicenseInfo
|
||||
seen := make(map[string]bool) // Deduplicate
|
||||
|
||||
for _, part := range parts {
|
||||
part = strings.TrimSpace(part)
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip if we've already seen this license
|
||||
if seen[part] {
|
||||
continue
|
||||
}
|
||||
seen[part] = true
|
||||
|
||||
// Look up license info
|
||||
info, found := GetLicenseInfo(part)
|
||||
if !found {
|
||||
// Unknown license - still include it as invalid
|
||||
info = LicenseInfo{
|
||||
Name: part,
|
||||
SPDXID: part,
|
||||
URL: "",
|
||||
IsValid: false,
|
||||
}
|
||||
}
|
||||
|
||||
result = append(result, info)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// GetVersion returns the SPDX License List version
|
||||
func GetVersion() string {
|
||||
return spdxLicenseListVersion
|
||||
}
|
||||
125
pkg/appview/licenses/licenses_test.go
Normal file
125
pkg/appview/licenses/licenses_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package licenses
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetLicenseInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
wantValid bool
|
||||
wantSPDX string
|
||||
}{
|
||||
{"MIT exact", "MIT", true, "MIT"},
|
||||
{"Apache-2.0 exact", "Apache-2.0", true, "Apache-2.0"},
|
||||
{"Apache 2.0 fuzzy", "Apache 2.0", true, "Apache-2.0"},
|
||||
{"GPL-3.0 exact", "GPL-3.0-only", true, "GPL-3.0-only"},
|
||||
{"Unknown license", "CustomProprietary", false, "CustomProprietary"},
|
||||
{"BSD-3-Clause", "BSD-3-Clause", true, "BSD-3-Clause"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
info, found := GetLicenseInfo(tt.input)
|
||||
|
||||
if info.IsValid != tt.wantValid {
|
||||
t.Errorf("GetLicenseInfo(%q).IsValid = %v, want %v", tt.input, info.IsValid, tt.wantValid)
|
||||
}
|
||||
|
||||
if tt.wantValid && !found {
|
||||
t.Errorf("GetLicenseInfo(%q) not found, want found", tt.input)
|
||||
}
|
||||
|
||||
if info.SPDXID != tt.wantSPDX {
|
||||
t.Errorf("GetLicenseInfo(%q).SPDXID = %q, want %q", tt.input, info.SPDXID, tt.wantSPDX)
|
||||
}
|
||||
|
||||
if info.IsValid && info.URL == "" {
|
||||
t.Errorf("GetLicenseInfo(%q).URL is empty for valid license", tt.input)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLicenses(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
wantCount int
|
||||
wantFirst string
|
||||
wantSecond string
|
||||
}{
|
||||
{"Single license", "MIT", 1, "MIT", ""},
|
||||
{"Two licenses comma", "MIT, Apache-2.0", 2, "MIT", "Apache-2.0"},
|
||||
{"Two licenses AND", "MIT AND Apache-2.0", 2, "MIT", "Apache-2.0"},
|
||||
{"Three licenses", "MIT, Apache-2.0, GPL-3.0-only", 3, "MIT", "Apache-2.0"},
|
||||
{"Empty string", "", 0, "", ""},
|
||||
{"Whitespace", " MIT ", 1, "MIT", ""},
|
||||
{"Duplicate licenses", "MIT, MIT, Apache-2.0", 2, "MIT", "Apache-2.0"},
|
||||
{"Mixed separators", "MIT; Apache-2.0, BSD-3-Clause", 3, "MIT", "Apache-2.0"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := ParseLicenses(tt.input)
|
||||
|
||||
if len(result) != tt.wantCount {
|
||||
t.Errorf("ParseLicenses(%q) returned %d licenses, want %d", tt.input, len(result), tt.wantCount)
|
||||
}
|
||||
|
||||
if tt.wantCount > 0 && result[0].SPDXID != tt.wantFirst {
|
||||
t.Errorf("ParseLicenses(%q)[0].SPDXID = %q, want %q", tt.input, result[0].SPDXID, tt.wantFirst)
|
||||
}
|
||||
|
||||
if tt.wantCount > 1 && result[1].SPDXID != tt.wantSecond {
|
||||
t.Errorf("ParseLicenses(%q)[1].SPDXID = %q, want %q", tt.input, result[1].SPDXID, tt.wantSecond)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeID(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
{"MIT", "mit"},
|
||||
{"Apache-2.0", "apache20"},
|
||||
{"GPL-3.0-only", "gpl30only"},
|
||||
{"BSD-3-Clause", "bsd3clause"},
|
||||
{"CC-BY-4.0", "ccby40"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.input, func(t *testing.T) {
|
||||
got := normalizeID(tt.input)
|
||||
if got != tt.want {
|
||||
t.Errorf("normalizeID(%q) = %q, want %q", tt.input, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVersion(t *testing.T) {
|
||||
version := GetVersion()
|
||||
if version == "" {
|
||||
t.Error("GetVersion() returned empty string")
|
||||
}
|
||||
t.Logf("SPDX License List version: %s", version)
|
||||
}
|
||||
|
||||
func TestSPDXDataLoaded(t *testing.T) {
|
||||
if len(spdxLicenses) == 0 {
|
||||
t.Fatal("SPDX license data not loaded")
|
||||
}
|
||||
t.Logf("Loaded %d SPDX licenses", len(spdxLicenses))
|
||||
|
||||
// Verify some common licenses exist
|
||||
commonLicenses := []string{"MIT", "Apache-2.0", "GPL-3.0-only", "BSD-3-Clause"}
|
||||
for _, lic := range commonLicenses {
|
||||
if _, ok := spdxLicenses[lic]; !ok {
|
||||
t.Errorf("Common license %q not found in SPDX data", lic)
|
||||
}
|
||||
}
|
||||
}
|
||||
134
pkg/appview/licenses/template_example_test.go
Normal file
134
pkg/appview/licenses/template_example_test.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package licenses_test
|
||||
|
||||
import (
|
||||
"html/template"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"atcr.io/pkg/appview/licenses"
|
||||
)
|
||||
|
||||
// TestRepositoryPageTemplate demonstrates how the license badges will render
|
||||
// in the actual repository.html template
|
||||
func TestRepositoryPageTemplate(t *testing.T) {
|
||||
funcMap := template.FuncMap{
|
||||
"parseLicenses": func(licensesStr string) []licenses.LicenseInfo {
|
||||
return licenses.ParseLicenses(licensesStr)
|
||||
},
|
||||
}
|
||||
|
||||
// This is the exact template structure from repository.html
|
||||
tmplStr := `{{ if .Licenses }}` +
|
||||
`{{ range parseLicenses .Licenses }}` +
|
||||
`{{ if .IsValid }}` +
|
||||
`<a href="{{ .URL }}" target="_blank" rel="noopener noreferrer" class="metadata-badge license-badge" title="{{ .Name }}">{{ .SPDXID }}</a>` +
|
||||
`{{ else }}` +
|
||||
`<span class="metadata-badge license-badge" title="Custom license: {{ .Name }}">{{ .Name }}</span>` +
|
||||
`{{ end }}` +
|
||||
`{{ end }}` +
|
||||
`{{ end }}`
|
||||
|
||||
tmpl := template.Must(template.New("test").Funcs(funcMap).Parse(tmplStr))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
licenses string
|
||||
wantContain []string
|
||||
wantNotContain []string
|
||||
}{
|
||||
{
|
||||
name: "MIT license",
|
||||
licenses: "MIT",
|
||||
wantContain: []string{
|
||||
`<a href="https://spdx.org/licenses/MIT.html"`,
|
||||
`class="metadata-badge license-badge"`,
|
||||
`title="MIT License"`,
|
||||
`>MIT</a>`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple valid licenses",
|
||||
licenses: "MIT, Apache-2.0, GPL-3.0-only",
|
||||
wantContain: []string{
|
||||
`https://spdx.org/licenses/MIT.html`,
|
||||
`https://spdx.org/licenses/Apache-2.0.html`,
|
||||
`https://spdx.org/licenses/GPL-3.0-only.html`,
|
||||
`>MIT</a>`,
|
||||
`>Apache-2.0</a>`,
|
||||
`>GPL-3.0-only</a>`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Custom license",
|
||||
licenses: "CustomProprietary",
|
||||
wantContain: []string{
|
||||
`<span class="metadata-badge license-badge"`,
|
||||
`title="Custom license: CustomProprietary"`,
|
||||
`>CustomProprietary</span>`,
|
||||
},
|
||||
wantNotContain: []string{
|
||||
`<a href=`,
|
||||
`https://spdx.org`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Mixed valid and custom",
|
||||
licenses: "MIT, MyCustomLicense",
|
||||
wantContain: []string{
|
||||
// Valid license (MIT) should be a link
|
||||
`<a href="https://spdx.org/licenses/MIT.html"`,
|
||||
`>MIT</a>`,
|
||||
// Custom license should be a span
|
||||
`<span class="metadata-badge license-badge"`,
|
||||
`title="Custom license: MyCustomLicense"`,
|
||||
`>MyCustomLicense</span>`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Apache fuzzy match",
|
||||
licenses: "Apache 2.0",
|
||||
wantContain: []string{
|
||||
`https://spdx.org/licenses/Apache-2.0.html`,
|
||||
`>Apache-2.0</a>`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Empty licenses",
|
||||
licenses: "",
|
||||
wantNotContain: []string{
|
||||
`<a `,
|
||||
`<span`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
data := struct{ Licenses string }{Licenses: tt.licenses}
|
||||
|
||||
var buf strings.Builder
|
||||
err := tmpl.Execute(&buf, data)
|
||||
if err != nil {
|
||||
t.Fatalf("Template execution failed: %v", err)
|
||||
}
|
||||
|
||||
output := buf.String()
|
||||
|
||||
// Check for expected content
|
||||
for _, want := range tt.wantContain {
|
||||
if !strings.Contains(output, want) {
|
||||
t.Errorf("Output missing expected content:\nWant: %s\nGot: %s", want, output)
|
||||
}
|
||||
}
|
||||
|
||||
// Check for unexpected content
|
||||
for _, notWant := range tt.wantNotContain {
|
||||
if strings.Contains(output, notWant) {
|
||||
t.Errorf("Output contains unexpected content:\nDon't want: %s\nGot: %s", notWant, output)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("Template output:\n%s", output)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,7 @@
|
||||
// Package middleware provides HTTP middleware for AppView, including
|
||||
// authentication (session-based for web UI, token-based for registry),
|
||||
// identity resolution (handle/DID to PDS endpoint), and hold discovery
|
||||
// for routing blobs to storage endpoints.
|
||||
package middleware
|
||||
|
||||
import (
|
||||
|
||||
395
pkg/appview/middleware/auth_test.go
Normal file
395
pkg/appview/middleware/auth_test.go
Normal file
@@ -0,0 +1,395 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
)
|
||||
|
||||
func TestGetUser_NoContext(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
user := GetUser(req)
|
||||
if user != nil {
|
||||
t.Error("Expected nil user when no context is set")
|
||||
}
|
||||
}
|
||||
|
||||
// setupTestDB creates an in-memory SQLite database for testing
|
||||
func setupTestDB(t *testing.T) *sql.DB {
|
||||
database, err := db.InitDB(":memory:", true)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
database.Close()
|
||||
})
|
||||
|
||||
return database
|
||||
}
|
||||
|
||||
// TestRequireAuth_ValidSession tests RequireAuth with a valid session
|
||||
func TestRequireAuth_ValidSession(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
store := db.NewSessionStore(database)
|
||||
|
||||
// Create a user first (required by foreign key)
|
||||
_, err := database.Exec(
|
||||
"INSERT INTO users (did, handle, pds_endpoint, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"did:plc:test123", "alice.bsky.social", "https://pds.example.com", time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a session
|
||||
sessionID, err := store.Create("did:plc:test123", "alice.bsky.social", "https://pds.example.com", 24*time.Hour)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a test handler that checks user context
|
||||
handlerCalled := false
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
handlerCalled = true
|
||||
user := GetUser(r)
|
||||
assert.NotNil(t, user)
|
||||
assert.Equal(t, "did:plc:test123", user.DID)
|
||||
assert.Equal(t, "alice.bsky.social", user.Handle)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
// Wrap with RequireAuth middleware
|
||||
middleware := RequireAuth(store, database)
|
||||
wrappedHandler := middleware(handler)
|
||||
|
||||
// Create request with session cookie
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: "atcr_session",
|
||||
Value: sessionID,
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
wrappedHandler.ServeHTTP(w, req)
|
||||
|
||||
assert.True(t, handlerCalled, "handler should have been called")
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
}
|
||||
|
||||
// TestRequireAuth_MissingSession tests RequireAuth redirects when no session
|
||||
func TestRequireAuth_MissingSession(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
store := db.NewSessionStore(database)
|
||||
|
||||
handlerCalled := false
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
handlerCalled = true
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
middleware := RequireAuth(store, database)
|
||||
wrappedHandler := middleware(handler)
|
||||
|
||||
// Request without session cookie
|
||||
req := httptest.NewRequest("GET", "/protected", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
wrappedHandler.ServeHTTP(w, req)
|
||||
|
||||
assert.False(t, handlerCalled, "handler should not have been called")
|
||||
assert.Equal(t, http.StatusFound, w.Code)
|
||||
assert.Contains(t, w.Header().Get("Location"), "/auth/oauth/login")
|
||||
assert.Contains(t, w.Header().Get("Location"), "return_to=%2Fprotected")
|
||||
}
|
||||
|
||||
// TestRequireAuth_InvalidSession tests RequireAuth redirects when session is invalid
|
||||
func TestRequireAuth_InvalidSession(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
store := db.NewSessionStore(database)
|
||||
|
||||
handlerCalled := false
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
handlerCalled = true
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
middleware := RequireAuth(store, database)
|
||||
wrappedHandler := middleware(handler)
|
||||
|
||||
// Request with invalid session ID
|
||||
req := httptest.NewRequest("GET", "/protected", nil)
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: "atcr_session",
|
||||
Value: "invalid-session-id",
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
wrappedHandler.ServeHTTP(w, req)
|
||||
|
||||
assert.False(t, handlerCalled, "handler should not have been called")
|
||||
assert.Equal(t, http.StatusFound, w.Code)
|
||||
assert.Contains(t, w.Header().Get("Location"), "/auth/oauth/login")
|
||||
}
|
||||
|
||||
// TestRequireAuth_WithQueryParams tests RequireAuth preserves query parameters in return_to
|
||||
func TestRequireAuth_WithQueryParams(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
store := db.NewSessionStore(database)
|
||||
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
middleware := RequireAuth(store, database)
|
||||
wrappedHandler := middleware(handler)
|
||||
|
||||
// Request without session but with query parameters
|
||||
req := httptest.NewRequest("GET", "/protected?foo=bar&baz=qux", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
wrappedHandler.ServeHTTP(w, req)
|
||||
|
||||
assert.Equal(t, http.StatusFound, w.Code)
|
||||
location := w.Header().Get("Location")
|
||||
assert.Contains(t, location, "/auth/oauth/login")
|
||||
assert.Contains(t, location, "return_to=")
|
||||
// Query parameters should be preserved in return_to
|
||||
assert.Contains(t, location, "foo%3Dbar")
|
||||
}
|
||||
|
||||
// TestRequireAuth_DatabaseFallback tests fallback to session data when DB lookup has no avatar
|
||||
func TestRequireAuth_DatabaseFallback(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
store := db.NewSessionStore(database)
|
||||
|
||||
// Create a user without avatar (required by foreign key)
|
||||
_, err := database.Exec(
|
||||
"INSERT INTO users (did, handle, pds_endpoint, last_seen, avatar) VALUES (?, ?, ?, ?, ?)",
|
||||
"did:plc:test123", "alice.bsky.social", "https://pds.example.com", time.Now(), "",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a session
|
||||
sessionID, err := store.Create("did:plc:test123", "alice.bsky.social", "https://pds.example.com", 24*time.Hour)
|
||||
require.NoError(t, err)
|
||||
|
||||
handlerCalled := false
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
handlerCalled = true
|
||||
user := GetUser(r)
|
||||
assert.NotNil(t, user)
|
||||
assert.Equal(t, "did:plc:test123", user.DID)
|
||||
assert.Equal(t, "alice.bsky.social", user.Handle)
|
||||
// User exists in DB but has no avatar - should use DB version
|
||||
assert.Empty(t, user.Avatar, "avatar should be empty when not set in DB")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
middleware := RequireAuth(store, database)
|
||||
wrappedHandler := middleware(handler)
|
||||
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: "atcr_session",
|
||||
Value: sessionID,
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
wrappedHandler.ServeHTTP(w, req)
|
||||
|
||||
assert.True(t, handlerCalled)
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
}
|
||||
|
||||
// TestOptionalAuth_ValidSession tests OptionalAuth with valid session
|
||||
func TestOptionalAuth_ValidSession(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
store := db.NewSessionStore(database)
|
||||
|
||||
// Create a user first (required by foreign key)
|
||||
_, err := database.Exec(
|
||||
"INSERT INTO users (did, handle, pds_endpoint, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"did:plc:test123", "alice.bsky.social", "https://pds.example.com", time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a session
|
||||
sessionID, err := store.Create("did:plc:test123", "alice.bsky.social", "https://pds.example.com", 24*time.Hour)
|
||||
require.NoError(t, err)
|
||||
|
||||
handlerCalled := false
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
handlerCalled = true
|
||||
user := GetUser(r)
|
||||
assert.NotNil(t, user, "user should be set when session is valid")
|
||||
assert.Equal(t, "did:plc:test123", user.DID)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
middleware := OptionalAuth(store, database)
|
||||
wrappedHandler := middleware(handler)
|
||||
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: "atcr_session",
|
||||
Value: sessionID,
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
wrappedHandler.ServeHTTP(w, req)
|
||||
|
||||
assert.True(t, handlerCalled)
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
}
|
||||
|
||||
// TestOptionalAuth_NoSession tests OptionalAuth continues without user when no session
|
||||
func TestOptionalAuth_NoSession(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
store := db.NewSessionStore(database)
|
||||
|
||||
handlerCalled := false
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
handlerCalled = true
|
||||
user := GetUser(r)
|
||||
assert.Nil(t, user, "user should be nil when no session")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
middleware := OptionalAuth(store, database)
|
||||
wrappedHandler := middleware(handler)
|
||||
|
||||
// Request without session cookie
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
wrappedHandler.ServeHTTP(w, req)
|
||||
|
||||
assert.True(t, handlerCalled, "handler should still be called")
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
}
|
||||
|
||||
// TestOptionalAuth_InvalidSession tests OptionalAuth continues without user when session invalid
|
||||
func TestOptionalAuth_InvalidSession(t *testing.T) {
|
||||
database := setupTestDB(t)
|
||||
store := db.NewSessionStore(database)
|
||||
|
||||
handlerCalled := false
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
handlerCalled = true
|
||||
user := GetUser(r)
|
||||
assert.Nil(t, user, "user should be nil when session is invalid")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
middleware := OptionalAuth(store, database)
|
||||
wrappedHandler := middleware(handler)
|
||||
|
||||
// Request with invalid session ID
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: "atcr_session",
|
||||
Value: "invalid-session-id",
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
wrappedHandler.ServeHTTP(w, req)
|
||||
|
||||
assert.True(t, handlerCalled, "handler should still be called")
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
}
|
||||
|
||||
// TestMiddleware_ConcurrentAccess tests concurrent requests through middleware
|
||||
func TestMiddleware_ConcurrentAccess(t *testing.T) {
|
||||
// Use a shared in-memory database for concurrent access
|
||||
// (SQLite's default :memory: creates separate DBs per connection)
|
||||
database, err := db.InitDB("file::memory:?cache=shared", true)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
database.Close()
|
||||
})
|
||||
|
||||
store := db.NewSessionStore(database)
|
||||
|
||||
// Pre-create all users and sessions before concurrent access
|
||||
// This ensures database is fully initialized before goroutines start
|
||||
sessionIDs := make([]string, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
did := fmt.Sprintf("did:plc:user%d", i)
|
||||
handle := fmt.Sprintf("user%d.bsky.social", i)
|
||||
|
||||
// Create user first
|
||||
_, err := database.Exec(
|
||||
"INSERT INTO users (did, handle, pds_endpoint, last_seen) VALUES (?, ?, ?, ?)",
|
||||
did, handle, "https://pds.example.com", time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create session
|
||||
sessionID, err := store.Create(
|
||||
did,
|
||||
handle,
|
||||
"https://pds.example.com",
|
||||
24*time.Hour,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
sessionIDs[i] = sessionID
|
||||
}
|
||||
|
||||
// All setup complete - now test concurrent access
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
user := GetUser(r)
|
||||
if user != nil {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
}
|
||||
})
|
||||
|
||||
middleware := RequireAuth(store, database)
|
||||
wrappedHandler := middleware(handler)
|
||||
|
||||
// Collect results from all goroutines
|
||||
results := make([]int, 10)
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex // Protect results map
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(1)
|
||||
go func(index int, sessionID string) {
|
||||
defer wg.Done()
|
||||
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: "atcr_session",
|
||||
Value: sessionID,
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
wrappedHandler.ServeHTTP(w, req)
|
||||
|
||||
mu.Lock()
|
||||
results[index] = w.Code
|
||||
mu.Unlock()
|
||||
}(i, sessionIDs[i])
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Check all results after concurrent execution
|
||||
// Note: Some failures are expected with in-memory SQLite under high concurrency
|
||||
// We consider the test successful if most requests succeed
|
||||
successCount := 0
|
||||
for _, code := range results {
|
||||
if code == http.StatusOK {
|
||||
successCount++
|
||||
}
|
||||
}
|
||||
|
||||
// At least 7 out of 10 should succeed (70%)
|
||||
assert.GreaterOrEqual(t, successCount, 7, "Most concurrent requests should succeed")
|
||||
}
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/bluesky-social/indigo/atproto/identity"
|
||||
"github.com/bluesky-social/indigo/atproto/syntax"
|
||||
"github.com/distribution/distribution/v3"
|
||||
"github.com/distribution/distribution/v3/registry/api/errcode"
|
||||
registrymw "github.com/distribution/distribution/v3/registry/middleware/registry"
|
||||
"github.com/distribution/distribution/v3/registry/storage/driver"
|
||||
"github.com/distribution/reference"
|
||||
@@ -18,30 +18,46 @@ import (
|
||||
"atcr.io/pkg/atproto"
|
||||
"atcr.io/pkg/auth"
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
"atcr.io/pkg/auth/token"
|
||||
)
|
||||
|
||||
// Global refresher instance (set by main.go)
|
||||
var globalRefresher *oauth.Refresher
|
||||
// holdDIDKey is the context key for storing hold DID
|
||||
const holdDIDKey contextKey = "hold.did"
|
||||
|
||||
// Global database instance (set by main.go for pull tracking)
|
||||
var globalDatabase interface {
|
||||
IncrementPullCount(did, repository string) error
|
||||
IncrementPushCount(did, repository string) error
|
||||
}
|
||||
// Global variables for initialization only
|
||||
// These are set by main.go during startup and copied into NamespaceResolver instances.
|
||||
// After initialization, request handling uses the NamespaceResolver's instance fields.
|
||||
var (
|
||||
globalRefresher *oauth.Refresher
|
||||
globalDatabase storage.DatabaseMetrics
|
||||
globalAuthorizer auth.HoldAuthorizer
|
||||
globalReadmeCache storage.ReadmeCache
|
||||
)
|
||||
|
||||
// SetGlobalRefresher sets the global OAuth refresher instance
|
||||
// SetGlobalRefresher sets the OAuth refresher instance during initialization
|
||||
// Must be called before the registry starts serving requests
|
||||
func SetGlobalRefresher(refresher *oauth.Refresher) {
|
||||
globalRefresher = refresher
|
||||
}
|
||||
|
||||
// SetGlobalDatabase sets the global database instance for metrics tracking
|
||||
func SetGlobalDatabase(database interface {
|
||||
IncrementPullCount(did, repository string) error
|
||||
IncrementPushCount(did, repository string) error
|
||||
}) {
|
||||
// SetGlobalDatabase sets the database instance during initialization
|
||||
// Must be called before the registry starts serving requests
|
||||
func SetGlobalDatabase(database storage.DatabaseMetrics) {
|
||||
globalDatabase = database
|
||||
}
|
||||
|
||||
// SetGlobalAuthorizer sets the authorizer instance during initialization
|
||||
// Must be called before the registry starts serving requests
|
||||
func SetGlobalAuthorizer(authorizer auth.HoldAuthorizer) {
|
||||
globalAuthorizer = authorizer
|
||||
}
|
||||
|
||||
// SetGlobalReadmeCache sets the readme cache instance during initialization
|
||||
// Must be called before the registry starts serving requests
|
||||
func SetGlobalReadmeCache(readmeCache storage.ReadmeCache) {
|
||||
globalReadmeCache = readmeCache
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Register the name resolution middleware
|
||||
registrymw.Register("atproto-resolver", initATProtoResolver)
|
||||
@@ -50,29 +66,58 @@ func init() {
|
||||
// NamespaceResolver wraps a namespace and resolves names
|
||||
type NamespaceResolver struct {
|
||||
distribution.Namespace
|
||||
directory identity.Directory
|
||||
defaultStorageEndpoint string
|
||||
repositories sync.Map // Cache of RoutingRepository instances by key (did:reponame)
|
||||
defaultHoldDID string // Default hold DID (e.g., "did:web:hold01.atcr.io")
|
||||
baseURL string // Base URL for error messages (e.g., "https://atcr.io")
|
||||
testMode bool // If true, fallback to default hold when user's hold is unreachable
|
||||
repositories sync.Map // Cache of RoutingRepository instances by key (did:reponame)
|
||||
refresher *oauth.Refresher // OAuth session manager (copied from global on init)
|
||||
database storage.DatabaseMetrics // Metrics database (copied from global on init)
|
||||
authorizer auth.HoldAuthorizer // Hold authorization (copied from global on init)
|
||||
readmeCache storage.ReadmeCache // README cache (copied from global on init)
|
||||
}
|
||||
|
||||
// initATProtoResolver initializes the name resolution middleware
|
||||
func initATProtoResolver(ctx context.Context, ns distribution.Namespace, _ driver.StorageDriver, options map[string]any) (distribution.Namespace, error) {
|
||||
// Use indigo's default directory (includes caching)
|
||||
directory := identity.DefaultDirectory()
|
||||
|
||||
// Get default storage endpoint from config (optional)
|
||||
defaultStorageEndpoint := ""
|
||||
if endpoint, ok := options["default_storage_endpoint"].(string); ok {
|
||||
defaultStorageEndpoint = endpoint
|
||||
// Get default hold DID from config (required)
|
||||
// Expected format: "did:web:hold01.atcr.io"
|
||||
defaultHoldDID := ""
|
||||
if holdDID, ok := options["default_hold_did"].(string); ok {
|
||||
defaultHoldDID = holdDID
|
||||
}
|
||||
|
||||
// Get base URL from config (for error messages)
|
||||
baseURL := ""
|
||||
if url, ok := options["base_url"].(string); ok {
|
||||
baseURL = url
|
||||
}
|
||||
|
||||
// Check test mode from options (passed via env var)
|
||||
testMode := false
|
||||
if tm, ok := options["test_mode"].(bool); ok {
|
||||
testMode = tm
|
||||
}
|
||||
|
||||
// Copy shared services from globals into the instance
|
||||
// This avoids accessing globals during request handling
|
||||
return &NamespaceResolver{
|
||||
Namespace: ns,
|
||||
directory: directory,
|
||||
defaultStorageEndpoint: defaultStorageEndpoint,
|
||||
Namespace: ns,
|
||||
defaultHoldDID: defaultHoldDID,
|
||||
baseURL: baseURL,
|
||||
testMode: testMode,
|
||||
refresher: globalRefresher,
|
||||
database: globalDatabase,
|
||||
authorizer: globalAuthorizer,
|
||||
readmeCache: globalReadmeCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// authErrorMessage creates a user-friendly auth error with login URL
|
||||
func (nr *NamespaceResolver) authErrorMessage(message string) error {
|
||||
loginURL := fmt.Sprintf("%s/auth/oauth/login", nr.baseURL)
|
||||
fullMessage := fmt.Sprintf("%s - please re-authenticate at %s", message, loginURL)
|
||||
return errcode.ErrorCodeUnauthorized.WithMessage(fullMessage)
|
||||
}
|
||||
|
||||
// Repository resolves the repository name and delegates to underlying namespace
|
||||
// Handles names like:
|
||||
// - atcr.io/alice/myimage → resolve alice to DID
|
||||
@@ -90,33 +135,42 @@ func (nr *NamespaceResolver) Repository(ctx context.Context, name reference.Name
|
||||
identityStr := parts[0]
|
||||
imageName := parts[1]
|
||||
|
||||
// Parse identity (handle or DID)
|
||||
atID, err := syntax.ParseAtIdentifier(identityStr)
|
||||
// Resolve identity to DID, handle, and PDS endpoint
|
||||
did, handle, pdsEndpoint, err := atproto.ResolveIdentity(ctx, identityStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid identity %s: %w", identityStr, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Resolve identity to DID and PDS using indigo's directory
|
||||
ident, err := nr.directory.Lookup(ctx, *atID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve identity %s: %w", identityStr, err)
|
||||
}
|
||||
slog.Debug("Resolved identity", "component", "registry/middleware", "did", did, "pds", pdsEndpoint, "handle", handle)
|
||||
|
||||
did := ident.DID.String()
|
||||
pdsEndpoint := ident.PDSEndpoint()
|
||||
if pdsEndpoint == "" {
|
||||
return nil, fmt.Errorf("no PDS endpoint found for %s", identityStr)
|
||||
}
|
||||
|
||||
fmt.Printf("DEBUG [registry/middleware]: Resolved identity: did=%s, pds=%s, handle=%s\n", did, pdsEndpoint, ident.Handle.String())
|
||||
|
||||
// Query for storage endpoint - either user's hold or default hold service
|
||||
storageEndpoint := nr.findStorageEndpoint(ctx, did, pdsEndpoint)
|
||||
if storageEndpoint == "" {
|
||||
// Query for hold DID - either user's hold or default hold service
|
||||
holdDID := nr.findHoldDID(ctx, did, pdsEndpoint)
|
||||
if holdDID == "" {
|
||||
// This is a fatal configuration error - registry cannot function without a hold service
|
||||
return nil, fmt.Errorf("no storage endpoint configured: ensure default_storage_endpoint is set in middleware config")
|
||||
return nil, fmt.Errorf("no hold DID configured: ensure default_hold_did is set in middleware config")
|
||||
}
|
||||
ctx = context.WithValue(ctx, holdDIDKey, holdDID)
|
||||
|
||||
// Auto-reconcile crew membership on first push/pull
|
||||
// This ensures users can push immediately after docker login without web sign-in
|
||||
// EnsureCrewMembership is best-effort and logs errors without failing the request
|
||||
if holdDID != "" && nr.refresher != nil {
|
||||
slog.Debug("Auto-reconciling crew membership", "component", "registry/middleware", "did", did, "hold_did", holdDID)
|
||||
client := atproto.NewClient(pdsEndpoint, did, "")
|
||||
storage.EnsureCrewMembership(ctx, client, nr.refresher, holdDID)
|
||||
}
|
||||
|
||||
// Get service token for hold authentication
|
||||
var serviceToken string
|
||||
if nr.refresher != nil {
|
||||
var err error
|
||||
serviceToken, err = token.GetOrFetchServiceToken(ctx, nr.refresher, did, holdDID, pdsEndpoint)
|
||||
if err != nil {
|
||||
slog.Error("Failed to get service token", "component", "registry/middleware", "did", did, "error", err)
|
||||
slog.Error("User needs to re-authenticate via credential helper", "component", "registry/middleware")
|
||||
return nil, nr.authErrorMessage("OAuth session expired")
|
||||
}
|
||||
}
|
||||
ctx = context.WithValue(ctx, "storage.endpoint", storageEndpoint)
|
||||
|
||||
// Create a new reference with identity/image format
|
||||
// Use the identity (or DID) as the namespace to ensure canonical format
|
||||
@@ -138,15 +192,15 @@ func (nr *NamespaceResolver) Repository(ctx context.Context, name reference.Name
|
||||
// Fall back to Basic Auth token cache (for users who used app passwords)
|
||||
var atprotoClient *atproto.Client
|
||||
|
||||
if globalRefresher != nil {
|
||||
if nr.refresher != nil {
|
||||
// Try OAuth flow first
|
||||
session, err := globalRefresher.GetSession(ctx, did)
|
||||
session, err := nr.refresher.GetSession(ctx, did)
|
||||
if err == nil {
|
||||
// OAuth session available - use indigo's API client (handles DPoP automatically)
|
||||
apiClient := session.APIClient()
|
||||
atprotoClient = atproto.NewClientWithIndigoClient(pdsEndpoint, did, apiClient)
|
||||
} else {
|
||||
fmt.Printf("DEBUG [registry/middleware]: OAuth refresh failed for DID=%s: %v, falling back to Basic Auth\n", did, err)
|
||||
slog.Debug("OAuth refresh failed, falling back to Basic Auth", "component", "registry/middleware", "did", did, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,10 +208,10 @@ func (nr *NamespaceResolver) Repository(ctx context.Context, name reference.Name
|
||||
if atprotoClient == nil {
|
||||
accessToken, ok := auth.GetGlobalTokenCache().Get(did)
|
||||
if !ok {
|
||||
fmt.Printf("DEBUG [registry/middleware]: No cached access token found for DID=%s (neither OAuth nor Basic Auth)\n", did)
|
||||
slog.Debug("No cached access token found (neither OAuth nor Basic Auth)", "component", "registry/middleware", "did", did)
|
||||
accessToken = "" // Will fail on manifest push, but let it try
|
||||
} else {
|
||||
fmt.Printf("DEBUG [registry/middleware]: Using Basic Auth access token for DID=%s (length=%d)\n", did, len(accessToken))
|
||||
slog.Debug("Using Basic Auth access token", "component", "registry/middleware", "did", did, "token_length", len(accessToken))
|
||||
}
|
||||
atprotoClient = atproto.NewClient(pdsEndpoint, did, accessToken)
|
||||
}
|
||||
@@ -170,15 +224,31 @@ func (nr *NamespaceResolver) Repository(ctx context.Context, name reference.Name
|
||||
// Cache key is DID + repository name
|
||||
cacheKey := did + ":" + repositoryName
|
||||
|
||||
// Check cache first
|
||||
// Check cache first and update service token
|
||||
if cached, ok := nr.repositories.Load(cacheKey); ok {
|
||||
return cached.(*storage.RoutingRepository), nil
|
||||
cachedRepo := cached.(*storage.RoutingRepository)
|
||||
// Always update the service token even for cached repos (token may have been renewed)
|
||||
cachedRepo.Ctx.ServiceToken = serviceToken
|
||||
return cachedRepo, nil
|
||||
}
|
||||
|
||||
// Create routing repository - routes manifests to ATProto, blobs to hold service
|
||||
// The registry is stateless - no local storage is used
|
||||
// Pass storage endpoint and DID as parameters (can't use context as it gets lost)
|
||||
routingRepo := storage.NewRoutingRepository(repo, atprotoClient, repositoryName, storageEndpoint, did, globalDatabase)
|
||||
// Bundle all context into a single RegistryContext struct
|
||||
registryCtx := &storage.RegistryContext{
|
||||
DID: did,
|
||||
Handle: handle,
|
||||
HoldDID: holdDID,
|
||||
PDSEndpoint: pdsEndpoint,
|
||||
Repository: repositoryName,
|
||||
ServiceToken: serviceToken, // Cached service token from middleware validation
|
||||
ATProtoClient: atprotoClient,
|
||||
Database: nr.database,
|
||||
Authorizer: nr.authorizer,
|
||||
Refresher: nr.refresher,
|
||||
ReadmeCache: nr.readmeCache,
|
||||
}
|
||||
routingRepo := storage.NewRoutingRepository(repo, registryCtx)
|
||||
|
||||
// Cache the repository
|
||||
nr.repositories.Store(cacheKey, routingRepo)
|
||||
@@ -201,34 +271,42 @@ func (nr *NamespaceResolver) BlobStatter() distribution.BlobStatter {
|
||||
return nr.Namespace.BlobStatter()
|
||||
}
|
||||
|
||||
// findStorageEndpoint determines which hold endpoint to use for blob storage
|
||||
// findHoldDID determines which hold DID to use for blob storage
|
||||
// Priority order:
|
||||
// 1. User's sailor profile defaultHold (if set)
|
||||
// 2. User's own hold record (io.atcr.hold)
|
||||
// 3. AppView's default hold endpoint
|
||||
// Returns the storage endpoint URL, or empty string if none configured
|
||||
func (nr *NamespaceResolver) findStorageEndpoint(ctx context.Context, did, pdsEndpoint string) string {
|
||||
// 3. AppView's default hold DID
|
||||
// Returns a hold DID (e.g., "did:web:hold01.atcr.io"), or empty string if none configured
|
||||
func (nr *NamespaceResolver) findHoldDID(ctx context.Context, did, pdsEndpoint string) string {
|
||||
// Create ATProto client (without auth - reading public records)
|
||||
client := atproto.NewClient(pdsEndpoint, did, "")
|
||||
|
||||
// 1. Check for sailor profile
|
||||
profile, err := atproto.GetProfile(ctx, client)
|
||||
// Check for sailor profile
|
||||
profile, err := storage.GetProfile(ctx, client)
|
||||
if err != nil {
|
||||
// Error reading profile (not a 404) - log and continue
|
||||
fmt.Printf("WARNING: failed to read profile for %s: %v\n", did, err)
|
||||
slog.Warn("Failed to read profile", "did", did, "error", err)
|
||||
}
|
||||
|
||||
if profile != nil && profile.DefaultHold != "" {
|
||||
// Profile exists with defaultHold set - use it
|
||||
// Profile exists with defaultHold set
|
||||
// In test mode, verify it's reachable before using it
|
||||
if nr.testMode {
|
||||
if nr.isHoldReachable(ctx, profile.DefaultHold) {
|
||||
return profile.DefaultHold
|
||||
}
|
||||
slog.Debug("User's defaultHold unreachable, falling back to default", "component", "registry/middleware/testmode", "default_hold", profile.DefaultHold)
|
||||
return nr.defaultHoldDID
|
||||
}
|
||||
return profile.DefaultHold
|
||||
}
|
||||
|
||||
// 2. Profile doesn't exist or defaultHold is null/empty
|
||||
// Profile doesn't exist or defaultHold is null/empty
|
||||
// Check for user's own hold records
|
||||
records, err := client.ListRecords(ctx, atproto.HoldCollection, 10)
|
||||
if err != nil {
|
||||
// Failed to query holds, use default
|
||||
return nr.defaultStorageEndpoint
|
||||
return nr.defaultHoldDID
|
||||
}
|
||||
|
||||
// Find the first hold record
|
||||
@@ -238,12 +316,31 @@ func (nr *NamespaceResolver) findStorageEndpoint(ctx context.Context, did, pdsEn
|
||||
continue
|
||||
}
|
||||
|
||||
// Return the endpoint from the first hold
|
||||
// Return the endpoint from the first hold (normalize to DID if URL)
|
||||
if holdRecord.Endpoint != "" {
|
||||
return holdRecord.Endpoint
|
||||
return atproto.ResolveHoldDIDFromURL(holdRecord.Endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
// 3. No profile defaultHold and no own hold records - use AppView default
|
||||
return nr.defaultStorageEndpoint
|
||||
// No profile defaultHold and no own hold records - use AppView default
|
||||
return nr.defaultHoldDID
|
||||
}
|
||||
|
||||
// isHoldReachable checks if a hold service is reachable
|
||||
// Used in test mode to fallback to default hold when user's hold is unavailable
|
||||
func (nr *NamespaceResolver) isHoldReachable(ctx context.Context, holdDID string) bool {
|
||||
// Try to fetch the DID document
|
||||
hostname := strings.TrimPrefix(holdDID, "did:web:")
|
||||
|
||||
// Try HTTP first (local), then HTTPS
|
||||
for _, scheme := range []string{"http", "https"} {
|
||||
testURL := fmt.Sprintf("%s://%s/.well-known/did.json", scheme, hostname)
|
||||
client := atproto.NewClient("", "", "")
|
||||
_, err := client.FetchDIDDocument(ctx, testURL)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
401
pkg/appview/middleware/registry_test.go
Normal file
401
pkg/appview/middleware/registry_test.go
Normal file
@@ -0,0 +1,401 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/distribution/distribution/v3"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
)
|
||||
|
||||
// mockNamespace is a mock implementation of distribution.Namespace
|
||||
type mockNamespace struct {
|
||||
distribution.Namespace
|
||||
repositories map[string]distribution.Repository
|
||||
}
|
||||
|
||||
func (m *mockNamespace) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) {
|
||||
if m.repositories == nil {
|
||||
return nil, fmt.Errorf("repository not found: %s", name.Name())
|
||||
}
|
||||
if repo, ok := m.repositories[name.Name()]; ok {
|
||||
return repo, nil
|
||||
}
|
||||
return nil, fmt.Errorf("repository not found: %s", name.Name())
|
||||
}
|
||||
|
||||
func (m *mockNamespace) Repositories(ctx context.Context, repos []string, last string) (int, error) {
|
||||
// Return empty result for mock
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (m *mockNamespace) Blobs() distribution.BlobEnumerator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockNamespace) BlobStatter() distribution.BlobStatter {
|
||||
return nil
|
||||
}
|
||||
|
||||
// mockRepository is a minimal mock implementation
|
||||
type mockRepository struct {
|
||||
distribution.Repository
|
||||
name string
|
||||
}
|
||||
|
||||
func TestSetGlobalRefresher(t *testing.T) {
|
||||
// Test that SetGlobalRefresher doesn't panic
|
||||
SetGlobalRefresher(nil)
|
||||
// If we get here without panic, test passes
|
||||
}
|
||||
|
||||
func TestSetGlobalDatabase(t *testing.T) {
|
||||
SetGlobalDatabase(nil)
|
||||
// If we get here without panic, test passes
|
||||
}
|
||||
|
||||
func TestSetGlobalAuthorizer(t *testing.T) {
|
||||
SetGlobalAuthorizer(nil)
|
||||
// If we get here without panic, test passes
|
||||
}
|
||||
|
||||
func TestSetGlobalReadmeCache(t *testing.T) {
|
||||
SetGlobalReadmeCache(nil)
|
||||
// If we get here without panic, test passes
|
||||
}
|
||||
|
||||
// TestInitATProtoResolver tests the initialization function
|
||||
func TestInitATProtoResolver(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockNS := &mockNamespace{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
options map[string]any
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "with default hold DID",
|
||||
options: map[string]any{
|
||||
"default_hold_did": "did:web:hold01.atcr.io",
|
||||
"base_url": "https://atcr.io",
|
||||
"test_mode": false,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "with test mode enabled",
|
||||
options: map[string]any{
|
||||
"default_hold_did": "did:web:hold01.atcr.io",
|
||||
"base_url": "https://atcr.io",
|
||||
"test_mode": true,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "without options",
|
||||
options: map[string]any{},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ns, err := initATProtoResolver(ctx, mockNS, nil, tt.options)
|
||||
if tt.wantErr {
|
||||
assert.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, ns)
|
||||
|
||||
resolver, ok := ns.(*NamespaceResolver)
|
||||
require.True(t, ok, "expected NamespaceResolver type")
|
||||
|
||||
if holdDID, ok := tt.options["default_hold_did"].(string); ok {
|
||||
assert.Equal(t, holdDID, resolver.defaultHoldDID)
|
||||
}
|
||||
if baseURL, ok := tt.options["base_url"].(string); ok {
|
||||
assert.Equal(t, baseURL, resolver.baseURL)
|
||||
}
|
||||
if testMode, ok := tt.options["test_mode"].(bool); ok {
|
||||
assert.Equal(t, testMode, resolver.testMode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAuthErrorMessage tests the error message formatting
|
||||
func TestAuthErrorMessage(t *testing.T) {
|
||||
resolver := &NamespaceResolver{
|
||||
baseURL: "https://atcr.io",
|
||||
}
|
||||
|
||||
err := resolver.authErrorMessage("OAuth session expired")
|
||||
assert.Contains(t, err.Error(), "OAuth session expired")
|
||||
assert.Contains(t, err.Error(), "https://atcr.io/auth/oauth/login")
|
||||
}
|
||||
|
||||
// TestFindHoldDID_DefaultFallback tests default hold DID fallback
|
||||
func TestFindHoldDID_DefaultFallback(t *testing.T) {
|
||||
// Start a mock PDS server that returns 404 for profile and empty list for holds
|
||||
mockPDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/xrpc/com.atproto.repo.getRecord" {
|
||||
// Profile not found
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/xrpc/com.atproto.repo.listRecords" {
|
||||
// Empty hold records
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]any{
|
||||
"records": []any{},
|
||||
})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
defer mockPDS.Close()
|
||||
|
||||
resolver := &NamespaceResolver{
|
||||
defaultHoldDID: "did:web:default.atcr.io",
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
holdDID := resolver.findHoldDID(ctx, "did:plc:test123", mockPDS.URL)
|
||||
|
||||
assert.Equal(t, "did:web:default.atcr.io", holdDID, "should fall back to default hold DID")
|
||||
}
|
||||
|
||||
// TestFindHoldDID_SailorProfile tests hold discovery from sailor profile
|
||||
func TestFindHoldDID_SailorProfile(t *testing.T) {
|
||||
// Start a mock PDS server that returns a sailor profile
|
||||
mockPDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/xrpc/com.atproto.repo.getRecord" {
|
||||
// Return sailor profile with defaultHold
|
||||
profile := atproto.NewSailorProfileRecord("did:web:user.hold.io")
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]any{
|
||||
"value": profile,
|
||||
})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
defer mockPDS.Close()
|
||||
|
||||
resolver := &NamespaceResolver{
|
||||
defaultHoldDID: "did:web:default.atcr.io",
|
||||
testMode: false,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
holdDID := resolver.findHoldDID(ctx, "did:plc:test123", mockPDS.URL)
|
||||
|
||||
assert.Equal(t, "did:web:user.hold.io", holdDID, "should use sailor profile's defaultHold")
|
||||
}
|
||||
|
||||
// TestFindHoldDID_LegacyHoldRecords tests legacy hold record discovery
|
||||
func TestFindHoldDID_LegacyHoldRecords(t *testing.T) {
|
||||
// Start a mock PDS server that returns hold records
|
||||
mockPDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/xrpc/com.atproto.repo.getRecord" {
|
||||
// Profile not found
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/xrpc/com.atproto.repo.listRecords" {
|
||||
// Return hold record
|
||||
holdRecord := atproto.NewHoldRecord("https://legacy.hold.io", "alice", true)
|
||||
recordJSON, _ := json.Marshal(holdRecord)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]any{
|
||||
"records": []any{
|
||||
map[string]any{
|
||||
"uri": "at://did:plc:test123/io.atcr.hold/abc123",
|
||||
"value": json.RawMessage(recordJSON),
|
||||
},
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
defer mockPDS.Close()
|
||||
|
||||
resolver := &NamespaceResolver{
|
||||
defaultHoldDID: "did:web:default.atcr.io",
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
holdDID := resolver.findHoldDID(ctx, "did:plc:test123", mockPDS.URL)
|
||||
|
||||
// Legacy URL should be converted to DID
|
||||
assert.Equal(t, "did:web:legacy.hold.io", holdDID, "should use legacy hold record and convert to DID")
|
||||
}
|
||||
|
||||
// TestFindHoldDID_Priority tests the priority order
|
||||
func TestFindHoldDID_Priority(t *testing.T) {
|
||||
// Start a mock PDS server that returns both profile and hold records
|
||||
mockPDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/xrpc/com.atproto.repo.getRecord" {
|
||||
// Return sailor profile with defaultHold (highest priority)
|
||||
profile := atproto.NewSailorProfileRecord("did:web:profile.hold.io")
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]any{
|
||||
"value": profile,
|
||||
})
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/xrpc/com.atproto.repo.listRecords" {
|
||||
// Return hold record (should be ignored since profile exists)
|
||||
holdRecord := atproto.NewHoldRecord("https://legacy.hold.io", "alice", true)
|
||||
recordJSON, _ := json.Marshal(holdRecord)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]any{
|
||||
"records": []any{
|
||||
map[string]any{
|
||||
"uri": "at://did:plc:test123/io.atcr.hold/abc123",
|
||||
"value": json.RawMessage(recordJSON),
|
||||
},
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
defer mockPDS.Close()
|
||||
|
||||
resolver := &NamespaceResolver{
|
||||
defaultHoldDID: "did:web:default.atcr.io",
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
holdDID := resolver.findHoldDID(ctx, "did:plc:test123", mockPDS.URL)
|
||||
|
||||
// Profile should take priority over hold records and default
|
||||
assert.Equal(t, "did:web:profile.hold.io", holdDID, "should prioritize sailor profile over hold records")
|
||||
}
|
||||
|
||||
// TestFindHoldDID_TestModeFallback tests test mode fallback when hold unreachable
|
||||
func TestFindHoldDID_TestModeFallback(t *testing.T) {
|
||||
// Start a mock PDS server that returns a profile with unreachable hold
|
||||
mockPDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/xrpc/com.atproto.repo.getRecord" {
|
||||
// Return sailor profile with an unreachable hold
|
||||
profile := atproto.NewSailorProfileRecord("did:web:unreachable.hold.io")
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]any{
|
||||
"value": profile,
|
||||
})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
defer mockPDS.Close()
|
||||
|
||||
resolver := &NamespaceResolver{
|
||||
defaultHoldDID: "did:web:default.atcr.io",
|
||||
testMode: true, // Test mode enabled
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
holdDID := resolver.findHoldDID(ctx, "did:plc:test123", mockPDS.URL)
|
||||
|
||||
// In test mode with unreachable hold, should fall back to default
|
||||
assert.Equal(t, "did:web:default.atcr.io", holdDID, "should fall back to default in test mode when hold unreachable")
|
||||
}
|
||||
|
||||
// TestIsHoldReachable tests the hold reachability check
|
||||
func TestIsHoldReachable(t *testing.T) {
|
||||
// Mock hold server with DID document
|
||||
mockHold := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/.well-known/did.json" {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]any{
|
||||
"id": "did:web:reachable.hold.io",
|
||||
})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
defer mockHold.Close()
|
||||
|
||||
resolver := &NamespaceResolver{}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("reachable hold", func(t *testing.T) {
|
||||
// Extract hostname from test server URL
|
||||
// The mock server URL is like http://127.0.0.1:port, so we use the host part
|
||||
holdDID := fmt.Sprintf("did:web:%s", mockHold.Listener.Addr().String())
|
||||
reachable := resolver.isHoldReachable(ctx, holdDID)
|
||||
assert.True(t, reachable, "should detect reachable hold")
|
||||
})
|
||||
|
||||
t.Run("unreachable hold", func(t *testing.T) {
|
||||
reachable := resolver.isHoldReachable(ctx, "did:web:nonexistent.example.com")
|
||||
assert.False(t, reachable, "should detect unreachable hold")
|
||||
})
|
||||
}
|
||||
|
||||
// TestRepositoryCaching tests that repositories are cached by DID+name
|
||||
func TestRepositoryCaching(t *testing.T) {
|
||||
// This test requires integration with actual repository resolution
|
||||
// For now, we test that the cache key format is correct
|
||||
did := "did:plc:test123"
|
||||
repoName := "myapp"
|
||||
expectedKey := "did:plc:test123:myapp"
|
||||
|
||||
cacheKey := did + ":" + repoName
|
||||
assert.Equal(t, expectedKey, cacheKey, "cache key should be DID:reponame")
|
||||
}
|
||||
|
||||
// TestNamespaceResolver_Repositories tests delegation to underlying namespace
|
||||
func TestNamespaceResolver_Repositories(t *testing.T) {
|
||||
mockNS := &mockNamespace{}
|
||||
resolver := &NamespaceResolver{
|
||||
Namespace: mockNS,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
repos := []string{}
|
||||
|
||||
// Test delegation (mockNamespace doesn't implement this, so it will return 0, nil)
|
||||
n, err := resolver.Repositories(ctx, repos, "")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, n)
|
||||
}
|
||||
|
||||
// TestNamespaceResolver_Blobs tests delegation to underlying namespace
|
||||
func TestNamespaceResolver_Blobs(t *testing.T) {
|
||||
mockNS := &mockNamespace{}
|
||||
resolver := &NamespaceResolver{
|
||||
Namespace: mockNS,
|
||||
}
|
||||
|
||||
// Should not panic
|
||||
blobs := resolver.Blobs()
|
||||
assert.Nil(t, blobs, "mockNamespace returns nil")
|
||||
}
|
||||
|
||||
// TestNamespaceResolver_BlobStatter tests delegation to underlying namespace
|
||||
func TestNamespaceResolver_BlobStatter(t *testing.T) {
|
||||
mockNS := &mockNamespace{}
|
||||
resolver := &NamespaceResolver{
|
||||
Namespace: mockNS,
|
||||
}
|
||||
|
||||
// Should not panic
|
||||
statter := resolver.BlobStatter()
|
||||
assert.Nil(t, statter, "mockNamespace returns nil")
|
||||
}
|
||||
111
pkg/appview/readme/cache.go
Normal file
111
pkg/appview/readme/cache.go
Normal file
@@ -0,0 +1,111 @@
|
||||
// Package readme provides README fetching, rendering, and caching functionality
|
||||
// for container repositories. It fetches markdown content from URLs, renders it
|
||||
// to sanitized HTML using GitHub-flavored markdown, and caches the results in
|
||||
// a database with configurable TTL.
|
||||
package readme
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"log/slog"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Cache stores rendered README HTML in the database
|
||||
type Cache struct {
|
||||
db *sql.DB
|
||||
fetcher *Fetcher
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
// NewCache creates a new README cache
|
||||
func NewCache(db *sql.DB, ttl time.Duration) *Cache {
|
||||
if ttl == 0 {
|
||||
ttl = 1 * time.Hour // Default TTL
|
||||
}
|
||||
return &Cache{
|
||||
db: db,
|
||||
fetcher: NewFetcher(),
|
||||
ttl: ttl,
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves a README from cache or fetches it
|
||||
func (c *Cache) Get(ctx context.Context, readmeURL string) (string, error) {
|
||||
// Try to get from cache
|
||||
html, fetchedAt, err := c.getFromDB(readmeURL)
|
||||
if err == nil {
|
||||
// Check if cache is still valid
|
||||
if time.Since(fetchedAt) < c.ttl {
|
||||
return html, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Cache miss or expired, fetch fresh content
|
||||
html, err = c.fetcher.FetchAndRender(ctx, readmeURL)
|
||||
if err != nil {
|
||||
// If fetch fails but we have stale cache, return it
|
||||
if html != "" {
|
||||
return html, nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Store in cache
|
||||
if err := c.storeInDB(readmeURL, html); err != nil {
|
||||
// Log error but don't fail - we have the content
|
||||
slog.Warn("Failed to cache README", "error", err)
|
||||
}
|
||||
|
||||
return html, nil
|
||||
}
|
||||
|
||||
// getFromDB retrieves cached README from database
|
||||
func (c *Cache) getFromDB(readmeURL string) (string, time.Time, error) {
|
||||
var html string
|
||||
var fetchedAt time.Time
|
||||
|
||||
err := c.db.QueryRow(`
|
||||
SELECT html, fetched_at
|
||||
FROM readme_cache
|
||||
WHERE url = ?
|
||||
`, readmeURL).Scan(&html, &fetchedAt)
|
||||
|
||||
if err != nil {
|
||||
return "", time.Time{}, err
|
||||
}
|
||||
|
||||
return html, fetchedAt, nil
|
||||
}
|
||||
|
||||
// storeInDB stores rendered README in database
|
||||
func (c *Cache) storeInDB(readmeURL, html string) error {
|
||||
_, err := c.db.Exec(`
|
||||
INSERT INTO readme_cache (url, html, fetched_at)
|
||||
VALUES (?, ?, ?)
|
||||
ON CONFLICT(url) DO UPDATE SET
|
||||
html = excluded.html,
|
||||
fetched_at = excluded.fetched_at
|
||||
`, readmeURL, html, time.Now())
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Invalidate removes a README from the cache
|
||||
func (c *Cache) Invalidate(readmeURL string) error {
|
||||
_, err := c.db.Exec(`
|
||||
DELETE FROM readme_cache
|
||||
WHERE url = ?
|
||||
`, readmeURL)
|
||||
return err
|
||||
}
|
||||
|
||||
// Cleanup removes expired entries from the cache
|
||||
func (c *Cache) Cleanup() error {
|
||||
cutoff := time.Now().Add(-c.ttl * 2) // Keep for 2x TTL
|
||||
_, err := c.db.Exec(`
|
||||
DELETE FROM readme_cache
|
||||
WHERE fetched_at < ?
|
||||
`, cutoff)
|
||||
return err
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user