Compare commits
157 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ca69c0d1c2 | ||
|
|
183d89530d | ||
|
|
f20170f595 | ||
|
|
22cf108a5f | ||
|
|
23db9be665 | ||
|
|
d6816fd00e | ||
|
|
385f8987fe | ||
|
|
8adbc7505f | ||
|
|
cdca30f346 | ||
|
|
29ef8138aa | ||
|
|
7d8e195189 | ||
|
|
e886192aeb | ||
|
|
8fb69497e3 | ||
|
|
347e7ac80b | ||
|
|
11a8be1413 | ||
|
|
fcc5fa78bc | ||
|
|
b235e4a7dc | ||
|
|
7d74e76772 | ||
|
|
0827219716 | ||
|
|
7c064ba8b0 | ||
|
|
136c0a0ecc | ||
|
|
dc31ca2f35 | ||
|
|
1e04c91507 | ||
|
|
e6c2099a0f | ||
|
|
5249c9eaab | ||
|
|
2b9ea997ac | ||
|
|
356f9d529a | ||
|
|
f90a46e0a4 | ||
|
|
33548ecf32 | ||
|
|
76383ec764 | ||
|
|
200d8a7bb9 | ||
|
|
5b722b3c73 | ||
|
|
0d00de76c6 | ||
|
|
22b2d69cb3 | ||
|
|
5615dd4132 | ||
|
|
27cf78158b | ||
|
|
dba201998e | ||
|
|
cd4986c0c8 | ||
|
|
6b87539ef8 | ||
|
|
2df5377541 | ||
|
|
10b35642a5 | ||
|
|
abefcfd1ed | ||
|
|
0d723cb708 | ||
|
|
f307d6ea85 | ||
|
|
3085fc726b | ||
|
|
cecf6d4b7c | ||
|
|
f340158a79 | ||
|
|
e3843db9d8 | ||
|
|
83e5c82ca4 | ||
|
|
ec2063ef52 | ||
|
|
8048921f5e | ||
|
|
de02e1f046 | ||
|
|
434a5f1eee | ||
|
|
07bc924a60 | ||
|
|
24c7b03ce5 | ||
|
|
c0cf3fb94f | ||
|
|
92c31835e2 | ||
|
|
8d39daa09d | ||
|
|
ac32a98104 | ||
|
|
150975a9fa | ||
|
|
22d5396589 | ||
|
|
8e45b2eee5 | ||
|
|
9723de0bcd | ||
|
|
914328dbf1 | ||
|
|
b251c8857f | ||
|
|
4ac2b97c33 | ||
|
|
53de92e5d3 | ||
|
|
aad9ebfc8b | ||
|
|
7ba42080c5 | ||
|
|
fbe7338492 | ||
|
|
bc034e3465 | ||
|
|
4d9452bb75 | ||
|
|
cd47945301 | ||
|
|
ef0161fb0e | ||
|
|
834bb8d36c | ||
|
|
2c39a78ac2 | ||
|
|
73109641e8 | ||
|
|
d6114cf549 | ||
|
|
9c9c808eea | ||
|
|
35f7a47af3 | ||
|
|
5d3b6c2047 | ||
|
|
6a52175d70 | ||
|
|
34f342f637 | ||
|
|
ca56a7c309 | ||
|
|
57593a8683 | ||
|
|
3b7455a299 | ||
|
|
865c597188 | ||
|
|
536fa416d4 | ||
|
|
d8b0305ce8 | ||
|
|
f79d6027ad | ||
|
|
0358e2e5ad | ||
|
|
faf63d8344 | ||
|
|
26f049fcbe | ||
|
|
ebb107ebec | ||
|
|
d0843323fe | ||
|
|
b7ed0e7d5b | ||
|
|
dbe0efd949 | ||
|
|
2d7d2fd5ca | ||
|
|
c48a763529 | ||
|
|
a7d3292624 | ||
|
|
b99ae53755 | ||
|
|
57d44389b9 | ||
|
|
8f3d992ce4 | ||
|
|
6272273588 | ||
|
|
950b1f94d0 | ||
|
|
908e124917 | ||
|
|
eb3eed5f7a | ||
|
|
055b34af71 | ||
|
|
23a9b52619 | ||
|
|
4c0f20a32e | ||
|
|
b1767cfb6b | ||
|
|
ac5821593f | ||
|
|
fa9abc28b9 | ||
|
|
3155f91e3a | ||
|
|
9e600649a6 | ||
|
|
64cdb66957 | ||
|
|
51f6917444 | ||
|
|
f27e2e0d93 | ||
|
|
263ec4b7af | ||
|
|
ab7e7c7abc | ||
|
|
3409af6c67 | ||
|
|
d4b88b5105 | ||
|
|
56dd522218 | ||
|
|
9704fe091d | ||
|
|
c82dad81f7 | ||
|
|
2d5039d33c | ||
|
|
e0a2dda1af | ||
|
|
482d921cc8 | ||
|
|
c80b5b2941 | ||
|
|
f5979b8f08 | ||
|
|
f35bf2bcde | ||
|
|
a448e8257b | ||
|
|
487fc8a47e | ||
|
|
e5e59fdcbf | ||
|
|
af815fbc7d | ||
|
|
efef46b15a | ||
|
|
fbcaf56fce | ||
|
|
680e4bdfe2 | ||
|
|
a7175f9e3e | ||
|
|
aa4b32bbd6 | ||
|
|
53e196a261 | ||
|
|
f74bc3018a | ||
|
|
6dd612e157 | ||
|
|
84866f5e74 | ||
|
|
e6bd4c122e | ||
|
|
7dcef54d28 | ||
|
|
506d8b002b | ||
|
|
647c33e164 | ||
|
|
1f0705a218 | ||
|
|
347db5c391 | ||
|
|
e97e51a59c | ||
|
|
045aeb2de5 | ||
|
|
74c90697a7 | ||
|
|
cd6928ec4a | ||
|
|
88998904d6 | ||
|
|
1df1bb57a4 | ||
|
|
f19dfa2716 |
26
.air.hold.toml
Normal file
26
.air.hold.toml
Normal file
@@ -0,0 +1,26 @@
|
||||
root = "."
|
||||
tmp_dir = "tmp"
|
||||
|
||||
[build]
|
||||
pre_cmd = ["go generate ./pkg/hold/..."]
|
||||
cmd = "go build -buildvcs=false -o ./tmp/atcr-hold ./cmd/hold"
|
||||
entrypoint = ["./tmp/atcr-hold", "serve", "--config", "config-hold.example.yaml"]
|
||||
include_ext = ["go", "html", "css", "js"]
|
||||
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "pkg/appview", "node_modules"]
|
||||
exclude_regex = ["_test\\.go$", "cbor_gen\\.go$", "\\.min\\.js$", "public/css/style\\.css$", "public/icons\\.svg$"]
|
||||
delay = 3000
|
||||
stop_on_error = true
|
||||
send_interrupt = true
|
||||
kill_delay = 500
|
||||
|
||||
[log]
|
||||
time = false
|
||||
|
||||
[color]
|
||||
main = "blue"
|
||||
watcher = "magenta"
|
||||
build = "yellow"
|
||||
runner = "green"
|
||||
|
||||
[misc]
|
||||
clean_on_exit = true
|
||||
17
.air.toml
17
.air.toml
@@ -2,17 +2,20 @@ root = "."
|
||||
tmp_dir = "tmp"
|
||||
|
||||
[build]
|
||||
# Use polling for Docker volume mounts (inotify doesn't work across mounts)
|
||||
poll = true
|
||||
poll_interval = 500
|
||||
# Pre-build: generate assets if missing (each string is a shell command)
|
||||
pre_cmd = ["[ -f pkg/appview/static/js/htmx.min.js ] || go generate ./..."]
|
||||
cmd = "go build -buildvcs=false -o ./tmp/atcr-appview ./cmd/appview"
|
||||
entrypoint = ["./tmp/atcr-appview", "serve"]
|
||||
pre_cmd = ["go generate ./pkg/appview/..."]
|
||||
cmd = "go build -tags billing -buildvcs=false -o ./tmp/atcr-appview ./cmd/appview"
|
||||
entrypoint = ["./tmp/atcr-appview", "serve", "--config", "config-appview.example.yaml"]
|
||||
include_ext = ["go", "html", "css", "js"]
|
||||
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist"]
|
||||
exclude_regex = ["_test\\.go$"]
|
||||
delay = 1000
|
||||
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "node_modules", "pkg/hold"]
|
||||
exclude_regex = ["_test\\.go$", "cbor_gen\\.go$", "\\.min\\.js$", "public/css/style\\.css$", "public/icons\\.svg$"]
|
||||
delay = 3000
|
||||
stop_on_error = true
|
||||
send_interrupt = true
|
||||
kill_delay = 500
|
||||
kill_delay = 3000
|
||||
|
||||
[log]
|
||||
time = false
|
||||
|
||||
3
.claudeignore
Normal file
3
.claudeignore
Normal file
@@ -0,0 +1,3 @@
|
||||
# Generated files
|
||||
pkg/appview/public/css/style.css
|
||||
pkg/appview/public/js/bundle.min.js
|
||||
@@ -1,122 +0,0 @@
|
||||
# ATCR AppView Configuration
|
||||
# Copy this file to .env.appview and fill in your values
|
||||
# Load with: source .env.appview && ./bin/atcr-appview serve
|
||||
|
||||
# ==============================================================================
|
||||
# Server Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# HTTP listen address (default: :5000)
|
||||
ATCR_HTTP_ADDR=:5000
|
||||
|
||||
# Debug listen address (default: :5001)
|
||||
# ATCR_DEBUG_ADDR=:5001
|
||||
|
||||
# Base URL for the AppView service (REQUIRED for production)
|
||||
# Used to generate OAuth redirect URIs and JWT realms
|
||||
# Development: Auto-detected from ATCR_HTTP_ADDR (e.g., http://127.0.0.1:5000)
|
||||
# Production: Set to your public URL (e.g., https://atcr.io)
|
||||
# ATCR_BASE_URL=http://127.0.0.1:5000
|
||||
|
||||
# Service name (used for JWT service/issuer fields)
|
||||
# Default: Derived from base URL hostname, or "atcr.io"
|
||||
# ATCR_SERVICE_NAME=atcr.io
|
||||
|
||||
# ==============================================================================
|
||||
# Storage Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Default hold service DID for users without their own storage (REQUIRED)
|
||||
# Users with a sailor profile defaultHold setting will override this
|
||||
# Format: did:web:hostname[:port]
|
||||
# Docker: did:web:atcr-hold:8080
|
||||
# Local dev: did:web:127.0.0.1:8080
|
||||
# Production: did:web:hold01.atcr.io
|
||||
ATCR_DEFAULT_HOLD_DID=did:web:127.0.0.1:8080
|
||||
|
||||
# ==============================================================================
|
||||
# Authentication Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Path to JWT signing private key (auto-generated if missing)
|
||||
# Default: /var/lib/atcr/auth/private-key.pem
|
||||
# ATCR_AUTH_KEY_PATH=/var/lib/atcr/auth/private-key.pem
|
||||
|
||||
# Path to JWT signing certificate (auto-generated if missing)
|
||||
# Default: /var/lib/atcr/auth/private-key.crt
|
||||
# ATCR_AUTH_CERT_PATH=/var/lib/atcr/auth/private-key.crt
|
||||
|
||||
# JWT token expiration in seconds (default: 300 = 5 minutes)
|
||||
# ATCR_TOKEN_EXPIRATION=300
|
||||
|
||||
# Path to OAuth client P-256 signing key (auto-generated on first run)
|
||||
# Used for confidential OAuth client authentication (production only)
|
||||
# Localhost deployments always use public OAuth clients (no key needed)
|
||||
# Default: /var/lib/atcr/oauth/client.key
|
||||
# ATCR_OAUTH_KEY_PATH=/var/lib/atcr/oauth/client.key
|
||||
|
||||
# OAuth client display name (shown in authorization screens)
|
||||
# Default: AT Container Registry
|
||||
# ATCR_CLIENT_NAME=AT Container Registry
|
||||
|
||||
# ==============================================================================
|
||||
# UI Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Enable web UI (default: true)
|
||||
# Set to "false" to disable web interface and run registry-only
|
||||
ATCR_UI_ENABLED=true
|
||||
|
||||
# SQLite database path for UI data (sessions, stars, pull counts, etc.)
|
||||
# Default: /var/lib/atcr/ui.db
|
||||
# ATCR_UI_DATABASE_PATH=/var/lib/atcr/ui.db
|
||||
|
||||
# Skip database migrations on startup (default: false)
|
||||
# Set to "true" to skip running migrations (useful for tests or fresh databases)
|
||||
# Production: Keep as "false" to ensure migrations are applied
|
||||
SKIP_DB_MIGRATIONS=false
|
||||
|
||||
# ==============================================================================
|
||||
# Logging Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Log level: debug, info, warn, error (default: info)
|
||||
ATCR_LOG_LEVEL=debug
|
||||
|
||||
# Log formatter: text, json (default: text)
|
||||
# ATCR_LOG_FORMATTER=text
|
||||
|
||||
# ==============================================================================
|
||||
# Hold Health Check Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# How often to check health of hold endpoints in the background (default: 15m)
|
||||
# Queries database for unique hold endpoints and checks if they're reachable
|
||||
# Examples: 5m, 15m, 30m, 1h
|
||||
# ATCR_HEALTH_CHECK_INTERVAL=15m
|
||||
|
||||
# How long to cache health check results (default: 15m)
|
||||
# Cached results avoid redundant health checks on page renders
|
||||
# Should be >= ATCR_HEALTH_CHECK_INTERVAL for efficiency
|
||||
# Examples: 15m, 30m, 1h
|
||||
# ATCR_HEALTH_CACHE_TTL=15m
|
||||
|
||||
# ==============================================================================
|
||||
# Jetstream Configuration (ATProto event streaming)
|
||||
# ==============================================================================
|
||||
|
||||
# Jetstream WebSocket URL for real-time ATProto events
|
||||
# Default: wss://jetstream2.us-west.bsky.network/subscribe
|
||||
# JETSTREAM_URL=wss://jetstream2.us-west.bsky.network/subscribe
|
||||
|
||||
# Enable backfill worker to sync historical records (default: false)
|
||||
# Set to "true" to enable periodic syncing of ATProto records
|
||||
# ATCR_BACKFILL_ENABLED=true
|
||||
|
||||
# ATProto relay endpoint for backfill sync API
|
||||
# Default: https://relay1.us-east.bsky.network
|
||||
# ATCR_RELAY_ENDPOINT=https://relay1.us-east.bsky.network
|
||||
|
||||
# Backfill interval (default: 1h)
|
||||
# Examples: 30m, 1h, 2h, 24h
|
||||
# ATCR_BACKFILL_INTERVAL=1h
|
||||
@@ -1,127 +0,0 @@
|
||||
# ATCR Hold Service Configuration
|
||||
# Copy this file to .env and fill in your values
|
||||
|
||||
# ==============================================================================
|
||||
# Required Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Hold service public URL (REQUIRED)
|
||||
# The hostname becomes the hold name/record key
|
||||
# Examples: https://hold1.atcr.io, http://127.0.0.1:8080
|
||||
HOLD_PUBLIC_URL=http://127.0.0.1:8080
|
||||
|
||||
# ==============================================================================
|
||||
# Storage Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Storage driver type (s3, filesystem)
|
||||
# Default: s3
|
||||
#
|
||||
# S3 Presigned URLs:
|
||||
# When using S3 storage, presigned URLs are automatically enabled for direct
|
||||
# client ↔ S3 transfers. This eliminates the hold service as a bandwidth
|
||||
# bottleneck, reducing hold bandwidth by ~99% for push/pull operations.
|
||||
# Falls back to proxy mode automatically for non-S3 drivers.
|
||||
STORAGE_DRIVER=filesystem
|
||||
|
||||
# S3 Access Credentials
|
||||
AWS_ACCESS_KEY_ID=your_access_key
|
||||
AWS_SECRET_ACCESS_KEY=your_secret_key
|
||||
|
||||
# S3 Region
|
||||
# For third-party S3 providers, this is ignored when S3_ENDPOINT is set,
|
||||
# but must be a valid AWS region (e.g., us-east-1) to pass validation.
|
||||
# Default: us-east-1
|
||||
AWS_REGION=us-east-1
|
||||
|
||||
# S3 Bucket Name
|
||||
S3_BUCKET=atcr-blobs
|
||||
|
||||
# S3 Endpoint (for S3-compatible services like Storj, Minio, UpCloud)
|
||||
# Examples:
|
||||
# - Storj: https://gateway.storjshare.io
|
||||
# - UpCloud: https://[bucket-id].upcloudobjects.com
|
||||
# - Minio: http://minio:9000
|
||||
# Leave empty for AWS S3
|
||||
# S3_ENDPOINT=https://gateway.storjshare.io
|
||||
|
||||
# For filesystem driver:
|
||||
# STORAGE_DRIVER=filesystem
|
||||
# STORAGE_ROOT_DIR=/var/lib/atcr/hold
|
||||
|
||||
# ==============================================================================
|
||||
# Server Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Server listen address (default: :8080)
|
||||
# HOLD_SERVER_ADDR=:8080
|
||||
|
||||
# Allow public blob reads (pulls) without authentication
|
||||
# Writes (pushes) always require crew membership via PDS
|
||||
# Default: false
|
||||
HOLD_PUBLIC=false
|
||||
|
||||
# ATProto relay endpoint for requesting crawl on startup
|
||||
# This makes the hold's embedded PDS discoverable by the relay network
|
||||
# Default: https://bsky.network (set to empty string to disable)
|
||||
# HOLD_RELAY_ENDPOINT=https://bsky.network
|
||||
|
||||
# ==============================================================================
|
||||
# Embedded PDS Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Directory path for embedded PDS carstore (SQLite database)
|
||||
# Default: /var/lib/atcr-hold
|
||||
# If empty, embedded PDS is disabled
|
||||
#
|
||||
# Note: This should be a directory path, NOT a file path
|
||||
# Carstore creates db.sqlite3 inside this directory
|
||||
#
|
||||
# The embedded PDS makes the hold a proper ATProto user with:
|
||||
# - did:web identity (derived from HOLD_PUBLIC_URL hostname)
|
||||
# - DID document at /.well-known/did.json
|
||||
# - XRPC endpoints for crew management
|
||||
# - ATProto blob endpoints (wraps existing presigned URL logic)
|
||||
HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
|
||||
# Path to signing key (auto-generated on first run if missing)
|
||||
# Default: {HOLD_DATABASE_DIR}/signing.key
|
||||
# HOLD_KEY_PATH=/var/lib/atcr-hold/signing.key
|
||||
|
||||
# ==============================================================================
|
||||
# Bluesky Integration
|
||||
# ==============================================================================
|
||||
|
||||
# Enable Bluesky posts when users push container images (default: false)
|
||||
# When enabled, the hold's embedded PDS will create posts announcing image pushes
|
||||
# Synced to captain record's enableBlueskyPosts field on startup
|
||||
# HOLD_BLUESKY_POSTS_ENABLED=false
|
||||
|
||||
# ==============================================================================
|
||||
# Registration (REQUIRED)
|
||||
# ==============================================================================
|
||||
|
||||
# Your ATProto DID (REQUIRED for registration)
|
||||
# Get your DID: https://bsky.social/xrpc/com.atproto.identity.resolveHandle?handle=yourhandle.bsky.social
|
||||
#
|
||||
# On first run with HOLD_OWNER set:
|
||||
# 1. Hold service will print an OAuth URL to the logs
|
||||
# 2. Visit the URL in your browser to authorize
|
||||
# 3. Hold service creates hold + crew records in your PDS
|
||||
# 4. Registration complete!
|
||||
#
|
||||
# On subsequent runs:
|
||||
# - Hold service checks if already registered
|
||||
# - Skips OAuth if records exist
|
||||
#
|
||||
HOLD_OWNER=did:plc:your-did-here
|
||||
|
||||
# ==============================================================================
|
||||
# Logging Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Log level: debug, info, warn, error (default: info)
|
||||
ATCR_LOG_LEVEL=debug
|
||||
|
||||
# Log formatter: text, json (default: text)
|
||||
# ATCR_LOG_FORMATTER=text
|
||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -2,6 +2,8 @@
|
||||
bin/
|
||||
dist/
|
||||
tmp/
|
||||
./appview
|
||||
./hold
|
||||
|
||||
# Test artifacts
|
||||
.atcr-pids
|
||||
@@ -12,12 +14,18 @@ tmp/
|
||||
# Environment configuration
|
||||
.env
|
||||
|
||||
# Deploy state (contains server UUIDs and IPs)
|
||||
deploy/upcloud/state.json
|
||||
|
||||
# Generated assets (run go generate to rebuild)
|
||||
pkg/appview/licenses/spdx-licenses.json
|
||||
pkg/appview/static/js/htmx.min.js
|
||||
pkg/appview/static/js/lucide.min.js
|
||||
pkg/appview/public/css/style.css
|
||||
pkg/appview/public/js/htmx.min.js
|
||||
pkg/appview/public/js/lucide.min.js
|
||||
pkg/hold/admin/public/css/style.css
|
||||
|
||||
# IDE
|
||||
.zed/
|
||||
.claude/
|
||||
.vscode/
|
||||
.idea/
|
||||
@@ -27,3 +35,4 @@ pkg/appview/static/js/lucide.min.js
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
node_modules
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
# golangci-lint configuration for ATCR
|
||||
# See: https://golangci-lint.run/usage/configuration/
|
||||
version: "2"
|
||||
linters:
|
||||
|
||||
issues:
|
||||
fix: true
|
||||
|
||||
linters:
|
||||
settings:
|
||||
staticcheck:
|
||||
checks:
|
||||
@@ -20,7 +24,17 @@ linters:
|
||||
exclusions:
|
||||
presets:
|
||||
- std-error-handling
|
||||
rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- errcheck
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
- goimports
|
||||
settings:
|
||||
gofmt:
|
||||
rewrite-rules:
|
||||
- pattern: 'interface{}'
|
||||
replacement: 'any'
|
||||
|
||||
24
.tangled/workflows/lint.yaml
Normal file
24
.tangled/workflows/lint.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
when:
|
||||
- event: ["push"]
|
||||
branch: ["*"]
|
||||
- event: ["pull_request"]
|
||||
branch: ["main"]
|
||||
|
||||
engine: kubernetes
|
||||
image: golang:1.25-trixie
|
||||
architecture: amd64
|
||||
|
||||
steps:
|
||||
- name: Download and Generate
|
||||
environment:
|
||||
CGO_ENABLED: 1
|
||||
command: |
|
||||
go mod download
|
||||
go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.7.2
|
||||
go generate ./...
|
||||
|
||||
- name: Run Linter
|
||||
environment:
|
||||
CGO_ENABLED: 1
|
||||
command: |
|
||||
golangci-lint run ./...
|
||||
915
CLAUDE.md
915
CLAUDE.md
@@ -4,799 +4,260 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
|
||||
## Project Overview
|
||||
|
||||
ATCR (ATProto Container Registry) is an OCI-compliant container registry that uses the AT Protocol for manifest storage and S3 for blob storage. This creates a decentralized container registry where manifests are stored in users' Personal Data Servers (PDS) while layers are stored in S3.
|
||||
ATCR (ATProto Container Registry) is an OCI-compliant container registry that uses the AT Protocol for manifest storage and S3 for blob storage. Manifests are stored in users' Personal Data Servers (PDS) while layers are stored in S3.
|
||||
|
||||
## Go Workspace
|
||||
|
||||
The project uses a Go workspace (`go.work`) with two modules:
|
||||
- `atcr.io` — Main module (appview, hold, credential-helper, oauth-helper)
|
||||
- `atcr.io/scanner` — Scanner module (separate to isolate heavy Syft/Grype dependencies)
|
||||
|
||||
## Build Commands
|
||||
|
||||
Always build into the `bin/` directory (`-o bin/...`), not the project root.
|
||||
|
||||
```bash
|
||||
# Build all binaries
|
||||
# create go builds in the bin/ directory
|
||||
# Build main binaries
|
||||
go build -o bin/atcr-appview ./cmd/appview
|
||||
go build -o bin/atcr-hold ./cmd/hold
|
||||
go build -o bin/docker-credential-atcr ./cmd/credential-helper
|
||||
go build -o bin/oauth-helper ./cmd/oauth-helper
|
||||
|
||||
# Run tests
|
||||
go test ./...
|
||||
# Build scanner (separate module)
|
||||
cd scanner && go build -o ../bin/atcr-scanner ./cmd/scanner && cd ..
|
||||
|
||||
# Run tests for specific package
|
||||
go test ./pkg/atproto/...
|
||||
go test ./pkg/appview/storage/...
|
||||
# Build hold with billing support (optional build tag)
|
||||
go build -tags billing -o bin/atcr-hold ./cmd/hold
|
||||
|
||||
# Run specific test
|
||||
go test -run TestManifestStore ./pkg/atproto/...
|
||||
# Tests
|
||||
go test ./... # all tests
|
||||
go test ./pkg/atproto/... # specific package
|
||||
go test -run TestManifestStore ./pkg/atproto/... # specific test
|
||||
go test -race ./... # race detector
|
||||
|
||||
# Run with race detector
|
||||
go test -race ./...
|
||||
|
||||
# Run tests with verbose output
|
||||
go test -v ./...
|
||||
|
||||
# Update dependencies
|
||||
go mod tidy
|
||||
|
||||
# Build Docker images
|
||||
docker build -t atcr.io/appview:latest .
|
||||
# Docker
|
||||
docker build -f Dockerfile.appview -t atcr.io/appview:latest .
|
||||
docker build -f Dockerfile.hold -t atcr.io/hold:latest .
|
||||
|
||||
# Or use docker-compose
|
||||
docker build -f Dockerfile.scanner -t atcr.io/scanner:latest .
|
||||
docker-compose up -d
|
||||
|
||||
# Run locally (AppView) - configure via env vars (see .env.appview.example)
|
||||
export ATCR_HTTP_ADDR=:5000
|
||||
export ATCR_DEFAULT_HOLD=http://127.0.0.1:8080
|
||||
./bin/atcr-appview serve
|
||||
# Generate & run with config
|
||||
./bin/atcr-appview config init config-appview.yaml
|
||||
./bin/atcr-hold config init config-hold.yaml
|
||||
./bin/atcr-appview serve --config config-appview.yaml
|
||||
./bin/atcr-hold serve --config config-hold.yaml
|
||||
|
||||
# Or use .env file:
|
||||
cp .env.appview.example .env.appview
|
||||
# Edit .env.appview with your settings
|
||||
source .env.appview
|
||||
./bin/atcr-appview serve
|
||||
# Scanner (env vars only, no YAML)
|
||||
SCANNER_HOLD_URL=ws://localhost:8080 SCANNER_SHARED_SECRET=secret ./bin/atcr-scanner serve
|
||||
|
||||
# Legacy mode (still supported):
|
||||
# ./bin/atcr-appview serve config/config.yml
|
||||
# Usage report
|
||||
go run ./cmd/usage-report --hold https://hold01.atcr.io
|
||||
go run ./cmd/usage-report --hold https://hold01.atcr.io --from-manifests
|
||||
|
||||
# Run hold service (configure via env vars - see .env.hold.example)
|
||||
export HOLD_PUBLIC_URL=http://127.0.0.1:8080
|
||||
export STORAGE_DRIVER=filesystem
|
||||
export STORAGE_ROOT_DIR=/tmp/atcr-hold
|
||||
export HOLD_OWNER=did:plc:your-did-here
|
||||
./bin/atcr-hold
|
||||
# Hold starts immediately with embedded PDS
|
||||
|
||||
# Request Bluesky relay crawl (makes your PDS discoverable)
|
||||
./deploy/request-crawl.sh hold01.atcr.io
|
||||
# Or specify a different relay:
|
||||
./deploy/request-crawl.sh hold01.atcr.io https://custom-relay.example.com/xrpc/com.atproto.sync.requestCrawl
|
||||
# Utilities
|
||||
go run ./cmd/db-migrate --help # SQLite → libsql migration
|
||||
go run ./cmd/record-query --help # Query ATProto relay by collection
|
||||
go run ./cmd/s3-test # S3 connectivity test
|
||||
go run ./cmd/healthcheck <url> # HTTP health check (for Docker)
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Core Design
|
||||
ATCR uses **distribution/distribution** as a library, extending it via middleware to route content to different backends:
|
||||
|
||||
ATCR uses **distribution/distribution** as a library and extends it through middleware to route different types of content to different storage backends:
|
||||
|
||||
- **Manifests** → ATProto PDS (small JSON metadata, stored as `io.atcr.manifest` records)
|
||||
- **Blobs/Layers** → S3 or user-deployed storage (large binary data)
|
||||
- **Manifests** → ATProto PDS (small JSON, stored as `io.atcr.manifest` records)
|
||||
- **Blobs/Layers** → S3 via hold service (presigned URLs for direct client-to-S3 transfers)
|
||||
- **Authentication** → ATProto OAuth with DPoP + Docker credential helpers
|
||||
|
||||
### Three-Component Architecture
|
||||
### Four Components
|
||||
|
||||
1. **AppView** (`cmd/appview`) - OCI Distribution API server
|
||||
- Resolves identities (handle/DID → PDS endpoint)
|
||||
- Routes manifests to user's PDS
|
||||
- Routes blobs to storage endpoint (default or BYOS)
|
||||
- Validates OAuth tokens via PDS
|
||||
- Issues registry JWTs
|
||||
1. **AppView** (`cmd/appview`) — OCI Distribution API server. Resolves identities, routes manifests to PDS, routes blobs to hold service, validates OAuth, issues registry JWTs. Includes web UI for browsing.
|
||||
2. **Hold Service** (`cmd/hold`) — BYOS blob storage. Embedded PDS with captain/crew/stats/scan records (all ATProto records in CAR store), S3-compatible storage, presigned URLs. Supports did:web (default) or did:plc identity with auto-recovery. Optional subsystems: admin UI, quotas, billing (Stripe), GC, scan dispatch, Bluesky status posts.
|
||||
3. **Scanner** (`scanner/cmd/scanner`) — Vulnerability scanning. Connects to hold via WebSocket, generates SBOMs (Syft), scans vulnerabilities (Grype). Priority queue with tier-based scheduling.
|
||||
4. **Credential Helper** (`cmd/credential-helper`) — Docker credential helper implementing ATProto OAuth flow, exchanges OAuth token for registry JWT.
|
||||
|
||||
2. **Hold Service** (`cmd/hold`) - Optional BYOS component
|
||||
- Lightweight HTTP server for presigned URLs
|
||||
- Embedded PDS with captain + crew records
|
||||
- Supports S3, Storj, Minio, filesystem, etc.
|
||||
- Authorization based on captain record (public, allowAllCrew)
|
||||
- Self-describing via DID resolution
|
||||
- Configured entirely via environment variables
|
||||
### Request Flow Summary
|
||||
|
||||
3. **Credential Helper** (`cmd/credential-helper`) - Client-side OAuth
|
||||
- Implements Docker credential helper protocol
|
||||
- ATProto OAuth flow with DPoP
|
||||
- Token caching and refresh
|
||||
- Exchanges OAuth token for registry JWT
|
||||
**Push:** Client pushes to `atcr.io/<identity>/<image>:<tag>`. Registry middleware resolves identity → DID → PDS, discovers hold DID (from sailor profile `defaultHold` → legacy `io.atcr.hold` records → AppView default). Blobs go to hold via XRPC multipart upload (presigned S3 URLs). Manifests stored in user's PDS as `io.atcr.manifest` records with `holdDid` reference.
|
||||
|
||||
### Request Flow
|
||||
**Pull:** AppView fetches manifest from user's PDS. The manifest's `holdDid` field tells where blobs were stored. Blobs fetched from that hold via presigned download URLs. Pull always uses the historical hold from the manifest, even if the user changed their default since pushing.
|
||||
|
||||
#### Push with Default Storage
|
||||
```
|
||||
1. Client: docker push atcr.io/alice/myapp:latest
|
||||
2. HTTP Request → /v2/alice/myapp/manifests/latest
|
||||
3. Registry Middleware (pkg/appview/middleware/registry.go)
|
||||
→ Resolves "alice" to DID and PDS endpoint
|
||||
→ Queries alice's sailor profile for defaultHold (returns DID if set)
|
||||
→ If not set, checks alice's io.atcr.hold records
|
||||
→ Falls back to AppView's default_hold_did
|
||||
→ Stores DID/PDS/hold DID in RegistryContext
|
||||
4. Routing Repository (pkg/appview/storage/routing_repository.go)
|
||||
→ Creates RoutingRepository
|
||||
→ Returns ATProto ManifestStore for manifests
|
||||
→ Returns ProxyBlobStore for blobs (routes to hold DID)
|
||||
5. Blob PUT → ProxyBlobStore calls hold's XRPC multipart upload endpoints:
|
||||
a. POST /xrpc/io.atcr.hold.initiateUpload (gets uploadID)
|
||||
b. POST /xrpc/io.atcr.hold.getPartUploadUrl (gets presigned URL for each part)
|
||||
c. PUT to S3 presigned URL (or PUT /xrpc/io.atcr.hold.uploadPart for buffered mode)
|
||||
d. POST /xrpc/io.atcr.hold.completeUpload (finalizes upload)
|
||||
6. Manifest PUT → alice's PDS as io.atcr.manifest record (includes holdDid + holdEndpoint)
|
||||
→ Manifest also uploaded to PDS blob storage (ATProto CID format)
|
||||
```
|
||||
|
||||
#### Push with BYOS (Bring Your Own Storage)
|
||||
```
|
||||
1. Client: docker push atcr.io/alice/myapp:latest
|
||||
2. Registry Middleware resolves alice → did:plc:alice123
|
||||
3. Hold discovery via findHoldDID():
|
||||
a. Check alice's sailor profile for defaultHold (returns DID if set)
|
||||
b. If not set, check alice's io.atcr.hold records (legacy)
|
||||
c. Fall back to AppView's default_hold_did
|
||||
4. Found: alice's profile has defaultHold = "did:web:alice-storage.fly.dev"
|
||||
5. Routing Repository returns ProxyBlobStore(did:web:alice-storage.fly.dev)
|
||||
6. ProxyBlobStore:
|
||||
a. Resolves hold DID → https://alice-storage.fly.dev (did:web resolution)
|
||||
b. Gets service token from alice's PDS via com.atproto.server.getServiceAuth
|
||||
c. Calls hold XRPC endpoints with service token authentication:
|
||||
- POST /xrpc/io.atcr.hold.initiateUpload
|
||||
- POST /xrpc/io.atcr.hold.getPartUploadUrl (returns presigned S3 URL)
|
||||
- PUT to S3 presigned URL (direct upload to alice's S3/Storj)
|
||||
- POST /xrpc/io.atcr.hold.completeUpload
|
||||
7. Hold service validates service token, checks crew membership, generates presigned URLs
|
||||
8. Manifest stored in alice's PDS with:
|
||||
- holdDid = "did:web:alice-storage.fly.dev" (primary)
|
||||
- holdEndpoint = "https://alice-storage.fly.dev" (backward compat)
|
||||
```
|
||||
|
||||
#### Pull Flow
|
||||
```
|
||||
1. Client: docker pull atcr.io/alice/myapp:latest
|
||||
2. GET /v2/alice/myapp/manifests/latest
|
||||
3. AppView fetches manifest from alice's PDS
|
||||
4. Manifest contains:
|
||||
- holdDid = "did:web:alice-storage.fly.dev" (primary reference)
|
||||
- holdEndpoint = "https://alice-storage.fly.dev" (legacy fallback)
|
||||
5. Hold DID cached: (alice's DID, "myapp") → "did:web:alice-storage.fly.dev"
|
||||
TTL: 10 minutes (covers typical pull operations)
|
||||
6. Client requests blobs: GET /v2/alice/myapp/blobs/sha256:abc123
|
||||
7. AppView checks cache, routes to hold DID from manifest (not re-discovered)
|
||||
8. ProxyBlobStore:
|
||||
a. Resolves hold DID → https://alice-storage.fly.dev
|
||||
b. Gets service token from alice's PDS via com.atproto.server.getServiceAuth
|
||||
c. Calls GET /xrpc/com.atproto.sync.getBlob?did={userDID}&cid=sha256:abc123&method=GET
|
||||
d. Hold returns presigned download URL in JSON response
|
||||
9. Client redirected to download blob directly from alice's S3 via presigned URL
|
||||
```
|
||||
|
||||
**Key insight:** Pull uses the historical `holdDid` from the manifest, ensuring blobs are fetched from the hold where they were originally pushed, even if alice later changes her default hold. Hold cache (10min TTL) avoids re-querying PDS for each blob during the same pull operation.
|
||||
**Hold discovery priority** (in `findHoldDID()`, `pkg/appview/middleware/registry.go`):
|
||||
1. Sailor profile's `defaultHold` (user preference)
|
||||
2. User's `io.atcr.hold` records (legacy)
|
||||
3. AppView's `default_hold_did` (fallback)
|
||||
|
||||
### Name Resolution
|
||||
|
||||
Names follow the pattern: `atcr.io/<identity>/<image>:<tag>`
|
||||
Pattern: `atcr.io/<identity>/<image>:<tag>` where identity is a handle or DID.
|
||||
|
||||
Where `<identity>` can be:
|
||||
- **Handle**: `alice.bsky.social` → resolved via .well-known/atproto-did
|
||||
- **DID**: `did:plc:xyz123` → resolved via PLC directory
|
||||
Resolution in `pkg/atproto/resolver.go`: Handle → DID (DNS/HTTPS) → PDS endpoint (DID document).
|
||||
|
||||
Resolution happens in `pkg/atproto/resolver.go`:
|
||||
1. Handle → DID (via DNS/HTTPS)
|
||||
2. DID → PDS endpoint (via DID document)
|
||||
### Nautical Terminology
|
||||
|
||||
### Middleware System
|
||||
- **Sailors** = registry users, **Captains** = hold owners, **Crew** = hold members
|
||||
- **Holds** = storage endpoints (BYOS), **Quartermaster/Bosun/Deckhand** = crew tiers
|
||||
|
||||
ATCR uses middleware and routing to handle requests:
|
||||
### Hold Embedded PDS Records
|
||||
|
||||
#### 1. Registry Middleware (`pkg/appview/middleware/registry.go`)
|
||||
- Wraps `distribution.Namespace`
|
||||
- Intercepts `Repository(name)` calls
|
||||
- Performs name resolution (alice → did:plc:xyz → pds.example.com)
|
||||
- Queries PDS for `io.atcr.hold` records to find storage endpoint
|
||||
- Stores resolved identity and storage endpoint in context
|
||||
The hold's embedded PDS stores all operational data as ATProto records in a CAR store (not SQLite). SQLite holds only the records index and events.
|
||||
|
||||
#### 2. Auth Middleware (`pkg/appview/middleware/auth.go`)
|
||||
- Validates JWT tokens from Docker clients
|
||||
- Extracts DID from token claims
|
||||
- Injects authenticated identity into context
|
||||
| Collection | Cardinality | Description |
|
||||
|---|---|---|
|
||||
| `io.atcr.hold.captain` | Singleton | Hold identity, owner DID, settings |
|
||||
| `io.atcr.hold.crew` | Per-member | Crew membership + permissions |
|
||||
| `io.atcr.hold.layer` | Per-layer | Layer metadata (digest, size, media type) |
|
||||
| `io.atcr.hold.stats` | Per-repo | Push/pull counts per owner+repository |
|
||||
| `io.atcr.hold.scan` | Per-scan | Vulnerability scan results |
|
||||
| `io.atcr.hold.image.config` | Per-manifest | OCI image config (history, env, entrypoint, labels) |
|
||||
| `app.bsky.feed.post` | Status posts | Online/offline status, push notifications |
|
||||
| `sh.tangled.actor.profile` | Singleton | Hold profile (name, description, avatar) |
|
||||
|
||||
#### 3. Routing Repository (`pkg/appview/storage/routing_repository.go`)
|
||||
- Implements `distribution.Repository`
|
||||
- Returns custom `Manifests()` and `Blobs()` implementations
|
||||
- Routes manifests to ATProto, blobs to S3 or BYOS
|
||||
- **IMPORTANT**: RoutingRepository is created fresh on EVERY request (no caching)
|
||||
- Each Docker layer upload is a separate HTTP request (possibly different process)
|
||||
- OAuth sessions can be refreshed/invalidated between requests
|
||||
- The OAuth refresher already caches sessions efficiently (in-memory + DB)
|
||||
- Previous caching of repositories with stale ATProtoClient caused "invalid refresh token" errors
|
||||
## Authentication
|
||||
|
||||
### Authentication Architecture
|
||||
Three token types flow through the system:
|
||||
|
||||
#### Token Types and Flows
|
||||
| Token | Issued By | Used For | Lifetime |
|
||||
|-------|-----------|----------|----------|
|
||||
| OAuth (access+refresh) | User's PDS | AppView → PDS communication | ~2h / ~90d |
|
||||
| Registry JWT | AppView | Docker client → AppView | 5 min |
|
||||
| Service Token | User's PDS | AppView → Hold service | 60s (cached 50s) |
|
||||
|
||||
ATCR uses three distinct token types in its authentication flow:
|
||||
|
||||
**1. OAuth Tokens (Access + Refresh)**
|
||||
- **Issued by:** User's PDS via OAuth flow
|
||||
- **Stored in:** AppView database (`oauth_sessions` table)
|
||||
- **Cached in:** Refresher's in-memory map (per-DID)
|
||||
- **Used for:** AppView → User's PDS communication (write manifests, read profiles)
|
||||
- **Managed by:** Indigo library with DPoP (automatic refresh)
|
||||
- **Lifetime:** Access ~2 hours, Refresh ~90 days (PDS controlled)
|
||||
|
||||
**2. Registry JWTs**
|
||||
- **Issued by:** AppView after OAuth login
|
||||
- **Stored in:** Docker credential helper (`~/.atcr/credential-helper-token.json`)
|
||||
- **Used for:** Docker client → AppView authentication
|
||||
- **Lifetime:** 15 minutes (configurable via `ATCR_TOKEN_EXPIRATION`)
|
||||
- **Format:** JWT with DID claim
|
||||
|
||||
**3. Service Tokens**
|
||||
- **Issued by:** User's PDS via `com.atproto.server.getServiceAuth`
|
||||
- **Stored in:** AppView memory (in-memory cache with ~50s TTL)
|
||||
- **Used for:** AppView → Hold service authentication (acting on behalf of user)
|
||||
- **Lifetime:** 60 seconds (PDS controlled), cached for 50s
|
||||
- **Required:** OAuth session to obtain (catch-22 solved by Refresher)
|
||||
|
||||
**Token Flow Diagram:**
|
||||
```
|
||||
┌─────────────┐ ┌──────────────┐
|
||||
│ Docker │ ─── Registry JWT ──────────────→ │ AppView │
|
||||
│ Client │ │ │
|
||||
└─────────────┘ └──────┬───────┘
|
||||
│
|
||||
│ OAuth tokens
|
||||
│ (access + refresh)
|
||||
↓
|
||||
┌──────────────┐
|
||||
│ User's PDS │
|
||||
└──────┬───────┘
|
||||
│
|
||||
│ Service token
|
||||
│ (via getServiceAuth)
|
||||
↓
|
||||
┌──────────────┐
|
||||
│ Hold Service │
|
||||
└──────────────┘
|
||||
Docker Client ──Registry JWT──→ AppView ──OAuth──→ User's PDS ──Service Token──→ Hold
|
||||
```
|
||||
|
||||
#### ATProto OAuth with DPoP
|
||||
|
||||
ATCR implements the full ATProto OAuth specification with mandatory security features:
|
||||
|
||||
**Required Components:**
|
||||
- **DPoP** (RFC 9449) - Cryptographic proof-of-possession for every request
|
||||
- **PAR** (RFC 9126) - Pushed Authorization Requests for server-to-server parameter exchange
|
||||
- **PKCE** (RFC 7636) - Proof Key for Code Exchange to prevent authorization code interception
|
||||
|
||||
**Key Components** (`pkg/auth/oauth/`):
|
||||
|
||||
1. **Client** (`client.go`) - OAuth client configuration and session management
|
||||
- **ClientApp setup:**
|
||||
- `NewClientApp()` - Creates configured `*oauth.ClientApp` (uses indigo directly, no wrapper)
|
||||
- Uses `NewLocalhostConfig()` for localhost (public client)
|
||||
- Uses `NewPublicConfig()` for production (upgraded to confidential with P-256 key)
|
||||
- `GetDefaultScopes()` - Returns ATCR-specific OAuth scopes
|
||||
- `ScopesMatch()` - Compares scope lists (order-independent)
|
||||
- **Session management (Refresher):**
|
||||
- `NewRefresher()` - Creates session cache manager for AppView
|
||||
- **Purpose:** In-memory cache for `*oauth.ClientSession` objects (performance optimization)
|
||||
- **Why needed:** Saves 1-2 DB queries per request (~2ms) with minimal code complexity
|
||||
- Per-DID locking prevents concurrent database loads
|
||||
- Calls `ClientApp.ResumeSession()` on cache miss
|
||||
- Indigo handles token refresh automatically (transparent to ATCR)
|
||||
- **Performance:** Essential for high-traffic deployments, negligible for low-traffic
|
||||
- **Architecture:** Single file containing both ClientApp helpers and Refresher (combined from previous two-file structure)
|
||||
|
||||
2. **Keys** (`keys.go`) - P-256 key management for confidential clients
|
||||
- `GenerateOrLoadClientKey()` - generates or loads P-256 key from disk
|
||||
- Follows hold service pattern: auto-generation, 0600 permissions, /var/lib/atcr/oauth/
|
||||
- `GenerateKeyID()` - derives key ID from public key hash
|
||||
- `PrivateKeyToMultibase()` - converts key for `SetClientSecret()` API
|
||||
- **Key type:** P-256 (ES256) for OAuth standard compatibility (not K-256 like PDS keys)
|
||||
|
||||
3. **Storage** - Persists OAuth sessions
|
||||
- `db/oauth_store.go` - SQLite-backed storage for AppView (in UI database)
|
||||
- `store.go` - File-based storage for CLI tools (`~/.atcr/oauth-sessions.json`)
|
||||
- Implements indigo's `ClientAuthStore` interface
|
||||
|
||||
4. **Server** (`server.go`) - OAuth authorization endpoints for AppView
|
||||
- `GET /auth/oauth/authorize` - starts OAuth flow
|
||||
- `GET /auth/oauth/callback` - handles OAuth callback
|
||||
- Uses `ClientApp` methods directly (no wrapper)
|
||||
|
||||
5. **Interactive Flow** (`interactive.go`) - Reusable OAuth flow for CLI tools
|
||||
- Used by credential helper and hold service registration
|
||||
- Two-phase callback setup ensures PAR metadata availability
|
||||
|
||||
**Client Configuration:**
|
||||
- **Localhost:** Always public client (no client authentication)
|
||||
- Client ID: `http://localhost?redirect_uri=...&scope=...` (query-based)
|
||||
- No P-256 key generation
|
||||
- **Production:** Confidential client with P-256 private key (if key exists)
|
||||
- Client ID: `{baseURL}/client-metadata.json` (metadata endpoint)
|
||||
- Key path: `/var/lib/atcr/oauth/client.key` (auto-generated on first run)
|
||||
- Key algorithm: ES256 (P-256, not K-256)
|
||||
- Upgraded via `config.SetClientSecret(key, keyID)`
|
||||
|
||||
**Authentication Flow:**
|
||||
```
|
||||
1. User configures Docker to use the credential helper (adds to config.json)
|
||||
2. On first docker push/pull, Docker calls credential helper
|
||||
3. Credential helper opens browser → AppView OAuth page
|
||||
4. AppView handles OAuth flow:
|
||||
- Resolves handle → DID → PDS endpoint
|
||||
- Discovers OAuth server metadata from PDS
|
||||
- PAR request with DPoP header → get request_uri
|
||||
- User authorizes in browser
|
||||
- AppView exchanges code for OAuth token with DPoP proof
|
||||
- AppView stores: OAuth session (tokens managed by indigo library with DPoP), DID, handle
|
||||
5. AppView shows device approval page: "Can [device] push to your account?"
|
||||
6. User approves device
|
||||
7. AppView issues registry JWT with validated DID
|
||||
8. AppView returns JSON token to credential helper (via callback or browser display)
|
||||
9. Credential helper saves registry JWT locally
|
||||
10. Helper returns registry JWT to Docker
|
||||
|
||||
Later (subsequent docker push):
|
||||
11. Docker calls credential helper
|
||||
12. Helper returns cached registry JWT (or re-authenticates if expired)
|
||||
```
|
||||
|
||||
**Key distinction:** The credential helper never manages OAuth tokens directly. AppView owns the OAuth session (including DPoP handling via indigo library) and issues registry JWTs to the credential helper. AppView needs the OAuth session for:
|
||||
- Writing manifests to user's PDS (with DPoP authentication)
|
||||
- Getting service tokens from user's PDS (with DPoP authentication)
|
||||
- Service tokens are then used to authenticate to hold services (Bearer tokens, not DPoP)
|
||||
|
||||
**Security:**
|
||||
- Tokens validated against authoritative source (user's PDS)
|
||||
- No trust in client-provided identity information
|
||||
- DPoP binds tokens to specific client key
|
||||
- 15-minute token expiry for registry JWTs
|
||||
- **Confidential clients** (production): Client authentication via P-256 private key JWT assertion
|
||||
- Prevents client impersonation attacks
|
||||
- Key stored in `/var/lib/atcr/oauth/client.key` with 0600 permissions
|
||||
- Automatically generated on first run
|
||||
- **Public clients** (localhost): No client authentication (development only)
|
||||
|
||||
### Key Components
|
||||
|
||||
#### ATProto Integration (`pkg/atproto/`)
|
||||
|
||||
**resolver.go**: DID and handle resolution
|
||||
- `ResolveIdentity()`: alice → did:plc:xyz → pds.example.com
|
||||
- `ResolveHandle()`: Uses .well-known/atproto-did
|
||||
- `ResolvePDS()`: Parses DID document for PDS endpoint
|
||||
|
||||
**client.go**: ATProto PDS client
|
||||
- `PutRecord()`: Store manifest as ATProto record
|
||||
- `GetRecord()`: Retrieve manifest from PDS
|
||||
- `DeleteRecord()`: Remove manifest
|
||||
- Uses XRPC protocol (com.atproto.repo.*)
|
||||
|
||||
**lexicon.go**: ATProto record schemas
|
||||
- `ManifestRecord`: OCI manifest stored as ATProto record (includes `holdDid` + `holdEndpoint` fields)
|
||||
- `TagRecord`: Tag pointing to manifest digest
|
||||
- `HoldRecord`: Storage hold definition (LEGACY - for old BYOS model)
|
||||
- `HoldCrewRecord`: Hold crew membership (LEGACY - stored in owner's PDS)
|
||||
- `CaptainRecord`: Hold ownership record (NEW - stored in hold's embedded PDS at rkey "self")
|
||||
- `CrewRecord`: Hold crew membership (NEW - stored in hold's embedded PDS, one record per member)
|
||||
- `SailorProfileRecord`: User profile with `defaultHold` preference (can be DID or URL)
|
||||
- Collections: `io.atcr.manifest`, `io.atcr.tag`, `io.atcr.hold` (legacy), `io.atcr.hold.crew` (used by both legacy and new models), `io.atcr.hold.captain` (new), `io.atcr.sailor.profile`
|
||||
|
||||
**profile.go**: Sailor profile management
|
||||
- `EnsureProfile()`: Creates profile with default hold on first authentication
|
||||
- `GetProfile()`: Retrieves user's profile from PDS
|
||||
- `UpdateProfile()`: Updates user's profile
|
||||
|
||||
**manifest_store.go**: Implements `distribution.ManifestService`
|
||||
- Stores OCI manifests as ATProto records
|
||||
- Digest-based addressing (sha256:abc123 → record key)
|
||||
- Converts between OCI and ATProto formats
|
||||
|
||||
#### Storage Layer (`pkg/appview/storage/`)
|
||||
|
||||
**routing_repository.go**: Routes content by type
|
||||
- `Manifests()` → returns ATProto ManifestStore (caches instance for hold DID extraction)
|
||||
- `Blobs()` → checks hold cache for pull, uses discovery for push
|
||||
- Pull: Uses cached `holdDid` from manifest (historical reference)
|
||||
- Push: Uses discovery-based DID from `findHoldDID()` in middleware
|
||||
- Always returns ProxyBlobStore (routes to hold service via DID)
|
||||
- Implements `distribution.Repository` interface
|
||||
- Uses RegistryContext to pass DID, PDS endpoint, hold DID, OAuth refresher, etc.
|
||||
|
||||
**Database-based hold DID lookups**:
|
||||
- Queries SQLite `manifests` table for hold DID (indexed, fast)
|
||||
- No in-memory caching needed - database IS the cache
|
||||
- Persistent across restarts, multi-instance safe
|
||||
- Pull operations use hold DID from latest manifest (historical reference)
|
||||
- Push operations use fresh discovery from profile/default
|
||||
- Function: `db.GetLatestHoldDIDForRepo(did, repository)` in `pkg/appview/db/queries.go`
|
||||
|
||||
**proxy_blob_store.go**: External storage proxy (routes to hold via XRPC)
|
||||
- Resolves hold DID → HTTP URL for XRPC requests (did:web resolution)
|
||||
- Gets service tokens from user's PDS (`com.atproto.server.getServiceAuth`)
|
||||
- Calls hold XRPC endpoints with service token authentication:
|
||||
- Multipart upload: initiateUpload, getPartUploadUrl, uploadPart, completeUpload, abortUpload
|
||||
- Blob read: com.atproto.sync.getBlob (returns presigned download URL)
|
||||
- Implements full `distribution.BlobStore` interface
|
||||
- Supports both presigned URL mode (S3 direct) and buffered mode (proxy via hold)
|
||||
|
||||
#### AppView Web UI (`pkg/appview/`)
|
||||
|
||||
The AppView includes a web interface for browsing the registry:
|
||||
|
||||
**Features:**
|
||||
- Repository browsing and search
|
||||
- Star/favorite repositories
|
||||
- Pull count tracking
|
||||
- User profiles and settings
|
||||
- OAuth-based authentication for web users
|
||||
|
||||
**Database Layer** (`pkg/appview/db/`):
|
||||
- SQLite database for metadata (stars, pulls, repository info)
|
||||
- Schema migrations via SQL files in `pkg/appview/db/schema.go`
|
||||
- Stores: OAuth sessions, device flows, repository metadata
|
||||
- **NOTE:** Simple SQLite for MVP. For production multi-instance: use PostgreSQL
|
||||
|
||||
**Jetstream Integration** (`pkg/appview/jetstream/`):
|
||||
- Consumes ATProto Jetstream for real-time updates
|
||||
- Backfills repository records from PDS
|
||||
- Indexes manifests, tags, and repository metadata
|
||||
- Worker processes incoming events
|
||||
|
||||
**Web Handlers** (`pkg/appview/handlers/`):
|
||||
- `home.go` - Landing page
|
||||
- `repository.go` - Repository detail pages
|
||||
- `search.go` - Search functionality
|
||||
- `auth.go` - OAuth login/logout for web
|
||||
- `settings.go` - User settings management
|
||||
- `api.go` - JSON API endpoints
|
||||
|
||||
**Static Assets** (`pkg/appview/static/`, `pkg/appview/templates/`):
|
||||
- Templates use Go html/template
|
||||
- JavaScript in `static/js/app.js`
|
||||
- Minimal CSS for clean UI
|
||||
|
||||
#### Hold Service (`cmd/hold/`)
|
||||
|
||||
Lightweight standalone service for BYOS (Bring Your Own Storage) with embedded PDS:
|
||||
|
||||
**Architecture:**
|
||||
- **Embedded PDS**: Each hold has a full ATProto PDS for storing captain + crew records
|
||||
- **DID**: Hold identified by did:web (e.g., `did:web:hold01.atcr.io`)
|
||||
- **Storage**: Reuses distribution's storage driver factory (S3, Storj, Minio, Azure, GCS, filesystem)
|
||||
- **Authorization**: Based on captain + crew records in embedded PDS
|
||||
- **Blob operations**: Generates presigned URLs (15min expiry) or proxies uploads/downloads via XRPC
|
||||
|
||||
**Authorization Model:**
|
||||
|
||||
Read access:
|
||||
- **Public hold** (`HOLD_PUBLIC=true`): Anonymous + all authenticated users
|
||||
- **Private hold** (`HOLD_PUBLIC=false`): Requires authentication + crew membership with blob:read OR blob:write permission
|
||||
- **Note:** `blob:write` implicitly grants `blob:read` access (can't push without pulling)
|
||||
|
||||
Write access:
|
||||
- Hold owner OR crew members with blob:write permission
|
||||
- Verified via `io.atcr.hold.crew` records in hold's embedded PDS
|
||||
|
||||
**Permission Matrix:**
|
||||
|
||||
| User Type | Public Read | Private Read | Write | Crew Admin |
|
||||
|-----------|-------------|--------------|-------|------------|
|
||||
| Anonymous | Yes | No | No | No |
|
||||
| Owner (captain) | Yes | Yes | Yes | Yes (implied) |
|
||||
| Crew (blob:read only) | Yes | Yes | No | No |
|
||||
| Crew (blob:write only) | Yes | Yes* | Yes | No |
|
||||
| Crew (blob:read + blob:write) | Yes | Yes | Yes | No |
|
||||
| Crew (crew:admin) | Yes | Yes | Yes | Yes |
|
||||
| Authenticated non-crew | Yes | No | No | No |
|
||||
|
||||
*`blob:write` implicitly grants `blob:read` access
|
||||
|
||||
**Authorization Error Format:**
|
||||
|
||||
All authorization failures use consistent structured errors (`pkg/hold/pds/auth.go`):
|
||||
```
|
||||
access denied for [action]: [reason] (required: [permission(s)])
|
||||
```
|
||||
|
||||
Examples:
|
||||
- `access denied for blob:read: user is not a crew member (required: blob:read or blob:write)`
|
||||
- `access denied for blob:write: crew member lacks permission (required: blob:write)`
|
||||
- `access denied for crew:admin: user is not a crew member (required: crew:admin)`
|
||||
|
||||
**Shared Error Constants** (`pkg/hold/pds/auth.go`):
|
||||
- `ErrMissingAuthHeader` - Missing Authorization header
|
||||
- `ErrInvalidAuthFormat` - Invalid Authorization header format
|
||||
- `ErrInvalidAuthScheme` - Invalid scheme (expected Bearer or DPoP)
|
||||
- `ErrInvalidJWTFormat` - Malformed JWT
|
||||
- `ErrMissingISSClaim` / `ErrMissingSubClaim` - Missing JWT claims
|
||||
- `ErrTokenExpired` - Token has expired
|
||||
|
||||
**Embedded PDS Endpoints** (`pkg/hold/pds/xrpc.go`):
|
||||
|
||||
Standard ATProto sync endpoints:
|
||||
- `GET /xrpc/com.atproto.sync.getRepo?did={did}` - Download full repository as CAR file
|
||||
- `GET /xrpc/com.atproto.sync.getRepo?did={did}&since={rev}` - Download repository diff since revision
|
||||
- `GET /xrpc/com.atproto.sync.getRepoStatus?did={did}` - Get repository hosting status and current revision
|
||||
- `GET /xrpc/com.atproto.sync.subscribeRepos` - WebSocket firehose for real-time events
|
||||
- `GET /xrpc/com.atproto.sync.listRepos` - List all repositories (single-user PDS)
|
||||
- `GET /xrpc/com.atproto.sync.getBlob?did={did}&cid={digest}` - Get blob or presigned download URL
|
||||
|
||||
Repository management:
|
||||
- `GET /xrpc/com.atproto.repo.describeRepo?repo={did}` - Repository metadata
|
||||
- `GET /xrpc/com.atproto.repo.getRecord?repo={did}&collection={col}&rkey={key}` - Get record
|
||||
- `GET /xrpc/com.atproto.repo.listRecords?repo={did}&collection={col}` - List records (supports pagination)
|
||||
- `POST /xrpc/com.atproto.repo.deleteRecord` - Delete record (owner/crew admin only)
|
||||
- `POST /xrpc/com.atproto.repo.uploadBlob` - Upload ATProto blob (owner/crew admin only)
|
||||
|
||||
DID resolution:
|
||||
- `GET /.well-known/did.json` - DID document (did:web resolution)
|
||||
- `GET /.well-known/atproto-did` - DID for handle resolution
|
||||
|
||||
Crew management:
|
||||
- `POST /xrpc/io.atcr.hold.requestCrew` - Request crew membership (authenticated users)
|
||||
|
||||
**OCI Multipart Upload Endpoints** (`pkg/hold/oci/xrpc.go`):
|
||||
|
||||
All require blob:write permission via service token authentication:
|
||||
- `POST /xrpc/io.atcr.hold.initiateUpload` - Start multipart upload session
|
||||
- `POST /xrpc/io.atcr.hold.getPartUploadUrl` - Get presigned URL for uploading a part
|
||||
- `PUT /xrpc/io.atcr.hold.uploadPart` - Direct buffered part upload (alternative to presigned URLs)
|
||||
- `POST /xrpc/io.atcr.hold.completeUpload` - Finalize multipart upload and move to final location
|
||||
- `POST /xrpc/io.atcr.hold.abortUpload` - Cancel multipart upload and cleanup temp data
|
||||
|
||||
**AppView-to-Hold Authentication:**
|
||||
- AppView uses service tokens from user's PDS (`com.atproto.server.getServiceAuth`)
|
||||
- Service tokens are scoped to specific hold DIDs and include the user's DID
|
||||
- Hold validates tokens and checks crew membership for authorization
|
||||
- Tokens cached for 50 seconds (valid for 60 seconds from PDS)
|
||||
|
||||
**Configuration:** Environment variables (see `.env.hold.example`)
|
||||
- `HOLD_PUBLIC_URL` - Public URL of hold service (required, used for did:web generation)
|
||||
- `STORAGE_DRIVER` - Storage driver type (s3, filesystem)
|
||||
- `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` - S3 credentials
|
||||
- `S3_BUCKET`, `S3_ENDPOINT` - S3 configuration
|
||||
- `HOLD_PUBLIC` - Allow public reads (default: false)
|
||||
- `HOLD_OWNER` - DID for captain record creation (optional)
|
||||
- `HOLD_ALLOW_ALL_CREW` - Allow any authenticated user to register as crew (default: false)
|
||||
- `HOLD_DATABASE_PATH` - Path for embedded PDS database (required)
|
||||
- `HOLD_DATABASE_KEY_PATH` - Path for PDS signing keys (optional, generated if missing)
|
||||
|
||||
**Deployment:** Can run on Fly.io, Railway, Docker, Kubernetes, etc.
|
||||
|
||||
### ATProto Storage Model
|
||||
|
||||
Manifests are stored as records with this structure:
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.manifest",
|
||||
"repository": "myapp",
|
||||
"digest": "sha256:abc123...",
|
||||
"holdDid": "did:web:hold01.atcr.io",
|
||||
"holdEndpoint": "https://hold1.atcr.io",
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"config": { "digest": "sha256:...", "size": 1234 },
|
||||
"layers": [
|
||||
{ "digest": "sha256:...", "size": 5678 }
|
||||
],
|
||||
"manifestBlob": {
|
||||
"$type": "blob",
|
||||
"ref": { "$link": "bafyrei..." },
|
||||
"mimeType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"size": 1234
|
||||
},
|
||||
"createdAt": "2025-09-30T..."
|
||||
}
|
||||
```
|
||||
|
||||
**Key fields:**
|
||||
- `holdDid` - DID of the hold service where blobs are stored (PRIMARY reference, new)
|
||||
- `holdEndpoint` - HTTP URL of hold service (DEPRECATED, kept for backward compatibility)
|
||||
- `manifestBlob` - Reference to manifest blob in ATProto blob storage (CID format)
|
||||
|
||||
Record key = manifest digest (without algorithm prefix)
|
||||
Collection = `io.atcr.manifest`
|
||||
|
||||
### Sailor Profile System
|
||||
|
||||
ATCR uses a "sailor profile" to manage user preferences for hold (storage) selection. The nautical theme reflects the architecture:
|
||||
- **Sailors** = Registry users
|
||||
- **Captains** = Hold owners
|
||||
- **Crew** = Hold members with access
|
||||
- **Holds** = Storage endpoints (BYOS)
|
||||
|
||||
**Profile Record** (`io.atcr.sailor.profile`):
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.sailor.profile",
|
||||
"defaultHold": "did:web:hold1.alice.com",
|
||||
"createdAt": "2025-10-02T...",
|
||||
"updatedAt": "2025-10-02T..."
|
||||
}
|
||||
```
|
||||
|
||||
**Profile Management:**
|
||||
- Created automatically on first authentication (OAuth or Basic Auth)
|
||||
- `defaultHold` can be a DID (preferred, e.g., `did:web:hold01.atcr.io`) or legacy URL
|
||||
- If AppView has `default_hold_did` configured, profile gets that as `defaultHold`
|
||||
- Users can update their profile to change default hold (future: via UI)
|
||||
- Setting `defaultHold` to null opts out of defaults (use own holds or AppView default)
|
||||
|
||||
**Hold Resolution Priority** (in `findHoldDID()` in middleware):
|
||||
1. **Profile's `defaultHold`** - User's explicit preference (DID or URL)
|
||||
2. **User's `io.atcr.hold` records** - User's own holds (legacy BYOS model)
|
||||
3. **AppView's `default_hold_did`** - Fallback default (configured in middleware)
|
||||
|
||||
This ensures:
|
||||
- Users can join shared holds by setting their profile's `defaultHold`
|
||||
- Users can opt out of defaults (set `defaultHold` to null)
|
||||
- URL structure remains `atcr.io/<owner>/<image>` (ownership-based, not hold-based)
|
||||
- Hold choice is transparent infrastructure (like choosing an S3 region)
|
||||
|
||||
### Key Design Decisions
|
||||
|
||||
1. **No fork of distribution**: Uses distribution as library, extends via middleware
|
||||
2. **Hybrid storage**: Manifests in ATProto (small), blobs in S3 or BYOS (cheap, scalable)
|
||||
3. **Content addressing**: Manifests stored by digest, blobs deduplicated globally
|
||||
4. **ATProto-native**: Manifests are first-class ATProto records, discoverable via AT Protocol
|
||||
5. **OCI compliant**: Fully compatible with Docker/containerd/podman
|
||||
6. **Account-agnostic AppView**: Server validates any user's token, queries their PDS for config
|
||||
7. **BYOS architecture**: Users can deploy their own storage service, AppView just routes
|
||||
8. **OAuth with DPoP**: Full ATProto OAuth implementation with mandatory DPoP proofs
|
||||
9. **Sailor profile system**: User preferences for hold selection, transparent to image ownership
|
||||
10. **Historical hold references**: Manifests store `holdEndpoint` for immutable blob location tracking
|
||||
|
||||
### Configuration
|
||||
|
||||
**AppView configuration** (environment variables):
|
||||
|
||||
Both AppView and Hold service follow the same pattern: **zero config files, all configuration via environment variables**.
|
||||
|
||||
See `.env.appview.example` for all available options. Key environment variables:
|
||||
|
||||
**Server:**
|
||||
- `ATCR_HTTP_ADDR` - HTTP listen address (default: `:5000`)
|
||||
- `ATCR_BASE_URL` - Public URL for OAuth/JWT realm (auto-detected in dev)
|
||||
- `ATCR_DEFAULT_HOLD_DID` - Default hold DID for blob storage (REQUIRED, e.g., `did:web:hold01.atcr.io`)
|
||||
|
||||
**Authentication:**
|
||||
- `ATCR_AUTH_KEY_PATH` - JWT signing key path (default: `/var/lib/atcr/auth/private-key.pem`)
|
||||
- `ATCR_TOKEN_EXPIRATION` - JWT expiration in seconds (default: 300)
|
||||
|
||||
**UI:**
|
||||
- `ATCR_UI_ENABLED` - Enable web interface (default: true)
|
||||
- `ATCR_UI_DATABASE_PATH` - SQLite database path (default: `/var/lib/atcr/ui.db`)
|
||||
|
||||
**Jetstream:**
|
||||
- `JETSTREAM_URL` - ATProto event stream URL
|
||||
- `ATCR_BACKFILL_ENABLED` - Enable periodic sync (default: false)
|
||||
|
||||
**Legacy:** `config/config.yml` is still supported but deprecated. Use environment variables instead.
|
||||
|
||||
**Hold Service configuration** (environment variables):
|
||||
|
||||
See `.env.hold.example` for all available options. Key environment variables:
|
||||
- `HOLD_PUBLIC_URL` - Public URL of hold service (REQUIRED)
|
||||
- `STORAGE_DRIVER` - Storage backend (s3, filesystem)
|
||||
- `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` - S3 credentials
|
||||
- `S3_BUCKET`, `S3_ENDPOINT` - S3 configuration
|
||||
- `HOLD_PUBLIC` - Allow public reads (default: false)
|
||||
- `HOLD_OWNER` - DID for captain record creation (optional)
|
||||
- `HOLD_ALLOW_ALL_CREW` - Allow any authenticated user to register as crew (default: false)
|
||||
|
||||
**Credential Helper**:
|
||||
- Token storage: `~/.atcr/credential-helper-token.json` (or Docker's credential store)
|
||||
- Contains: Registry JWT issued by AppView (NOT OAuth tokens)
|
||||
- OAuth session managed entirely by AppView
|
||||
|
||||
### Development Notes
|
||||
|
||||
**General:**
|
||||
- Middleware is in `pkg/appview/middleware/` (auth.go, registry.go)
|
||||
- Storage routing is in `pkg/appview/storage/` (routing_repository.go, proxy_blob_store.go)
|
||||
- Hold DID lookups use database queries (no in-memory caching)
|
||||
- Storage drivers imported as `_ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws"`
|
||||
- Hold service reuses distribution's driver factory for multi-backend support
|
||||
|
||||
**OAuth implementation:**
|
||||
- Client (`pkg/auth/oauth/client.go`) encapsulates all OAuth configuration
|
||||
- Token validation via `com.atproto.server.getSession` ensures no trust in client-provided identity
|
||||
- All ATCR components use standardized `/auth/oauth/callback` path
|
||||
- Client ID generation (localhost query-based vs production metadata URL) handled internally
|
||||
|
||||
### Testing Strategy
|
||||
|
||||
When writing tests:
|
||||
- Mock ATProto client for manifest operations
|
||||
- Mock S3 driver for blob operations
|
||||
- Test name resolution independently
|
||||
- Integration tests require real PDS + S3
|
||||
|
||||
### Common Tasks
|
||||
|
||||
**Adding a new ATProto record type**:
|
||||
The credential helper never manages OAuth tokens directly — AppView owns the OAuth session and issues registry JWTs. See `docs/OAUTH.md` for full OAuth/DPoP implementation details.
|
||||
|
||||
## Hold Authorization
|
||||
|
||||
- **Public hold**: Anonymous reads allowed. Writes require captain or crew with `blob:write`.
|
||||
- **Private hold**: Reads require crew with `blob:read` or `blob:write`. Writes require `blob:write`.
|
||||
- `blob:write` implicitly grants `blob:read`.
|
||||
- Captain has all permissions implicitly.
|
||||
- See `docs/BYOS.md` for full authorization model and permission matrix.
|
||||
|
||||
## Key File Locations
|
||||
|
||||
| Responsibility | Files |
|
||||
|---|---|
|
||||
| ATProto records & collections | `pkg/atproto/lexicon.go` |
|
||||
| DID/handle resolution | `pkg/atproto/resolver.go` |
|
||||
| PDS client (XRPC) | `pkg/atproto/client.go` |
|
||||
| Manifest ↔ ATProto storage | `pkg/atproto/manifest_store.go` |
|
||||
| Sailor profiles | `pkg/atproto/profile.go` |
|
||||
| Registry middleware (identity resolution, hold discovery) | `pkg/appview/middleware/registry.go` |
|
||||
| Auth middleware (JWT validation) | `pkg/appview/middleware/auth.go` |
|
||||
| Content routing (manifests vs blobs) | `pkg/appview/storage/routing_repository.go` |
|
||||
| Blob proxy to hold (presigned URLs) | `pkg/appview/storage/proxy_blob_store.go` |
|
||||
| Request context struct | `pkg/appview/storage/context.go` |
|
||||
| Database queries | `pkg/appview/db/queries.go` |
|
||||
| Database schema | `pkg/appview/db/schema.sql` |
|
||||
| OAuth client & session refresher | `pkg/auth/oauth/client.go` |
|
||||
| OAuth P-256 key management | `pkg/auth/oauth/keys.go` |
|
||||
| Hold PDS endpoints & auth | `pkg/hold/pds/xrpc.go`, `pkg/hold/pds/auth.go` |
|
||||
| Hold DID management (did:web, did:plc, PLC recovery) | `pkg/hold/pds/did.go` |
|
||||
| Hold captain records | `pkg/hold/pds/captain.go` |
|
||||
| Hold crew management | `pkg/hold/pds/crew.go` |
|
||||
| Hold push/pull stats (ATProto records in CAR store) | `pkg/hold/pds/stats.go` |
|
||||
| Hold layer records | `pkg/hold/pds/layer.go` |
|
||||
| Hold scan records & scanner integration | `pkg/hold/pds/scan.go`, `pkg/hold/pds/scan_broadcaster.go` |
|
||||
| Hold Bluesky status posts | `pkg/hold/pds/status.go` |
|
||||
| Hold OCI upload endpoints | `pkg/hold/oci/xrpc.go` |
|
||||
| Hold config | `pkg/hold/config.go` |
|
||||
| AppView config | `pkg/appview/config.go` |
|
||||
| Config marshaling (commented YAML) | `pkg/config/marshal.go` |
|
||||
| Scanner config (env-only) | `scanner/internal/config/config.go` |
|
||||
|
||||
## Configuration
|
||||
|
||||
ATCR uses **Viper** for config. YAML primary, env vars override. Generate defaults with `config init`.
|
||||
|
||||
**Env var convention:** Prefix + YAML path with `_` separators:
|
||||
- AppView: `ATCR_` (e.g., `ATCR_SERVER_DEFAULT_HOLD_DID`)
|
||||
- Hold: `HOLD_` (e.g., `HOLD_SERVER_PUBLIC_URL`)
|
||||
- S3: standard AWS names (`AWS_ACCESS_KEY_ID`, `S3_BUCKET`, `S3_ENDPOINT`)
|
||||
- Scanner: `SCANNER_` prefix (env-only, no Viper)
|
||||
|
||||
See `config-appview.example.yaml` and `config-hold.example.yaml` for all options. Config structs use `comment` struct tags for auto-generating commented YAML via `MarshalCommentedYAML()` in `pkg/config/marshal.go`.
|
||||
|
||||
## Development Gotchas
|
||||
|
||||
- **Do NOT run `npm run css:build` or `npm run js:build` manually** — Air handles these on file change
|
||||
- **Do NOT edit `icons.svg` directly** — SVG icon sprite sheets (`pkg/appview/public/icons.svg`, `pkg/hold/admin/public/icons.svg`) are auto-generated from template icon references during build. Just reference icons by name in templates and the build will include them.
|
||||
- **RoutingRepository is created fresh on EVERY request** (no caching). Previous caching caused stale OAuth sessions and "invalid refresh token" errors. The OAuth refresher caches efficiently already (in-memory + DB).
|
||||
- **Storage driver import**: `_ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws"` — blank import required
|
||||
- **Hold DID lookups use database** (`manifests` table), not in-memory cache — persistent across restarts
|
||||
- **Context keys** (`auth.method`, `puller.did`) exist because `Repository()` receives `context.Context` from the distribution library interface — context values are the only way to pass data from HTTP middleware into the distribution middleware layer. Both are copied into `RegistryContext` inside `Repository()`.
|
||||
- **OAuth key types**: AppView uses P-256 (ES256) for OAuth, not K-256 like PDS keys
|
||||
- **Confidential vs public clients**: Production uses P-256 key at `/var/lib/atcr/oauth/client.key` (auto-generated); localhost is always public client
|
||||
- **Hold stats are ATProto records in CAR store** — `io.atcr.hold.stats` records are stored via `repomgr.PutRecord()`, not in SQLite. Lost if CAR store is lost without backup.
|
||||
- **PLC auto-update on boot** — When using did:plc, `LoadOrCreateDID()` calls `EnsurePLCCurrent()` every startup. If local signing key or URL doesn't match plc.directory, it auto-updates (requires rotation key on disk).
|
||||
- **Hold CAR store is the source of truth** — Captain, crew, layer, stats, scan records, Bluesky posts, profiles are all ATProto records in the CAR store. SQLite holds only the records index and events.
|
||||
|
||||
## Common Tasks
|
||||
|
||||
**Adding a new ATProto record type:**
|
||||
1. Define schema in `pkg/atproto/lexicon.go`
|
||||
2. Add collection constant (e.g., `MyCollection = "io.atcr.my-type"`)
|
||||
3. Add constructor function (e.g., `NewMyRecord()`)
|
||||
4. Update client methods if needed
|
||||
|
||||
**Modifying storage routing**:
|
||||
**Modifying storage routing:**
|
||||
1. Edit `pkg/appview/storage/routing_repository.go`
|
||||
2. Update `Blobs()` method to change routing logic
|
||||
3. Context is passed via RegistryContext struct (holds DID, PDS endpoint, hold DID, OAuth refresher, etc.)
|
||||
2. Update `Blobs()` or `Manifests()` method
|
||||
3. Context passed via `RegistryContext` struct (`pkg/appview/storage/context.go`)
|
||||
|
||||
**Changing name resolution**:
|
||||
**Changing name resolution:**
|
||||
1. Modify `pkg/atproto/resolver.go` for DID/handle resolution
|
||||
2. Update `pkg/appview/middleware/registry.go` if changing routing logic
|
||||
3. Remember: `findHoldDID()` checks sailor profile, then `io.atcr.hold` records (legacy), then default hold DID
|
||||
2. Update `pkg/appview/middleware/registry.go` if changing routing
|
||||
3. `findHoldDID()` checks: sailor profile → `io.atcr.hold` records (legacy) → default hold DID
|
||||
|
||||
**Working with OAuth client**:
|
||||
- Client is self-contained: pass `baseURL`, it handles client ID/redirect URI/scopes
|
||||
- For AppView server/refresher: use `NewClient(baseURL)` or `NewClientWithKey(baseURL, storedKey)`
|
||||
- For custom scopes: call `client.SetScopes(customScopes)` after initialization
|
||||
- Standard callback path: `/auth/oauth/callback` (used by all ATCR components)
|
||||
- Client methods are consistent across authorization, token exchange, and refresh flows
|
||||
**Working with OAuth client:**
|
||||
- Self-contained: pass `baseURL`, handles client ID/redirect URI/scopes
|
||||
- Standard callback path: `/auth/oauth/callback` (all ATCR components)
|
||||
- See `pkg/auth/oauth/client.go` for `NewClientApp()`, refresher setup
|
||||
|
||||
**Adding BYOS support for a user**:
|
||||
1. User sets environment variables (storage credentials, public URL, HOLD_OWNER)
|
||||
2. User runs hold service - creates captain + crew records in embedded PDS
|
||||
3. Hold creates `io.atcr.hold.captain` + `io.atcr.hold.crew` records
|
||||
4. User sets sailor profile `defaultHold` to point to their hold
|
||||
5. AppView automatically queries hold's PDS and routes blobs to user's storage
|
||||
6. No AppView changes needed - fully decentralized
|
||||
**Adding BYOS support for a user:**
|
||||
1. User configures hold YAML (storage credentials, public URL, owner DID)
|
||||
2. User runs hold service — creates captain + crew records in embedded PDS
|
||||
3. User sets sailor profile `defaultHold` to their hold's DID
|
||||
4. AppView automatically routes blobs to user's storage — no AppView changes needed
|
||||
|
||||
**Supporting a new storage backend**:
|
||||
1. Ensure driver is registered in `cmd/hold/main.go` imports
|
||||
2. Distribution supports: S3, Azure, GCS, Swift, filesystem, OSS
|
||||
3. For custom drivers: implement `storagedriver.StorageDriver` interface
|
||||
4. Add case to `buildStorageConfig()` in `cmd/hold/main.go`
|
||||
5. Update `.env.example` with new driver's env vars
|
||||
|
||||
**Working with the database**:
|
||||
- **Base schema** defined in `pkg/appview/db/schema.sql` - source of truth for fresh installations
|
||||
- **Migrations** in `pkg/appview/db/migrations/*.yaml` - only for ALTER/UPDATE/DELETE on existing databases
|
||||
- **Queries** in `pkg/appview/db/queries.go`
|
||||
- **Stores** for OAuth, devices, sessions in separate files
|
||||
- **Execution order**: schema.sql first, then migrations (automatically on startup)
|
||||
- **Database path** configurable via `ATCR_UI_DATABASE_PATH` env var
|
||||
**Working with the database:**
|
||||
- **Base schema**: `pkg/appview/db/schema.sql` — source of truth for fresh installs
|
||||
- **Migrations**: `pkg/appview/db/migrations/*.yaml` — only for ALTER/UPDATE/DELETE on existing DBs
|
||||
- **Adding new tables**: Add to `schema.sql` only (no migration needed)
|
||||
- **Altering tables**: Create migration AND update `schema.sql` to keep them in sync
|
||||
|
||||
**Adding web UI features**:
|
||||
**Hold DID recovery/migration (did:plc):**
|
||||
1. Back up `rotation.key` and DID string (from `did.txt` or plc.directory)
|
||||
2. Set `database.did_method: plc` and `database.did: "did:plc:..."` in config
|
||||
3. Provide `rotation_key` (multibase K-256 private key) — signing key auto-generates if missing
|
||||
4. On boot: `LoadOrCreateDID()` adopts the DID, `EnsurePLCCurrent()` auto-updates PLC directory if keys/URL changed
|
||||
5. Without rotation key: hold boots but logs warning about PLC mismatch
|
||||
|
||||
**Adding web UI features:**
|
||||
- Add handler in `pkg/appview/handlers/`
|
||||
- Register route in `cmd/appview/serve.go`
|
||||
- Register route in `pkg/appview/routes/routes.go`
|
||||
- Create template in `pkg/appview/templates/pages/`
|
||||
- Use existing auth middleware for protected routes
|
||||
- API endpoints return JSON, pages return HTML
|
||||
|
||||
## Important Context Values
|
||||
## Testing Strategy
|
||||
|
||||
When working with the codebase, routing information is passed via the `RegistryContext` struct (`pkg/appview/storage/context.go`):
|
||||
|
||||
- `DID` - User's DID (e.g., `did:plc:alice123`)
|
||||
- `PDSEndpoint` - User's PDS endpoint (e.g., `https://bsky.social`)
|
||||
- `HoldDID` - Hold service DID (e.g., `did:web:hold01.atcr.io`)
|
||||
- `Repository` - Image repository name (e.g., `myapp`)
|
||||
- `ATProtoClient` - Client for calling user's PDS with OAuth/Basic Auth
|
||||
- `Refresher` - OAuth token refresher for service token requests
|
||||
- `Database` - Database for metrics tracking
|
||||
- `Authorizer` - Hold authorizer for access control
|
||||
|
||||
Legacy context keys (deprecated):
|
||||
- `hold.did` - Hold DID (now in RegistryContext)
|
||||
- `auth.did` - Authenticated DID from validated token (now in auth middleware)
|
||||
- Mock ATProto client for manifest operations
|
||||
- Mock S3 driver for blob operations
|
||||
- Test name resolution independently
|
||||
- Integration tests require real PDS + S3
|
||||
|
||||
## Documentation References
|
||||
|
||||
- **BYOS Architecture**: See `docs/BYOS.md` for complete BYOS documentation
|
||||
- **OAuth Implementation**: See `docs/OAUTH.md` for OAuth/DPoP flow details
|
||||
- **BYOS Architecture**: `docs/BYOS.md`
|
||||
- **OAuth Implementation**: `docs/OAUTH.md`
|
||||
- **Hold Service**: `docs/hold.md`
|
||||
- **AppView**: `docs/appview.md`
|
||||
- **Hold XRPC Endpoints**: `docs/HOLD_XRPC_ENDPOINTS.md`
|
||||
- **Development Guide**: `docs/DEVELOPMENT.md`
|
||||
- **Billing/Quotas**: `docs/BILLING.md`, `docs/QUOTAS.md`
|
||||
- **Scanning**: `docs/SBOM_SCANNING.md`
|
||||
- **ATProto Spec**: https://atproto.com/specs/oauth
|
||||
- **OCI Distribution Spec**: https://github.com/opencontainers/distribution-spec
|
||||
- **DPoP RFC**: https://datatracker.ietf.org/doc/html/rfc9449
|
||||
- **PAR RFC**: https://datatracker.ietf.org/doc/html/rfc9126
|
||||
- **PKCE RFC**: https://datatracker.ietf.org/doc/html/rfc7636
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# Production build for ATCR AppView
|
||||
# Result: ~30MB scratch image with static binary
|
||||
FROM docker.io/golang:1.25.4-trixie AS builder
|
||||
FROM docker.io/golang:1.25.7-trixie AS builder
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends libsqlite3-dev && \
|
||||
apt-get install -y --no-install-recommends libsqlite3-dev nodejs npm && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
@@ -15,19 +15,27 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN npm ci
|
||||
RUN go generate ./...
|
||||
|
||||
RUN CGO_ENABLED=1 go build \
|
||||
-ldflags="-s -w -linkmode external -extldflags '-static'" \
|
||||
-tags sqlite_omit_load_extension \
|
||||
-trimpath \
|
||||
-o atcr-appview ./cmd/appview
|
||||
|
||||
RUN CGO_ENABLED=0 go build \
|
||||
-ldflags="-s -w" \
|
||||
-trimpath \
|
||||
-o healthcheck ./cmd/healthcheck
|
||||
|
||||
# Minimal runtime
|
||||
FROM scratch
|
||||
|
||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=builder /usr/share/zoneinfo /usr/share/zoneinfo
|
||||
COPY --from=builder /app/atcr-appview /atcr-appview
|
||||
COPY --from=builder /app/healthcheck /healthcheck
|
||||
|
||||
EXPOSE 5000
|
||||
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
# Development image with Air hot reload
|
||||
# Build: docker build -f Dockerfile.dev -t atcr-appview-dev .
|
||||
# Run: docker run -v $(pwd):/app -p 5000:5000 atcr-appview-dev
|
||||
FROM docker.io/golang:1.25.4-trixie
|
||||
# Build: docker build -f Dockerfile.dev -t atcr-dev .
|
||||
# Run: docker run -v $(pwd):/app -p 5000:5000 atcr-dev
|
||||
FROM docker.io/golang:1.25.7-trixie
|
||||
|
||||
ARG AIR_CONFIG=.air.toml
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV AIR_CONFIG=${AIR_CONFIG}
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends sqlite3 libsqlite3-dev curl && \
|
||||
apt-get install -y --no-install-recommends sqlite3 libsqlite3-dev curl nodejs npm && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
go install github.com/air-verse/air@latest
|
||||
|
||||
@@ -17,5 +20,4 @@ COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# For development: source mounted as volume, Air handles builds
|
||||
EXPOSE 5000
|
||||
CMD ["air", "-c", ".air.toml"]
|
||||
CMD ["sh", "-c", "air -c ${AIR_CONFIG}"]
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
FROM docker.io/golang:1.25.4-trixie AS builder
|
||||
FROM docker.io/golang:1.25.7-trixie AS builder
|
||||
|
||||
# Build argument to enable Stripe billing integration
|
||||
# Usage: docker build --build-arg BILLING_ENABLED=true -f Dockerfile.hold .
|
||||
ARG BILLING_ENABLED=false
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends sqlite3 libsqlite3-dev && \
|
||||
apt-get install -y --no-install-recommends sqlite3 libsqlite3-dev nodejs npm && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
@@ -13,11 +17,31 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=1 go build \
|
||||
-ldflags="-s -w -linkmode external -extldflags '-static'" \
|
||||
-tags sqlite_omit_load_extension \
|
||||
# Build frontend assets (Tailwind CSS, JS bundle, SVG icons)
|
||||
RUN npm ci
|
||||
RUN go generate ./...
|
||||
|
||||
# Conditionally add billing tag based on build arg
|
||||
RUN if [ "$BILLING_ENABLED" = "true" ]; then \
|
||||
echo "Building with Stripe billing support"; \
|
||||
CGO_ENABLED=1 go build \
|
||||
-ldflags="-s -w -linkmode external -extldflags '-static'" \
|
||||
-tags "sqlite_omit_load_extension,billing" \
|
||||
-trimpath \
|
||||
-o atcr-hold ./cmd/hold; \
|
||||
else \
|
||||
echo "Building without billing support"; \
|
||||
CGO_ENABLED=1 go build \
|
||||
-ldflags="-s -w -linkmode external -extldflags '-static'" \
|
||||
-tags sqlite_omit_load_extension \
|
||||
-trimpath \
|
||||
-o atcr-hold ./cmd/hold; \
|
||||
fi
|
||||
|
||||
RUN CGO_ENABLED=0 go build \
|
||||
-ldflags="-s -w" \
|
||||
-trimpath \
|
||||
-o atcr-hold ./cmd/hold
|
||||
-o healthcheck ./cmd/healthcheck
|
||||
|
||||
# ==========================================
|
||||
# Stage 2: Minimal FROM scratch runtime
|
||||
@@ -30,6 +54,7 @@ COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=builder /usr/share/zoneinfo /usr/share/zoneinfo
|
||||
# Copy optimized binary (SQLite embedded)
|
||||
COPY --from=builder /build/atcr-hold /atcr-hold
|
||||
COPY --from=builder /build/healthcheck /healthcheck
|
||||
|
||||
# Expose default port
|
||||
EXPOSE 8080
|
||||
@@ -46,3 +71,4 @@ LABEL org.opencontainers.image.title="ATCR Hold Service" \
|
||||
io.atcr.readme="https://tangled.org/evan.jarrett.net/at-container-registry/raw/main/docs/hold.md"
|
||||
|
||||
ENTRYPOINT ["/atcr-hold"]
|
||||
CMD ["serve"]
|
||||
|
||||
53
Dockerfile.scanner
Normal file
53
Dockerfile.scanner
Normal file
@@ -0,0 +1,53 @@
|
||||
FROM docker.io/golang:1.25.7-trixie AS builder
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends sqlite3 libsqlite3-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Disable workspace mode — go.work references modules not in the Docker context
|
||||
ENV GOWORK=off
|
||||
|
||||
# Copy module definitions first for layer caching
|
||||
COPY go.mod go.sum ./
|
||||
COPY scanner/go.mod scanner/go.sum ./scanner/
|
||||
|
||||
RUN cd scanner && go mod download
|
||||
|
||||
# Copy full source
|
||||
COPY . .
|
||||
|
||||
RUN cd scanner && CGO_ENABLED=1 go build \
|
||||
-ldflags="-s -w -linkmode external -extldflags '-static'" \
|
||||
-trimpath \
|
||||
-o /build/atcr-scanner ./cmd/scanner
|
||||
|
||||
# ==========================================
|
||||
# Stage 2: Minimal FROM scratch runtime
|
||||
# ==========================================
|
||||
FROM scratch
|
||||
|
||||
# Copy CA certificates for HTTPS (presigned URL downloads)
|
||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
# Copy timezone data for timestamp formatting
|
||||
COPY --from=builder /usr/share/zoneinfo /usr/share/zoneinfo
|
||||
# Copy binary
|
||||
COPY --from=builder /build/atcr-scanner /atcr-scanner
|
||||
|
||||
# Expose health endpoint port
|
||||
EXPOSE 9090
|
||||
|
||||
# OCI image annotations
|
||||
LABEL org.opencontainers.image.title="ATCR Scanner" \
|
||||
org.opencontainers.image.description="ATCR Scanner - container image vulnerability scanner with Syft and Grype" \
|
||||
org.opencontainers.image.authors="ATCR Contributors" \
|
||||
org.opencontainers.image.source="https://tangled.org/evan.jarrett.net/at-container-registry" \
|
||||
org.opencontainers.image.documentation="https://tangled.org/evan.jarrett.net/at-container-registry" \
|
||||
org.opencontainers.image.licenses="MIT" \
|
||||
org.opencontainers.image.version="0.1.0"
|
||||
|
||||
ENTRYPOINT ["/atcr-scanner"]
|
||||
CMD ["serve"]
|
||||
@@ -4,28 +4,36 @@
|
||||
class DockerCredentialAtcr < Formula
|
||||
desc "Docker credential helper for ATCR (ATProto Container Registry)"
|
||||
homepage "https://atcr.io"
|
||||
url "https://github.com/atcr-io/atcr/archive/refs/tags/v0.0.1.tar.gz"
|
||||
sha256 "REPLACE_WITH_TARBALL_SHA256"
|
||||
version "0.0.1"
|
||||
license "MIT"
|
||||
head "https://github.com/atcr-io/atcr.git", branch: "main"
|
||||
|
||||
depends_on "go" => :build
|
||||
on_macos do
|
||||
on_arm do
|
||||
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v0.0.1/download/docker-credential-atcr_0.0.1_Darwin_arm64.tar.gz"
|
||||
sha256 "REPLACE_WITH_SHA256"
|
||||
end
|
||||
on_intel do
|
||||
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v0.0.1/download/docker-credential-atcr_0.0.1_Darwin_x86_64.tar.gz"
|
||||
sha256 "REPLACE_WITH_SHA256"
|
||||
end
|
||||
end
|
||||
|
||||
on_linux do
|
||||
on_arm do
|
||||
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v0.0.1/download/docker-credential-atcr_0.0.1_Linux_arm64.tar.gz"
|
||||
sha256 "REPLACE_WITH_SHA256"
|
||||
end
|
||||
on_intel do
|
||||
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v0.0.1/download/docker-credential-atcr_0.0.1_Linux_x86_64.tar.gz"
|
||||
sha256 "REPLACE_WITH_SHA256"
|
||||
end
|
||||
end
|
||||
|
||||
def install
|
||||
# Build the credential helper binary
|
||||
# Use ldflags to inject version information
|
||||
ldflags = %W[
|
||||
-s -w
|
||||
-X main.version=#{version}
|
||||
-X main.commit=#{tap.user}
|
||||
-X main.date=#{time.iso8601}
|
||||
]
|
||||
|
||||
system "go", "build", *std_go_args(ldflags:, output: bin/"docker-credential-atcr"), "./cmd/credential-helper"
|
||||
bin.install "docker-credential-atcr"
|
||||
end
|
||||
|
||||
test do
|
||||
# Test that the binary exists and is executable
|
||||
assert_match version.to_s, shell_output("#{bin}/docker-credential-atcr version 2>&1")
|
||||
end
|
||||
|
||||
@@ -34,21 +42,18 @@ class DockerCredentialAtcr < Formula
|
||||
To configure Docker to use ATCR credential helper, add the following
|
||||
to your ~/.docker/config.json:
|
||||
|
||||
{
|
||||
"credHelpers": {
|
||||
"atcr.io": "atcr"
|
||||
{
|
||||
"credHelpers": {
|
||||
"atcr.io": "atcr"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Note: The credential helper name is "atcr" (Docker automatically prefixes
|
||||
with "docker-credential-" when looking for the binary).
|
||||
Or run: docker-credential-atcr configure-docker
|
||||
|
||||
To authenticate with ATCR:
|
||||
docker push atcr.io/<your-handle>/<image>:latest
|
||||
|
||||
This will open your browser to complete the OAuth device flow.
|
||||
|
||||
Configuration is stored in: ~/.atcr/device.json
|
||||
Configuration is stored in: ~/.atcr/config.json
|
||||
EOS
|
||||
end
|
||||
end
|
||||
|
||||
32
Makefile
32
Makefile
@@ -2,8 +2,9 @@
|
||||
# Build targets for the ATProto Container Registry
|
||||
|
||||
.PHONY: all build build-appview build-hold build-credential-helper build-oauth-helper \
|
||||
generate test test-race test-verbose lint clean help install-credential-helper \
|
||||
develop develop-detached develop-down dev
|
||||
generate test test-race test-verbose lint lex-lint clean help install-credential-helper \
|
||||
develop develop-detached develop-down dev \
|
||||
docker docker-appview docker-hold docker-scanner
|
||||
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
@@ -16,8 +17,8 @@ all: generate build ## Generate assets and build all binaries (default)
|
||||
|
||||
# Generated asset files
|
||||
GENERATED_ASSETS = \
|
||||
pkg/appview/static/js/htmx.min.js \
|
||||
pkg/appview/static/js/lucide.min.js \
|
||||
pkg/appview/public/js/htmx.min.js \
|
||||
pkg/appview/public/js/lucide.min.js \
|
||||
pkg/appview/licenses/spdx-licenses.json
|
||||
|
||||
generate: $(GENERATED_ASSETS) ## Run go generate to download vendor assets
|
||||
@@ -40,12 +41,12 @@ build-hold: $(GENERATED_ASSETS) ## Build hold binary only
|
||||
@mkdir -p bin
|
||||
go build -o bin/atcr-hold ./cmd/hold
|
||||
|
||||
build-credential-helper: $(GENERATED_ASSETS) ## Build credential helper only
|
||||
build-credential-helper: ## Build credential helper only
|
||||
@echo "→ Building credential helper..."
|
||||
@mkdir -p bin
|
||||
go build -o bin/docker-credential-atcr ./cmd/credential-helper
|
||||
|
||||
build-oauth-helper: $(GENERATED_ASSETS) ## Build OAuth helper only
|
||||
build-oauth-helper: ## Build OAuth helper only
|
||||
@echo "→ Building OAuth helper..."
|
||||
@mkdir -p bin
|
||||
go build -o bin/oauth-helper ./cmd/oauth-helper
|
||||
@@ -74,6 +75,9 @@ lint: check-golangci-lint ## Run golangci-lint
|
||||
@echo "→ Running golangci-lint..."
|
||||
golangci-lint run ./...
|
||||
|
||||
lex-lint: ## Lint ATProto lexicon schemas
|
||||
goat lex lint ./lexicons/
|
||||
|
||||
##@ Install Targets
|
||||
|
||||
install-credential-helper: build-credential-helper ## Install credential helper to /usr/local/sbin
|
||||
@@ -89,6 +93,20 @@ dev: $(GENERATED_ASSETS) ## Run AppView locally with Air hot reload
|
||||
|
||||
##@ Docker Targets
|
||||
|
||||
docker: docker-appview docker-hold docker-scanner ## Build all Docker images
|
||||
|
||||
docker-appview: ## Build appview Docker image
|
||||
@echo "→ Building appview Docker image..."
|
||||
docker build -f Dockerfile.appview -t atcr.io/atcr.io/appview:latest .
|
||||
|
||||
docker-hold: ## Build hold Docker image
|
||||
@echo "→ Building hold Docker image..."
|
||||
docker build -f Dockerfile.hold -t atcr.io/atcr.io/hold:latest .
|
||||
|
||||
docker-scanner: ## Build scanner Docker image
|
||||
@echo "→ Building scanner Docker image..."
|
||||
docker build -f Dockerfile.scanner -t atcr.io/atcr.io/scanner:latest .
|
||||
|
||||
develop: ## Build and start docker-compose with Air hot reload
|
||||
@echo "→ Building Docker images..."
|
||||
docker-compose build
|
||||
@@ -113,7 +131,5 @@ develop-down: ## Stop docker-compose services
|
||||
clean: ## Remove built binaries and generated assets
|
||||
@echo "→ Cleaning build artifacts..."
|
||||
rm -rf bin/
|
||||
rm -f pkg/appview/static/js/htmx.min.js
|
||||
rm -f pkg/appview/static/js/lucide.min.js
|
||||
rm -f pkg/appview/licenses/spdx-licenses.json
|
||||
@echo "✓ Clean complete"
|
||||
|
||||
63
README.md
63
README.md
@@ -77,30 +77,33 @@ See **[INSTALLATION.md](./INSTALLATION.md)** for detailed installation instructi
|
||||
|
||||
### Running Your Own AppView
|
||||
|
||||
**Using Docker Compose:**
|
||||
```bash
|
||||
cp .env.appview.example .env.appview
|
||||
# Edit .env.appview with your configuration
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
**Local development:**
|
||||
```bash
|
||||
# Build
|
||||
go build -o bin/atcr-appview ./cmd/appview
|
||||
go build -o bin/atcr-hold ./cmd/hold
|
||||
|
||||
# Configure
|
||||
cp .env.appview.example .env.appview
|
||||
# Edit .env.appview - set ATCR_DEFAULT_HOLD
|
||||
source .env.appview
|
||||
# Generate a config file with all defaults
|
||||
./bin/atcr-appview config init config-appview.yaml
|
||||
# Edit config-appview.yaml — set server.default_hold_did at minimum
|
||||
|
||||
# Run
|
||||
./bin/atcr-appview serve
|
||||
./bin/atcr-appview serve --config config-appview.yaml
|
||||
```
|
||||
|
||||
**Using Docker:**
|
||||
```bash
|
||||
docker build -f Dockerfile.appview -t atcr-appview:latest .
|
||||
docker run -d -p 5000:5000 \
|
||||
-v ./config-appview.yaml:/config.yaml:ro \
|
||||
-v atcr-data:/var/lib/atcr \
|
||||
atcr-appview:latest serve --config /config.yaml
|
||||
```
|
||||
|
||||
See **[deploy/README.md](./deploy/README.md)** for production deployment.
|
||||
|
||||
### Running Your Own Hold (BYOS Storage)
|
||||
|
||||
See **[docs/hold.md](./docs/hold.md)** for deploying your own storage backend.
|
||||
|
||||
## Development
|
||||
|
||||
### Building from Source
|
||||
@@ -122,23 +125,43 @@ go test -race ./...
|
||||
cmd/
|
||||
├── appview/ # Registry server + web UI
|
||||
├── hold/ # Storage service (BYOS)
|
||||
└── credential-helper/ # Docker credential helper
|
||||
├── credential-helper/ # Docker credential helper
|
||||
├── oauth-helper/ # OAuth debug tool
|
||||
├── healthcheck/ # HTTP health check (for Docker)
|
||||
├── db-migrate/ # SQLite → libsql migration
|
||||
├── usage-report/ # Hold storage usage report
|
||||
├── record-query/ # Query ATProto relay by collection
|
||||
└── s3-test/ # S3 connectivity test
|
||||
|
||||
pkg/
|
||||
├── appview/
|
||||
│ ├── db/ # SQLite database (migrations, queries, stores)
|
||||
│ ├── handlers/ # HTTP handlers (home, repo, search, auth, settings)
|
||||
│ ├── holdhealth/ # Hold service health checker
|
||||
│ ├── jetstream/ # ATProto Jetstream consumer
|
||||
│ ├── middleware/ # Auth & registry middleware
|
||||
│ ├── storage/ # Storage routing (hold cache, blob proxy, repository)
|
||||
│ ├── static/ # Static assets (JS, CSS, install scripts)
|
||||
│ ├── ogcard/ # OpenGraph image generation
|
||||
│ ├── readme/ # Repository README fetcher
|
||||
│ ├── routes/ # HTTP route registration
|
||||
│ ├── storage/ # Storage routing (blob proxy, manifest store)
|
||||
│ ├── public/ # Static assets (JS, CSS, install scripts)
|
||||
│ └── templates/ # HTML templates
|
||||
├── atproto/ # ATProto client, records, manifest/tag stores
|
||||
├── auth/
|
||||
│ ├── oauth/ # OAuth client, server, refresher, storage
|
||||
│ ├── oauth/ # OAuth client, refresher, storage
|
||||
│ ├── token/ # JWT issuer, validator, claims
|
||||
│ └── atproto/ # Session validation
|
||||
└── hold/ # Hold service (authorization, storage, multipart, S3)
|
||||
│ └── holdlocal/ # Local hold authorization
|
||||
├── config/ # Config marshaling (commented YAML)
|
||||
├── hold/
|
||||
│ ├── admin/ # Admin web UI
|
||||
│ ├── billing/ # Stripe billing integration
|
||||
│ ├── db/ # Vendored carstore (go-libsql)
|
||||
│ ├── gc/ # Garbage collection
|
||||
│ ├── oci/ # OCI upload endpoints
|
||||
│ ├── pds/ # Embedded PDS (DID, captain, crew, stats, scans)
|
||||
│ └── quota/ # Storage quotas
|
||||
├── logging/ # Structured logging + remote shipping
|
||||
└── s3/ # S3 client utilities
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
@@ -1,18 +1,102 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/distribution/distribution/v3/registry"
|
||||
_ "github.com/distribution/distribution/v3/registry/auth/token"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"atcr.io/pkg/appview"
|
||||
|
||||
// Register our custom middleware
|
||||
_ "atcr.io/pkg/appview/middleware"
|
||||
|
||||
// Register built-in themes
|
||||
_ "atcr.io/themes/seamark"
|
||||
)
|
||||
|
||||
var configFile string
|
||||
|
||||
var serveCmd = &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Start the ATCR registry server",
|
||||
Long: `Start the ATCR registry server with authentication endpoints.
|
||||
|
||||
Configuration is loaded in layers: defaults -> YAML file -> environment variables.
|
||||
Use --config to specify a YAML configuration file.
|
||||
Environment variables always override file values.`,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: serveRegistry,
|
||||
}
|
||||
|
||||
var configCmd = &cobra.Command{
|
||||
Use: "config",
|
||||
Short: "Configuration management commands",
|
||||
}
|
||||
|
||||
var configInitCmd = &cobra.Command{
|
||||
Use: "init [path]",
|
||||
Short: "Generate an example configuration file",
|
||||
Long: `Generate an example YAML configuration file with all available options.
|
||||
If path is provided, writes to that file. Otherwise writes to stdout.`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
yamlBytes, err := appview.ExampleYAML()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate example config: %w", err)
|
||||
}
|
||||
if len(args) == 1 {
|
||||
if err := os.WriteFile(args[0], yamlBytes, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write config file: %w", err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Wrote example config to %s\n", args[0])
|
||||
return nil
|
||||
}
|
||||
fmt.Print(string(yamlBytes))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
serveCmd.Flags().StringVarP(&configFile, "config", "c", "", "path to YAML configuration file")
|
||||
|
||||
configCmd.AddCommand(configInitCmd)
|
||||
|
||||
// Replace the default serve command with our custom one
|
||||
for i, cmd := range registry.RootCmd.Commands() {
|
||||
if cmd.Name() == "serve" {
|
||||
registry.RootCmd.Commands()[i] = serveCmd
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
registry.RootCmd.AddCommand(configCmd)
|
||||
}
|
||||
|
||||
func serveRegistry(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := appview.LoadConfig(configFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
|
||||
branding, err := appview.LookupTheme(cfg.UI.Theme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
server, err := appview.NewAppViewServer(cfg, branding)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize server: %w", err)
|
||||
}
|
||||
|
||||
return server.Serve()
|
||||
}
|
||||
|
||||
func main() {
|
||||
// The serve command is registered in serve.go via init()
|
||||
// The serve command is registered above via init()
|
||||
// Just execute the root command
|
||||
if err := registry.RootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
|
||||
@@ -1,560 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/distribution/distribution/v3/registry"
|
||||
"github.com/distribution/distribution/v3/registry/handlers"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"atcr.io/pkg/appview/middleware"
|
||||
"atcr.io/pkg/appview/storage"
|
||||
"atcr.io/pkg/atproto"
|
||||
"atcr.io/pkg/auth"
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
"atcr.io/pkg/auth/token"
|
||||
"atcr.io/pkg/logging"
|
||||
|
||||
// UI components
|
||||
"atcr.io/pkg/appview"
|
||||
"atcr.io/pkg/appview/db"
|
||||
uihandlers "atcr.io/pkg/appview/handlers"
|
||||
"atcr.io/pkg/appview/holdhealth"
|
||||
"atcr.io/pkg/appview/jetstream"
|
||||
"atcr.io/pkg/appview/readme"
|
||||
"atcr.io/pkg/appview/routes"
|
||||
"github.com/go-chi/chi/v5"
|
||||
chimiddleware "github.com/go-chi/chi/v5/middleware"
|
||||
)
|
||||
|
||||
var serveCmd = &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Start the ATCR registry server",
|
||||
Long: `Start the ATCR registry server with authentication endpoints.
|
||||
|
||||
Configuration is loaded from environment variables.
|
||||
See .env.appview.example for available environment variables.`,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: serveRegistry,
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Replace the default serve command with our custom one
|
||||
for i, cmd := range registry.RootCmd.Commands() {
|
||||
if cmd.Name() == "serve" {
|
||||
registry.RootCmd.Commands()[i] = serveCmd
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func serveRegistry(cmd *cobra.Command, args []string) error {
|
||||
// Load configuration from environment variables
|
||||
cfg, err := appview.LoadConfigFromEnv()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config from environment: %w", err)
|
||||
}
|
||||
|
||||
// Initialize structured logging
|
||||
logging.InitLogger(cfg.LogLevel)
|
||||
|
||||
slog.Info("Configuration loaded successfully from environment")
|
||||
|
||||
// Initialize UI database first (required for all stores)
|
||||
slog.Info("Initializing UI database", "path", cfg.UI.DatabasePath)
|
||||
uiDatabase, uiReadOnlyDB, uiSessionStore := db.InitializeDatabase(cfg.UI.Enabled, cfg.UI.DatabasePath, cfg.UI.SkipDBMigrations)
|
||||
if uiDatabase == nil {
|
||||
return fmt.Errorf("failed to initialize UI database - required for session storage")
|
||||
}
|
||||
|
||||
// Initialize hold health checker
|
||||
slog.Info("Initializing hold health checker", "cache_ttl", cfg.Health.CacheTTL)
|
||||
healthChecker := holdhealth.NewChecker(cfg.Health.CacheTTL)
|
||||
|
||||
// Initialize README fetcher for rendering repo page descriptions
|
||||
readmeFetcher := readme.NewFetcher()
|
||||
|
||||
// Start background health check worker
|
||||
startupDelay := 5 * time.Second // Wait for hold services to start (Docker compose)
|
||||
dbAdapter := holdhealth.NewDBAdapter(uiDatabase)
|
||||
healthWorker := holdhealth.NewWorkerWithStartupDelay(healthChecker, dbAdapter, cfg.Health.CheckInterval, startupDelay)
|
||||
|
||||
// Create context for worker lifecycle management
|
||||
workerCtx, workerCancel := context.WithCancel(context.Background())
|
||||
defer workerCancel() // Ensure context is cancelled on all exit paths
|
||||
healthWorker.Start(workerCtx)
|
||||
slog.Info("Hold health worker started", "startup_delay", startupDelay, "refresh_interval", cfg.Health.CheckInterval, "cache_ttl", cfg.Health.CacheTTL)
|
||||
|
||||
// Initialize OAuth components
|
||||
slog.Info("Initializing OAuth components")
|
||||
|
||||
// Create OAuth session storage (SQLite-backed)
|
||||
oauthStore := db.NewOAuthStore(uiDatabase)
|
||||
slog.Info("Using SQLite for OAuth session storage")
|
||||
|
||||
// Create device store (SQLite-backed)
|
||||
deviceStore := db.NewDeviceStore(uiDatabase)
|
||||
slog.Info("Using SQLite for device storage")
|
||||
|
||||
// Get base URL and default hold DID from config
|
||||
baseURL := cfg.Server.BaseURL
|
||||
defaultHoldDID := cfg.Server.DefaultHoldDID
|
||||
testMode := cfg.Server.TestMode
|
||||
|
||||
slog.Debug("Base URL for OAuth", "base_url", baseURL)
|
||||
if testMode {
|
||||
slog.Info("TEST_MODE enabled - will use HTTP for local DID resolution and transition:generic scope")
|
||||
}
|
||||
|
||||
// Create OAuth client app (automatically configures confidential client for production)
|
||||
desiredScopes := oauth.GetDefaultScopes(defaultHoldDID)
|
||||
oauthClientApp, err := oauth.NewClientApp(baseURL, oauthStore, desiredScopes, cfg.Server.OAuthKeyPath, cfg.Server.ClientName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create OAuth client app: %w", err)
|
||||
}
|
||||
if testMode {
|
||||
slog.Info("Using OAuth scopes with transition:generic (test mode)")
|
||||
} else {
|
||||
slog.Info("Using OAuth scopes with RPC scope (production mode)")
|
||||
}
|
||||
|
||||
// Invalidate sessions with mismatched scopes on startup
|
||||
// This ensures all users have the latest required scopes after deployment
|
||||
invalidatedCount, err := oauthStore.InvalidateSessionsWithMismatchedScopes(context.Background(), desiredScopes)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to invalidate sessions with mismatched scopes", "error", err)
|
||||
} else if invalidatedCount > 0 {
|
||||
slog.Info("Invalidated OAuth sessions due to scope changes", "count", invalidatedCount)
|
||||
}
|
||||
|
||||
// Create oauth token refresher
|
||||
refresher := oauth.NewRefresher(oauthClientApp)
|
||||
|
||||
// Wire up UI session store to refresher so it can invalidate UI sessions on OAuth failures
|
||||
if uiSessionStore != nil {
|
||||
refresher.SetUISessionStore(uiSessionStore)
|
||||
}
|
||||
|
||||
// Set global refresher for middleware
|
||||
middleware.SetGlobalRefresher(refresher)
|
||||
|
||||
// Set global database for pull/push metrics tracking
|
||||
middleware.SetGlobalDatabase(uiDatabase)
|
||||
|
||||
// Create RemoteHoldAuthorizer for hold authorization with caching
|
||||
holdAuthorizer := auth.NewRemoteHoldAuthorizer(uiDatabase, testMode)
|
||||
middleware.SetGlobalAuthorizer(holdAuthorizer)
|
||||
slog.Info("Hold authorizer initialized with database caching")
|
||||
|
||||
// Initialize Jetstream workers (background services before HTTP routes)
|
||||
initializeJetstream(uiDatabase, &cfg.Jetstream, defaultHoldDID, testMode, refresher)
|
||||
|
||||
// Create main chi router
|
||||
mainRouter := chi.NewRouter()
|
||||
|
||||
// Add core middleware
|
||||
mainRouter.Use(chimiddleware.Logger)
|
||||
mainRouter.Use(chimiddleware.Recoverer)
|
||||
mainRouter.Use(chimiddleware.GetHead) // Automatically handle HEAD requests for GET routes
|
||||
mainRouter.Use(routes.CORSMiddleware())
|
||||
|
||||
// Load templates if UI is enabled
|
||||
var uiTemplates *template.Template
|
||||
if cfg.UI.Enabled {
|
||||
var err error
|
||||
uiTemplates, err = appview.Templates()
|
||||
if err != nil {
|
||||
slog.Warn("Failed to load UI templates", "error", err)
|
||||
} else {
|
||||
// Register UI routes with dependencies
|
||||
routes.RegisterUIRoutes(mainRouter, routes.UIDependencies{
|
||||
Database: uiDatabase,
|
||||
ReadOnlyDB: uiReadOnlyDB,
|
||||
SessionStore: uiSessionStore,
|
||||
OAuthClientApp: oauthClientApp,
|
||||
OAuthStore: oauthStore,
|
||||
Refresher: refresher,
|
||||
BaseURL: baseURL,
|
||||
DeviceStore: deviceStore,
|
||||
HealthChecker: healthChecker,
|
||||
ReadmeFetcher: readmeFetcher,
|
||||
Templates: uiTemplates,
|
||||
DefaultHoldDID: defaultHoldDID,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Create OAuth server
|
||||
oauthServer := oauth.NewServer(oauthClientApp)
|
||||
// Connect server to refresher for cache invalidation
|
||||
oauthServer.SetRefresher(refresher)
|
||||
// Connect UI session store for web login
|
||||
if uiSessionStore != nil {
|
||||
oauthServer.SetUISessionStore(uiSessionStore)
|
||||
}
|
||||
|
||||
// Register OAuth post-auth callback for AppView business logic
|
||||
// This decouples the OAuth package from AppView-specific dependencies
|
||||
oauthServer.SetPostAuthCallback(func(ctx context.Context, did, handle, pdsEndpoint, sessionID string) error {
|
||||
slog.Debug("OAuth post-auth callback", "component", "appview/callback", "did", did)
|
||||
|
||||
// Create ATProto client with session provider (uses DoWithSession for DPoP nonce safety)
|
||||
client := atproto.NewClientWithSessionProvider(pdsEndpoint, did, refresher)
|
||||
|
||||
// Note: Profile and crew setup now happen automatically via UserContext.EnsureUserSetup()
|
||||
|
||||
// Fetch user's profile record from PDS (contains blob references)
|
||||
profileRecord, err := client.GetProfileRecord(ctx, did)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to fetch profile record", "component", "appview/callback", "did", did, "error", err)
|
||||
// Continue without avatar - set profileRecord to nil to skip avatar extraction
|
||||
profileRecord = nil
|
||||
}
|
||||
|
||||
// Construct avatar URL from blob CID using imgs.blue CDN (if profile record was fetched successfully)
|
||||
avatarURL := ""
|
||||
if profileRecord != nil && profileRecord.Avatar != nil && profileRecord.Avatar.Ref.Link != "" {
|
||||
avatarURL = atproto.BlobCDNURL(did, profileRecord.Avatar.Ref.Link)
|
||||
slog.Debug("Constructed avatar URL", "component", "appview/callback", "avatar_url", avatarURL)
|
||||
}
|
||||
|
||||
// Store user in database (with or without avatar)
|
||||
// Use UpsertUser if we successfully fetched an avatar (to update existing users)
|
||||
// Use UpsertUserIgnoreAvatar if fetch failed (to preserve existing avatars)
|
||||
if avatarURL != "" {
|
||||
err = db.UpsertUser(uiDatabase, &db.User{
|
||||
DID: did,
|
||||
Handle: handle,
|
||||
PDSEndpoint: pdsEndpoint,
|
||||
Avatar: avatarURL,
|
||||
LastSeen: time.Now(),
|
||||
})
|
||||
} else {
|
||||
err = db.UpsertUserIgnoreAvatar(uiDatabase, &db.User{
|
||||
DID: did,
|
||||
Handle: handle,
|
||||
PDSEndpoint: pdsEndpoint,
|
||||
Avatar: avatarURL,
|
||||
LastSeen: time.Now(),
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
slog.Warn("Failed to store user in database", "component", "appview/callback", "error", err)
|
||||
return nil // Non-fatal
|
||||
}
|
||||
|
||||
slog.Debug("Stored user", "component", "appview/callback", "did", did, "has_avatar", avatarURL != "")
|
||||
|
||||
// Migrate profile URL→DID if needed
|
||||
profile, err := storage.GetProfile(ctx, client)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to get profile", "component", "appview/callback", "did", did, "error", err)
|
||||
return nil // Non-fatal
|
||||
}
|
||||
|
||||
// Migrate profile URL→DID if needed (legacy migration, crew registration now handled by UserContext)
|
||||
if profile != nil && profile.DefaultHold != "" {
|
||||
// Check if defaultHold is a URL (needs migration)
|
||||
if strings.HasPrefix(profile.DefaultHold, "http://") || strings.HasPrefix(profile.DefaultHold, "https://") {
|
||||
slog.Debug("Migrating hold URL to DID", "component", "appview/callback", "did", did, "hold_url", profile.DefaultHold)
|
||||
|
||||
// Resolve URL to DID
|
||||
holdDID := atproto.ResolveHoldDIDFromURL(profile.DefaultHold)
|
||||
|
||||
// Update profile with DID
|
||||
profile.DefaultHold = holdDID
|
||||
if err := storage.UpdateProfile(ctx, client, profile); err != nil {
|
||||
slog.Warn("Failed to update profile with hold DID", "component", "appview/callback", "did", did, "error", err)
|
||||
} else {
|
||||
slog.Debug("Updated profile with hold DID", "component", "appview/callback", "hold_did", holdDID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil // All errors are non-fatal, logged for debugging
|
||||
})
|
||||
|
||||
// Create token issuer (also initializes auth keys if needed)
|
||||
var issuer *token.Issuer
|
||||
if cfg.Distribution.Auth["token"] != nil {
|
||||
issuer, err = createTokenIssuer(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create token issuer: %w", err)
|
||||
}
|
||||
|
||||
// Log successful initialization
|
||||
slog.Info("Auth keys initialized", "path", cfg.Auth.KeyPath)
|
||||
}
|
||||
|
||||
// Create registry app (returns http.Handler)
|
||||
ctx := context.Background()
|
||||
app := handlers.NewApp(ctx, cfg.Distribution)
|
||||
|
||||
// Wrap registry app with middleware chain:
|
||||
// 1. ExtractAuthMethod - extracts auth method from JWT and stores in context
|
||||
// 2. UserContextMiddleware - builds UserContext with identity, permissions, service tokens
|
||||
wrappedApp := middleware.ExtractAuthMethod(app)
|
||||
|
||||
// Create dependencies for UserContextMiddleware
|
||||
userContextDeps := &auth.Dependencies{
|
||||
Refresher: refresher,
|
||||
Authorizer: holdAuthorizer,
|
||||
DefaultHoldDID: defaultHoldDID,
|
||||
}
|
||||
wrappedApp = middleware.UserContextMiddleware(userContextDeps)(wrappedApp)
|
||||
|
||||
// Mount registry at /v2/
|
||||
mainRouter.Handle("/v2/*", wrappedApp)
|
||||
|
||||
// Mount static files if UI is enabled
|
||||
if uiSessionStore != nil && uiTemplates != nil {
|
||||
// Register dynamic routes for root-level files (favicons, manifests, etc.)
|
||||
staticHandler := appview.StaticHandler()
|
||||
rootFiles, err := appview.StaticRootFiles()
|
||||
if err != nil {
|
||||
slog.Warn("Failed to scan static root files", "error", err)
|
||||
} else {
|
||||
for _, filename := range rootFiles {
|
||||
// Create a closure to capture the filename
|
||||
file := filename
|
||||
mainRouter.Get("/"+file, func(w http.ResponseWriter, r *http.Request) {
|
||||
// Serve the specific file from static root
|
||||
r.URL.Path = "/" + file
|
||||
staticHandler.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
slog.Info("Registered dynamic root file routes", "count", len(rootFiles), "files", rootFiles)
|
||||
}
|
||||
|
||||
// Mount subdirectory routes with clean paths
|
||||
mainRouter.Handle("/css/*", http.StripPrefix("/css/", appview.StaticSubdir("css")))
|
||||
mainRouter.Handle("/js/*", http.StripPrefix("/js/", appview.StaticSubdir("js")))
|
||||
mainRouter.Handle("/static/*", http.StripPrefix("/static/", appview.StaticSubdir("static")))
|
||||
|
||||
slog.Info("UI enabled", "home", "/", "settings", "/settings")
|
||||
}
|
||||
|
||||
// Mount OAuth endpoints
|
||||
mainRouter.Get("/auth/oauth/authorize", oauthServer.ServeAuthorize)
|
||||
mainRouter.Get("/auth/oauth/callback", oauthServer.ServeCallback)
|
||||
|
||||
// OAuth client metadata endpoint
|
||||
mainRouter.Get("/oauth-client-metadata.json", func(w http.ResponseWriter, r *http.Request) {
|
||||
config := oauthClientApp.Config
|
||||
metadata := config.ClientMetadata()
|
||||
|
||||
// For confidential clients, ensure JWKS is included
|
||||
// The indigo library should populate this automatically, but we explicitly set it here
|
||||
// to be defensive and ensure it's always present for confidential clients
|
||||
if config.IsConfidential() && metadata.JWKS == nil {
|
||||
jwks := config.PublicJWKS()
|
||||
metadata.JWKS = &jwks
|
||||
}
|
||||
|
||||
// Convert indigo's metadata to map so we can add custom fields
|
||||
metadataBytes, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to marshal metadata", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
var metadataMap map[string]interface{}
|
||||
if err := json.Unmarshal(metadataBytes, &metadataMap); err != nil {
|
||||
http.Error(w, "Failed to unmarshal metadata", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Add custom fields
|
||||
metadataMap["client_name"] = cfg.Server.ClientName
|
||||
metadataMap["client_uri"] = cfg.Server.BaseURL
|
||||
metadataMap["logo_uri"] = cfg.Server.BaseURL + "/web-app-manifest-192x192.png"
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
// Limit caching to allow scope changes to propagate quickly
|
||||
// PDS servers cache client metadata, so short max-age helps with updates
|
||||
w.Header().Set("Cache-Control", "public, max-age=300")
|
||||
if err := json.NewEncoder(w).Encode(metadataMap); err != nil {
|
||||
http.Error(w, "Failed to encode metadata", http.StatusInternalServerError)
|
||||
}
|
||||
})
|
||||
|
||||
// Note: Indigo handles OAuth state cleanup internally via its store
|
||||
|
||||
// Mount auth endpoints if enabled
|
||||
if issuer != nil {
|
||||
// Basic Auth token endpoint (supports device secrets and app passwords)
|
||||
tokenHandler := token.NewHandler(issuer, deviceStore)
|
||||
|
||||
// Register OAuth session validator for device auth validation
|
||||
// This validates OAuth sessions are usable (not just exist) before issuing tokens
|
||||
// Prevents the flood of errors when a stale session is discovered during push
|
||||
tokenHandler.SetOAuthSessionValidator(refresher)
|
||||
|
||||
// Register token post-auth callback
|
||||
// Note: Profile and crew setup now happen automatically via UserContext.EnsureUserSetup()
|
||||
tokenHandler.SetPostAuthCallback(func(ctx context.Context, did, handle, pdsEndpoint, accessToken string) error {
|
||||
slog.Debug("Token post-auth callback", "component", "appview/callback", "did", did)
|
||||
return nil
|
||||
})
|
||||
|
||||
mainRouter.Get("/auth/token", tokenHandler.ServeHTTP)
|
||||
|
||||
// Device authorization endpoints (public)
|
||||
mainRouter.Handle("/auth/device/code", &uihandlers.DeviceCodeHandler{
|
||||
Store: deviceStore,
|
||||
AppViewBaseURL: baseURL,
|
||||
})
|
||||
mainRouter.Handle("/auth/device/token", &uihandlers.DeviceTokenHandler{
|
||||
Store: deviceStore,
|
||||
})
|
||||
|
||||
slog.Info("Auth endpoints enabled",
|
||||
"basic_auth", "/auth/token",
|
||||
"device_code", "/auth/device/code",
|
||||
"device_token", "/auth/device/token",
|
||||
"oauth_authorize", "/auth/oauth/authorize",
|
||||
"oauth_callback", "/auth/oauth/callback",
|
||||
"oauth_metadata", "/client-metadata.json")
|
||||
}
|
||||
|
||||
// Register credential helper version API (public endpoint)
|
||||
mainRouter.Handle("/api/credential-helper/version", &uihandlers.CredentialHelperVersionHandler{
|
||||
Version: cfg.CredentialHelper.Version,
|
||||
TangledRepo: cfg.CredentialHelper.TangledRepo,
|
||||
Checksums: cfg.CredentialHelper.Checksums,
|
||||
})
|
||||
if cfg.CredentialHelper.Version != "" {
|
||||
slog.Info("Credential helper version API enabled",
|
||||
"endpoint", "/api/credential-helper/version",
|
||||
"version", cfg.CredentialHelper.Version)
|
||||
}
|
||||
|
||||
// Create HTTP server
|
||||
server := &http.Server{
|
||||
Addr: cfg.Server.Addr,
|
||||
Handler: mainRouter,
|
||||
}
|
||||
|
||||
// Handle graceful shutdown
|
||||
stop := make(chan os.Signal, 1)
|
||||
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
// Start server in goroutine
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
slog.Info("Starting registry server", "addr", cfg.Server.Addr)
|
||||
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for shutdown signal or error
|
||||
select {
|
||||
case <-stop:
|
||||
slog.Info("Shutting down registry server")
|
||||
|
||||
// Stop health worker first
|
||||
slog.Info("Stopping hold health worker")
|
||||
healthWorker.Stop()
|
||||
|
||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := server.Shutdown(shutdownCtx); err != nil {
|
||||
return fmt.Errorf("server shutdown error: %w", err)
|
||||
}
|
||||
case err := <-errChan:
|
||||
// Stop health worker on error (workerCancel called by defer)
|
||||
healthWorker.Stop()
|
||||
return fmt.Errorf("server error: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createTokenIssuer creates a token issuer for auth handlers
|
||||
func createTokenIssuer(cfg *appview.Config) (*token.Issuer, error) {
|
||||
return token.NewIssuer(
|
||||
cfg.Auth.KeyPath,
|
||||
cfg.Auth.ServiceName, // issuer
|
||||
cfg.Auth.ServiceName, // service
|
||||
cfg.Auth.TokenExpiration,
|
||||
)
|
||||
}
|
||||
|
||||
// initializeJetstream initializes the Jetstream workers for real-time events and backfill
|
||||
func initializeJetstream(database *sql.DB, jetstreamCfg *appview.JetstreamConfig, defaultHoldDID string, testMode bool, refresher *oauth.Refresher) {
|
||||
// Start Jetstream worker
|
||||
jetstreamURL := jetstreamCfg.URL
|
||||
|
||||
// Start real-time Jetstream worker with cursor tracking for reconnects
|
||||
go func() {
|
||||
var lastCursor int64 = 0 // Start from now on first connect
|
||||
for {
|
||||
worker := jetstream.NewWorker(database, jetstreamURL, lastCursor)
|
||||
if err := worker.Start(context.Background()); err != nil {
|
||||
// Save cursor from this connection for next reconnect
|
||||
lastCursor = worker.GetLastCursor()
|
||||
slog.Warn("Jetstream real-time worker error, reconnecting", "component", "jetstream", "error", err, "reconnect_delay", "10s")
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
}
|
||||
}()
|
||||
slog.Info("Jetstream real-time worker started", "component", "jetstream")
|
||||
|
||||
// Start backfill worker (enabled by default, set ATCR_BACKFILL_ENABLED=false to disable)
|
||||
if jetstreamCfg.BackfillEnabled {
|
||||
// Get relay endpoint for sync API (defaults to Bluesky's relay)
|
||||
relayEndpoint := jetstreamCfg.RelayEndpoint
|
||||
|
||||
backfillWorker, err := jetstream.NewBackfillWorker(database, relayEndpoint, defaultHoldDID, testMode, refresher)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to create backfill worker", "component", "jetstream/backfill", "error", err)
|
||||
} else {
|
||||
// Run initial backfill with startup delay for Docker compose
|
||||
go func() {
|
||||
// Wait for hold service to be ready (Docker startup race condition)
|
||||
startupDelay := 5 * time.Second
|
||||
slog.Info("Waiting for services to be ready", "component", "jetstream/backfill", "startup_delay", startupDelay)
|
||||
time.Sleep(startupDelay)
|
||||
|
||||
slog.Info("Starting sync-based backfill", "component", "jetstream/backfill", "relay_endpoint", relayEndpoint)
|
||||
if err := backfillWorker.Start(context.Background()); err != nil {
|
||||
slog.Warn("Backfill finished with error", "component", "jetstream/backfill", "error", err)
|
||||
} else {
|
||||
slog.Info("Backfill completed successfully", "component", "jetstream/backfill")
|
||||
}
|
||||
}()
|
||||
|
||||
// Start periodic backfill scheduler
|
||||
interval := jetstreamCfg.BackfillInterval
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
slog.Info("Starting periodic backfill", "component", "jetstream/backfill", "interval", interval)
|
||||
if err := backfillWorker.Start(context.Background()); err != nil {
|
||||
slog.Warn("Periodic backfill finished with error", "component", "jetstream/backfill", "error", err)
|
||||
} else {
|
||||
slog.Info("Periodic backfill completed successfully", "component", "jetstream/backfill")
|
||||
}
|
||||
}
|
||||
}()
|
||||
slog.Info("Periodic backfill scheduler started", "component", "jetstream/backfill", "interval", interval)
|
||||
}
|
||||
}
|
||||
}
|
||||
159
cmd/credential-helper/cmd_configure.go
Normal file
159
cmd/credential-helper/cmd_configure.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/charmbracelet/huh"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newConfigureDockerCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "configure-docker",
|
||||
Short: "Configure Docker to use this credential helper",
|
||||
Long: "Adds or updates the credHelpers entry in ~/.docker/config.json\nfor all configured registries.",
|
||||
RunE: runConfigureDocker,
|
||||
}
|
||||
}
|
||||
|
||||
func runConfigureDocker(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := loadConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading config: %w", err)
|
||||
}
|
||||
|
||||
if len(cfg.Registries) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "No registries configured.\n")
|
||||
fmt.Fprintf(os.Stderr, "Run: docker-credential-atcr login\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect registry hosts
|
||||
var hosts []string
|
||||
for url := range cfg.Registries {
|
||||
host := strings.TrimPrefix(url, "https://")
|
||||
host = strings.TrimPrefix(host, "http://")
|
||||
hosts = append(hosts, host)
|
||||
}
|
||||
|
||||
dockerConfigPath := getDockerConfigPath()
|
||||
|
||||
// Load existing Docker config
|
||||
dockerCfg := loadDockerConfig()
|
||||
if dockerCfg == nil {
|
||||
dockerCfg = make(map[string]any)
|
||||
}
|
||||
|
||||
// Get or create credHelpers
|
||||
helpers, ok := dockerCfg["credHelpers"]
|
||||
if !ok {
|
||||
helpers = make(map[string]any)
|
||||
}
|
||||
helpersMap, ok := helpers.(map[string]any)
|
||||
if !ok {
|
||||
helpersMap = make(map[string]any)
|
||||
}
|
||||
|
||||
// Check what needs to change
|
||||
var toAdd []string
|
||||
for _, host := range hosts {
|
||||
current, exists := helpersMap[host]
|
||||
if !exists || current != "atcr" {
|
||||
toAdd = append(toAdd, host)
|
||||
}
|
||||
}
|
||||
|
||||
if len(toAdd) == 0 {
|
||||
fmt.Printf("Docker is already configured for all registries.\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("Will update %s:\n", dockerConfigPath)
|
||||
for _, host := range toAdd {
|
||||
fmt.Printf(" + credHelpers[%q] = \"atcr\"\n", host)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
var confirm bool
|
||||
err = huh.NewConfirm().
|
||||
Title("Apply changes?").
|
||||
Value(&confirm).
|
||||
Run()
|
||||
if err != nil || !confirm {
|
||||
fmt.Fprintf(os.Stderr, "Cancelled.\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply changes
|
||||
for _, host := range toAdd {
|
||||
helpersMap[host] = "atcr"
|
||||
}
|
||||
dockerCfg["credHelpers"] = helpersMap
|
||||
|
||||
// Remove conflicting credsStore if it exists and we're adding credHelpers
|
||||
if _, hasStore := dockerCfg["credsStore"]; hasStore {
|
||||
fmt.Fprintf(os.Stderr, "Note: credsStore is set — credHelpers takes precedence for configured registries.\n")
|
||||
}
|
||||
|
||||
if err := saveDockerConfig(dockerConfigPath, dockerCfg); err != nil {
|
||||
return fmt.Errorf("saving Docker config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Docker configured successfully.\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDockerConfigPath returns the path to Docker's config.json
|
||||
func getDockerConfigPath() string {
|
||||
// Check DOCKER_CONFIG env var first
|
||||
if dir := os.Getenv("DOCKER_CONFIG"); dir != "" {
|
||||
return filepath.Join(dir, "config.json")
|
||||
}
|
||||
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return filepath.Join(homeDir, ".docker", "config.json")
|
||||
}
|
||||
|
||||
// loadDockerConfig loads Docker's config.json as a generic map
|
||||
func loadDockerConfig() map[string]any {
|
||||
path := getDockerConfigPath()
|
||||
if path == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var config map[string]any
|
||||
if err := json.Unmarshal(data, &config); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
// saveDockerConfig writes Docker's config.json
|
||||
func saveDockerConfig(path string, config map[string]any) error {
|
||||
// Ensure directory exists
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(config, "", "\t")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data = append(data, '\n')
|
||||
|
||||
return os.WriteFile(path, data, 0600)
|
||||
}
|
||||
181
cmd/credential-helper/cmd_login.go
Normal file
181
cmd/credential-helper/cmd_login.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/charmbracelet/huh"
|
||||
"github.com/charmbracelet/huh/spinner"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newLoginCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "login [registry]",
|
||||
Short: "Authenticate with a container registry",
|
||||
Long: "Starts a device authorization flow to authenticate with a registry.\nDefault registry: atcr.io",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: runLogin,
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runLogin(cmd *cobra.Command, args []string) error {
|
||||
serverURL := "atcr.io"
|
||||
if len(args) > 0 {
|
||||
serverURL = args[0]
|
||||
}
|
||||
|
||||
appViewURL := buildAppViewURL(serverURL)
|
||||
|
||||
cfg, err := loadConfig()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: config load error: %v\n", err)
|
||||
}
|
||||
|
||||
// Check if already logged in
|
||||
reg := cfg.findRegistry(appViewURL)
|
||||
if reg != nil && len(reg.Accounts) > 0 {
|
||||
var lines []string
|
||||
for _, acct := range reg.Accounts {
|
||||
lines = append(lines, acct.Handle)
|
||||
}
|
||||
|
||||
var addAnother bool
|
||||
err := huh.NewConfirm().
|
||||
Title("Already logged in to " + appViewURL).
|
||||
Description("Accounts: " + strings.Join(lines, ", ")).
|
||||
Value(&addAnother).
|
||||
Affirmative("Add another account").
|
||||
Negative("Cancel").
|
||||
Run()
|
||||
if err != nil || !addAnother {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Request device code
|
||||
codeResp, resolvedURL, err := requestDeviceCode(serverURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("device authorization failed: %w", err)
|
||||
}
|
||||
|
||||
verificationURL := codeResp.VerificationURI + "?user_code=" + codeResp.UserCode
|
||||
|
||||
// 2. Show code and open browser
|
||||
fmt.Fprintln(os.Stderr)
|
||||
logWarning("First copy your one-time code: %s", bold(codeResp.UserCode))
|
||||
|
||||
if isTerminal(os.Stdin) {
|
||||
// Interactive: wait for Enter before opening browser
|
||||
logInfof("Press Enter to open %s in your browser... ", codeResp.VerificationURI)
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
reader.ReadString('\n') //nolint:errcheck
|
||||
|
||||
if err := openBrowser(verificationURL); err != nil {
|
||||
logWarning("Could not open browser automatically.")
|
||||
fmt.Fprintf(os.Stderr, " Visit: %s\n", verificationURL)
|
||||
}
|
||||
} else {
|
||||
// Non-interactive: just print the URL
|
||||
logInfo("Visit this URL in your browser:")
|
||||
fmt.Fprintf(os.Stderr, " %s\n", verificationURL)
|
||||
}
|
||||
|
||||
// 3. Poll for authorization with spinner
|
||||
var acct *Account
|
||||
var pollErr error
|
||||
if err := spinner.New().
|
||||
Title("Waiting for authentication...").
|
||||
Action(func() {
|
||||
acct, pollErr = pollDeviceToken(resolvedURL, codeResp)
|
||||
}).
|
||||
Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
if pollErr != nil {
|
||||
return fmt.Errorf("device authorization failed: %w", pollErr)
|
||||
}
|
||||
|
||||
logSuccess("Authentication complete.")
|
||||
|
||||
// 4. Save
|
||||
cfg.addAccount(resolvedURL, acct)
|
||||
if err := cfg.save(); err != nil {
|
||||
return fmt.Errorf("saving config: %w", err)
|
||||
}
|
||||
|
||||
logSuccess("Logged in as %s on %s", bold(acct.Handle), resolvedURL)
|
||||
|
||||
// 5. Offer to configure Docker if not already set up
|
||||
if isTerminal(os.Stdin) && !isDockerConfigured(serverURL) {
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
var configureDkr bool
|
||||
err := huh.NewConfirm().
|
||||
Title("Configure Docker to use this credential helper?").
|
||||
Description("Adds credHelpers entry to ~/.docker/config.json").
|
||||
Value(&configureDkr).
|
||||
Run()
|
||||
if err == nil && configureDkr {
|
||||
if configureErr := configureDockerForRegistry(serverURL); configureErr != nil {
|
||||
logWarning("Failed to configure Docker: %v", configureErr)
|
||||
} else {
|
||||
logSuccess("Configured Docker for %s", serverURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isDockerConfigured checks if Docker's config.json has this registry in credHelpers
|
||||
func isDockerConfigured(serverURL string) bool {
|
||||
dockerConfig := loadDockerConfig()
|
||||
if dockerConfig == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
helpers, ok := dockerConfig["credHelpers"]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
helpersMap, ok := helpers.(map[string]any)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
host := strings.TrimPrefix(serverURL, "https://")
|
||||
host = strings.TrimPrefix(host, "http://")
|
||||
|
||||
_, ok = helpersMap[host]
|
||||
return ok
|
||||
}
|
||||
|
||||
// configureDockerForRegistry adds a credHelpers entry for a single registry
|
||||
func configureDockerForRegistry(serverURL string) error {
|
||||
host := strings.TrimPrefix(serverURL, "https://")
|
||||
host = strings.TrimPrefix(host, "http://")
|
||||
|
||||
dockerConfigPath := getDockerConfigPath()
|
||||
dockerCfg := loadDockerConfig()
|
||||
if dockerCfg == nil {
|
||||
dockerCfg = make(map[string]any)
|
||||
}
|
||||
|
||||
helpers, ok := dockerCfg["credHelpers"]
|
||||
if !ok {
|
||||
helpers = make(map[string]any)
|
||||
}
|
||||
helpersMap, ok := helpers.(map[string]any)
|
||||
if !ok {
|
||||
helpersMap = make(map[string]any)
|
||||
}
|
||||
|
||||
helpersMap[host] = "atcr"
|
||||
dockerCfg["credHelpers"] = helpersMap
|
||||
|
||||
return saveDockerConfig(dockerConfigPath, dockerCfg)
|
||||
}
|
||||
93
cmd/credential-helper/cmd_logout.go
Normal file
93
cmd/credential-helper/cmd_logout.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/charmbracelet/huh"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newLogoutCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "logout [registry]",
|
||||
Short: "Remove account credentials",
|
||||
Long: "Remove stored credentials for an account.\nDefault registry: atcr.io",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: runLogout,
|
||||
}
|
||||
}
|
||||
|
||||
func runLogout(cmd *cobra.Command, args []string) error {
|
||||
serverURL := "atcr.io"
|
||||
if len(args) > 0 {
|
||||
serverURL = args[0]
|
||||
}
|
||||
|
||||
appViewURL := buildAppViewURL(serverURL)
|
||||
|
||||
cfg, err := loadConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading config: %w", err)
|
||||
}
|
||||
|
||||
reg := cfg.findRegistry(appViewURL)
|
||||
if reg == nil || len(reg.Accounts) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "No accounts configured for %s.\n", serverURL)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Determine which account to remove
|
||||
var handle string
|
||||
|
||||
if len(reg.Accounts) == 1 {
|
||||
for h := range reg.Accounts {
|
||||
handle = h
|
||||
}
|
||||
} else {
|
||||
// Multiple accounts — select which to remove
|
||||
var handles []string
|
||||
for h := range reg.Accounts {
|
||||
handles = append(handles, h)
|
||||
}
|
||||
sort.Strings(handles)
|
||||
|
||||
var options []huh.Option[string]
|
||||
for _, h := range handles {
|
||||
label := h
|
||||
if h == reg.Active {
|
||||
label += " (active)"
|
||||
}
|
||||
options = append(options, huh.NewOption(label, h))
|
||||
}
|
||||
|
||||
err := huh.NewSelect[string]().
|
||||
Title("Which account to remove?").
|
||||
Options(options...).
|
||||
Value(&handle).
|
||||
Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Confirm
|
||||
var confirm bool
|
||||
err = huh.NewConfirm().
|
||||
Title(fmt.Sprintf("Remove %s from %s?", handle, serverURL)).
|
||||
Value(&confirm).
|
||||
Run()
|
||||
if err != nil || !confirm {
|
||||
fmt.Fprintf(os.Stderr, "Cancelled.\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg.removeAccount(appViewURL, handle)
|
||||
if err := cfg.save(); err != nil {
|
||||
return fmt.Errorf("saving config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Removed %s from %s\n", handle, serverURL)
|
||||
return nil
|
||||
}
|
||||
65
cmd/credential-helper/cmd_status.go
Normal file
65
cmd/credential-helper/cmd_status.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newStatusCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Show all configured accounts",
|
||||
RunE: runStatus,
|
||||
}
|
||||
}
|
||||
|
||||
func runStatus(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := loadConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading config: %w", err)
|
||||
}
|
||||
|
||||
if len(cfg.Registries) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "No accounts configured.\n")
|
||||
fmt.Fprintf(os.Stderr, "Run: docker-credential-atcr login\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sort registry URLs for stable output
|
||||
var urls []string
|
||||
for url := range cfg.Registries {
|
||||
urls = append(urls, url)
|
||||
}
|
||||
sort.Strings(urls)
|
||||
|
||||
for _, url := range urls {
|
||||
reg := cfg.Registries[url]
|
||||
fmt.Printf("%s\n", url)
|
||||
|
||||
// Sort handles for stable output
|
||||
var handles []string
|
||||
for h := range reg.Accounts {
|
||||
handles = append(handles, h)
|
||||
}
|
||||
sort.Strings(handles)
|
||||
|
||||
for _, handle := range handles {
|
||||
acct := reg.Accounts[handle]
|
||||
marker := " "
|
||||
if handle == reg.Active {
|
||||
marker = "* "
|
||||
}
|
||||
did := ""
|
||||
if acct.DID != "" {
|
||||
did = fmt.Sprintf(" (%s)", acct.DID)
|
||||
}
|
||||
fmt.Printf(" %s%s%s\n", marker, handle, did)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
96
cmd/credential-helper/cmd_switch.go
Normal file
96
cmd/credential-helper/cmd_switch.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/charmbracelet/huh"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newSwitchCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "switch [registry]",
|
||||
Short: "Switch the active account for a registry",
|
||||
Long: "Switch the active account used for Docker operations.\nDefault registry: atcr.io",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: runSwitch,
|
||||
}
|
||||
}
|
||||
|
||||
func runSwitch(cmd *cobra.Command, args []string) error {
|
||||
serverURL := "atcr.io"
|
||||
if len(args) > 0 {
|
||||
serverURL = args[0]
|
||||
}
|
||||
|
||||
appViewURL := buildAppViewURL(serverURL)
|
||||
|
||||
cfg, err := loadConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading config: %w", err)
|
||||
}
|
||||
|
||||
reg := cfg.findRegistry(appViewURL)
|
||||
if reg == nil || len(reg.Accounts) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "No accounts configured for %s.\n", serverURL)
|
||||
fmt.Fprintf(os.Stderr, "Run: docker-credential-atcr login\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(reg.Accounts) == 1 {
|
||||
for h := range reg.Accounts {
|
||||
fmt.Fprintf(os.Stderr, "Only one account (%s) — nothing to switch.\n", h)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// For exactly 2 accounts, just toggle
|
||||
if len(reg.Accounts) == 2 {
|
||||
for h := range reg.Accounts {
|
||||
if h != reg.Active {
|
||||
reg.Active = h
|
||||
if err := cfg.save(); err != nil {
|
||||
return fmt.Errorf("saving config: %w", err)
|
||||
}
|
||||
fmt.Printf("Switched to %s on %s\n", h, serverURL)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3+ accounts: interactive select
|
||||
var handles []string
|
||||
for h := range reg.Accounts {
|
||||
handles = append(handles, h)
|
||||
}
|
||||
sort.Strings(handles)
|
||||
|
||||
var options []huh.Option[string]
|
||||
for _, h := range handles {
|
||||
label := h
|
||||
if h == reg.Active {
|
||||
label += " (current)"
|
||||
}
|
||||
options = append(options, huh.NewOption(label, h))
|
||||
}
|
||||
|
||||
var selected string
|
||||
err = huh.NewSelect[string]().
|
||||
Title("Select account for " + serverURL).
|
||||
Options(options...).
|
||||
Value(&selected).
|
||||
Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reg.Active = selected
|
||||
if err := cfg.save(); err != nil {
|
||||
return fmt.Errorf("saving config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Switched to %s on %s\n", selected, serverURL)
|
||||
return nil
|
||||
}
|
||||
281
cmd/credential-helper/cmd_update.go
Normal file
281
cmd/credential-helper/cmd_update.go
Normal file
@@ -0,0 +1,281 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// VersionAPIResponse is the response from /api/credential-helper/version
|
||||
type VersionAPIResponse struct {
|
||||
Latest string `json:"latest"`
|
||||
DownloadURLs map[string]string `json:"download_urls"`
|
||||
Checksums map[string]string `json:"checksums"`
|
||||
ReleaseNotes string `json:"release_notes,omitempty"`
|
||||
}
|
||||
|
||||
func newUpdateCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "update",
|
||||
Short: "Update to the latest version",
|
||||
RunE: runUpdate,
|
||||
}
|
||||
cmd.Flags().Bool("check", false, "Only check for updates, don't install")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runUpdate(cmd *cobra.Command, args []string) error {
|
||||
checkOnly, _ := cmd.Flags().GetBool("check")
|
||||
|
||||
// Default API URL
|
||||
apiURL := "https://atcr.io/api/credential-helper/version"
|
||||
|
||||
// Try to get AppView URL from stored credentials
|
||||
cfg, _ := loadConfig()
|
||||
if cfg != nil {
|
||||
for url := range cfg.Registries {
|
||||
apiURL = url + "/api/credential-helper/version"
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
versionInfo, err := fetchVersionInfo(apiURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking for updates: %w", err)
|
||||
}
|
||||
|
||||
if !isNewerVersion(versionInfo.Latest, version) {
|
||||
fmt.Printf("You're already running the latest version (%s)\n", version)
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("New version available: %s (current: %s)\n", versionInfo.Latest, version)
|
||||
|
||||
if checkOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := performUpdate(versionInfo); err != nil {
|
||||
return fmt.Errorf("update failed: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Update completed successfully!")
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchVersionInfo fetches version info from the AppView API
|
||||
func fetchVersionInfo(apiURL string) (*VersionAPIResponse, error) {
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
resp, err := client.Get(apiURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fetching version info: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("version API returned status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var versionInfo VersionAPIResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&versionInfo); err != nil {
|
||||
return nil, fmt.Errorf("parsing version info: %w", err)
|
||||
}
|
||||
|
||||
return &versionInfo, nil
|
||||
}
|
||||
|
||||
// isNewerVersion compares two version strings (simple semver comparison)
|
||||
func isNewerVersion(newVersion, currentVersion string) bool {
|
||||
if currentVersion == "dev" {
|
||||
return true
|
||||
}
|
||||
|
||||
newV := strings.TrimPrefix(newVersion, "v")
|
||||
curV := strings.TrimPrefix(currentVersion, "v")
|
||||
|
||||
newParts := strings.Split(newV, ".")
|
||||
curParts := strings.Split(curV, ".")
|
||||
|
||||
for i := range min(len(newParts), len(curParts)) {
|
||||
newNum := 0
|
||||
if parsed, err := strconv.Atoi(newParts[i]); err == nil {
|
||||
newNum = parsed
|
||||
}
|
||||
curNum := 0
|
||||
if parsed, err := strconv.Atoi(curParts[i]); err == nil {
|
||||
curNum = parsed
|
||||
}
|
||||
|
||||
if newNum > curNum {
|
||||
return true
|
||||
}
|
||||
if newNum < curNum {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return len(newParts) > len(curParts)
|
||||
}
|
||||
|
||||
// getPlatformKey returns the platform key for the current OS/arch
|
||||
func getPlatformKey() string {
|
||||
return fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
// performUpdate downloads and installs the new version
|
||||
func performUpdate(versionInfo *VersionAPIResponse) error {
|
||||
platformKey := getPlatformKey()
|
||||
|
||||
downloadURL, ok := versionInfo.DownloadURLs[platformKey]
|
||||
if !ok {
|
||||
return fmt.Errorf("no download available for platform %s", platformKey)
|
||||
}
|
||||
|
||||
expectedChecksum := versionInfo.Checksums[platformKey]
|
||||
|
||||
fmt.Printf("Downloading update from %s...\n", downloadURL)
|
||||
|
||||
tmpDir, err := os.MkdirTemp("", "atcr-update-")
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating temp directory: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
archivePath := filepath.Join(tmpDir, "archive.tar.gz")
|
||||
if strings.HasSuffix(downloadURL, ".zip") {
|
||||
archivePath = filepath.Join(tmpDir, "archive.zip")
|
||||
}
|
||||
|
||||
if err := downloadFile(downloadURL, archivePath); err != nil {
|
||||
return fmt.Errorf("downloading: %w", err)
|
||||
}
|
||||
|
||||
if expectedChecksum != "" {
|
||||
if err := verifyChecksum(archivePath, expectedChecksum); err != nil {
|
||||
return fmt.Errorf("checksum verification failed: %w", err)
|
||||
}
|
||||
fmt.Println("Checksum verified.")
|
||||
}
|
||||
|
||||
binaryPath := filepath.Join(tmpDir, "docker-credential-atcr")
|
||||
if runtime.GOOS == "windows" {
|
||||
binaryPath += ".exe"
|
||||
}
|
||||
|
||||
if strings.HasSuffix(archivePath, ".zip") {
|
||||
if err := extractZip(archivePath, tmpDir); err != nil {
|
||||
return fmt.Errorf("extracting archive: %w", err)
|
||||
}
|
||||
} else {
|
||||
if err := extractTarGz(archivePath, tmpDir); err != nil {
|
||||
return fmt.Errorf("extracting archive: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
currentPath, err := os.Executable()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting current executable path: %w", err)
|
||||
}
|
||||
currentPath, err = filepath.EvalSymlinks(currentPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving symlinks: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Verifying new binary...")
|
||||
verifyCmd := exec.Command(binaryPath, "version")
|
||||
if output, err := verifyCmd.Output(); err != nil {
|
||||
return fmt.Errorf("new binary verification failed: %w", err)
|
||||
} else {
|
||||
fmt.Printf("New binary version: %s", string(output))
|
||||
}
|
||||
|
||||
backupPath := currentPath + ".bak"
|
||||
if err := os.Rename(currentPath, backupPath); err != nil {
|
||||
return fmt.Errorf("backing up current binary: %w", err)
|
||||
}
|
||||
|
||||
if err := copyFile(binaryPath, currentPath); err != nil {
|
||||
os.Rename(backupPath, currentPath) //nolint:errcheck
|
||||
return fmt.Errorf("installing new binary: %w", err)
|
||||
}
|
||||
|
||||
if err := os.Chmod(currentPath, 0755); err != nil {
|
||||
os.Remove(currentPath) //nolint:errcheck
|
||||
os.Rename(backupPath, currentPath) //nolint:errcheck
|
||||
return fmt.Errorf("setting permissions: %w", err)
|
||||
}
|
||||
|
||||
os.Remove(backupPath) //nolint:errcheck
|
||||
return nil
|
||||
}
|
||||
|
||||
// downloadFile downloads a file from a URL to a local path
|
||||
func downloadFile(url, destPath string) error {
|
||||
resp, err := http.Get(url) //nolint:gosec
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("download returned status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
out, err := os.Create(destPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
_, err = io.Copy(out, resp.Body)
|
||||
return err
|
||||
}
|
||||
|
||||
// verifyChecksum verifies the SHA256 checksum of a file
|
||||
func verifyChecksum(filePath, expected string) error {
|
||||
if expected == "" {
|
||||
return nil
|
||||
}
|
||||
// Checksums are optional until configured
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractTarGz extracts a .tar.gz archive
|
||||
func extractTarGz(archivePath, destDir string) error {
|
||||
cmd := exec.Command("tar", "-xzf", archivePath, "-C", destDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("tar failed: %s: %w", string(output), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractZip extracts a .zip archive
|
||||
func extractZip(archivePath, destDir string) error {
|
||||
cmd := exec.Command("unzip", "-o", archivePath, "-d", destDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("unzip failed: %s: %w", string(output), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyFile copies a file from src to dst
|
||||
func copyFile(src, dst string) error {
|
||||
input, err := os.ReadFile(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(dst, input, 0755)
|
||||
}
|
||||
262
cmd/credential-helper/config.go
Normal file
262
cmd/credential-helper/config.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config is the top-level credential helper configuration (v2).
|
||||
type Config struct {
|
||||
Version int `json:"version"`
|
||||
Registries map[string]*RegistryConfig `json:"registries"`
|
||||
}
|
||||
|
||||
// RegistryConfig holds accounts for a single registry.
|
||||
type RegistryConfig struct {
|
||||
Active string `json:"active"`
|
||||
Accounts map[string]*Account `json:"accounts"`
|
||||
}
|
||||
|
||||
// Account holds credentials for a single identity on a registry.
|
||||
type Account struct {
|
||||
Handle string `json:"handle"`
|
||||
DID string `json:"did,omitempty"`
|
||||
DeviceSecret string `json:"device_secret"`
|
||||
}
|
||||
|
||||
// UpdateCheckCache stores the last update check result.
|
||||
type UpdateCheckCache struct {
|
||||
CheckedAt time.Time `json:"checked_at"`
|
||||
Latest string `json:"latest"`
|
||||
Current string `json:"current"`
|
||||
}
|
||||
|
||||
// loadConfig loads the config from disk, auto-migrating old formats.
|
||||
// Returns a valid Config (possibly empty) even on error.
|
||||
func loadConfig() (*Config, error) {
|
||||
path := getConfigPath()
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return newConfig(), nil
|
||||
}
|
||||
return newConfig(), err
|
||||
}
|
||||
|
||||
// Try v2 format first
|
||||
var cfg Config
|
||||
if err := json.Unmarshal(data, &cfg); err == nil && cfg.Version == 2 && cfg.Registries != nil {
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// Try current multi-registry format: {"credentials": {"url": {...}}}
|
||||
var multiCreds struct {
|
||||
Credentials map[string]struct {
|
||||
Handle string `json:"handle"`
|
||||
DID string `json:"did"`
|
||||
DeviceSecret string `json:"device_secret"`
|
||||
AppViewURL string `json:"appview_url"`
|
||||
} `json:"credentials"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &multiCreds); err == nil && multiCreds.Credentials != nil {
|
||||
migrated := newConfig()
|
||||
for appViewURL, cred := range multiCreds.Credentials {
|
||||
handle := cred.Handle
|
||||
if handle == "" {
|
||||
continue
|
||||
}
|
||||
registryURL := appViewURL
|
||||
reg := migrated.getOrCreateRegistry(registryURL)
|
||||
reg.Accounts[handle] = &Account{
|
||||
Handle: handle,
|
||||
DID: cred.DID,
|
||||
DeviceSecret: cred.DeviceSecret,
|
||||
}
|
||||
if reg.Active == "" {
|
||||
reg.Active = handle
|
||||
}
|
||||
}
|
||||
if err := migrated.save(); err != nil {
|
||||
return migrated, fmt.Errorf("saving migrated config: %w", err)
|
||||
}
|
||||
return migrated, nil
|
||||
}
|
||||
|
||||
// Try legacy single-device format: {"handle": "...", "device_secret": "...", "appview_url": "..."}
|
||||
var legacy struct {
|
||||
Handle string `json:"handle"`
|
||||
DeviceSecret string `json:"device_secret"`
|
||||
AppViewURL string `json:"appview_url"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &legacy); err == nil && legacy.DeviceSecret != "" {
|
||||
migrated := newConfig()
|
||||
handle := legacy.Handle
|
||||
registryURL := legacy.AppViewURL
|
||||
if registryURL == "" {
|
||||
registryURL = "https://atcr.io"
|
||||
}
|
||||
reg := migrated.getOrCreateRegistry(registryURL)
|
||||
reg.Accounts[handle] = &Account{
|
||||
Handle: handle,
|
||||
DeviceSecret: legacy.DeviceSecret,
|
||||
}
|
||||
reg.Active = handle
|
||||
if err := migrated.save(); err != nil {
|
||||
return migrated, fmt.Errorf("saving migrated config: %w", err)
|
||||
}
|
||||
return migrated, nil
|
||||
}
|
||||
|
||||
return newConfig(), fmt.Errorf("unrecognized config format")
|
||||
}
|
||||
|
||||
func newConfig() *Config {
|
||||
return &Config{
|
||||
Version: 2,
|
||||
Registries: make(map[string]*RegistryConfig),
|
||||
}
|
||||
}
|
||||
|
||||
// save writes the config to disk.
|
||||
func (c *Config) save() error {
|
||||
path := getConfigPath()
|
||||
data, err := json.MarshalIndent(c, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(path, data, 0600)
|
||||
}
|
||||
|
||||
// getOrCreateRegistry returns (or creates) a RegistryConfig for the given URL.
|
||||
func (c *Config) getOrCreateRegistry(registryURL string) *RegistryConfig {
|
||||
reg, ok := c.Registries[registryURL]
|
||||
if !ok {
|
||||
reg = &RegistryConfig{
|
||||
Accounts: make(map[string]*Account),
|
||||
}
|
||||
c.Registries[registryURL] = reg
|
||||
}
|
||||
return reg
|
||||
}
|
||||
|
||||
// findRegistry looks up a RegistryConfig by registry URL.
|
||||
func (c *Config) findRegistry(registryURL string) *RegistryConfig {
|
||||
return c.Registries[registryURL]
|
||||
}
|
||||
|
||||
// resolveAccount determines which account to use for a given registry.
|
||||
// Priority:
|
||||
// 1. Identity detected from parent process command line
|
||||
// 2. Active account (set by `switch`)
|
||||
// 3. Sole account (if only one exists)
|
||||
// 4. Error
|
||||
func (c *Config) resolveAccount(registryURL, serverURL string) (*Account, error) {
|
||||
reg := c.findRegistry(registryURL)
|
||||
if reg == nil || len(reg.Accounts) == 0 {
|
||||
return nil, fmt.Errorf("no accounts configured for %s\nRun: docker-credential-atcr login", serverURL)
|
||||
}
|
||||
|
||||
// 1. Try to detect identity from parent process
|
||||
ref := detectImageRef(serverURL)
|
||||
if ref != nil && ref.Identity != "" {
|
||||
if acct, ok := reg.Accounts[ref.Identity]; ok {
|
||||
return acct, nil
|
||||
}
|
||||
// Identity detected but no matching account — fall through to active
|
||||
}
|
||||
|
||||
// 2. Active account
|
||||
if reg.Active != "" {
|
||||
if acct, ok := reg.Accounts[reg.Active]; ok {
|
||||
return acct, nil
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Sole account
|
||||
if len(reg.Accounts) == 1 {
|
||||
for _, acct := range reg.Accounts {
|
||||
return acct, nil
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Ambiguous
|
||||
return nil, fmt.Errorf("multiple accounts configured for %s\nRun: docker-credential-atcr switch", serverURL)
|
||||
}
|
||||
|
||||
// addAccount adds or updates an account in a registry and sets it active.
|
||||
func (c *Config) addAccount(registryURL string, acct *Account) {
|
||||
reg := c.getOrCreateRegistry(registryURL)
|
||||
reg.Accounts[acct.Handle] = acct
|
||||
reg.Active = acct.Handle
|
||||
}
|
||||
|
||||
// removeAccount removes an account from a registry.
|
||||
// If it was the active account, clears active (or sets to remaining account if exactly one left).
|
||||
func (c *Config) removeAccount(registryURL, handle string) {
|
||||
reg := c.findRegistry(registryURL)
|
||||
if reg == nil {
|
||||
return
|
||||
}
|
||||
|
||||
delete(reg.Accounts, handle)
|
||||
|
||||
if reg.Active == handle {
|
||||
reg.Active = ""
|
||||
if len(reg.Accounts) == 1 {
|
||||
for h := range reg.Accounts {
|
||||
reg.Active = h
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up empty registries
|
||||
if len(reg.Accounts) == 0 {
|
||||
delete(c.Registries, registryURL)
|
||||
}
|
||||
}
|
||||
|
||||
// getUpdateCheckCachePath returns the path to the update check cache file
|
||||
func getUpdateCheckCachePath() string {
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%s/.atcr/update-check.json", homeDir)
|
||||
}
|
||||
|
||||
// loadUpdateCheckCache loads the update check cache from disk
|
||||
func loadUpdateCheckCache() *UpdateCheckCache {
|
||||
path := getUpdateCheckCachePath()
|
||||
if path == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var cache UpdateCheckCache
|
||||
if err := json.Unmarshal(data, &cache); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &cache
|
||||
}
|
||||
|
||||
// saveUpdateCheckCache saves the update check cache to disk
|
||||
func saveUpdateCheckCache(cache *UpdateCheckCache) {
|
||||
path := getUpdateCheckCachePath()
|
||||
if path == "" {
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(cache, "", " ")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
os.WriteFile(path, data, 0600) //nolint:errcheck
|
||||
}
|
||||
123
cmd/credential-helper/detect.go
Normal file
123
cmd/credential-helper/detect.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ImageRef is a parsed container image reference
|
||||
type ImageRef struct {
|
||||
Host string
|
||||
Identity string
|
||||
Repo string
|
||||
Tag string
|
||||
Raw string
|
||||
}
|
||||
|
||||
// detectImageRef walks the process tree looking for an image reference
|
||||
// that matches the given registry host. It starts from the parent process
|
||||
// and walks up to 5 ancestors to handle wrapper scripts (make, bash -c, etc.).
|
||||
//
|
||||
// Returns nil if no matching image reference is found — callers should
|
||||
// fall back to the active account.
|
||||
func detectImageRef(registryHost string) *ImageRef {
|
||||
// Normalize the registry host for matching
|
||||
matchHost := strings.TrimPrefix(registryHost, "https://")
|
||||
matchHost = strings.TrimPrefix(matchHost, "http://")
|
||||
matchHost = strings.TrimSuffix(matchHost, "/")
|
||||
|
||||
pid := os.Getppid()
|
||||
for depth := 0; depth < 5; depth++ {
|
||||
args, err := getProcessArgs(pid)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
for _, arg := range args {
|
||||
if ref := parseImageRef(arg, matchHost); ref != nil {
|
||||
return ref
|
||||
}
|
||||
}
|
||||
|
||||
ppid, err := getParentPID(pid)
|
||||
if err != nil || ppid == pid || ppid <= 1 {
|
||||
break
|
||||
}
|
||||
pid = ppid
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseImageRef tries to parse a string as a container image reference.
|
||||
// Expected format: host/identity/repo:tag or host/identity/repo
|
||||
//
|
||||
// Handles:
|
||||
// - docker:// and oci:// transport prefixes (skopeo)
|
||||
// - Flags (- prefix), paths (/ or . prefix), shell artifacts (|, &, ;)
|
||||
// - Optional tag (defaults to "latest")
|
||||
// - Host must look like a domain (contains ., or is localhost, or has :port)
|
||||
// - If matchHost is non-empty, only returns refs matching that host
|
||||
func parseImageRef(s string, matchHost string) *ImageRef {
|
||||
// Skip flags, absolute paths, relative paths
|
||||
if strings.HasPrefix(s, "-") || strings.HasPrefix(s, "/") || strings.HasPrefix(s, ".") {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Strip docker:// or oci:// transport prefixes (skopeo)
|
||||
s = strings.TrimPrefix(s, "docker://")
|
||||
s = strings.TrimPrefix(s, "oci://")
|
||||
|
||||
// Skip other transport schemes
|
||||
if strings.Contains(s, "://") {
|
||||
return nil
|
||||
}
|
||||
// Must contain at least one slash
|
||||
if !strings.Contains(s, "/") {
|
||||
return nil
|
||||
}
|
||||
// Skip things that look like shell commands
|
||||
if strings.ContainsAny(s, " |&;") {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Split off tag
|
||||
tag := "latest"
|
||||
refPart := s
|
||||
if atIdx := strings.LastIndex(s, ":"); atIdx != -1 {
|
||||
lastSlash := strings.LastIndex(s, "/")
|
||||
if atIdx > lastSlash {
|
||||
tag = s[atIdx+1:]
|
||||
refPart = s[:atIdx]
|
||||
}
|
||||
}
|
||||
|
||||
parts := strings.Split(refPart, "/")
|
||||
|
||||
// ATCR pattern requires host/identity/repo (3+ parts)
|
||||
if len(parts) < 3 {
|
||||
return nil
|
||||
}
|
||||
|
||||
host := parts[0]
|
||||
identity := parts[1]
|
||||
repo := strings.Join(parts[2:], "/")
|
||||
|
||||
// Host must look like a domain
|
||||
if !strings.Contains(host, ".") && host != "localhost" && !strings.Contains(host, ":") {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If a specific host was requested, enforce it
|
||||
if matchHost != "" && host != matchHost {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &ImageRef{
|
||||
Host: host,
|
||||
Identity: identity,
|
||||
Repo: repo,
|
||||
Tag: tag,
|
||||
Raw: s,
|
||||
}
|
||||
}
|
||||
173
cmd/credential-helper/device_auth.go
Normal file
173
cmd/credential-helper/device_auth.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Device authorization API types
|
||||
|
||||
type DeviceCodeRequest struct {
|
||||
DeviceName string `json:"device_name"`
|
||||
}
|
||||
|
||||
type DeviceCodeResponse struct {
|
||||
DeviceCode string `json:"device_code"`
|
||||
UserCode string `json:"user_code"`
|
||||
VerificationURI string `json:"verification_uri"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
Interval int `json:"interval"`
|
||||
}
|
||||
|
||||
type DeviceTokenRequest struct {
|
||||
DeviceCode string `json:"device_code"`
|
||||
}
|
||||
|
||||
type DeviceTokenResponse struct {
|
||||
DeviceSecret string `json:"device_secret,omitempty"`
|
||||
Handle string `json:"handle,omitempty"`
|
||||
DID string `json:"did,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// AuthErrorResponse is the JSON error response from /auth/token
|
||||
type AuthErrorResponse struct {
|
||||
Error string `json:"error"`
|
||||
Message string `json:"message"`
|
||||
LoginURL string `json:"login_url,omitempty"`
|
||||
}
|
||||
|
||||
// ValidationResult represents the result of credential validation
|
||||
type ValidationResult struct {
|
||||
Valid bool
|
||||
OAuthSessionExpired bool
|
||||
LoginURL string
|
||||
}
|
||||
|
||||
// requestDeviceCode requests a device code from the AppView.
|
||||
// Returns the code response and resolved AppView URL.
|
||||
// Does not print anything — the caller controls UX.
|
||||
func requestDeviceCode(serverURL string) (*DeviceCodeResponse, string, error) {
|
||||
appViewURL := buildAppViewURL(serverURL)
|
||||
deviceName := hostname()
|
||||
|
||||
reqBody, _ := json.Marshal(DeviceCodeRequest{DeviceName: deviceName})
|
||||
resp, err := http.Post(appViewURL+"/auth/device/code", "application/json", bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
return nil, appViewURL, fmt.Errorf("failed to request device code: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, appViewURL, fmt.Errorf("device code request failed: %s", string(body))
|
||||
}
|
||||
|
||||
var codeResp DeviceCodeResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&codeResp); err != nil {
|
||||
return nil, appViewURL, fmt.Errorf("failed to decode device code response: %w", err)
|
||||
}
|
||||
|
||||
return &codeResp, appViewURL, nil
|
||||
}
|
||||
|
||||
// pollDeviceToken polls the token endpoint until authorization completes.
|
||||
// Does not print anything — the caller controls UX.
|
||||
// Returns the account on success, or an error on timeout/failure.
|
||||
func pollDeviceToken(appViewURL string, codeResp *DeviceCodeResponse) (*Account, error) {
|
||||
pollInterval := time.Duration(codeResp.Interval) * time.Second
|
||||
timeout := time.Duration(codeResp.ExpiresIn) * time.Second
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
time.Sleep(pollInterval)
|
||||
|
||||
tokenReqBody, _ := json.Marshal(DeviceTokenRequest{DeviceCode: codeResp.DeviceCode})
|
||||
tokenResp, err := http.Post(appViewURL+"/auth/device/token", "application/json", bytes.NewReader(tokenReqBody))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var tokenResult DeviceTokenResponse
|
||||
if err := json.NewDecoder(tokenResp.Body).Decode(&tokenResult); err != nil {
|
||||
tokenResp.Body.Close()
|
||||
continue
|
||||
}
|
||||
tokenResp.Body.Close()
|
||||
|
||||
if tokenResult.Error == "authorization_pending" {
|
||||
continue
|
||||
}
|
||||
|
||||
if tokenResult.Error != "" {
|
||||
return nil, fmt.Errorf("authorization failed: %s", tokenResult.Error)
|
||||
}
|
||||
|
||||
return &Account{
|
||||
Handle: tokenResult.Handle,
|
||||
DID: tokenResult.DID,
|
||||
DeviceSecret: tokenResult.DeviceSecret,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("authorization timed out")
|
||||
}
|
||||
|
||||
// validateCredentials checks if the credentials are still valid by making a test request
|
||||
func validateCredentials(appViewURL, handle, deviceSecret string) ValidationResult {
|
||||
client := &http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
tokenURL := appViewURL + "/auth/token?service=" + appViewURL
|
||||
|
||||
req, err := http.NewRequest("GET", tokenURL, nil)
|
||||
if err != nil {
|
||||
return ValidationResult{Valid: false}
|
||||
}
|
||||
|
||||
req.SetBasicAuth(handle, deviceSecret)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
// Network error — assume credentials are valid but server unreachable
|
||||
return ValidationResult{Valid: true}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
return ValidationResult{Valid: true}
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
var authErr AuthErrorResponse
|
||||
if json.Unmarshal(body, &authErr) == nil && authErr.Error == "oauth_session_expired" {
|
||||
return ValidationResult{
|
||||
Valid: false,
|
||||
OAuthSessionExpired: true,
|
||||
LoginURL: authErr.LoginURL,
|
||||
}
|
||||
}
|
||||
}
|
||||
return ValidationResult{Valid: false}
|
||||
}
|
||||
|
||||
// Any other error = assume valid (don't re-auth on server issues)
|
||||
return ValidationResult{Valid: true}
|
||||
}
|
||||
|
||||
// hostname returns the machine hostname, or a fallback.
|
||||
func hostname() string {
|
||||
name, err := os.Hostname()
|
||||
if err != nil {
|
||||
return "Unknown Device"
|
||||
}
|
||||
return name
|
||||
}
|
||||
195
cmd/credential-helper/helpers.go
Normal file
195
cmd/credential-helper/helpers.go
Normal file
@@ -0,0 +1,195 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
)
|
||||
|
||||
// Status message styles (matching gh CLI conventions)
|
||||
var (
|
||||
successStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("2")) // green
|
||||
warningStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("3")) // yellow
|
||||
infoStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("6")) // cyan
|
||||
boldStyle = lipgloss.NewStyle().Bold(true)
|
||||
)
|
||||
|
||||
// logSuccess prints a green ✓ prefixed message to stderr
|
||||
func logSuccess(format string, a ...any) {
|
||||
fmt.Fprintf(os.Stderr, "%s %s\n", successStyle.Render("✓"), fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// logWarning prints a yellow ! prefixed message to stderr
|
||||
func logWarning(format string, a ...any) {
|
||||
fmt.Fprintf(os.Stderr, "%s %s\n", warningStyle.Render("!"), fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// logInfo prints a cyan - prefixed message to stderr
|
||||
func logInfo(format string, a ...any) {
|
||||
fmt.Fprintf(os.Stderr, "%s %s\n", infoStyle.Render("-"), fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// logInfof prints a cyan - prefixed message to stderr without a trailing newline
|
||||
func logInfof(format string, a ...any) {
|
||||
fmt.Fprintf(os.Stderr, "%s %s", infoStyle.Render("-"), fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// bold renders text in bold
|
||||
func bold(s string) string {
|
||||
return boldStyle.Render(s)
|
||||
}
|
||||
|
||||
// DockerDaemonConfig represents Docker's daemon.json configuration
|
||||
type DockerDaemonConfig struct {
|
||||
InsecureRegistries []string `json:"insecure-registries"`
|
||||
}
|
||||
|
||||
// openBrowser opens the specified URL in the default browser
|
||||
func openBrowser(url string) error {
|
||||
var cmd *exec.Cmd
|
||||
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
cmd = exec.Command("xdg-open", url)
|
||||
case "darwin":
|
||||
cmd = exec.Command("open", url)
|
||||
case "windows":
|
||||
cmd = exec.Command("rundll32", "url.dll,FileProtocolHandler", url)
|
||||
default:
|
||||
return fmt.Errorf("unsupported platform")
|
||||
}
|
||||
|
||||
return cmd.Start()
|
||||
}
|
||||
|
||||
// buildAppViewURL constructs the AppView URL with the appropriate protocol
|
||||
func buildAppViewURL(serverURL string) string {
|
||||
// If serverURL already has a scheme, use it as-is
|
||||
if strings.HasPrefix(serverURL, "http://") || strings.HasPrefix(serverURL, "https://") {
|
||||
return serverURL
|
||||
}
|
||||
|
||||
// Determine protocol based on Docker configuration and heuristics
|
||||
if isInsecureRegistry(serverURL) {
|
||||
return "http://" + serverURL
|
||||
}
|
||||
|
||||
// Default to HTTPS (mirrors Docker's default behavior)
|
||||
return "https://" + serverURL
|
||||
}
|
||||
|
||||
// isInsecureRegistry checks if a registry should use HTTP instead of HTTPS
|
||||
func isInsecureRegistry(serverURL string) bool {
|
||||
// Check Docker's insecure-registries configuration
|
||||
insecureRegistries := getDockerInsecureRegistries()
|
||||
for _, reg := range insecureRegistries {
|
||||
if reg == serverURL || reg == stripPort(serverURL) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback heuristics: localhost and private IPs
|
||||
host := stripPort(serverURL)
|
||||
|
||||
if host == "localhost" || host == "127.0.0.1" || host == "::1" {
|
||||
return true
|
||||
}
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
if ip.IsLoopback() || ip.IsPrivate() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// getDockerInsecureRegistries reads Docker's insecure-registries configuration
|
||||
func getDockerInsecureRegistries() []string {
|
||||
var paths []string
|
||||
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
programData := os.Getenv("ProgramData")
|
||||
if programData != "" {
|
||||
paths = append(paths, filepath.Join(programData, "docker", "config", "daemon.json"))
|
||||
}
|
||||
default:
|
||||
paths = append(paths, "/etc/docker/daemon.json")
|
||||
if homeDir, err := os.UserHomeDir(); err == nil {
|
||||
paths = append(paths, filepath.Join(homeDir, ".docker", "daemon.json"))
|
||||
}
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
if config := readDockerDaemonConfig(path); config != nil && len(config.InsecureRegistries) > 0 {
|
||||
return config.InsecureRegistries
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readDockerDaemonConfig reads and parses a Docker daemon.json file
|
||||
func readDockerDaemonConfig(path string) *DockerDaemonConfig {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var config DockerDaemonConfig
|
||||
if err := json.Unmarshal(data, &config); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &config
|
||||
}
|
||||
|
||||
// stripPort removes the port from a host:port string
|
||||
func stripPort(hostPort string) string {
|
||||
if colonIdx := strings.LastIndex(hostPort, ":"); colonIdx != -1 {
|
||||
if strings.Count(hostPort, ":") > 1 {
|
||||
return hostPort
|
||||
}
|
||||
return hostPort[:colonIdx]
|
||||
}
|
||||
return hostPort
|
||||
}
|
||||
|
||||
// isTerminal checks if the file is a terminal
|
||||
func isTerminal(f *os.File) bool {
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return (stat.Mode() & os.ModeCharDevice) != 0
|
||||
}
|
||||
|
||||
// getConfigDir returns the path to the .atcr config directory, creating it if needed
|
||||
func getConfigDir() string {
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error getting home directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
atcrDir := filepath.Join(homeDir, ".atcr")
|
||||
if err := os.MkdirAll(atcrDir, 0700); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error creating .atcr directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
return atcrDir
|
||||
}
|
||||
|
||||
// getConfigPath returns the path to the device configuration file
|
||||
func getConfigPath() string {
|
||||
return filepath.Join(getConfigDir(), "device.json")
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
107
cmd/credential-helper/process_darwin.go
Normal file
107
cmd/credential-helper/process_darwin.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// getProcessArgs uses kern.procargs2 sysctl to get process arguments.
|
||||
// This is the same mechanism ps(1) uses on macOS — no exec.Command needed.
|
||||
//
|
||||
// The kern.procargs2 buffer layout:
|
||||
//
|
||||
// [4 bytes: argc as int32]
|
||||
// [executable path\0]
|
||||
// [padding \0 bytes]
|
||||
// [argv[0]\0][argv[1]\0]...[argv[argc-1]\0]
|
||||
// [env vars...]
|
||||
func getProcessArgs(pid int) ([]string, error) {
|
||||
// kern.procargs2 MIB: CTL_KERN=1, KERN_PROCARGS2=49
|
||||
mib := []int32{1, 49, int32(pid)} //nolint:mnd
|
||||
|
||||
// First call to get buffer size
|
||||
n := uintptr(0)
|
||||
if err := sysctl(mib, nil, &n, nil, 0); err != nil {
|
||||
return nil, fmt.Errorf("sysctl size query for pid %d: %w", pid, err)
|
||||
}
|
||||
|
||||
buf := make([]byte, n)
|
||||
if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
|
||||
return nil, fmt.Errorf("sysctl read for pid %d: %w", pid, err)
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
if len(buf) < 4 {
|
||||
return nil, fmt.Errorf("procargs2 buffer too short for pid %d", pid)
|
||||
}
|
||||
|
||||
// First 4 bytes: argc
|
||||
argc := int(binary.LittleEndian.Uint32(buf[:4]))
|
||||
pos := 4
|
||||
|
||||
// Skip executable path (null-terminated)
|
||||
end := bytes.IndexByte(buf[pos:], 0)
|
||||
if end == -1 {
|
||||
return nil, fmt.Errorf("no null terminator in exec path for pid %d", pid)
|
||||
}
|
||||
pos += end + 1
|
||||
|
||||
// Skip padding null bytes
|
||||
for pos < len(buf) && buf[pos] == 0 {
|
||||
pos++
|
||||
}
|
||||
|
||||
// Read argc arguments
|
||||
args := make([]string, 0, argc)
|
||||
for i := 0; i < argc && pos < len(buf); i++ {
|
||||
end := bytes.IndexByte(buf[pos:], 0)
|
||||
if end == -1 {
|
||||
args = append(args, string(buf[pos:]))
|
||||
break
|
||||
}
|
||||
args = append(args, string(buf[pos:pos+end]))
|
||||
pos += end + 1
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return nil, fmt.Errorf("no args found for pid %d", pid)
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// getParentPID uses kern.proc.pid sysctl to find the parent PID.
|
||||
func getParentPID(pid int) (int, error) {
|
||||
// kern.proc.pid MIB: CTL_KERN=1, KERN_PROC=14, KERN_PROC_PID=1
|
||||
mib := []int32{1, 14, 1, int32(pid)} //nolint:mnd
|
||||
|
||||
var kinfo unix.KinfoProc
|
||||
n := uintptr(unsafe.Sizeof(kinfo))
|
||||
|
||||
if err := sysctl(mib, (*byte)(unsafe.Pointer(&kinfo)), &n, nil, 0); err != nil {
|
||||
return 0, fmt.Errorf("sysctl kern.proc.pid for pid %d: %w", pid, err)
|
||||
}
|
||||
|
||||
return int(kinfo.Eproc.Ppid), nil
|
||||
}
|
||||
|
||||
// sysctl is a thin wrapper around unix.Sysctl raw syscall.
|
||||
func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error {
|
||||
_, _, errno := unix.Syscall6(
|
||||
unix.SYS___SYSCTL,
|
||||
uintptr(unsafe.Pointer(&mib[0])),
|
||||
uintptr(len(mib)),
|
||||
uintptr(unsafe.Pointer(old)),
|
||||
uintptr(unsafe.Pointer(oldlen)),
|
||||
uintptr(unsafe.Pointer(new)),
|
||||
newlen,
|
||||
)
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
42
cmd/credential-helper/process_linux.go
Normal file
42
cmd/credential-helper/process_linux.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// getProcessArgs reads /proc/<pid>/cmdline to get process arguments.
|
||||
func getProcessArgs(pid int) ([]string, error) {
|
||||
data, err := os.ReadFile(fmt.Sprintf("/proc/%d/cmdline", pid))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading /proc/%d/cmdline: %w", pid, err)
|
||||
}
|
||||
|
||||
s := strings.TrimRight(string(data), "\x00")
|
||||
if s == "" {
|
||||
return nil, fmt.Errorf("empty cmdline for pid %d", pid)
|
||||
}
|
||||
|
||||
return strings.Split(s, "\x00"), nil
|
||||
}
|
||||
|
||||
// getParentPID reads /proc/<pid>/status to find the parent PID.
|
||||
func getParentPID(pid int) (int, error) {
|
||||
data, err := os.ReadFile(fmt.Sprintf("/proc/%d/status", pid))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, line := range strings.Split(string(data), "\n") {
|
||||
if strings.HasPrefix(line, "PPid:") {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) >= 2 {
|
||||
return strconv.Atoi(fields[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("PPid not found in /proc/%d/status", pid)
|
||||
}
|
||||
19
cmd/credential-helper/process_other.go
Normal file
19
cmd/credential-helper/process_other.go
Normal file
@@ -0,0 +1,19 @@
|
||||
//go:build !linux && !darwin
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// getProcessArgs is not supported on this platform.
|
||||
// The credential helper falls back to the active account.
|
||||
func getProcessArgs(pid int) ([]string, error) {
|
||||
return nil, fmt.Errorf("process introspection not supported on %s", runtime.GOOS)
|
||||
}
|
||||
|
||||
// getParentPID is not supported on this platform.
|
||||
func getParentPID(pid int) (int, error) {
|
||||
return 0, fmt.Errorf("process introspection not supported on %s", runtime.GOOS)
|
||||
}
|
||||
234
cmd/credential-helper/protocol.go
Normal file
234
cmd/credential-helper/protocol.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Credentials represents docker credentials (Docker credential helper protocol)
|
||||
type Credentials struct {
|
||||
ServerURL string `json:"ServerURL,omitempty"`
|
||||
Username string `json:"Username,omitempty"`
|
||||
Secret string `json:"Secret,omitempty"`
|
||||
}
|
||||
|
||||
func newGetCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "Get credentials for a registry (Docker protocol)",
|
||||
Hidden: true,
|
||||
RunE: runGet,
|
||||
}
|
||||
}
|
||||
|
||||
func newStoreCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "store",
|
||||
Short: "Store credentials (Docker protocol)",
|
||||
Hidden: true,
|
||||
RunE: runStore,
|
||||
}
|
||||
}
|
||||
|
||||
func newEraseCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "erase",
|
||||
Short: "Erase credentials (Docker protocol)",
|
||||
Hidden: true,
|
||||
RunE: runErase,
|
||||
}
|
||||
}
|
||||
|
||||
func newListCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all credentials (Docker protocol extension)",
|
||||
Hidden: true,
|
||||
RunE: runList,
|
||||
}
|
||||
}
|
||||
|
||||
func runGet(cmd *cobra.Command, args []string) error {
|
||||
// If stdin is a terminal, the user ran this directly (not Docker calling us)
|
||||
if isTerminal(os.Stdin) {
|
||||
fmt.Fprintf(os.Stderr, "The 'get' command is part of the Docker credential helper protocol.\n")
|
||||
fmt.Fprintf(os.Stderr, "It should not be run directly.\n\n")
|
||||
fmt.Fprintf(os.Stderr, "To authenticate with a registry, run:\n")
|
||||
fmt.Fprintf(os.Stderr, " docker-credential-atcr login\n\n")
|
||||
fmt.Fprintf(os.Stderr, "To check your accounts:\n")
|
||||
fmt.Fprintf(os.Stderr, " docker-credential-atcr status\n")
|
||||
return fmt.Errorf("not a pipe")
|
||||
}
|
||||
|
||||
// Docker sends the server URL as a plain string on stdin (not JSON)
|
||||
var serverURL string
|
||||
if _, err := fmt.Fscanln(os.Stdin, &serverURL); err != nil {
|
||||
return fmt.Errorf("reading server URL: %w", err)
|
||||
}
|
||||
|
||||
appViewURL := buildAppViewURL(serverURL)
|
||||
|
||||
cfg, err := loadConfig()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: config load error: %v\n", err)
|
||||
}
|
||||
|
||||
acct, err := cfg.resolveAccount(appViewURL, serverURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate credentials
|
||||
result := validateCredentials(appViewURL, acct.Handle, acct.DeviceSecret)
|
||||
if !result.Valid {
|
||||
if result.OAuthSessionExpired {
|
||||
loginURL := result.LoginURL
|
||||
if loginURL == "" {
|
||||
loginURL = appViewURL + "/auth/oauth/login"
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "OAuth session expired for %s.\n", acct.Handle)
|
||||
fmt.Fprintf(os.Stderr, "Please visit: %s\n", loginURL)
|
||||
fmt.Fprintf(os.Stderr, "Then retry your docker command.\n")
|
||||
return fmt.Errorf("oauth session expired")
|
||||
}
|
||||
|
||||
// Generic auth failure — remove the bad account
|
||||
fmt.Fprintf(os.Stderr, "Credentials for %s are invalid.\n", acct.Handle)
|
||||
fmt.Fprintf(os.Stderr, "Run: docker-credential-atcr login\n")
|
||||
cfg.removeAccount(appViewURL, acct.Handle)
|
||||
cfg.save() //nolint:errcheck
|
||||
return fmt.Errorf("invalid credentials")
|
||||
}
|
||||
|
||||
// Check for updates (cached, non-blocking)
|
||||
checkAndNotifyUpdate(appViewURL)
|
||||
|
||||
// Return credentials for Docker
|
||||
creds := Credentials{
|
||||
ServerURL: serverURL,
|
||||
Username: acct.Handle,
|
||||
Secret: acct.DeviceSecret,
|
||||
}
|
||||
|
||||
return json.NewEncoder(os.Stdout).Encode(creds)
|
||||
}
|
||||
|
||||
func runStore(cmd *cobra.Command, args []string) error {
|
||||
var creds Credentials
|
||||
if err := json.NewDecoder(os.Stdin).Decode(&creds); err != nil {
|
||||
return fmt.Errorf("decoding credentials: %w", err)
|
||||
}
|
||||
|
||||
// Only store if the secret looks like a device secret
|
||||
if !strings.HasPrefix(creds.Secret, "atcr_device_") {
|
||||
// Not our device secret — ignore (e.g., docker login with app-password)
|
||||
return nil
|
||||
}
|
||||
|
||||
appViewURL := buildAppViewURL(creds.ServerURL)
|
||||
|
||||
cfg, err := loadConfig()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: config load error: %v\n", err)
|
||||
}
|
||||
|
||||
cfg.addAccount(appViewURL, &Account{
|
||||
Handle: creds.Username,
|
||||
DeviceSecret: creds.Secret,
|
||||
})
|
||||
|
||||
return cfg.save()
|
||||
}
|
||||
|
||||
func runErase(cmd *cobra.Command, args []string) error {
|
||||
var serverURL string
|
||||
if _, err := fmt.Fscanln(os.Stdin, &serverURL); err != nil {
|
||||
return fmt.Errorf("reading server URL: %w", err)
|
||||
}
|
||||
|
||||
appViewURL := buildAppViewURL(serverURL)
|
||||
|
||||
cfg, err := loadConfig()
|
||||
if err != nil {
|
||||
return nil // No config, nothing to erase
|
||||
}
|
||||
|
||||
reg := cfg.findRegistry(appViewURL)
|
||||
if reg == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Erase the active account (or sole account)
|
||||
handle := reg.Active
|
||||
if handle == "" && len(reg.Accounts) == 1 {
|
||||
for h := range reg.Accounts {
|
||||
handle = h
|
||||
}
|
||||
}
|
||||
if handle == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg.removeAccount(appViewURL, handle)
|
||||
return cfg.save()
|
||||
}
|
||||
|
||||
func runList(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := loadConfig()
|
||||
if err != nil {
|
||||
// Return empty object
|
||||
fmt.Println("{}")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Docker list protocol: {"ServerURL": "Username", ...}
|
||||
result := make(map[string]string)
|
||||
for url, reg := range cfg.Registries {
|
||||
// Strip scheme for Docker compatibility
|
||||
host := strings.TrimPrefix(url, "https://")
|
||||
host = strings.TrimPrefix(host, "http://")
|
||||
for _, acct := range reg.Accounts {
|
||||
result[host] = acct.Handle
|
||||
}
|
||||
}
|
||||
|
||||
return json.NewEncoder(os.Stdout).Encode(result)
|
||||
}
|
||||
|
||||
// checkAndNotifyUpdate checks for updates in the background and notifies the user
|
||||
func checkAndNotifyUpdate(appViewURL string) {
|
||||
cache := loadUpdateCheckCache()
|
||||
if cache != nil && cache.Current == version {
|
||||
// Cache is fresh and for current version
|
||||
if isNewerVersion(cache.Latest, version) {
|
||||
fmt.Fprintf(os.Stderr, "\nUpdate available: %s (current: %s)\n", cache.Latest, version)
|
||||
fmt.Fprintf(os.Stderr, "Run: docker-credential-atcr update\n\n")
|
||||
}
|
||||
// Check if cache is still fresh (24h)
|
||||
if cache.CheckedAt.Add(updateCheckCacheTTL).After(timeNow()) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch version info
|
||||
apiURL := appViewURL + "/api/credential-helper/version"
|
||||
versionInfo, err := fetchVersionInfo(apiURL)
|
||||
if err != nil {
|
||||
return // Silently fail
|
||||
}
|
||||
|
||||
saveUpdateCheckCache(&UpdateCheckCache{
|
||||
CheckedAt: timeNow(),
|
||||
Latest: versionInfo.Latest,
|
||||
Current: version,
|
||||
})
|
||||
|
||||
if isNewerVersion(versionInfo.Latest, version) {
|
||||
fmt.Fprintf(os.Stderr, "\nUpdate available: %s (current: %s)\n", versionInfo.Latest, version)
|
||||
fmt.Fprintf(os.Stderr, "Run: docker-credential-atcr update\n\n")
|
||||
}
|
||||
}
|
||||
374
cmd/db-migrate/main.go
Normal file
374
cmd/db-migrate/main.go
Normal file
@@ -0,0 +1,374 @@
|
||||
// db-migrate copies all tables and data from a local SQLite database to a
|
||||
// remote libsql database (e.g. Bunny Database, Turso). It reads the schema
|
||||
// from sqlite_master, creates tables on the remote, and inserts all rows
|
||||
// in batches. Generic — works with any SQLite DB (appview, hold, etc.).
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// go run ./cmd/db-migrate --local /path/to/local.db --remote "libsql://..." --token "..."
|
||||
// go run ./cmd/db-migrate --local /path/to/local.db --remote "libsql://..." --token "..." --skip-existing
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
_ "github.com/tursodatabase/go-libsql"
|
||||
)
|
||||
|
||||
func main() {
|
||||
localPath := flag.String("local", "", "Path to local SQLite database file")
|
||||
remoteURL := flag.String("remote", "", "Remote libsql URL (libsql://...)")
|
||||
authToken := flag.String("token", "", "Auth token for remote database")
|
||||
skipExisting := flag.Bool("skip-existing", false, "Skip tables that already have data on remote")
|
||||
batchSize := flag.Int("batch-size", 100, "Number of rows per INSERT batch")
|
||||
dryRun := flag.Bool("dry-run", false, "Show what would be migrated without writing")
|
||||
flag.Parse()
|
||||
|
||||
if *localPath == "" || *remoteURL == "" || *authToken == "" {
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Open local database read-only
|
||||
localDSN := *localPath
|
||||
if !strings.HasPrefix(localDSN, "file:") {
|
||||
localDSN = "file:" + localDSN
|
||||
}
|
||||
localDSN += "?mode=ro"
|
||||
|
||||
localDB, err := sql.Open("libsql", localDSN)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open local database: %v", err)
|
||||
}
|
||||
defer localDB.Close()
|
||||
|
||||
if err := localDB.Ping(); err != nil {
|
||||
log.Fatalf("Failed to ping local database: %v", err)
|
||||
}
|
||||
|
||||
// Open remote database
|
||||
remoteDSN := fmt.Sprintf("%s?authToken=%s", *remoteURL, *authToken)
|
||||
remoteDB, err := sql.Open("libsql", remoteDSN)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open remote database: %v", err)
|
||||
}
|
||||
defer remoteDB.Close()
|
||||
|
||||
if err := remoteDB.Ping(); err != nil {
|
||||
log.Fatalf("Failed to ping remote database: %v", err)
|
||||
}
|
||||
// Get all user tables from local
|
||||
tables, err := getTables(localDB)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list tables: %v", err)
|
||||
}
|
||||
|
||||
if len(tables) == 0 {
|
||||
log.Println("No tables found in local database")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Found %d tables to migrate\n\n", len(tables))
|
||||
|
||||
start := time.Now()
|
||||
|
||||
if !*dryRun {
|
||||
// Phase 1: Create all tables first so FK references resolve
|
||||
fmt.Println("Creating tables...")
|
||||
for _, t := range tables {
|
||||
if err := createTable(remoteDB, t); err != nil {
|
||||
log.Fatalf("Failed to create table %s: %v", t.name, err)
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Phase 2: Copy data
|
||||
fmt.Println("Migrating data...")
|
||||
totalRows := 0
|
||||
for _, t := range tables {
|
||||
count, err := migrateTable(localDB, remoteDB, t, *batchSize, *skipExisting, *dryRun)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to migrate table %s: %v", t.name, err)
|
||||
}
|
||||
totalRows += count
|
||||
}
|
||||
|
||||
if !*dryRun {
|
||||
// Phase 3: Create indexes after data is loaded (faster than indexing during insert)
|
||||
fmt.Println("\nCreating indexes...")
|
||||
for _, t := range tables {
|
||||
if err := createIndexes(localDB, remoteDB, t.name); err != nil {
|
||||
log.Fatalf("Failed to create indexes for %s: %v", t.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
fmt.Printf("\nDone. %d total rows across %d tables in %s\n", totalRows, len(tables), time.Since(start).Round(time.Millisecond))
|
||||
if *dryRun {
|
||||
fmt.Println("(dry run — nothing was written)")
|
||||
}
|
||||
}
|
||||
|
||||
type tableInfo struct {
|
||||
name string
|
||||
ddl string
|
||||
}
|
||||
|
||||
func getTables(db *sql.DB) ([]tableInfo, error) {
|
||||
rows, err := db.Query(`
|
||||
SELECT name, sql FROM sqlite_master
|
||||
WHERE type = 'table'
|
||||
AND name NOT LIKE 'sqlite_%'
|
||||
AND name NOT LIKE '_litestream_%'
|
||||
AND name NOT LIKE 'libsql_%'
|
||||
ORDER BY name
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var tables []tableInfo
|
||||
for rows.Next() {
|
||||
var t tableInfo
|
||||
var ddl sql.NullString
|
||||
if err := rows.Scan(&t.name, &ddl); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ddl.Valid {
|
||||
t.ddl = ddl.String
|
||||
}
|
||||
tables = append(tables, t)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Sort tables so those referenced by foreign keys come first.
|
||||
// Tables with FK references depend on other tables existing and
|
||||
// having data, so we insert referenced tables first.
|
||||
return topoSortTables(db, tables)
|
||||
}
|
||||
|
||||
// topoSortTables orders tables so that referenced (parent) tables come before
|
||||
// tables that reference them via foreign keys.
|
||||
func topoSortTables(db *sql.DB, tables []tableInfo) ([]tableInfo, error) {
|
||||
byName := make(map[string]tableInfo, len(tables))
|
||||
for _, t := range tables {
|
||||
byName[t.name] = t
|
||||
}
|
||||
|
||||
// Build dependency graph: table -> tables it references
|
||||
deps := make(map[string][]string)
|
||||
for _, t := range tables {
|
||||
fkRows, err := db.Query(fmt.Sprintf("PRAGMA foreign_key_list([%s])", t.name))
|
||||
if err != nil {
|
||||
// PRAGMA might not return rows for tables without FKs
|
||||
continue
|
||||
}
|
||||
seen := make(map[string]bool)
|
||||
for fkRows.Next() {
|
||||
var id, seq int
|
||||
var table, from, to, onUpdate, onDelete, match string
|
||||
if err := fkRows.Scan(&id, &seq, &table, &from, &to, &onUpdate, &onDelete, &match); err != nil {
|
||||
fkRows.Close()
|
||||
return nil, err
|
||||
}
|
||||
if !seen[table] {
|
||||
deps[t.name] = append(deps[t.name], table)
|
||||
seen[table] = true
|
||||
}
|
||||
}
|
||||
fkRows.Close()
|
||||
}
|
||||
|
||||
// Topological sort (Kahn's algorithm)
|
||||
visited := make(map[string]bool)
|
||||
var sorted []tableInfo
|
||||
var visit func(name string)
|
||||
visit = func(name string) {
|
||||
if visited[name] {
|
||||
return
|
||||
}
|
||||
visited[name] = true
|
||||
for _, dep := range deps[name] {
|
||||
visit(dep)
|
||||
}
|
||||
if t, ok := byName[name]; ok {
|
||||
sorted = append(sorted, t)
|
||||
}
|
||||
}
|
||||
for _, t := range tables {
|
||||
visit(t.name)
|
||||
}
|
||||
return sorted, nil
|
||||
}
|
||||
|
||||
func getIndexes(db *sql.DB, tableName string) ([]string, error) {
|
||||
rows, err := db.Query(`
|
||||
SELECT sql FROM sqlite_master
|
||||
WHERE type = 'index'
|
||||
AND tbl_name = ?
|
||||
AND sql IS NOT NULL
|
||||
`, tableName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var indexes []string
|
||||
for rows.Next() {
|
||||
var ddl string
|
||||
if err := rows.Scan(&ddl); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexes = append(indexes, ddl)
|
||||
}
|
||||
return indexes, rows.Err()
|
||||
}
|
||||
|
||||
func createTable(remoteDB *sql.DB, t tableInfo) error {
|
||||
if t.ddl == "" {
|
||||
return nil
|
||||
}
|
||||
ddl := t.ddl
|
||||
if !strings.Contains(strings.ToUpper(ddl), "IF NOT EXISTS") {
|
||||
ddl = strings.Replace(ddl, "CREATE TABLE", "CREATE TABLE IF NOT EXISTS", 1)
|
||||
}
|
||||
if _, err := remoteDB.Exec(ddl); err != nil {
|
||||
return fmt.Errorf("create table %s: %w", t.name, err)
|
||||
}
|
||||
fmt.Printf(" %s\n", t.name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func createIndexes(localDB, remoteDB *sql.DB, tableName string) error {
|
||||
indexes, err := getIndexes(localDB, tableName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, idx := range indexes {
|
||||
ddl := idx
|
||||
if !strings.Contains(strings.ToUpper(ddl), "IF NOT EXISTS") {
|
||||
ddl = strings.Replace(ddl, "CREATE INDEX", "CREATE INDEX IF NOT EXISTS", 1)
|
||||
ddl = strings.Replace(ddl, "CREATE UNIQUE INDEX", "CREATE UNIQUE INDEX IF NOT EXISTS", 1)
|
||||
}
|
||||
if _, err := remoteDB.Exec(ddl); err != nil {
|
||||
return fmt.Errorf("create index on %s: %w", tableName, err)
|
||||
}
|
||||
}
|
||||
if len(indexes) > 0 {
|
||||
fmt.Printf(" %s: %d indexes\n", tableName, len(indexes))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateTable(localDB, remoteDB *sql.DB, t tableInfo, batchSize int, skipExisting, dryRun bool) (int, error) {
|
||||
var localCount int
|
||||
if err := localDB.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM [%s]", t.name)).Scan(&localCount); err != nil {
|
||||
return 0, fmt.Errorf("count local rows: %w", err)
|
||||
}
|
||||
|
||||
if localCount == 0 {
|
||||
fmt.Printf(" %-30s %6d rows (empty)\n", t.name, 0)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
fmt.Printf(" %-30s %6d rows (would migrate)\n", t.name, localCount)
|
||||
return localCount, nil
|
||||
}
|
||||
|
||||
if skipExisting {
|
||||
var remoteCount int
|
||||
if err := remoteDB.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM [%s]", t.name)).Scan(&remoteCount); err != nil {
|
||||
return 0, fmt.Errorf("count remote rows: %w", err)
|
||||
}
|
||||
if remoteCount > 0 {
|
||||
fmt.Printf(" %-30s %6d rows (skipped, %d on remote)\n", t.name, localCount, remoteCount)
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
|
||||
rows, err := localDB.Query(fmt.Sprintf("SELECT * FROM [%s]", t.name))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("select: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
cols, err := rows.Columns()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("columns: %w", err)
|
||||
}
|
||||
|
||||
placeholders := make([]string, len(cols))
|
||||
quotedCols := make([]string, len(cols))
|
||||
for i, c := range cols {
|
||||
placeholders[i] = "?"
|
||||
quotedCols[i] = fmt.Sprintf("[%s]", c)
|
||||
}
|
||||
insertPrefix := fmt.Sprintf("INSERT INTO [%s] (%s) VALUES ", t.name, strings.Join(quotedCols, ", "))
|
||||
rowPlaceholder := "(" + strings.Join(placeholders, ", ") + ")"
|
||||
|
||||
inserted := 0
|
||||
batch := make([][]any, 0, batchSize)
|
||||
|
||||
for rows.Next() {
|
||||
vals := make([]any, len(cols))
|
||||
ptrs := make([]any, len(cols))
|
||||
for i := range vals {
|
||||
ptrs[i] = &vals[i]
|
||||
}
|
||||
if err := rows.Scan(ptrs...); err != nil {
|
||||
return 0, fmt.Errorf("scan: %w", err)
|
||||
}
|
||||
batch = append(batch, vals)
|
||||
|
||||
if len(batch) >= batchSize {
|
||||
if err := insertBatch(remoteDB, insertPrefix, rowPlaceholder, batch); err != nil {
|
||||
return 0, fmt.Errorf("insert batch at row %d: %w", inserted, err)
|
||||
}
|
||||
inserted += len(batch)
|
||||
batch = batch[:0]
|
||||
}
|
||||
}
|
||||
|
||||
if len(batch) > 0 {
|
||||
if err := insertBatch(remoteDB, insertPrefix, rowPlaceholder, batch); err != nil {
|
||||
return 0, fmt.Errorf("insert final batch: %w", err)
|
||||
}
|
||||
inserted += len(batch)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return 0, fmt.Errorf("rows iteration: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf(" %-30s %6d rows migrated\n", t.name, inserted)
|
||||
return inserted, nil
|
||||
}
|
||||
|
||||
func insertBatch(db *sql.DB, prefix, rowPlaceholder string, batch [][]any) error {
|
||||
if len(batch) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
placeholders := make([]string, len(batch))
|
||||
var args []any
|
||||
for i, row := range batch {
|
||||
placeholders[i] = rowPlaceholder
|
||||
args = append(args, row...)
|
||||
}
|
||||
|
||||
query := prefix + strings.Join(placeholders, ", ")
|
||||
_, err := db.Exec(query, args...)
|
||||
return err
|
||||
}
|
||||
22
cmd/healthcheck/main.go
Normal file
22
cmd/healthcheck/main.go
Normal file
@@ -0,0 +1,22 @@
|
||||
// Minimal HTTP health check binary for scratch Docker images.
|
||||
// Usage: healthcheck <url>
|
||||
// Exits 0 if the URL returns HTTP 200, 1 otherwise.
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
os.Exit(1)
|
||||
}
|
||||
client := &http.Client{Timeout: 5 * time.Second}
|
||||
resp, err := client.Get(os.Args[1])
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
289
cmd/hold/main.go
289
cmd/hold/main.go
@@ -1,229 +1,88 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"atcr.io/pkg/hold"
|
||||
"atcr.io/pkg/hold/oci"
|
||||
"atcr.io/pkg/hold/pds"
|
||||
"atcr.io/pkg/logging"
|
||||
"atcr.io/pkg/s3"
|
||||
|
||||
// Import storage drivers
|
||||
"github.com/distribution/distribution/v3/registry/storage/driver/factory"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
)
|
||||
|
||||
var configFile string
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "atcr-hold",
|
||||
Short: "ATCR Hold Service - BYOS blob storage",
|
||||
}
|
||||
|
||||
var serveCmd = &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Start the hold service",
|
||||
Long: `Start the ATCR hold service with embedded PDS and S3 blob storage.
|
||||
|
||||
Configuration is loaded in layers: defaults -> YAML file -> environment variables.
|
||||
Use --config to specify a YAML configuration file.
|
||||
Environment variables always override file values.`,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := hold.LoadConfig(configFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
|
||||
server, err := hold.NewHoldServer(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize hold server: %w", err)
|
||||
}
|
||||
|
||||
return server.Serve()
|
||||
},
|
||||
}
|
||||
|
||||
var configCmd = &cobra.Command{
|
||||
Use: "config",
|
||||
Short: "Configuration management commands",
|
||||
}
|
||||
|
||||
var configInitCmd = &cobra.Command{
|
||||
Use: "init [path]",
|
||||
Short: "Generate an example configuration file",
|
||||
Long: `Generate an example YAML configuration file with all available options.
|
||||
If path is provided, writes to that file. Otherwise writes to stdout.`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
yamlBytes, err := hold.ExampleYAML()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate example config: %w", err)
|
||||
}
|
||||
if len(args) == 1 {
|
||||
if err := os.WriteFile(args[0], yamlBytes, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write config file: %w", err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Wrote example config to %s\n", args[0])
|
||||
return nil
|
||||
}
|
||||
fmt.Print(string(yamlBytes))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
serveCmd.Flags().StringVarP(&configFile, "config", "c", "", "path to YAML configuration file")
|
||||
|
||||
configCmd.AddCommand(configInitCmd)
|
||||
|
||||
rootCmd.AddCommand(serveCmd)
|
||||
rootCmd.AddCommand(configCmd)
|
||||
rootCmd.AddCommand(repoCmd)
|
||||
rootCmd.AddCommand(plcCmd)
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Load configuration from environment variables
|
||||
cfg, err := hold.LoadConfigFromEnv()
|
||||
if err != nil {
|
||||
slog.Error("Failed to load config", "error", err)
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
slog.Error("Command failed", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Initialize structured logging
|
||||
logging.InitLogger(cfg.LogLevel)
|
||||
|
||||
// Initialize embedded PDS if database path is configured
|
||||
// This must happen before creating HoldService since service needs PDS for authorization
|
||||
var holdPDS *pds.HoldPDS
|
||||
var xrpcHandler *pds.XRPCHandler
|
||||
var broadcaster *pds.EventBroadcaster
|
||||
if cfg.Database.Path != "" {
|
||||
// Generate did:web from public URL
|
||||
holdDID := pds.GenerateDIDFromURL(cfg.Server.PublicURL)
|
||||
slog.Info("Initializing embedded PDS", "did", holdDID)
|
||||
|
||||
// Initialize PDS with carstore and keys
|
||||
ctx := context.Background()
|
||||
holdPDS, err = pds.NewHoldPDS(ctx, holdDID, cfg.Server.PublicURL, cfg.Database.Path, cfg.Database.KeyPath, cfg.Registration.EnableBlueskyPosts)
|
||||
if err != nil {
|
||||
slog.Error("Failed to initialize embedded PDS", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create storage driver from config (needed for bootstrap profile avatar)
|
||||
driver, err := factory.Create(ctx, cfg.Storage.Type(), cfg.Storage.Parameters())
|
||||
if err != nil {
|
||||
slog.Error("Failed to create storage driver", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Bootstrap PDS with captain record, hold owner as first crew member, and profile
|
||||
if err := holdPDS.Bootstrap(ctx, driver, cfg.Registration.OwnerDID, cfg.Server.Public, cfg.Registration.AllowAllCrew, cfg.Registration.ProfileAvatarURL); err != nil {
|
||||
slog.Error("Failed to bootstrap PDS", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create event broadcaster for subscribeRepos firehose
|
||||
// Database path: carstore creates db.sqlite3 inside cfg.Database.Path
|
||||
var dbPath string
|
||||
if cfg.Database.Path != ":memory:" {
|
||||
dbPath = cfg.Database.Path + "/db.sqlite3"
|
||||
} else {
|
||||
dbPath = ":memory:"
|
||||
}
|
||||
broadcaster = pds.NewEventBroadcaster(holdDID, 100, dbPath)
|
||||
|
||||
// Bootstrap events from existing repo records (one-time migration)
|
||||
if err := broadcaster.BootstrapFromRepo(holdPDS); err != nil {
|
||||
slog.Warn("Failed to bootstrap events from repo", "error", err)
|
||||
}
|
||||
|
||||
// Wire up repo event handler to broadcaster
|
||||
holdPDS.RepomgrRef().SetEventHandler(broadcaster.SetRepoEventHandler(), true)
|
||||
|
||||
slog.Info("Embedded PDS initialized successfully with firehose enabled")
|
||||
} else {
|
||||
slog.Error("Database path is required for embedded PDS authorization")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create blob store adapter and XRPC handlers
|
||||
var ociHandler *oci.XRPCHandler
|
||||
if holdPDS != nil {
|
||||
// Create storage driver from config
|
||||
ctx := context.Background()
|
||||
driver, err := factory.Create(ctx, cfg.Storage.Type(), cfg.Storage.Parameters())
|
||||
if err != nil {
|
||||
slog.Error("Failed to create storage driver", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
s3Service, err := s3.NewS3Service(cfg.Storage.Parameters(), cfg.Server.DisablePresignedURLs, cfg.Storage.Type())
|
||||
if err != nil {
|
||||
slog.Error("Failed to create S3 service", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create PDS XRPC handler (ATProto endpoints)
|
||||
xrpcHandler = pds.NewXRPCHandler(holdPDS, *s3Service, driver, broadcaster, nil)
|
||||
|
||||
// Create OCI XRPC handler (multipart upload endpoints)
|
||||
ociHandler = oci.NewXRPCHandler(holdPDS, *s3Service, driver, cfg.Server.DisablePresignedURLs, cfg.Registration.EnableBlueskyPosts, nil)
|
||||
}
|
||||
|
||||
// Setup HTTP routes with chi router
|
||||
r := chi.NewRouter()
|
||||
|
||||
// Add RealIP middleware to extract real client IP from proxy headers
|
||||
r.Use(middleware.RealIP)
|
||||
|
||||
// Add logging middleware to log all HTTP requests
|
||||
r.Use(middleware.Logger)
|
||||
|
||||
// Add CORS middleware (must be before routes)
|
||||
if xrpcHandler != nil {
|
||||
r.Use(xrpcHandler.CORSMiddleware())
|
||||
}
|
||||
|
||||
// Root page
|
||||
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
fmt.Fprintf(w, "This is a hold server. More info at https://atcr.io")
|
||||
})
|
||||
|
||||
// Register XRPC/ATProto PDS endpoints if PDS is initialized
|
||||
if xrpcHandler != nil {
|
||||
slog.Info("Registering ATProto PDS endpoints")
|
||||
xrpcHandler.RegisterHandlers(r)
|
||||
}
|
||||
|
||||
// Register OCI multipart upload endpoints
|
||||
if ociHandler != nil {
|
||||
slog.Info("Registering OCI multipart upload endpoints")
|
||||
ociHandler.RegisterHandlers(r)
|
||||
}
|
||||
|
||||
// Create server
|
||||
server := &http.Server{
|
||||
Addr: cfg.Server.Addr,
|
||||
Handler: r,
|
||||
ReadTimeout: cfg.Server.ReadTimeout,
|
||||
WriteTimeout: cfg.Server.WriteTimeout,
|
||||
}
|
||||
|
||||
// Set up signal handling for graceful shutdown
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
// Start server in goroutine
|
||||
serverErr := make(chan error, 1)
|
||||
go func() {
|
||||
slog.Info("Starting hold service", "addr", cfg.Server.Addr)
|
||||
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
serverErr <- err
|
||||
}
|
||||
}()
|
||||
|
||||
// Update status post to "online" after server starts
|
||||
if holdPDS != nil {
|
||||
ctx := context.Background()
|
||||
|
||||
if err := holdPDS.SetStatus(ctx, "online"); err != nil {
|
||||
slog.Warn("Failed to set status post to online", "error", err)
|
||||
} else {
|
||||
slog.Info("Status post set to online")
|
||||
}
|
||||
}
|
||||
|
||||
// Request crawl from relay to make PDS discoverable
|
||||
if cfg.Server.RelayEndpoint != "" {
|
||||
slog.Info("Requesting crawl from relay", "relay", cfg.Server.RelayEndpoint)
|
||||
if err := hold.RequestCrawl(cfg.Server.RelayEndpoint, cfg.Server.PublicURL); err != nil {
|
||||
slog.Warn("Failed to request crawl from relay", "error", err)
|
||||
} else {
|
||||
slog.Info("Crawl requested successfully")
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for signal or server error
|
||||
select {
|
||||
case err := <-serverErr:
|
||||
slog.Error("Server failed", "error", err)
|
||||
os.Exit(1)
|
||||
case sig := <-sigChan:
|
||||
slog.Info("Received signal, shutting down gracefully", "signal", sig)
|
||||
|
||||
// Update status post to "offline" before shutdown
|
||||
if holdPDS != nil {
|
||||
ctx := context.Background()
|
||||
if err := holdPDS.SetStatus(ctx, "offline"); err != nil {
|
||||
slog.Warn("Failed to set status post to offline", "error", err)
|
||||
} else {
|
||||
slog.Info("Status post set to offline")
|
||||
}
|
||||
}
|
||||
|
||||
// Close broadcaster database connection
|
||||
if broadcaster != nil {
|
||||
if err := broadcaster.Close(); err != nil {
|
||||
slog.Warn("Failed to close broadcaster database", "error", err)
|
||||
} else {
|
||||
slog.Info("Broadcaster database closed")
|
||||
}
|
||||
}
|
||||
|
||||
// Graceful shutdown with 10 second timeout
|
||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := server.Shutdown(shutdownCtx); err != nil {
|
||||
slog.Error("Server shutdown error", "error", err)
|
||||
} else {
|
||||
slog.Info("Server shutdown complete")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
164
cmd/hold/plc.go
Normal file
164
cmd/hold/plc.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
"atcr.io/pkg/hold"
|
||||
"atcr.io/pkg/hold/pds"
|
||||
|
||||
"github.com/bluesky-social/indigo/atproto/atcrypto"
|
||||
didplc "github.com/did-method-plc/go-didplc"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var plcCmd = &cobra.Command{
|
||||
Use: "plc",
|
||||
Short: "PLC directory management commands",
|
||||
}
|
||||
|
||||
var plcConfigFile string
|
||||
|
||||
var plcAddRotationKeyCmd = &cobra.Command{
|
||||
Use: "add-rotation-key <multibase-key>",
|
||||
Short: "Add a rotation key to this hold's PLC identity",
|
||||
Long: `Add an additional rotation key to the hold's did:plc document.
|
||||
The key must be a multibase-encoded private key (K-256 or P-256, starting with 'z').
|
||||
The hold's configured rotation key is used to sign the PLC update.
|
||||
|
||||
atcr-hold plc add-rotation-key --config config.yaml z...`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := hold.LoadConfig(plcConfigFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
|
||||
if cfg.Database.DIDMethod != "plc" {
|
||||
return fmt.Errorf("this command only works with did:plc (database.did_method is %q)", cfg.Database.DIDMethod)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Resolve the hold's DID
|
||||
holdDID, err := pds.LoadOrCreateDID(ctx, pds.DIDConfig{
|
||||
DID: cfg.Database.DID,
|
||||
DIDMethod: cfg.Database.DIDMethod,
|
||||
PublicURL: cfg.Server.PublicURL,
|
||||
DBPath: cfg.Database.Path,
|
||||
SigningKeyPath: cfg.Database.KeyPath,
|
||||
RotationKey: cfg.Database.RotationKey,
|
||||
PLCDirectoryURL: cfg.Database.PLCDirectoryURL,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve hold DID: %w", err)
|
||||
}
|
||||
|
||||
// Parse the rotation key from config (required for signing PLC updates)
|
||||
if cfg.Database.RotationKey == "" {
|
||||
return fmt.Errorf("database.rotation_key must be set to sign PLC updates")
|
||||
}
|
||||
rotationKey, err := atcrypto.ParsePrivateMultibase(cfg.Database.RotationKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse rotation_key from config: %w", err)
|
||||
}
|
||||
|
||||
// Parse the new key to add (K-256 or P-256)
|
||||
newKey, err := atcrypto.ParsePrivateMultibase(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse key argument: %w", err)
|
||||
}
|
||||
newKeyPub, err := newKey.PublicKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get public key from argument: %w", err)
|
||||
}
|
||||
newKeyDIDKey := newKeyPub.DIDKey()
|
||||
|
||||
// Load signing key for verification methods
|
||||
keyPath := cfg.Database.KeyPath
|
||||
if keyPath == "" {
|
||||
keyPath = cfg.Database.Path + "/signing.key"
|
||||
}
|
||||
signingKey, err := oauth.GenerateOrLoadPDSKey(keyPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load signing key: %w", err)
|
||||
}
|
||||
|
||||
// Fetch current PLC state
|
||||
plcDirectoryURL := cfg.Database.PLCDirectoryURL
|
||||
if plcDirectoryURL == "" {
|
||||
plcDirectoryURL = "https://plc.directory"
|
||||
}
|
||||
client := &didplc.Client{DirectoryURL: plcDirectoryURL}
|
||||
|
||||
opLog, err := client.OpLog(ctx, holdDID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch PLC op log: %w", err)
|
||||
}
|
||||
if len(opLog) == 0 {
|
||||
return fmt.Errorf("empty op log for %s", holdDID)
|
||||
}
|
||||
|
||||
lastEntry := opLog[len(opLog)-1]
|
||||
lastOp := lastEntry.Regular
|
||||
if lastOp == nil {
|
||||
return fmt.Errorf("last PLC operation is not a regular op")
|
||||
}
|
||||
|
||||
// Check if key already present
|
||||
for _, k := range lastOp.RotationKeys {
|
||||
if k == newKeyDIDKey {
|
||||
fmt.Printf("Key %s is already a rotation key for %s\n", newKeyDIDKey, holdDID)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Build updated rotation keys: keep existing, append new
|
||||
rotationKeys := make([]string, len(lastOp.RotationKeys))
|
||||
copy(rotationKeys, lastOp.RotationKeys)
|
||||
rotationKeys = append(rotationKeys, newKeyDIDKey)
|
||||
|
||||
// Build update: preserve everything else from current state
|
||||
sigPub, err := signingKey.PublicKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get signing public key: %w", err)
|
||||
}
|
||||
|
||||
prevCID := lastEntry.AsOperation().CID().String()
|
||||
|
||||
op := &didplc.RegularOp{
|
||||
Type: "plc_operation",
|
||||
RotationKeys: rotationKeys,
|
||||
VerificationMethods: map[string]string{
|
||||
"atproto": sigPub.DIDKey(),
|
||||
},
|
||||
AlsoKnownAs: lastOp.AlsoKnownAs,
|
||||
Services: lastOp.Services,
|
||||
Prev: &prevCID,
|
||||
}
|
||||
|
||||
if err := op.Sign(rotationKey); err != nil {
|
||||
return fmt.Errorf("failed to sign PLC update: %w", err)
|
||||
}
|
||||
|
||||
if err := client.Submit(ctx, holdDID, op); err != nil {
|
||||
return fmt.Errorf("failed to submit PLC update: %w", err)
|
||||
}
|
||||
|
||||
slog.Info("Added rotation key to PLC identity",
|
||||
"did", holdDID,
|
||||
"new_key", newKeyDIDKey,
|
||||
"total_rotation_keys", len(rotationKeys),
|
||||
)
|
||||
fmt.Printf("Added rotation key %s to %s\n", newKeyDIDKey, holdDID)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
plcCmd.PersistentFlags().StringVarP(&plcConfigFile, "config", "c", "", "path to YAML configuration file")
|
||||
|
||||
plcCmd.AddCommand(plcAddRotationKeyCmd)
|
||||
}
|
||||
146
cmd/hold/repo.go
Normal file
146
cmd/hold/repo.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"atcr.io/pkg/hold"
|
||||
holddb "atcr.io/pkg/hold/db"
|
||||
"atcr.io/pkg/hold/pds"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var repoCmd = &cobra.Command{
|
||||
Use: "repo",
|
||||
Short: "Repository management commands",
|
||||
}
|
||||
|
||||
var repoExportCmd = &cobra.Command{
|
||||
Use: "export",
|
||||
Short: "Export the hold's repo as a CAR file to stdout",
|
||||
Long: `Export the hold's ATProto repository as a CAR (Content Addressable Archive) file.
|
||||
The CAR is written to stdout, so redirect to a file:
|
||||
|
||||
atcr-hold repo export --config config.yaml > backup.car`,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := hold.LoadConfig(repoConfigFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
holdPDS, cleanup, err := openHoldPDS(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
if err := holdPDS.ExportToCAR(ctx, os.Stdout); err != nil {
|
||||
return fmt.Errorf("failed to export: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Export complete\n")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var repoImportCmd = &cobra.Command{
|
||||
Use: "import <file> [file...]",
|
||||
Short: "Import records from one or more CAR files",
|
||||
Long: `Import ATProto records from CAR files into the hold's repo.
|
||||
Records are upserted (existing records are overwritten). Multiple files can be
|
||||
imported additively.
|
||||
|
||||
atcr-hold repo import --config config.yaml backup.car
|
||||
atcr-hold repo import --config config.yaml backup.car extra-records.car`,
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := hold.LoadConfig(repoConfigFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
holdPDS, cleanup, err := openHoldPDS(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
for _, path := range args {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open %s: %w", path, err)
|
||||
}
|
||||
|
||||
result, err := holdPDS.ImportFromCAR(ctx, f)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to import %s: %w", path, err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Imported %d records from %s\n", result.Total, path)
|
||||
for collection, count := range result.PerCollection {
|
||||
fmt.Fprintf(os.Stderr, " %s: %d\n", collection, count)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var repoConfigFile string
|
||||
|
||||
func init() {
|
||||
repoCmd.PersistentFlags().StringVarP(&repoConfigFile, "config", "c", "", "path to YAML configuration file")
|
||||
|
||||
repoCmd.AddCommand(repoExportCmd)
|
||||
repoCmd.AddCommand(repoImportCmd)
|
||||
}
|
||||
|
||||
// openHoldPDS creates a HoldPDS from config for offline CLI operations.
|
||||
// Returns the PDS and a cleanup function that must be deferred.
|
||||
func openHoldPDS(ctx context.Context, cfg *hold.Config) (*pds.HoldPDS, func(), error) {
|
||||
holdDID, err := pds.LoadOrCreateDID(ctx, pds.DIDConfig{
|
||||
DID: cfg.Database.DID,
|
||||
DIDMethod: cfg.Database.DIDMethod,
|
||||
PublicURL: cfg.Server.PublicURL,
|
||||
DBPath: cfg.Database.Path,
|
||||
SigningKeyPath: cfg.Database.KeyPath,
|
||||
RotationKey: cfg.Database.RotationKey,
|
||||
PLCDirectoryURL: cfg.Database.PLCDirectoryURL,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to resolve hold DID: %w", err)
|
||||
}
|
||||
slog.Info("Using hold DID", "did", holdDID)
|
||||
|
||||
// Open shared database
|
||||
dbFilePath := cfg.Database.Path + "/db.sqlite3"
|
||||
libsqlCfg := holddb.LibsqlConfig{
|
||||
SyncURL: cfg.Database.LibsqlSyncURL,
|
||||
AuthToken: cfg.Database.LibsqlAuthToken,
|
||||
SyncInterval: cfg.Database.LibsqlSyncInterval,
|
||||
}
|
||||
holdDB, err := holddb.OpenHoldDB(dbFilePath, libsqlCfg)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to open hold database: %w", err)
|
||||
}
|
||||
|
||||
holdPDS, err := pds.NewHoldPDSWithDB(ctx, holdDID, cfg.Server.PublicURL, cfg.Server.AppviewURL(), cfg.Database.Path, cfg.Database.KeyPath, false, holdDB.DB)
|
||||
if err != nil {
|
||||
holdDB.Close()
|
||||
return nil, nil, fmt.Errorf("failed to initialize PDS: %w", err)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
holdPDS.Close()
|
||||
holdDB.Close()
|
||||
}
|
||||
|
||||
return holdPDS, cleanup, nil
|
||||
}
|
||||
822
cmd/image-advisor/main.go
Normal file
822
cmd/image-advisor/main.go
Normal file
@@ -0,0 +1,822 @@
|
||||
// image-advisor fetches OCI image config, SBOM, and vulnerability data from
|
||||
// the ATCR system and outputs a structured markdown report suitable for LLM
|
||||
// analysis of container image improvements.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// go run ./cmd/image-advisor --url https://seamark.dev/r/therobbiedavis.com/listenarr --tag latest
|
||||
// go run ./cmd/image-advisor --url https://seamark.dev/r/therobbiedavis.com/listenarr --digest sha256:abc...
|
||||
// go run ./cmd/image-advisor --url https://seamark.dev/r/therobbiedavis.com/listenarr --platform linux/arm64
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
)
|
||||
|
||||
// OCI config types (full config, not just history)
|
||||
type ociFullConfig struct {
|
||||
Architecture string `json:"architecture"`
|
||||
OS string `json:"os"`
|
||||
Config ociContainerConfig `json:"config"`
|
||||
History []ociHistoryEntry `json:"history"`
|
||||
RootFS ociRootFS `json:"rootfs"`
|
||||
}
|
||||
|
||||
type ociContainerConfig struct {
|
||||
Env []string `json:"Env"`
|
||||
Cmd []string `json:"Cmd"`
|
||||
Entrypoint []string `json:"Entrypoint"`
|
||||
WorkingDir string `json:"WorkingDir"`
|
||||
ExposedPorts map[string]struct{} `json:"ExposedPorts"`
|
||||
Labels map[string]string `json:"Labels"`
|
||||
User string `json:"User"`
|
||||
Volumes map[string]struct{} `json:"Volumes"`
|
||||
}
|
||||
|
||||
type ociHistoryEntry struct {
|
||||
Created string `json:"created"`
|
||||
CreatedBy string `json:"created_by"`
|
||||
EmptyLayer bool `json:"empty_layer"`
|
||||
Comment string `json:"comment"`
|
||||
}
|
||||
|
||||
type ociRootFS struct {
|
||||
Type string `json:"type"`
|
||||
DiffIDs []string `json:"diff_ids"`
|
||||
}
|
||||
|
||||
// Grype vulnerability report types
|
||||
type grypeReport struct {
|
||||
Matches []grypeMatch `json:"matches"`
|
||||
}
|
||||
|
||||
type grypeMatch struct {
|
||||
Vulnerability grypeVuln `json:"Vulnerability"`
|
||||
Package grypePackage `json:"Package"`
|
||||
}
|
||||
|
||||
type grypeVuln struct {
|
||||
ID string `json:"ID"`
|
||||
Metadata grypeMetadata `json:"Metadata"`
|
||||
Fix grypeFix `json:"Fix"`
|
||||
}
|
||||
|
||||
type grypeMetadata struct {
|
||||
Severity string `json:"Severity"`
|
||||
}
|
||||
|
||||
type grypeFix struct {
|
||||
Versions []string `json:"Versions"`
|
||||
State string `json:"State"`
|
||||
}
|
||||
|
||||
type grypePackage struct {
|
||||
Name string `json:"Name"`
|
||||
Version string `json:"Version"`
|
||||
Type string `json:"Type"`
|
||||
}
|
||||
|
||||
// SPDX SBOM types
|
||||
type spdxDocument struct {
|
||||
Packages []spdxPackage `json:"packages"`
|
||||
}
|
||||
|
||||
type spdxPackage struct {
|
||||
SPDXID string `json:"SPDXID"`
|
||||
Name string `json:"name"`
|
||||
VersionInfo string `json:"versionInfo"`
|
||||
Supplier string `json:"supplier"`
|
||||
LicenseConcluded string `json:"licenseConcluded"`
|
||||
}
|
||||
|
||||
// reportData holds all fetched data for markdown generation
|
||||
type reportData struct {
|
||||
Handle string
|
||||
Repository string
|
||||
Tag string
|
||||
Digest string
|
||||
Platform string
|
||||
HoldURL string
|
||||
ScannedAt string
|
||||
|
||||
Config *ociFullConfig
|
||||
ConfigErr string
|
||||
Layers []atproto.BlobReference // from manifest record
|
||||
VulnReport *grypeReport
|
||||
VulnErr string
|
||||
ScanRecord *atproto.ScanRecord
|
||||
SBOM *spdxDocument
|
||||
SBOMErr string
|
||||
}
|
||||
|
||||
func main() {
|
||||
registryURL := flag.String("url", "", "Registry URL (e.g., https://seamark.dev/r/therobbiedavis.com/listenarr)")
|
||||
tag := flag.String("tag", "latest", "Image tag to look up")
|
||||
digest := flag.String("digest", "", "Manifest digest (overrides --tag)")
|
||||
platform := flag.String("platform", "linux/amd64", "Platform to select from manifest index (os/arch)")
|
||||
holdURL := flag.String("hold", "https://us-chi1.cove.seamark.dev", "Hold service URL")
|
||||
flag.Parse()
|
||||
|
||||
if *registryURL == "" {
|
||||
fmt.Fprintln(os.Stderr, "error: --url is required")
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
handle, repository, err := parseRegistryURL(*registryURL)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse URL: %v", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Resolving identity for %s...\n", handle)
|
||||
did, resolvedHandle, pdsEndpoint, err := atproto.ResolveIdentity(ctx, handle)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to resolve identity %q: %v", handle, err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, " DID: %s\n Handle: %s\n PDS: %s\n", did, resolvedHandle, pdsEndpoint)
|
||||
|
||||
// Resolve hold DID
|
||||
fmt.Fprintf(os.Stderr, "Resolving hold DID for %s...\n", *holdURL)
|
||||
holdDID, err := atproto.ResolveHoldDID(ctx, *holdURL)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to resolve hold DID: %v", err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, " Hold DID: %s\n", holdDID)
|
||||
|
||||
// Resolve manifest digest
|
||||
manifestDigest := *digest
|
||||
tagName := *tag
|
||||
if manifestDigest == "" {
|
||||
fmt.Fprintf(os.Stderr, "Looking up tag %q for %s/%s...\n", tagName, resolvedHandle, repository)
|
||||
tagRecord, err := fetchTagRecord(ctx, pdsEndpoint, did, repository, tagName)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to fetch tag record: %v", err)
|
||||
}
|
||||
manifestDigest, err = tagRecord.GetManifestDigest()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get manifest digest from tag: %v", err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, " Digest: %s\n", manifestDigest)
|
||||
} else {
|
||||
tagName = ""
|
||||
}
|
||||
|
||||
// Fetch manifest record
|
||||
fmt.Fprintf(os.Stderr, "Fetching manifest record...\n")
|
||||
manifest, err := fetchManifestRecord(ctx, pdsEndpoint, did, manifestDigest)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to fetch manifest record: %v", err)
|
||||
}
|
||||
|
||||
// Handle manifest index
|
||||
if len(manifest.Manifests) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "Manifest is an index with %d platforms:\n", len(manifest.Manifests))
|
||||
for _, m := range manifest.Manifests {
|
||||
if m.Platform != nil {
|
||||
p := m.Platform
|
||||
platStr := p.OS + "/" + p.Architecture
|
||||
if p.Variant != "" {
|
||||
platStr += "/" + p.Variant
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, " - %s (%s)\n", platStr, truncate(m.Digest, 24))
|
||||
}
|
||||
}
|
||||
|
||||
child, err := selectPlatform(manifest.Manifests, *platform)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: %v, using first platform\n", err)
|
||||
child = &manifest.Manifests[0]
|
||||
}
|
||||
|
||||
manifestDigest = child.Digest
|
||||
if child.Platform != nil {
|
||||
*platform = child.Platform.OS + "/" + child.Platform.Architecture
|
||||
if child.Platform.Variant != "" {
|
||||
*platform += "/" + child.Platform.Variant
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Selected platform %s → %s\n", *platform, truncate(manifestDigest, 24))
|
||||
|
||||
// Re-fetch the child manifest record
|
||||
manifest, err = fetchManifestRecord(ctx, pdsEndpoint, did, manifestDigest)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to fetch child manifest: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
report := &reportData{
|
||||
Handle: resolvedHandle,
|
||||
Repository: repository,
|
||||
Tag: tagName,
|
||||
Digest: manifestDigest,
|
||||
Platform: *platform,
|
||||
HoldURL: *holdURL,
|
||||
Layers: manifest.Layers,
|
||||
}
|
||||
|
||||
// Fetch image config
|
||||
fmt.Fprintf(os.Stderr, "Fetching image config...\n")
|
||||
config, err := fetchFullImageConfig(ctx, *holdURL, manifestDigest)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, " Warning: %v\n", err)
|
||||
report.ConfigErr = err.Error()
|
||||
} else {
|
||||
report.Config = config
|
||||
}
|
||||
|
||||
// Fetch scan data (scan record + SBOM blob + vuln blob)
|
||||
fmt.Fprintf(os.Stderr, "Fetching scan data...\n")
|
||||
scanRecord, sbom, vulnReport, scanErr := fetchScanData(ctx, *holdURL, holdDID, manifestDigest)
|
||||
if scanErr != nil {
|
||||
fmt.Fprintf(os.Stderr, " Warning: %v\n", scanErr)
|
||||
report.VulnErr = scanErr.Error()
|
||||
report.SBOMErr = scanErr.Error()
|
||||
} else {
|
||||
report.ScanRecord = scanRecord
|
||||
report.ScannedAt = scanRecord.ScannedAt
|
||||
if vulnReport != nil {
|
||||
report.VulnReport = vulnReport
|
||||
} else {
|
||||
report.VulnErr = "No vulnerability report blob available"
|
||||
}
|
||||
if sbom != nil {
|
||||
report.SBOM = sbom
|
||||
} else {
|
||||
report.SBOMErr = "No SBOM blob available"
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Generating prompt...\n")
|
||||
generatePrompt(os.Stdout, report)
|
||||
}
|
||||
|
||||
func parseRegistryURL(rawURL string) (handle, repository string, err error) {
|
||||
u, err := url.Parse(rawURL)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("invalid URL: %w", err)
|
||||
}
|
||||
|
||||
path := strings.TrimPrefix(u.Path, "/")
|
||||
path = strings.TrimPrefix(path, "r/")
|
||||
path = strings.TrimSuffix(path, "/")
|
||||
|
||||
parts := strings.SplitN(path, "/", 2)
|
||||
if len(parts) < 2 {
|
||||
return "", "", fmt.Errorf("URL must be in format: https://domain/r/<handle>/<repository>")
|
||||
}
|
||||
|
||||
return parts[0], parts[1], nil
|
||||
}
|
||||
|
||||
func fetchTagRecord(ctx context.Context, pdsEndpoint, did, repository, tag string) (*atproto.TagRecord, error) {
|
||||
rkey := atproto.RepositoryTagToRKey(repository, tag)
|
||||
reqURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
|
||||
strings.TrimSuffix(pdsEndpoint, "/"),
|
||||
url.QueryEscape(did),
|
||||
url.QueryEscape(atproto.TagCollection),
|
||||
url.QueryEscape(rkey),
|
||||
)
|
||||
|
||||
resp, err := httpGet(ctx, reqURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("tag %q not found (HTTP %d)", tag, resp.StatusCode)
|
||||
}
|
||||
|
||||
var envelope struct {
|
||||
Value json.RawMessage `json:"value"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil {
|
||||
return nil, fmt.Errorf("parse response: %w", err)
|
||||
}
|
||||
|
||||
var tagRecord atproto.TagRecord
|
||||
if err := json.Unmarshal(envelope.Value, &tagRecord); err != nil {
|
||||
return nil, fmt.Errorf("parse tag record: %w", err)
|
||||
}
|
||||
|
||||
return &tagRecord, nil
|
||||
}
|
||||
|
||||
func fetchManifestRecord(ctx context.Context, pdsEndpoint, did, digest string) (*atproto.ManifestRecord, error) {
|
||||
rkey := strings.TrimPrefix(digest, "sha256:")
|
||||
reqURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
|
||||
strings.TrimSuffix(pdsEndpoint, "/"),
|
||||
url.QueryEscape(did),
|
||||
url.QueryEscape(atproto.ManifestCollection),
|
||||
url.QueryEscape(rkey),
|
||||
)
|
||||
|
||||
resp, err := httpGet(ctx, reqURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("manifest not found (HTTP %d)", resp.StatusCode)
|
||||
}
|
||||
|
||||
var envelope struct {
|
||||
Value json.RawMessage `json:"value"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil {
|
||||
return nil, fmt.Errorf("parse response: %w", err)
|
||||
}
|
||||
|
||||
var manifest atproto.ManifestRecord
|
||||
if err := json.Unmarshal(envelope.Value, &manifest); err != nil {
|
||||
return nil, fmt.Errorf("parse manifest record: %w", err)
|
||||
}
|
||||
|
||||
return &manifest, nil
|
||||
}
|
||||
|
||||
func selectPlatform(manifests []atproto.ManifestReference, platform string) (*atproto.ManifestReference, error) {
|
||||
parts := strings.Split(platform, "/")
|
||||
wantOS := parts[0]
|
||||
wantArch := ""
|
||||
wantVariant := ""
|
||||
if len(parts) > 1 {
|
||||
wantArch = parts[1]
|
||||
}
|
||||
if len(parts) > 2 {
|
||||
wantVariant = parts[2]
|
||||
}
|
||||
|
||||
for i := range manifests {
|
||||
m := &manifests[i]
|
||||
if m.Platform == nil {
|
||||
continue
|
||||
}
|
||||
if m.Platform.OS == wantOS && m.Platform.Architecture == wantArch {
|
||||
if wantVariant == "" || m.Platform.Variant == wantVariant {
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no platform matching %s found", platform)
|
||||
}
|
||||
|
||||
func fetchFullImageConfig(ctx context.Context, holdURL, manifestDigest string) (*ociFullConfig, error) {
|
||||
reqURL := fmt.Sprintf("%s%s?digest=%s",
|
||||
strings.TrimSuffix(holdURL, "/"),
|
||||
atproto.HoldGetImageConfig,
|
||||
url.QueryEscape(manifestDigest),
|
||||
)
|
||||
|
||||
resp, err := httpGet(ctx, reqURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("image config not found (HTTP %d)", resp.StatusCode)
|
||||
}
|
||||
|
||||
var record struct {
|
||||
ConfigJSON string `json:"configJson"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&record); err != nil {
|
||||
return nil, fmt.Errorf("parse response: %w", err)
|
||||
}
|
||||
|
||||
var config ociFullConfig
|
||||
if err := json.Unmarshal([]byte(record.ConfigJSON), &config); err != nil {
|
||||
return nil, fmt.Errorf("parse OCI config: %w", err)
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
func fetchScanData(ctx context.Context, holdURL, holdDID, manifestDigest string) (*atproto.ScanRecord, *spdxDocument, *grypeReport, error) {
|
||||
rkey := strings.TrimPrefix(manifestDigest, "sha256:")
|
||||
|
||||
// Fetch scan record
|
||||
scanURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
|
||||
strings.TrimSuffix(holdURL, "/"),
|
||||
url.QueryEscape(holdDID),
|
||||
url.QueryEscape(atproto.ScanCollection),
|
||||
url.QueryEscape(rkey),
|
||||
)
|
||||
|
||||
resp, err := httpGet(ctx, scanURL)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("fetch scan record: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, nil, nil, fmt.Errorf("no scan record found (HTTP %d)", resp.StatusCode)
|
||||
}
|
||||
|
||||
var envelope struct {
|
||||
Value json.RawMessage `json:"value"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("parse scan response: %w", err)
|
||||
}
|
||||
|
||||
var scanRecord atproto.ScanRecord
|
||||
if err := json.Unmarshal(envelope.Value, &scanRecord); err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("parse scan record: %w", err)
|
||||
}
|
||||
|
||||
// Fetch SBOM blob
|
||||
var sbom *spdxDocument
|
||||
if scanRecord.SbomBlob != nil && scanRecord.SbomBlob.Ref.String() != "" {
|
||||
blobURL := fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob?did=%s&cid=%s",
|
||||
strings.TrimSuffix(holdURL, "/"),
|
||||
url.QueryEscape(holdDID),
|
||||
url.QueryEscape(scanRecord.SbomBlob.Ref.String()),
|
||||
)
|
||||
blobResp, err := httpGet(ctx, blobURL)
|
||||
if err == nil {
|
||||
defer blobResp.Body.Close()
|
||||
if blobResp.StatusCode == http.StatusOK {
|
||||
var doc spdxDocument
|
||||
if err := json.NewDecoder(blobResp.Body).Decode(&doc); err == nil {
|
||||
sbom = &doc
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, " Warning: failed to parse SBOM: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch vuln report blob
|
||||
var vulnReport *grypeReport
|
||||
if scanRecord.VulnReportBlob != nil && scanRecord.VulnReportBlob.Ref.String() != "" {
|
||||
blobURL := fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob?did=%s&cid=%s",
|
||||
strings.TrimSuffix(holdURL, "/"),
|
||||
url.QueryEscape(holdDID),
|
||||
url.QueryEscape(scanRecord.VulnReportBlob.Ref.String()),
|
||||
)
|
||||
blobResp, err := httpGet(ctx, blobURL)
|
||||
if err == nil {
|
||||
defer blobResp.Body.Close()
|
||||
if blobResp.StatusCode == http.StatusOK {
|
||||
var report grypeReport
|
||||
if err := json.NewDecoder(blobResp.Body).Decode(&report); err == nil {
|
||||
vulnReport = &report
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, " Warning: failed to parse vuln report: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &scanRecord, sbom, vulnReport, nil
|
||||
}
|
||||
|
||||
func httpGet(ctx context.Context, rawURL string) (*http.Response, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", rawURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build request: %w", err)
|
||||
}
|
||||
return http.DefaultClient.Do(req)
|
||||
}
|
||||
|
||||
// --- Output generation ---
|
||||
|
||||
func generatePrompt(w io.Writer, r *reportData) {
|
||||
// System instruction
|
||||
fmt.Fprintln(w, `Respond ONLY with raw YAML. No markdown fences, no explanation, no preamble.
|
||||
Analyze the container image data below. Output a list of actionable suggestions sorted by impact (highest first).
|
||||
|
||||
schema:
|
||||
suggestions:
|
||||
- action: "<specific actionable step>"
|
||||
category: vulnerability|size|cache|security|best-practice
|
||||
impact: high|medium|low
|
||||
effort: low|medium|high
|
||||
cves_fixed: <int or 0>
|
||||
size_saved_mb: <int or 0>
|
||||
detail: "<one sentence with specific package names, versions, or commands>"
|
||||
---`)
|
||||
|
||||
// Compact data block - no markdown formatting, just facts
|
||||
ref := r.Handle + "/" + r.Repository
|
||||
if r.Tag != "" {
|
||||
ref += ":" + r.Tag
|
||||
}
|
||||
totalSize := int64(0)
|
||||
for _, l := range r.Layers {
|
||||
totalSize += l.Size
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "\nimage: %s\ndigest: %s\nplatform: %s\ntotal_size: %s\nlayers: %d\n",
|
||||
ref, r.Digest, r.Platform, humanSize(totalSize), len(r.Layers))
|
||||
|
||||
// Config
|
||||
if r.Config != nil {
|
||||
c := r.Config.Config
|
||||
user := c.User
|
||||
if user == "" {
|
||||
user = "root"
|
||||
}
|
||||
fmt.Fprintf(w, "user: %s\n", user)
|
||||
if c.WorkingDir != "" {
|
||||
fmt.Fprintf(w, "workdir: %s\n", c.WorkingDir)
|
||||
}
|
||||
if len(c.Entrypoint) > 0 {
|
||||
fmt.Fprintf(w, "entrypoint: %s\n", strings.Join(c.Entrypoint, " "))
|
||||
}
|
||||
if len(c.Cmd) > 0 {
|
||||
fmt.Fprintf(w, "cmd: %s\n", strings.Join(c.Cmd, " "))
|
||||
}
|
||||
if len(c.ExposedPorts) > 0 {
|
||||
ports := make([]string, 0, len(c.ExposedPorts))
|
||||
for p := range c.ExposedPorts {
|
||||
ports = append(ports, p)
|
||||
}
|
||||
fmt.Fprintf(w, "ports: %s\n", strings.Join(ports, ","))
|
||||
}
|
||||
if len(c.Env) > 0 {
|
||||
fmt.Fprintln(w, "env:")
|
||||
for _, env := range c.Env {
|
||||
parts := strings.SplitN(env, "=", 2)
|
||||
if shouldRedact(parts[0]) {
|
||||
fmt.Fprintf(w, " - %s=[REDACTED]\n", parts[0])
|
||||
} else {
|
||||
fmt.Fprintf(w, " - %s\n", env)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(c.Labels) > 0 {
|
||||
fmt.Fprintln(w, "labels:")
|
||||
keys := make([]string, 0, len(c.Labels))
|
||||
for k := range c.Labels {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
v := c.Labels[k]
|
||||
if len(v) > 80 {
|
||||
v = v[:77] + "..."
|
||||
}
|
||||
fmt.Fprintf(w, " %s: %s\n", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// History as compact list
|
||||
fmt.Fprintln(w, "history:")
|
||||
layerIdx := 0
|
||||
for _, h := range r.Config.History {
|
||||
cmd := cleanCommand(h.CreatedBy)
|
||||
if len(cmd) > 100 {
|
||||
cmd = cmd[:97] + "..."
|
||||
}
|
||||
if !h.EmptyLayer && layerIdx < len(r.Layers) {
|
||||
fmt.Fprintf(w, " - [%s] %s\n", humanSize(r.Layers[layerIdx].Size), cmd)
|
||||
layerIdx++
|
||||
} else {
|
||||
fmt.Fprintf(w, " - %s\n", cmd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Vuln summary
|
||||
if r.ScanRecord != nil {
|
||||
sr := r.ScanRecord
|
||||
fmt.Fprintf(w, "vulns: {critical: %d, high: %d, medium: %d, low: %d, total: %d}\n",
|
||||
sr.Critical, sr.High, sr.Medium, sr.Low, sr.Total)
|
||||
}
|
||||
|
||||
// Fixable vulns - compact list
|
||||
if r.VulnReport != nil {
|
||||
// Group by package: name -> {version, type, fixes[], cves[]}
|
||||
type pkgInfo struct {
|
||||
version string
|
||||
typ string
|
||||
fixes map[string]bool
|
||||
cves []string
|
||||
maxSev int
|
||||
}
|
||||
pkgs := map[string]*pkgInfo{}
|
||||
|
||||
for _, m := range r.VulnReport.Matches {
|
||||
sev := m.Vulnerability.Metadata.Severity
|
||||
if sev != "Critical" && sev != "High" {
|
||||
continue
|
||||
}
|
||||
key := m.Package.Name
|
||||
p, ok := pkgs[key]
|
||||
if !ok {
|
||||
p = &pkgInfo{version: m.Package.Version, typ: m.Package.Type, fixes: map[string]bool{}, maxSev: 5}
|
||||
pkgs[key] = p
|
||||
}
|
||||
p.cves = append(p.cves, m.Vulnerability.ID)
|
||||
for _, f := range m.Vulnerability.Fix.Versions {
|
||||
p.fixes[f] = true
|
||||
}
|
||||
if s := severityOrder(sev); s < p.maxSev {
|
||||
p.maxSev = s
|
||||
}
|
||||
}
|
||||
|
||||
if len(pkgs) > 0 {
|
||||
fmt.Fprintln(w, "fixable_critical_high:")
|
||||
// Sort by severity then CVE count
|
||||
type entry struct {
|
||||
name string
|
||||
info *pkgInfo
|
||||
}
|
||||
sorted := make([]entry, 0, len(pkgs))
|
||||
for n, p := range pkgs {
|
||||
sorted = append(sorted, entry{n, p})
|
||||
}
|
||||
sort.Slice(sorted, func(i, j int) bool {
|
||||
if sorted[i].info.maxSev != sorted[j].info.maxSev {
|
||||
return sorted[i].info.maxSev < sorted[j].info.maxSev
|
||||
}
|
||||
return len(sorted[i].info.cves) > len(sorted[j].info.cves)
|
||||
})
|
||||
|
||||
for _, e := range sorted {
|
||||
fixes := make([]string, 0, len(e.info.fixes))
|
||||
for f := range e.info.fixes {
|
||||
fixes = append(fixes, f)
|
||||
}
|
||||
sort.Strings(fixes)
|
||||
fmt.Fprintf(w, " - pkg: %s@%s (%s) cves: %d fix: %s\n",
|
||||
e.name, e.info.version, e.info.typ, len(e.info.cves), strings.Join(fixes, ","))
|
||||
}
|
||||
}
|
||||
|
||||
// Unfixable counts
|
||||
unfixable := map[string]int{}
|
||||
for _, m := range r.VulnReport.Matches {
|
||||
if len(m.Vulnerability.Fix.Versions) == 0 {
|
||||
unfixable[m.Vulnerability.Metadata.Severity]++
|
||||
}
|
||||
}
|
||||
if len(unfixable) > 0 {
|
||||
fmt.Fprintf(w, "unfixable:")
|
||||
for _, sev := range []string{"Critical", "High", "Medium", "Low", "Negligible", "Unknown"} {
|
||||
if c, ok := unfixable[sev]; ok {
|
||||
fmt.Fprintf(w, " %s=%d", strings.ToLower(sev), c)
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
}
|
||||
|
||||
// SBOM summary - just type counts
|
||||
if r.SBOM != nil {
|
||||
typeCounts := map[string]int{}
|
||||
total := 0
|
||||
for _, p := range r.SBOM.Packages {
|
||||
if strings.HasPrefix(p.SPDXID, "SPDXRef-DocumentRoot") || p.SPDXID == "SPDXRef-DOCUMENT" {
|
||||
continue
|
||||
}
|
||||
total++
|
||||
pkgType := extractPackageType(p.Supplier)
|
||||
if pkgType == "" {
|
||||
pkgType = "other"
|
||||
}
|
||||
typeCounts[pkgType]++
|
||||
}
|
||||
fmt.Fprintf(w, "sbom_packages: %d", total)
|
||||
for t, c := range typeCounts {
|
||||
fmt.Fprintf(w, " %s=%d", t, c)
|
||||
}
|
||||
fmt.Fprintln(w)
|
||||
|
||||
// Top vulnerable packages
|
||||
if r.VulnReport != nil {
|
||||
vulnPkgs := map[string]int{}
|
||||
for _, m := range r.VulnReport.Matches {
|
||||
vulnPkgs[m.Package.Name]++
|
||||
}
|
||||
type pv struct {
|
||||
name string
|
||||
count int
|
||||
}
|
||||
sorted := make([]pv, 0, len(vulnPkgs))
|
||||
for n, c := range vulnPkgs {
|
||||
sorted = append(sorted, pv{n, c})
|
||||
}
|
||||
sort.Slice(sorted, func(i, j int) bool { return sorted[i].count > sorted[j].count })
|
||||
// Top 10 only
|
||||
if len(sorted) > 10 {
|
||||
sorted = sorted[:10]
|
||||
}
|
||||
fmt.Fprintln(w, "top_vulnerable_packages:")
|
||||
for _, p := range sorted {
|
||||
fmt.Fprintf(w, " - %s: %d\n", p.name, p.count)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Helpers ---
|
||||
|
||||
func severityOrder(s string) int {
|
||||
switch s {
|
||||
case "Critical":
|
||||
return 0
|
||||
case "High":
|
||||
return 1
|
||||
case "Medium":
|
||||
return 2
|
||||
case "Low":
|
||||
return 3
|
||||
case "Negligible":
|
||||
return 4
|
||||
default:
|
||||
return 5
|
||||
}
|
||||
}
|
||||
|
||||
func humanSize(bytes int64) string {
|
||||
const (
|
||||
KB = 1024
|
||||
MB = 1024 * KB
|
||||
GB = 1024 * MB
|
||||
)
|
||||
switch {
|
||||
case bytes >= GB:
|
||||
return fmt.Sprintf("%.1f GB", float64(bytes)/float64(GB))
|
||||
case bytes >= MB:
|
||||
return fmt.Sprintf("%.1f MB", float64(bytes)/float64(MB))
|
||||
case bytes >= KB:
|
||||
return fmt.Sprintf("%.1f KB", float64(bytes)/float64(KB))
|
||||
default:
|
||||
return fmt.Sprintf("%d B", bytes)
|
||||
}
|
||||
}
|
||||
|
||||
func cleanCommand(cmd string) string {
|
||||
// Remove common prefixes that add noise
|
||||
cmd = strings.TrimPrefix(cmd, "/bin/sh -c ")
|
||||
cmd = strings.TrimPrefix(cmd, "#(nop) ")
|
||||
return strings.TrimSpace(cmd)
|
||||
}
|
||||
|
||||
func shouldRedact(envName string) bool {
|
||||
upper := strings.ToUpper(envName)
|
||||
for _, suffix := range []string{"_KEY", "_SECRET", "_PASSWORD", "_TOKEN", "_CREDENTIALS", "_API_KEY"} {
|
||||
if strings.HasSuffix(upper, suffix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func truncate(s string, n int) string {
|
||||
if len(s) <= n {
|
||||
return s
|
||||
}
|
||||
return s[:n] + "..."
|
||||
}
|
||||
|
||||
func extractPackageType(supplier string) string {
|
||||
s := strings.ToLower(supplier)
|
||||
switch {
|
||||
case strings.Contains(s, "npmjs") || strings.Contains(s, "npm"):
|
||||
return "npm"
|
||||
case strings.Contains(s, "pypi") || strings.Contains(s, "python"):
|
||||
return "python"
|
||||
case strings.Contains(s, "rubygems"):
|
||||
return "gem"
|
||||
case strings.Contains(s, "golang") || strings.Contains(s, "go"):
|
||||
return "go"
|
||||
case strings.Contains(s, "debian") || strings.Contains(s, "ubuntu"):
|
||||
return "deb"
|
||||
case strings.Contains(s, "alpine"):
|
||||
return "apk"
|
||||
case strings.Contains(s, "redhat") || strings.Contains(s, "fedora") || strings.Contains(s, "centos"):
|
||||
return "rpm"
|
||||
case strings.Contains(s, "maven") || strings.Contains(s, "java"):
|
||||
return "java"
|
||||
case strings.Contains(s, "nuget") || strings.Contains(s, ".net"):
|
||||
return "nuget"
|
||||
case strings.Contains(s, "cargo") || strings.Contains(s, "rust"):
|
||||
return "rust"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
@@ -75,7 +75,8 @@ func main() {
|
||||
ctx,
|
||||
"http://localhost:8765",
|
||||
*handle,
|
||||
nil, // Use default scopes
|
||||
nil, // Use default scopes
|
||||
"AT Container Registry", // Client name
|
||||
registerCallback,
|
||||
displayAuthURL,
|
||||
)
|
||||
|
||||
578
cmd/record-query/main.go
Normal file
578
cmd/record-query/main.go
Normal file
@@ -0,0 +1,578 @@
|
||||
// record-query queries the ATProto relay to find all users with records in a given
|
||||
// collection, fetches the records from each user's PDS, and optionally filters them.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// go run ./cmd/record-query --collection io.atcr.sailor.profile --filter "defaultHold!=prefix:did:web"
|
||||
// go run ./cmd/record-query --collection io.atcr.manifest
|
||||
// go run ./cmd/record-query --collection io.atcr.sailor.profile --limit 5
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ListReposByCollectionResponse is the response from com.atproto.sync.listReposByCollection
|
||||
type ListReposByCollectionResponse struct {
|
||||
Repos []RepoRef `json:"repos"`
|
||||
Cursor string `json:"cursor,omitempty"`
|
||||
}
|
||||
|
||||
// RepoRef is a single repo reference
|
||||
type RepoRef struct {
|
||||
DID string `json:"did"`
|
||||
}
|
||||
|
||||
// ListRecordsResponse is the response from com.atproto.repo.listRecords
|
||||
type ListRecordsResponse struct {
|
||||
Records []Record `json:"records"`
|
||||
Cursor string `json:"cursor,omitempty"`
|
||||
}
|
||||
|
||||
// Record is a single ATProto record
|
||||
type Record struct {
|
||||
URI string `json:"uri"`
|
||||
CID string `json:"cid"`
|
||||
Value json.RawMessage `json:"value"`
|
||||
}
|
||||
|
||||
// MatchResult is a record that passed the filter
|
||||
type MatchResult struct {
|
||||
DID string
|
||||
Handle string
|
||||
URI string
|
||||
Fields map[string]any
|
||||
}
|
||||
|
||||
// Filter defines a simple field filter
|
||||
type Filter struct {
|
||||
Field string
|
||||
Operator string // "=", "!="
|
||||
Mode string // "exact", "prefix", "empty"
|
||||
Value string
|
||||
}
|
||||
|
||||
var client = &http.Client{Timeout: 30 * time.Second}
|
||||
|
||||
func main() {
|
||||
relay := flag.String("relay", "https://relay1.us-east.bsky.network", "Relay endpoint")
|
||||
collection := flag.String("collection", "io.atcr.sailor.profile", "ATProto collection to query")
|
||||
filterStr := flag.String("filter", "", "Filter expression: field=value, field!=value, field=prefix:xxx, field!=prefix:xxx, field=empty, field!=empty")
|
||||
resolve := flag.Bool("resolve", true, "Resolve DIDs to handles")
|
||||
limit := flag.Int("limit", 0, "Max repos to process (0 = unlimited)")
|
||||
flag.Parse()
|
||||
|
||||
// Parse filter
|
||||
var filter *Filter
|
||||
if *filterStr != "" {
|
||||
var err error
|
||||
filter, err = parseFilter(*filterStr)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Invalid filter: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Filter: %s %s %s:%s\n", filter.Field, filter.Operator, filter.Mode, filter.Value)
|
||||
}
|
||||
|
||||
fmt.Printf("Relay: %s\n", *relay)
|
||||
fmt.Printf("Collection: %s\n", *collection)
|
||||
if *limit > 0 {
|
||||
fmt.Printf("Limit: %d repos\n", *limit)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Step 1: Enumerate all DIDs with records in this collection
|
||||
fmt.Println("Enumerating repos from relay...")
|
||||
dids, err := listAllRepos(*relay, *collection, *limit)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to list repos: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Found %d repos with %s records\n\n", len(dids), *collection)
|
||||
|
||||
// Step 2: For each DID, fetch records and apply filter
|
||||
fmt.Println("Fetching records from each user's PDS...")
|
||||
var results []MatchResult
|
||||
errorsByCategory := make(map[string][]string) // category -> list of DIDs
|
||||
for i, did := range dids {
|
||||
totalErrors := 0
|
||||
for _, v := range errorsByCategory {
|
||||
totalErrors += len(v)
|
||||
}
|
||||
if (i+1)%10 == 0 || i == len(dids)-1 {
|
||||
fmt.Printf(" Progress: %d/%d repos (matches: %d, errors: %d)\r", i+1, len(dids), len(results), totalErrors)
|
||||
}
|
||||
|
||||
matches, err := fetchAndFilter(did, *collection, filter)
|
||||
if err != nil {
|
||||
cat := categorizeError(err)
|
||||
errorsByCategory[cat] = append(errorsByCategory[cat], did)
|
||||
continue
|
||||
}
|
||||
results = append(results, matches...)
|
||||
}
|
||||
totalErrors := 0
|
||||
for _, v := range errorsByCategory {
|
||||
totalErrors += len(v)
|
||||
}
|
||||
fmt.Printf(" Progress: %d/%d repos (matches: %d, errors: %d)\n", len(dids), len(dids), len(results), totalErrors)
|
||||
if len(errorsByCategory) > 0 {
|
||||
fmt.Println(" Error breakdown:")
|
||||
var cats []string
|
||||
for k := range errorsByCategory {
|
||||
cats = append(cats, k)
|
||||
}
|
||||
sort.Strings(cats)
|
||||
for _, cat := range cats {
|
||||
dids := errorsByCategory[cat]
|
||||
fmt.Printf(" %s (%d):\n", cat, len(dids))
|
||||
for _, did := range dids {
|
||||
fmt.Printf(" - %s\n", did)
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Step 3: Resolve DIDs to handles
|
||||
if *resolve && len(results) > 0 {
|
||||
fmt.Println("Resolving DIDs to handles...")
|
||||
handleCache := make(map[string]string)
|
||||
for i := range results {
|
||||
did := results[i].DID
|
||||
if h, ok := handleCache[did]; ok {
|
||||
results[i].Handle = h
|
||||
continue
|
||||
}
|
||||
handle, err := resolveDIDToHandle(did)
|
||||
if err != nil {
|
||||
handle = did
|
||||
}
|
||||
handleCache[did] = handle
|
||||
results[i].Handle = handle
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Step 4: Print results
|
||||
if len(results) == 0 {
|
||||
fmt.Println("No matching records found.")
|
||||
return
|
||||
}
|
||||
|
||||
// Sort by handle/DID for consistent output
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Handle < results[j].Handle
|
||||
})
|
||||
|
||||
fmt.Println("========================================")
|
||||
fmt.Printf("RESULTS (%d matches)\n", len(results))
|
||||
fmt.Println("========================================")
|
||||
|
||||
for i, r := range results {
|
||||
identity := r.Handle
|
||||
if identity == "" {
|
||||
identity = r.DID
|
||||
}
|
||||
fmt.Printf("\n%3d. %s\n", i+1, identity)
|
||||
if r.Handle != "" && r.Handle != r.DID {
|
||||
fmt.Printf(" DID: %s\n", r.DID)
|
||||
}
|
||||
fmt.Printf(" URI: %s\n", r.URI)
|
||||
|
||||
// Print interesting fields (skip $type, createdAt, updatedAt)
|
||||
for k, v := range r.Fields {
|
||||
if k == "$type" || k == "createdAt" || k == "updatedAt" {
|
||||
continue
|
||||
}
|
||||
fmt.Printf(" %s: %v\n", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// CSV output
|
||||
fmt.Println("\n========================================")
|
||||
fmt.Println("CSV FORMAT")
|
||||
fmt.Println("========================================")
|
||||
|
||||
// Collect all field names for CSV header
|
||||
fieldSet := make(map[string]bool)
|
||||
for _, r := range results {
|
||||
for k := range r.Fields {
|
||||
if k == "$type" || k == "createdAt" || k == "updatedAt" {
|
||||
continue
|
||||
}
|
||||
fieldSet[k] = true
|
||||
}
|
||||
}
|
||||
var fieldNames []string
|
||||
for k := range fieldSet {
|
||||
fieldNames = append(fieldNames, k)
|
||||
}
|
||||
sort.Strings(fieldNames)
|
||||
|
||||
// Header
|
||||
fmt.Printf("handle,did,uri")
|
||||
for _, f := range fieldNames {
|
||||
fmt.Printf(",%s", f)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Rows
|
||||
for _, r := range results {
|
||||
identity := r.Handle
|
||||
if identity == "" {
|
||||
identity = r.DID
|
||||
}
|
||||
fmt.Printf("%s,%s,%s", identity, r.DID, r.URI)
|
||||
for _, f := range fieldNames {
|
||||
val := ""
|
||||
if v, ok := r.Fields[f]; ok {
|
||||
val = fmt.Sprintf("%v", v)
|
||||
}
|
||||
// Escape commas in values
|
||||
if strings.Contains(val, ",") {
|
||||
val = "\"" + val + "\""
|
||||
}
|
||||
fmt.Printf(",%s", val)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
// parseFilter parses a filter string like "field!=prefix:did:web"
|
||||
func parseFilter(s string) (*Filter, error) {
|
||||
f := &Filter{}
|
||||
|
||||
// Check for != first (before =)
|
||||
if idx := strings.Index(s, "!="); idx > 0 {
|
||||
f.Field = s[:idx]
|
||||
f.Operator = "!="
|
||||
s = s[idx+2:]
|
||||
} else if idx := strings.Index(s, "="); idx > 0 {
|
||||
f.Field = s[:idx]
|
||||
f.Operator = "="
|
||||
s = s[idx+1:]
|
||||
} else {
|
||||
return nil, fmt.Errorf("expected field=value or field!=value, got %q", s)
|
||||
}
|
||||
|
||||
// Check for mode prefix
|
||||
if s == "empty" {
|
||||
f.Mode = "empty"
|
||||
f.Value = ""
|
||||
} else if strings.HasPrefix(s, "prefix:") {
|
||||
f.Mode = "prefix"
|
||||
f.Value = strings.TrimPrefix(s, "prefix:")
|
||||
} else {
|
||||
f.Mode = "exact"
|
||||
f.Value = s
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// matchFilter checks if a record's fields match the filter
|
||||
func matchFilter(fields map[string]any, filter *Filter) bool {
|
||||
if filter == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
val := ""
|
||||
if v, ok := fields[filter.Field]; ok {
|
||||
val = fmt.Sprintf("%v", v)
|
||||
}
|
||||
|
||||
switch filter.Mode {
|
||||
case "empty":
|
||||
isEmpty := val == "" || val == "<nil>"
|
||||
if filter.Operator == "=" {
|
||||
return isEmpty
|
||||
}
|
||||
return !isEmpty
|
||||
|
||||
case "prefix":
|
||||
hasPrefix := strings.HasPrefix(val, filter.Value)
|
||||
if filter.Operator == "=" {
|
||||
return hasPrefix
|
||||
}
|
||||
return !hasPrefix && val != "" && val != "<nil>"
|
||||
|
||||
case "exact":
|
||||
if filter.Operator == "=" {
|
||||
return val == filter.Value
|
||||
}
|
||||
return val != filter.Value
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// categorizeError classifies an error into a human-readable category
|
||||
func categorizeError(err error) string {
|
||||
s := err.Error()
|
||||
|
||||
// HTTP status codes
|
||||
for _, code := range []string{"400", "401", "403", "404", "410", "429", "500", "502", "503"} {
|
||||
if strings.Contains(s, "status "+code) {
|
||||
switch code {
|
||||
case "400":
|
||||
if strings.Contains(s, "RepoDeactivated") || strings.Contains(s, "deactivated") {
|
||||
return "deactivated (400)"
|
||||
}
|
||||
if strings.Contains(s, "RepoTakendown") || strings.Contains(s, "takendown") {
|
||||
return "takendown (400)"
|
||||
}
|
||||
if strings.Contains(s, "RepoNotFound") || strings.Contains(s, "Could not find repo") {
|
||||
return "repo not found (400)"
|
||||
}
|
||||
return "bad request (400)"
|
||||
case "401":
|
||||
return "unauthorized (401)"
|
||||
case "404":
|
||||
return "not found (404)"
|
||||
case "410":
|
||||
return "gone/deleted (410)"
|
||||
case "429":
|
||||
return "rate limited (429)"
|
||||
case "502":
|
||||
return "bad gateway (502)"
|
||||
case "503":
|
||||
return "unavailable (503)"
|
||||
default:
|
||||
return fmt.Sprintf("HTTP %s", code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Connection errors
|
||||
if strings.Contains(s, "connection refused") {
|
||||
return "connection refused"
|
||||
}
|
||||
if strings.Contains(s, "no such host") {
|
||||
return "DNS failure"
|
||||
}
|
||||
if strings.Contains(s, "timeout") || strings.Contains(s, "deadline exceeded") {
|
||||
return "timeout"
|
||||
}
|
||||
if strings.Contains(s, "TLS") || strings.Contains(s, "certificate") {
|
||||
return "TLS error"
|
||||
}
|
||||
if strings.Contains(s, "EOF") {
|
||||
return "connection reset"
|
||||
}
|
||||
|
||||
// PLC/DID errors
|
||||
if strings.Contains(s, "no PDS found") {
|
||||
return "no PDS in DID doc"
|
||||
}
|
||||
if strings.Contains(s, "unsupported DID method") {
|
||||
return "unsupported DID method"
|
||||
}
|
||||
|
||||
return "other: " + s
|
||||
}
|
||||
|
||||
// listAllRepos paginates through the relay to get all DIDs with records in a collection
|
||||
func listAllRepos(relayURL, collection string, limit int) ([]string, error) {
|
||||
var dids []string
|
||||
cursor := ""
|
||||
|
||||
for {
|
||||
u := fmt.Sprintf("%s/xrpc/com.atproto.sync.listReposByCollection", relayURL)
|
||||
params := url.Values{}
|
||||
params.Set("collection", collection)
|
||||
params.Set("limit", "1000")
|
||||
if cursor != "" {
|
||||
params.Set("cursor", cursor)
|
||||
}
|
||||
|
||||
resp, err := client.Get(u + "?" + params.Encode())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("status %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var result ListReposByCollectionResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("decode failed: %w", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
for _, repo := range result.Repos {
|
||||
dids = append(dids, repo.DID)
|
||||
}
|
||||
|
||||
fmt.Printf(" Fetched %d repos so far...\r", len(dids))
|
||||
|
||||
if limit > 0 && len(dids) >= limit {
|
||||
dids = dids[:limit]
|
||||
break
|
||||
}
|
||||
|
||||
if result.Cursor == "" {
|
||||
break
|
||||
}
|
||||
cursor = result.Cursor
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
return dids, nil
|
||||
}
|
||||
|
||||
// fetchAndFilter fetches records for a DID and returns those matching the filter
|
||||
func fetchAndFilter(did, collection string, filter *Filter) ([]MatchResult, error) {
|
||||
// Resolve DID to PDS
|
||||
pdsEndpoint, err := resolveDIDToPDS(did)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve PDS: %w", err)
|
||||
}
|
||||
|
||||
var results []MatchResult
|
||||
cursor := ""
|
||||
|
||||
for {
|
||||
u := fmt.Sprintf("%s/xrpc/com.atproto.repo.listRecords", pdsEndpoint)
|
||||
params := url.Values{}
|
||||
params.Set("repo", did)
|
||||
params.Set("collection", collection)
|
||||
params.Set("limit", "100")
|
||||
if cursor != "" {
|
||||
params.Set("cursor", cursor)
|
||||
}
|
||||
|
||||
resp, err := client.Get(u + "?" + params.Encode())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var listResp ListRecordsResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("decode failed: %w", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
for _, rec := range listResp.Records {
|
||||
var fields map[string]any
|
||||
if err := json.Unmarshal(rec.Value, &fields); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if matchFilter(fields, filter) {
|
||||
results = append(results, MatchResult{
|
||||
DID: did,
|
||||
URI: rec.URI,
|
||||
Fields: fields,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if listResp.Cursor == "" || len(listResp.Records) < 100 {
|
||||
break
|
||||
}
|
||||
cursor = listResp.Cursor
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// resolveDIDToHandle resolves a DID to a handle using the PLC directory or did:web
|
||||
func resolveDIDToHandle(did string) (string, error) {
|
||||
if strings.HasPrefix(did, "did:web:") {
|
||||
return strings.TrimPrefix(did, "did:web:"), nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(did, "did:plc:") {
|
||||
resp, err := client.Get("https://plc.directory/" + did)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("PLC query failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("PLC returned status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var plcDoc struct {
|
||||
AlsoKnownAs []string `json:"alsoKnownAs"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&plcDoc); err != nil {
|
||||
return "", fmt.Errorf("failed to parse PLC response: %w", err)
|
||||
}
|
||||
|
||||
for _, aka := range plcDoc.AlsoKnownAs {
|
||||
if strings.HasPrefix(aka, "at://") {
|
||||
return strings.TrimPrefix(aka, "at://"), nil
|
||||
}
|
||||
}
|
||||
|
||||
return did, nil
|
||||
}
|
||||
|
||||
return did, nil
|
||||
}
|
||||
|
||||
// resolveDIDToPDS resolves a DID to its PDS endpoint
|
||||
func resolveDIDToPDS(did string) (string, error) {
|
||||
if strings.HasPrefix(did, "did:web:") {
|
||||
domain := strings.TrimPrefix(did, "did:web:")
|
||||
domain = strings.ReplaceAll(domain, "%3A", ":")
|
||||
scheme := "https"
|
||||
if strings.Contains(domain, ":") {
|
||||
scheme = "http"
|
||||
}
|
||||
return scheme + "://" + domain, nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(did, "did:plc:") {
|
||||
resp, err := client.Get("https://plc.directory/" + did)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("PLC query failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("PLC returned status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var plcDoc struct {
|
||||
Service []struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
ServiceEndpoint string `json:"serviceEndpoint"`
|
||||
} `json:"service"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&plcDoc); err != nil {
|
||||
return "", fmt.Errorf("failed to parse PLC response: %w", err)
|
||||
}
|
||||
|
||||
for _, svc := range plcDoc.Service {
|
||||
if svc.Type == "AtprotoPersonalDataServer" {
|
||||
return svc.ServiceEndpoint, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no PDS found in DID document")
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unsupported DID method: %s", did)
|
||||
}
|
||||
616
cmd/relay-compare/main.go
Normal file
616
cmd/relay-compare/main.go
Normal file
@@ -0,0 +1,616 @@
|
||||
// relay-compare compares ATProto relays by querying listReposByCollection
|
||||
// for all io.atcr.* record types and showing what's missing from each relay.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// go run ./cmd/relay-compare https://relay1.us-east.bsky.network https://relay1.us-west.bsky.network
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/bluesky-social/indigo/atproto/identity"
|
||||
"github.com/bluesky-social/indigo/atproto/syntax"
|
||||
"github.com/bluesky-social/indigo/xrpc"
|
||||
)
|
||||
|
||||
// ANSI color codes (disabled via --no-color or NO_COLOR env)
|
||||
var (
|
||||
cRed = "\033[31m"
|
||||
cGreen = "\033[32m"
|
||||
cYellow = "\033[33m"
|
||||
cCyan = "\033[36m"
|
||||
cBold = "\033[1m"
|
||||
cDim = "\033[2m"
|
||||
cReset = "\033[0m"
|
||||
)
|
||||
|
||||
func disableColors() {
|
||||
cRed, cGreen, cYellow, cCyan, cBold, cDim, cReset = "", "", "", "", "", "", ""
|
||||
}
|
||||
|
||||
// All io.atcr.* collections to compare
|
||||
var allCollections = []string{
|
||||
"io.atcr.manifest",
|
||||
"io.atcr.tag",
|
||||
"io.atcr.sailor.profile",
|
||||
"io.atcr.sailor.star",
|
||||
"io.atcr.repo.page",
|
||||
"io.atcr.hold.captain",
|
||||
"io.atcr.hold.crew",
|
||||
"io.atcr.hold.layer",
|
||||
"io.atcr.hold.stats",
|
||||
"io.atcr.hold.scan",
|
||||
}
|
||||
|
||||
type summaryRow struct {
|
||||
collection string
|
||||
counts []int
|
||||
status string // "sync", "diff", "error"
|
||||
diffCount int
|
||||
realGaps int // verified: record exists on PDS but relay is missing it
|
||||
ghosts int // verified: record doesn't exist on PDS, relay has stale entry
|
||||
deactivated int // verified: account deactivated/deleted on PDS
|
||||
}
|
||||
|
||||
// verifyResult holds the PDS verification result for a (DID, collection) pair.
|
||||
type verifyResult struct {
|
||||
exists bool
|
||||
deactivated bool // account deactivated/deleted on PDS
|
||||
err error
|
||||
}
|
||||
|
||||
// key identifies a (collection, relay-or-DID) pair for result lookups.
|
||||
type key struct{ col, relay string }
|
||||
|
||||
// diffEntry represents a DID missing from a specific relay for a collection.
|
||||
type diffEntry struct {
|
||||
did string
|
||||
collection string
|
||||
relayIdx int
|
||||
}
|
||||
|
||||
// XRPC response types for listReposByCollection
|
||||
type listReposByCollectionResult struct {
|
||||
Repos []repoRef `json:"repos"`
|
||||
Cursor string `json:"cursor,omitempty"`
|
||||
}
|
||||
|
||||
type repoRef struct {
|
||||
DID string `json:"did"`
|
||||
}
|
||||
|
||||
// XRPC response types for listRecords
|
||||
type listRecordsResult struct {
|
||||
Records []json.RawMessage `json:"records"`
|
||||
Cursor string `json:"cursor,omitempty"`
|
||||
}
|
||||
|
||||
// Shared identity directory for DID resolution
|
||||
var dir identity.Directory
|
||||
|
||||
func main() {
|
||||
noColor := flag.Bool("no-color", false, "disable colored output")
|
||||
verify := flag.Bool("verify", false, "verify diffs against PDS to distinguish real gaps from ghost entries")
|
||||
hideGhosts := flag.Bool("hide-ghosts", false, "with --verify, hide ghost and deactivated entries from output")
|
||||
collection := flag.String("collection", "", "compare only this collection")
|
||||
timeout := flag.Duration("timeout", 2*time.Minute, "timeout for all relay queries")
|
||||
flag.Usage = func() {
|
||||
fmt.Fprintf(os.Stderr, "Compare ATProto relays by querying listReposByCollection for io.atcr.* records.\n\n")
|
||||
fmt.Fprintf(os.Stderr, "Usage:\n relay-compare [flags] <relay-url> <relay-url> [relay-url...]\n\n")
|
||||
fmt.Fprintf(os.Stderr, "Example:\n")
|
||||
fmt.Fprintf(os.Stderr, " go run ./cmd/relay-compare https://relay1.us-east.bsky.network https://relay1.us-west.bsky.network\n\n")
|
||||
fmt.Fprintf(os.Stderr, "Flags:\n")
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.Parse()
|
||||
|
||||
if *noColor || os.Getenv("NO_COLOR") != "" {
|
||||
disableColors()
|
||||
}
|
||||
|
||||
relays := flag.Args()
|
||||
if len(relays) < 2 {
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for i, r := range relays {
|
||||
relays[i] = strings.TrimRight(r, "/")
|
||||
}
|
||||
|
||||
cols := allCollections
|
||||
if *collection != "" {
|
||||
cols = []string{*collection}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), *timeout)
|
||||
defer cancel()
|
||||
|
||||
dir = identity.DefaultDirectory()
|
||||
|
||||
// Short display names for each relay
|
||||
names := make([]string, len(relays))
|
||||
maxNameLen := 0
|
||||
for i, r := range relays {
|
||||
names[i] = shortName(r)
|
||||
if len(names[i]) > maxNameLen {
|
||||
maxNameLen = len(names[i])
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("%sFetching %d collections from %d relays...%s\n", cDim, len(cols), len(relays), cReset)
|
||||
|
||||
// Fetch all data in parallel: every (collection, relay) pair concurrently
|
||||
type fetchResult struct {
|
||||
dids map[string]struct{}
|
||||
err error
|
||||
}
|
||||
allResults := make(map[key]fetchResult)
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, col := range cols {
|
||||
for _, relay := range relays {
|
||||
wg.Add(1)
|
||||
go func(col, relay string) {
|
||||
defer wg.Done()
|
||||
dids, err := fetchAllDIDs(ctx, relay, col)
|
||||
mu.Lock()
|
||||
allResults[key{col, relay}] = fetchResult{dids, err}
|
||||
mu.Unlock()
|
||||
}(col, relay)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Collect all diffs across collections (for optional verification)
|
||||
var allDiffs []diffEntry
|
||||
|
||||
// First pass: compute diffs per collection
|
||||
type colDiffs struct {
|
||||
hasError bool
|
||||
counts []int
|
||||
// per-relay missing DIDs (sorted)
|
||||
missing [][]string
|
||||
}
|
||||
colResults := make(map[string]*colDiffs)
|
||||
|
||||
for _, col := range cols {
|
||||
cd := &colDiffs{counts: make([]int, len(relays)), missing: make([][]string, len(relays))}
|
||||
colResults[col] = cd
|
||||
|
||||
for ri, relay := range relays {
|
||||
r := allResults[key{col, relay}]
|
||||
if r.err != nil {
|
||||
cd.hasError = true
|
||||
} else {
|
||||
cd.counts[ri] = len(r.dids)
|
||||
}
|
||||
}
|
||||
|
||||
if cd.hasError {
|
||||
continue
|
||||
}
|
||||
|
||||
// Build union of all DIDs across relays
|
||||
union := make(map[string]struct{})
|
||||
for _, relay := range relays {
|
||||
for did := range allResults[key{col, relay}].dids {
|
||||
union[did] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for ri, relay := range relays {
|
||||
var missing []string
|
||||
for did := range union {
|
||||
if _, ok := allResults[key{col, relay}].dids[did]; !ok {
|
||||
missing = append(missing, did)
|
||||
}
|
||||
}
|
||||
sort.Strings(missing)
|
||||
cd.missing[ri] = missing
|
||||
for _, did := range missing {
|
||||
allDiffs = append(allDiffs, diffEntry{did: did, collection: col, relayIdx: ri})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Optionally verify diffs against PDS
|
||||
verified := make(map[key]verifyResult)
|
||||
if *verify && len(allDiffs) > 0 {
|
||||
verified = verifyDiffs(ctx, allDiffs)
|
||||
}
|
||||
|
||||
// Display per-collection diffs and collect summary
|
||||
var summary []summaryRow
|
||||
totalMissing := 0
|
||||
totalRealGaps := 0
|
||||
totalGhosts := 0
|
||||
totalDeactivated := 0
|
||||
|
||||
for _, col := range cols {
|
||||
fmt.Printf("\n%s%s━━━ %s ━━━%s\n", cBold, cCyan, col, cReset)
|
||||
|
||||
cd := colResults[col]
|
||||
row := summaryRow{collection: col, counts: cd.counts}
|
||||
|
||||
if cd.hasError {
|
||||
for ri, relay := range relays {
|
||||
r := allResults[key{col, relay}]
|
||||
if r.err != nil {
|
||||
fmt.Printf(" %-*s %s%serror%s: %v\n", maxNameLen, names[ri], cBold, cRed, cReset, r.err)
|
||||
} else {
|
||||
fmt.Printf(" %-*s %s%d%s DIDs\n", maxNameLen, names[ri], cBold, len(r.dids), cReset)
|
||||
}
|
||||
}
|
||||
row.status = "error"
|
||||
summary = append(summary, row)
|
||||
continue
|
||||
}
|
||||
|
||||
// Show counts per relay
|
||||
for ri := range relays {
|
||||
fmt.Printf(" %-*s %s%d%s DIDs\n", maxNameLen, names[ri], cBold, cd.counts[ri], cReset)
|
||||
}
|
||||
|
||||
// Show missing DIDs per relay
|
||||
inSync := true
|
||||
for ri := range relays {
|
||||
missing := cd.missing[ri]
|
||||
if len(missing) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
inSync = false
|
||||
totalMissing += len(missing)
|
||||
row.diffCount += len(missing)
|
||||
|
||||
fmt.Printf("\n %sMissing from %s (%d):%s\n", cRed, names[ri], len(missing), cReset)
|
||||
for _, did := range missing {
|
||||
suffix := ""
|
||||
skip := false
|
||||
if *verify {
|
||||
vr, ok := verified[key{col, did}]
|
||||
if !ok {
|
||||
suffix = fmt.Sprintf(" %s(verify: unknown)%s", cDim, cReset)
|
||||
} else if vr.err != nil {
|
||||
suffix = fmt.Sprintf(" %s(verify: %s)%s", cDim, vr.err, cReset)
|
||||
} else if vr.deactivated {
|
||||
suffix = fmt.Sprintf(" %s← deactivated%s", cDim, cReset)
|
||||
row.deactivated++
|
||||
totalDeactivated++
|
||||
skip = *hideGhosts
|
||||
} else if vr.exists {
|
||||
suffix = fmt.Sprintf(" %s← real gap%s", cRed, cReset)
|
||||
row.realGaps++
|
||||
totalRealGaps++
|
||||
} else {
|
||||
suffix = fmt.Sprintf(" %s← ghost (not on PDS)%s", cDim, cReset)
|
||||
row.ghosts++
|
||||
totalGhosts++
|
||||
skip = *hideGhosts
|
||||
}
|
||||
}
|
||||
if !skip {
|
||||
fmt.Printf(" %s- %s%s%s\n", cRed, did, cReset, suffix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// When verifying, ghost/deactivated-only diffs are considered in sync
|
||||
if !inSync && *verify && row.realGaps == 0 {
|
||||
inSync = true
|
||||
}
|
||||
|
||||
if inSync {
|
||||
notes := ""
|
||||
if !*hideGhosts {
|
||||
notes = formatSyncNotes(row.ghosts, row.deactivated)
|
||||
}
|
||||
if notes != "" {
|
||||
fmt.Printf(" %s✓ in sync%s %s(%s)%s\n", cGreen, cReset, cDim, notes, cReset)
|
||||
} else {
|
||||
fmt.Printf(" %s✓ in sync%s\n", cGreen, cReset)
|
||||
}
|
||||
row.status = "sync"
|
||||
} else {
|
||||
row.status = "diff"
|
||||
}
|
||||
summary = append(summary, row)
|
||||
}
|
||||
|
||||
// Summary table
|
||||
printSummary(summary, names, maxNameLen, totalMissing, *verify, *hideGhosts, totalRealGaps, totalGhosts, totalDeactivated)
|
||||
}
|
||||
|
||||
func printSummary(rows []summaryRow, names []string, maxNameLen, totalMissing int, showVerify, hideGhosts bool, totalRealGaps, totalGhosts, totalDeactivated int) {
|
||||
fmt.Printf("\n%s%s━━━ Summary ━━━%s\n\n", cBold, cCyan, cReset)
|
||||
|
||||
// Build short labels (A, B, C, ...) for compact columns
|
||||
labels := make([]string, len(names))
|
||||
for i, name := range names {
|
||||
labels[i] = string(rune('A' + i))
|
||||
fmt.Printf(" %s%s%s: %s\n", cBold, labels[i], cReset, name)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
colW := len("Collection")
|
||||
for _, row := range rows {
|
||||
if len(row.collection) > colW {
|
||||
colW = len(row.collection)
|
||||
}
|
||||
}
|
||||
relayW := 6
|
||||
|
||||
// Header
|
||||
fmt.Printf(" %-*s", colW, "Collection")
|
||||
for _, label := range labels {
|
||||
fmt.Printf(" %*s", relayW, label)
|
||||
}
|
||||
fmt.Printf(" Status\n")
|
||||
|
||||
// Separator
|
||||
fmt.Printf(" %s", strings.Repeat("─", colW))
|
||||
for range labels {
|
||||
fmt.Printf(" %s", strings.Repeat("─", relayW))
|
||||
}
|
||||
fmt.Printf(" %s\n", strings.Repeat("─", 14))
|
||||
|
||||
// Data rows
|
||||
for _, row := range rows {
|
||||
fmt.Printf(" %-*s", colW, row.collection)
|
||||
for _, c := range row.counts {
|
||||
switch row.status {
|
||||
case "error":
|
||||
fmt.Printf(" %*s", relayW, fmt.Sprintf("%s—%s", cDim, cReset))
|
||||
default:
|
||||
fmt.Printf(" %*d", relayW, c)
|
||||
}
|
||||
}
|
||||
switch row.status {
|
||||
case "sync":
|
||||
notes := ""
|
||||
if !hideGhosts {
|
||||
notes = formatSyncNotes(row.ghosts, row.deactivated)
|
||||
}
|
||||
if notes != "" {
|
||||
fmt.Printf(" %s✓ in sync%s %s(%s)%s", cGreen, cReset, cDim, notes, cReset)
|
||||
} else {
|
||||
fmt.Printf(" %s✓ in sync%s", cGreen, cReset)
|
||||
}
|
||||
case "diff":
|
||||
if showVerify {
|
||||
if hideGhosts {
|
||||
fmt.Printf(" %s≠ %d missing%s", cYellow, row.realGaps, cReset)
|
||||
} else {
|
||||
notes := formatSyncNotes(row.ghosts, row.deactivated)
|
||||
if notes != "" {
|
||||
notes = ", " + notes
|
||||
}
|
||||
fmt.Printf(" %s≠ %d missing%s %s(%d real%s)%s",
|
||||
cYellow, row.realGaps, cReset, cDim, row.realGaps, notes, cReset)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf(" %s≠ %d missing%s", cYellow, row.diffCount, cReset)
|
||||
}
|
||||
case "error":
|
||||
fmt.Printf(" %s✗ error%s", cRed, cReset)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Footer
|
||||
fmt.Println()
|
||||
if totalMissing > 0 {
|
||||
if showVerify && totalRealGaps == 0 {
|
||||
if hideGhosts {
|
||||
fmt.Printf("%s✓ All relays in sync%s\n", cGreen, cReset)
|
||||
} else {
|
||||
notes := formatSyncNotes(totalGhosts, totalDeactivated)
|
||||
fmt.Printf("%s✓ All relays in sync%s %s(%s)%s\n", cGreen, cReset, cDim, notes, cReset)
|
||||
}
|
||||
} else {
|
||||
if showVerify {
|
||||
fmt.Printf("%s%d real gaps across relays%s", cYellow, totalRealGaps, cReset)
|
||||
if !hideGhosts {
|
||||
notes := formatSyncNotes(totalGhosts, totalDeactivated)
|
||||
if notes != "" {
|
||||
fmt.Printf(" %s(%s)%s", cDim, notes, cReset)
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
} else {
|
||||
fmt.Printf("%s%d total missing DID-collection pairs across relays%s\n", cYellow, totalMissing, cReset)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("%s✓ All relays fully in sync%s\n", cGreen, cReset)
|
||||
}
|
||||
}
|
||||
|
||||
// formatSyncNotes builds a parenthetical like "2 ghost, 1 deactivated" for sync status.
|
||||
// Returns empty string if both counts are zero.
|
||||
func formatSyncNotes(ghosts, deactivated int) string {
|
||||
var parts []string
|
||||
if ghosts > 0 {
|
||||
parts = append(parts, fmt.Sprintf("%d ghost", ghosts))
|
||||
}
|
||||
if deactivated > 0 {
|
||||
parts = append(parts, fmt.Sprintf("%d deactivated", deactivated))
|
||||
}
|
||||
return strings.Join(parts, ", ")
|
||||
}
|
||||
|
||||
// verifyDiffs resolves each diff DID to its PDS and checks if records actually exist.
|
||||
func verifyDiffs(ctx context.Context, diffs []diffEntry) map[key]verifyResult {
|
||||
// Collect unique (DID, collection) pairs to verify
|
||||
type didCol struct{ did, col string }
|
||||
unique := make(map[didCol]struct{})
|
||||
for _, d := range diffs {
|
||||
unique[didCol{d.did, d.collection}] = struct{}{}
|
||||
}
|
||||
|
||||
// Resolve unique DIDs to PDS endpoints (deduplicate across collections)
|
||||
uniqueDIDs := make(map[string]struct{})
|
||||
for dc := range unique {
|
||||
uniqueDIDs[dc.did] = struct{}{}
|
||||
}
|
||||
|
||||
fmt.Printf("\n%sVerifying %d DID-collection pairs (%d unique DIDs)...%s\n", cDim, len(unique), len(uniqueDIDs), cReset)
|
||||
|
||||
pdsEndpoints := make(map[string]string) // DID → PDS URL
|
||||
pdsErrors := make(map[string]error) // DID → resolution error
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
sem := make(chan struct{}, 10) // concurrency limit
|
||||
|
||||
for did := range uniqueDIDs {
|
||||
wg.Add(1)
|
||||
go func(did string) {
|
||||
defer wg.Done()
|
||||
sem <- struct{}{}
|
||||
defer func() { <-sem }()
|
||||
|
||||
pds, err := resolveDIDToPDS(ctx, did)
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
pdsErrors[did] = err
|
||||
} else {
|
||||
pdsEndpoints[did] = pds
|
||||
}
|
||||
mu.Unlock()
|
||||
}(did)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Check each (DID, collection) pair against the resolved PDS
|
||||
results := make(map[key]verifyResult)
|
||||
|
||||
for dc := range unique {
|
||||
wg.Add(1)
|
||||
go func(dc didCol) {
|
||||
defer wg.Done()
|
||||
sem <- struct{}{}
|
||||
defer func() { <-sem }()
|
||||
|
||||
k := key{dc.col, dc.did}
|
||||
|
||||
// Check if DID resolution failed — could mean account is deactivated/tombstoned
|
||||
if err, ok := pdsErrors[dc.did]; ok {
|
||||
errStr := err.Error()
|
||||
if strings.Contains(errStr, "no PDS endpoint") ||
|
||||
strings.Contains(errStr, "not found") {
|
||||
mu.Lock()
|
||||
results[k] = verifyResult{deactivated: true}
|
||||
mu.Unlock()
|
||||
} else {
|
||||
mu.Lock()
|
||||
results[k] = verifyResult{err: fmt.Errorf("DID resolution failed: %w", err)}
|
||||
mu.Unlock()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
pds := pdsEndpoints[dc.did]
|
||||
client := &xrpc.Client{Host: pds, Client: http.DefaultClient}
|
||||
var listResult listRecordsResult
|
||||
err := client.LexDo(ctx, "GET", "", "com.atproto.repo.listRecords", map[string]any{
|
||||
"repo": dc.did,
|
||||
"collection": dc.col,
|
||||
"limit": 1,
|
||||
}, nil, &listResult)
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errStr := err.Error()
|
||||
if strings.Contains(errStr, "Could not find repo") ||
|
||||
strings.Contains(errStr, "RepoDeactivated") ||
|
||||
strings.Contains(errStr, "RepoTakendown") ||
|
||||
strings.Contains(errStr, "RepoSuspended") {
|
||||
results[k] = verifyResult{deactivated: true}
|
||||
} else {
|
||||
results[k] = verifyResult{err: err}
|
||||
}
|
||||
} else {
|
||||
results[k] = verifyResult{exists: len(listResult.Records) > 0}
|
||||
}
|
||||
mu.Unlock()
|
||||
}(dc)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// resolveDIDToPDS resolves a DID to its PDS endpoint using the shared identity directory.
|
||||
func resolveDIDToPDS(ctx context.Context, did string) (string, error) {
|
||||
didParsed, err := syntax.ParseDID(did)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid DID: %w", err)
|
||||
}
|
||||
|
||||
ident, err := dir.LookupDID(ctx, didParsed)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to resolve DID: %w", err)
|
||||
}
|
||||
|
||||
pdsEndpoint := ident.PDSEndpoint()
|
||||
if pdsEndpoint == "" {
|
||||
return "", fmt.Errorf("no PDS endpoint found for DID")
|
||||
}
|
||||
|
||||
return pdsEndpoint, nil
|
||||
}
|
||||
|
||||
// fetchAllDIDs paginates through listReposByCollection to collect all DIDs.
|
||||
func fetchAllDIDs(ctx context.Context, relay, collection string) (map[string]struct{}, error) {
|
||||
client := &xrpc.Client{Host: relay, Client: http.DefaultClient}
|
||||
dids := make(map[string]struct{})
|
||||
var cursor string
|
||||
|
||||
for {
|
||||
params := map[string]any{
|
||||
"collection": collection,
|
||||
"limit": 1000,
|
||||
}
|
||||
if cursor != "" {
|
||||
params["cursor"] = cursor
|
||||
}
|
||||
|
||||
var result listReposByCollectionResult
|
||||
err := client.LexDo(ctx, "GET", "", "com.atproto.sync.listReposByCollection", params, nil, &result)
|
||||
if err != nil {
|
||||
return dids, fmt.Errorf("listReposByCollection failed: %w", err)
|
||||
}
|
||||
|
||||
for _, repo := range result.Repos {
|
||||
dids[repo.DID] = struct{}{}
|
||||
}
|
||||
|
||||
if result.Cursor == "" {
|
||||
break
|
||||
}
|
||||
cursor = result.Cursor
|
||||
}
|
||||
|
||||
return dids, nil
|
||||
}
|
||||
|
||||
// shortName extracts the hostname from a relay URL for display.
|
||||
func shortName(relayURL string) string {
|
||||
u, err := url.Parse(relayURL)
|
||||
if err != nil {
|
||||
return relayURL
|
||||
}
|
||||
return u.Hostname()
|
||||
}
|
||||
418
cmd/s3-test/main.go
Normal file
418
cmd/s3-test/main.go
Normal file
@@ -0,0 +1,418 @@
|
||||
// Command s3-test is a diagnostic tool that tests S3 connectivity using both
|
||||
// AWS SDK v1 (used by distribution's storage driver) and AWS SDK v2 (used by
|
||||
// ATCR's presigned URL service). It helps diagnose signature compatibility
|
||||
// issues with S3-compatible storage providers.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
awsv1 "github.com/aws/aws-sdk-go/aws"
|
||||
credentialsv1 "github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
s3v1 "github.com/aws/aws-sdk-go/service/s3"
|
||||
|
||||
awsv2 "github.com/aws/aws-sdk-go-v2/aws"
|
||||
configv2 "github.com/aws/aws-sdk-go-v2/config"
|
||||
credentialsv2 "github.com/aws/aws-sdk-go-v2/credentials"
|
||||
s3v2 "github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
envFile = flag.String("env-file", "", "Load environment variables from file (KEY=VALUE format)")
|
||||
accessKey = flag.String("access-key", "", "S3 access key (env: AWS_ACCESS_KEY_ID)")
|
||||
secretKey = flag.String("secret-key", "", "S3 secret key (env: AWS_SECRET_ACCESS_KEY)")
|
||||
region = flag.String("region", "", "S3 region (env: S3_REGION)")
|
||||
bucket = flag.String("bucket", "", "S3 bucket name (env: S3_BUCKET)")
|
||||
endpoint = flag.String("endpoint", "", "S3 endpoint URL (env: S3_ENDPOINT)")
|
||||
pullZone = flag.String("pull-zone", "", "CDN pull zone URL for presigned reads (env: PULL_ZONE)")
|
||||
prefix = flag.String("prefix", "docker/registry/v2/blobs", "Key prefix for list operations")
|
||||
verbose = flag.Bool("verbose", false, "Enable SDK debug signing logs")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
// Load env file first, then let flags and real env vars override
|
||||
if *envFile != "" {
|
||||
if err := loadEnvFile(*envFile); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error loading env file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve: flag > env var > default
|
||||
if *accessKey == "" {
|
||||
*accessKey = os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
}
|
||||
if *secretKey == "" {
|
||||
*secretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
}
|
||||
if *region == "" {
|
||||
*region = envOr("S3_REGION", "us-east-1")
|
||||
}
|
||||
if *bucket == "" {
|
||||
*bucket = os.Getenv("S3_BUCKET")
|
||||
}
|
||||
if *endpoint == "" {
|
||||
*endpoint = os.Getenv("S3_ENDPOINT")
|
||||
}
|
||||
if *pullZone == "" {
|
||||
*pullZone = os.Getenv("PULL_ZONE")
|
||||
}
|
||||
|
||||
if *accessKey == "" || *secretKey == "" || *bucket == "" {
|
||||
fmt.Fprintln(os.Stderr, "Usage: s3-test [--env-file FILE] [--access-key KEY] [--secret-key KEY] [--bucket BUCKET] [--endpoint URL] [--region REGION] [--prefix PREFIX] [--verbose]")
|
||||
fmt.Fprintln(os.Stderr, "Env vars: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, S3_BUCKET, S3_REGION, S3_ENDPOINT")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println("S3 Connectivity Diagnostic")
|
||||
fmt.Println("==========================")
|
||||
fmt.Printf("Endpoint: %s\n", valueOr(*endpoint, "(default AWS)"))
|
||||
fmt.Printf("Pull Zone: %s\n", valueOr(*pullZone, "(none)"))
|
||||
fmt.Printf("Region: %s\n", *region)
|
||||
fmt.Printf("AccessKey: %s...%s (%d chars)\n", (*accessKey)[:3], (*accessKey)[len(*accessKey)-3:], len(*accessKey))
|
||||
fmt.Printf("SecretKey: %s...%s (%d chars)\n", (*secretKey)[:3], (*secretKey)[len(*secretKey)-3:], len(*secretKey))
|
||||
fmt.Printf("Bucket: %s\n", *bucket)
|
||||
fmt.Printf("Prefix: %s\n", *prefix)
|
||||
fmt.Println()
|
||||
|
||||
ctx := context.Background()
|
||||
results := make([]result, 0, 6)
|
||||
|
||||
// Build SDK v1 client (SigV4) — matches distribution driver's New()
|
||||
v1Client := buildV1Client(*accessKey, *secretKey, *region, *endpoint, *verbose)
|
||||
|
||||
// Test 1: SDK v1 SigV4 HeadBucket
|
||||
results = append(results, runTest("SDK v1 / SigV4 / HeadBucket", func() error {
|
||||
_, err := v1Client.HeadBucketWithContext(ctx, &s3v1.HeadBucketInput{
|
||||
Bucket: awsv1.String(*bucket),
|
||||
})
|
||||
return err
|
||||
}))
|
||||
|
||||
// Test 2: SDK v1 SigV4 ListObjectsV2
|
||||
results = append(results, runTest("SDK v1 / SigV4 / ListObjectsV2", func() error {
|
||||
_, err := v1Client.ListObjectsV2WithContext(ctx, &s3v1.ListObjectsV2Input{
|
||||
Bucket: awsv1.String(*bucket),
|
||||
Prefix: awsv1.String(*prefix),
|
||||
MaxKeys: awsv1.Int64(5),
|
||||
})
|
||||
return err
|
||||
}))
|
||||
|
||||
// Test 3: SDK v1 SigV4 ListObjectsV2Pages (paginated, matches doWalk)
|
||||
results = append(results, runTest("SDK v1 / SigV4 / ListObjectsV2Pages", func() error {
|
||||
return v1Client.ListObjectsV2PagesWithContext(ctx, &s3v1.ListObjectsV2Input{
|
||||
Bucket: awsv1.String(*bucket),
|
||||
Prefix: awsv1.String(*prefix),
|
||||
MaxKeys: awsv1.Int64(5),
|
||||
}, func(page *s3v1.ListObjectsV2Output, lastPage bool) bool {
|
||||
return false // stop after first page
|
||||
})
|
||||
}))
|
||||
|
||||
// Build SDK v2 client — matches NewS3Service()
|
||||
v2Client := buildV2Client(ctx, *accessKey, *secretKey, *region, *endpoint)
|
||||
|
||||
// Test 5: SDK v2 SigV4 HeadBucket
|
||||
results = append(results, runTest("SDK v2 / SigV4 / HeadBucket", func() error {
|
||||
_, err := v2Client.HeadBucket(ctx, &s3v2.HeadBucketInput{
|
||||
Bucket: awsv2.String(*bucket),
|
||||
})
|
||||
return err
|
||||
}))
|
||||
|
||||
// Test 6: SDK v2 SigV4 ListObjectsV2
|
||||
results = append(results, runTest("SDK v2 / SigV4 / ListObjectsV2", func() error {
|
||||
_, err := v2Client.ListObjectsV2(ctx, &s3v2.ListObjectsV2Input{
|
||||
Bucket: awsv2.String(*bucket),
|
||||
Prefix: awsv2.String(*prefix),
|
||||
MaxKeys: awsv2.Int32(5),
|
||||
})
|
||||
return err
|
||||
}))
|
||||
|
||||
// Find a real object key for GetObject / presigned URL tests
|
||||
var testKey string
|
||||
listOut, err := v2Client.ListObjectsV2(ctx, &s3v2.ListObjectsV2Input{
|
||||
Bucket: awsv2.String(*bucket),
|
||||
Prefix: awsv2.String(*prefix),
|
||||
MaxKeys: awsv2.Int32(1),
|
||||
})
|
||||
if err == nil && len(listOut.Contents) > 0 {
|
||||
testKey = *listOut.Contents[0].Key
|
||||
}
|
||||
|
||||
if testKey == "" {
|
||||
fmt.Printf("\n (Skipping GetObject/Presigned tests — no objects found under prefix %q)\n", *prefix)
|
||||
} else {
|
||||
fmt.Printf("\n Test object: %s\n\n", testKey)
|
||||
|
||||
// Test 7: SDK v1 GetObject (HEAD only)
|
||||
results = append(results, runTest("SDK v1 / SigV4 / HeadObject", func() error {
|
||||
_, err := v1Client.HeadObjectWithContext(ctx, &s3v1.HeadObjectInput{
|
||||
Bucket: awsv1.String(*bucket),
|
||||
Key: awsv1.String(testKey),
|
||||
})
|
||||
return err
|
||||
}))
|
||||
|
||||
// Test 8: SDK v2 GetObject (HEAD only)
|
||||
results = append(results, runTest("SDK v2 / SigV4 / HeadObject", func() error {
|
||||
_, err := v2Client.HeadObject(ctx, &s3v2.HeadObjectInput{
|
||||
Bucket: awsv2.String(*bucket),
|
||||
Key: awsv2.String(testKey),
|
||||
})
|
||||
return err
|
||||
}))
|
||||
|
||||
// Test 9: SDK v2 Presigned GET URL (generate + fetch)
|
||||
presignClient := s3v2.NewPresignClient(v2Client)
|
||||
results = append(results, runTest("SDK v2 / Presigned GET URL", func() error {
|
||||
presigned, err := presignClient.PresignGetObject(ctx, &s3v2.GetObjectInput{
|
||||
Bucket: awsv2.String(*bucket),
|
||||
Key: awsv2.String(testKey),
|
||||
}, func(opts *s3v2.PresignOptions) {
|
||||
opts.Expires = 5 * time.Minute
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("presign: %w", err)
|
||||
}
|
||||
if *verbose {
|
||||
// Show host + query params (no path to avoid leaking key structure)
|
||||
u, _ := url.Parse(presigned.URL)
|
||||
fmt.Printf("\n Presigned host: %s\n", u.Host)
|
||||
fmt.Printf(" Signed headers: %s\n", presigned.SignedHeader)
|
||||
}
|
||||
resp, err := http.Get(presigned.URL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetch: %w", err)
|
||||
}
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("presigned URL returned %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
|
||||
// Pull zone presigned tests — sign against real endpoint, swap host to pull zone
|
||||
if *pullZone != "" {
|
||||
results = append(results, runTest("SDK v2 / Presigned GET via Pull Zone", func() error {
|
||||
presigned, err := presignClient.PresignGetObject(ctx, &s3v2.GetObjectInput{
|
||||
Bucket: awsv2.String(*bucket),
|
||||
Key: awsv2.String(testKey),
|
||||
}, func(opts *s3v2.PresignOptions) {
|
||||
opts.Expires = 5 * time.Minute
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("presign: %w", err)
|
||||
}
|
||||
pzURL := swapHost(presigned.URL, *pullZone)
|
||||
if *verbose {
|
||||
fmt.Printf("\n Signed against: %s\n", presigned.URL[:40]+"...")
|
||||
fmt.Printf(" Fetching from: %s\n", pzURL[:40]+"...")
|
||||
}
|
||||
resp, err := http.Get(pzURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetch: %w", err)
|
||||
}
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("pull zone GET returned %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
|
||||
}
|
||||
|
||||
// Test 10: SDK v2 Presigned PUT URL (generate + upload empty)
|
||||
results = append(results, runTest("SDK v2 / Presigned PUT URL", func() error {
|
||||
putKey := *prefix + "/_s3-test-probe"
|
||||
presigned, err := presignClient.PresignPutObject(ctx, &s3v2.PutObjectInput{
|
||||
Bucket: awsv2.String(*bucket),
|
||||
Key: awsv2.String(putKey),
|
||||
}, func(opts *s3v2.PresignOptions) {
|
||||
opts.Expires = 5 * time.Minute
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("presign: %w", err)
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, presigned.URL, strings.NewReader(""))
|
||||
if err != nil {
|
||||
return fmt.Errorf("build request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Length", "0")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetch: %w", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("presigned PUT returned %d", resp.StatusCode)
|
||||
}
|
||||
// Clean up
|
||||
_, _ = v2Client.DeleteObject(ctx, &s3v2.DeleteObjectInput{
|
||||
Bucket: awsv2.String(*bucket),
|
||||
Key: awsv2.String(putKey),
|
||||
})
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
|
||||
// Print summary
|
||||
fmt.Println()
|
||||
fmt.Println("Summary")
|
||||
fmt.Println("=======")
|
||||
|
||||
allPass := true
|
||||
for _, r := range results {
|
||||
status := "PASS"
|
||||
if !r.ok {
|
||||
status = "FAIL"
|
||||
allPass = false
|
||||
}
|
||||
fmt.Printf(" [%s] %s (%s)\n", status, r.name, r.duration.Round(time.Millisecond))
|
||||
if !r.ok {
|
||||
fmt.Printf(" Error: %s\n", r.err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
if allPass {
|
||||
fmt.Println("Diagnosis: All tests passed. S3 connectivity is working with both SDKs.")
|
||||
} else {
|
||||
fmt.Println("Diagnosis: Some tests failed. Review errors above.")
|
||||
}
|
||||
}
|
||||
|
||||
type result struct {
|
||||
name string
|
||||
ok bool
|
||||
err error
|
||||
duration time.Duration
|
||||
}
|
||||
|
||||
func runTest(name string, fn func() error) result {
|
||||
fmt.Printf(" Testing: %s ... ", name)
|
||||
start := time.Now()
|
||||
err := fn()
|
||||
d := time.Since(start)
|
||||
if err != nil {
|
||||
fmt.Printf("FAIL (%s)\n", d.Round(time.Millisecond))
|
||||
return result{name: name, ok: false, err: err, duration: d}
|
||||
}
|
||||
fmt.Printf("PASS (%s)\n", d.Round(time.Millisecond))
|
||||
return result{name: name, ok: true, duration: d}
|
||||
}
|
||||
|
||||
func loadEnvFile(path string) error {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if line == "" || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
line = strings.TrimPrefix(line, "export ")
|
||||
k, v, ok := strings.Cut(line, "=")
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
v = strings.Trim(v, `"'`)
|
||||
os.Setenv(strings.TrimSpace(k), strings.TrimSpace(v))
|
||||
}
|
||||
return scanner.Err()
|
||||
}
|
||||
|
||||
func envOr(key, fallback string) string {
|
||||
if v := os.Getenv(key); v != "" {
|
||||
return v
|
||||
}
|
||||
return fallback
|
||||
}
|
||||
|
||||
func swapHost(presignedURL, pullZone string) string {
|
||||
parsed, err := url.Parse(presignedURL)
|
||||
if err != nil {
|
||||
return presignedURL
|
||||
}
|
||||
pz, err := url.Parse(pullZone)
|
||||
if err != nil {
|
||||
return presignedURL
|
||||
}
|
||||
parsed.Scheme = pz.Scheme
|
||||
parsed.Host = pz.Host
|
||||
return parsed.String()
|
||||
}
|
||||
|
||||
func valueOr(s, fallback string) string {
|
||||
if s == "" {
|
||||
return fallback
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// buildV1Client constructs an SDK v1 S3 client identically to
|
||||
// distribution/distribution's s3-aws driver New() function.
|
||||
func buildV1Client(accessKey, secretKey, region, endpoint string, verbose bool) *s3v1.S3 {
|
||||
awsConfig := awsv1.NewConfig()
|
||||
|
||||
if verbose {
|
||||
awsConfig.WithLogLevel(awsv1.LogDebugWithSigning)
|
||||
}
|
||||
|
||||
awsConfig.WithCredentials(credentialsv1.NewStaticCredentials(accessKey, secretKey, ""))
|
||||
awsConfig.WithRegion(region)
|
||||
|
||||
if endpoint != "" {
|
||||
awsConfig.WithEndpoint(endpoint)
|
||||
awsConfig.WithS3ForcePathStyle(true)
|
||||
}
|
||||
|
||||
sess, err := session.NewSession(awsConfig)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to create SDK v1 session: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
return s3v1.New(sess)
|
||||
}
|
||||
|
||||
// buildV2Client constructs an SDK v2 S3 client identically to
|
||||
// ATCR's NewS3Service() in pkg/s3/types.go.
|
||||
func buildV2Client(ctx context.Context, accessKey, secretKey, region, endpoint string) *s3v2.Client {
|
||||
cfg, err := configv2.LoadDefaultConfig(ctx,
|
||||
configv2.WithRegion(region),
|
||||
configv2.WithCredentialsProvider(
|
||||
credentialsv2.NewStaticCredentialsProvider(accessKey, secretKey, ""),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to load SDK v2 config: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
return s3v2.NewFromConfig(cfg, func(o *s3v2.Options) {
|
||||
if endpoint != "" {
|
||||
o.BaseEndpoint = awsv2.String(endpoint)
|
||||
o.UsePathStyle = true
|
||||
}
|
||||
})
|
||||
}
|
||||
759
cmd/usage-report/main.go
Normal file
759
cmd/usage-report/main.go
Normal file
@@ -0,0 +1,759 @@
|
||||
// usage-report queries a hold service and generates a storage usage report
|
||||
// grouped by user, with unique layers and totals.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// go run ./cmd/usage-report --hold https://hold01.atcr.io
|
||||
// go run ./cmd/usage-report --hold https://hold01.atcr.io --from-manifests
|
||||
// go run ./cmd/usage-report --hold https://hold01.atcr.io --list-blobs
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LayerRecord matches the io.atcr.hold.layer record structure
|
||||
type LayerRecord struct {
|
||||
Type string `json:"$type"`
|
||||
Digest string `json:"digest"`
|
||||
Size int64 `json:"size"`
|
||||
MediaType string `json:"mediaType"`
|
||||
Manifest string `json:"manifest"`
|
||||
UserDID string `json:"userDid"`
|
||||
CreatedAt string `json:"createdAt"`
|
||||
}
|
||||
|
||||
// ManifestRecord matches the io.atcr.manifest record structure
|
||||
type ManifestRecord struct {
|
||||
Type string `json:"$type"`
|
||||
Repository string `json:"repository"`
|
||||
Digest string `json:"digest"`
|
||||
HoldDID string `json:"holdDid"`
|
||||
Config *struct {
|
||||
Digest string `json:"digest"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"config"`
|
||||
Layers []struct {
|
||||
Digest string `json:"digest"`
|
||||
Size int64 `json:"size"`
|
||||
MediaType string `json:"mediaType"`
|
||||
} `json:"layers"`
|
||||
Manifests []struct {
|
||||
Digest string `json:"digest"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"manifests"`
|
||||
CreatedAt string `json:"createdAt"`
|
||||
}
|
||||
|
||||
// CrewRecord matches the io.atcr.hold.crew record structure
|
||||
type CrewRecord struct {
|
||||
Member string `json:"member"`
|
||||
Role string `json:"role"`
|
||||
Permissions []string `json:"permissions"`
|
||||
AddedAt string `json:"addedAt"`
|
||||
}
|
||||
|
||||
// ListRecordsResponse is the response from com.atproto.repo.listRecords
|
||||
type ListRecordsResponse struct {
|
||||
Records []struct {
|
||||
URI string `json:"uri"`
|
||||
CID string `json:"cid"`
|
||||
Value json.RawMessage `json:"value"`
|
||||
} `json:"records"`
|
||||
Cursor string `json:"cursor,omitempty"`
|
||||
}
|
||||
|
||||
// UserUsage tracks storage for a single user
|
||||
type UserUsage struct {
|
||||
DID string
|
||||
Handle string
|
||||
UniqueLayers map[string]int64 // digest -> size
|
||||
TotalSize int64
|
||||
LayerCount int
|
||||
Repositories map[string]bool // unique repos
|
||||
}
|
||||
|
||||
var client = &http.Client{Timeout: 30 * time.Second}
|
||||
|
||||
// BlobInfo represents a single blob with its metadata
|
||||
type BlobInfo struct {
|
||||
Digest string
|
||||
Size int64
|
||||
MediaType string
|
||||
UserDID string
|
||||
Handle string
|
||||
}
|
||||
|
||||
func main() {
|
||||
holdURL := flag.String("hold", "https://hold01.atcr.io", "Hold service URL")
|
||||
fromManifests := flag.Bool("from-manifests", false, "Calculate usage from user manifests instead of hold layer records (more accurate but slower)")
|
||||
listBlobs := flag.Bool("list-blobs", false, "List all individual blobs sorted by size (largest first)")
|
||||
flag.Parse()
|
||||
|
||||
// Normalize URL
|
||||
baseURL := strings.TrimSuffix(*holdURL, "/")
|
||||
|
||||
fmt.Printf("Querying %s...\n\n", baseURL)
|
||||
|
||||
// First, get the hold's DID
|
||||
holdDID, err := getHoldDID(baseURL)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to get hold DID: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Hold DID: %s\n\n", holdDID)
|
||||
|
||||
// If --list-blobs flag is set, run blob listing mode
|
||||
if *listBlobs {
|
||||
listAllBlobs(baseURL, holdDID)
|
||||
return
|
||||
}
|
||||
|
||||
var userUsage map[string]*UserUsage
|
||||
|
||||
if *fromManifests {
|
||||
fmt.Println("=== Calculating from user manifests (bypasses layer record bug) ===")
|
||||
userUsage, err = calculateFromManifests(baseURL, holdDID)
|
||||
} else {
|
||||
fmt.Println("=== Calculating from hold layer records ===")
|
||||
fmt.Println("NOTE: May undercount app-password users due to layer record bug")
|
||||
fmt.Println(" Use --from-manifests for more accurate results")
|
||||
|
||||
userUsage, err = calculateFromLayerRecords(baseURL, holdDID)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to calculate usage: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Resolve DIDs to handles
|
||||
fmt.Println("\n\nResolving DIDs to handles...")
|
||||
for _, usage := range userUsage {
|
||||
handle, err := resolveDIDToHandle(usage.DID)
|
||||
if err != nil {
|
||||
usage.Handle = usage.DID
|
||||
} else {
|
||||
usage.Handle = handle
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to slice and sort by total size (descending)
|
||||
var sorted []*UserUsage
|
||||
for _, u := range userUsage {
|
||||
sorted = append(sorted, u)
|
||||
}
|
||||
sort.Slice(sorted, func(i, j int) bool {
|
||||
return sorted[i].TotalSize > sorted[j].TotalSize
|
||||
})
|
||||
|
||||
// Print report
|
||||
fmt.Println("\n========================================")
|
||||
fmt.Println("STORAGE USAGE REPORT")
|
||||
fmt.Println("========================================")
|
||||
|
||||
var grandTotal int64
|
||||
var grandLayers int
|
||||
for _, u := range sorted {
|
||||
grandTotal += u.TotalSize
|
||||
grandLayers += u.LayerCount
|
||||
}
|
||||
|
||||
fmt.Printf("\nTotal Users: %d\n", len(sorted))
|
||||
fmt.Printf("Total Unique Layers: %d\n", grandLayers)
|
||||
fmt.Printf("Total Storage: %s\n\n", humanSize(grandTotal))
|
||||
|
||||
fmt.Println("BY USER (sorted by storage):")
|
||||
fmt.Println("----------------------------------------")
|
||||
for i, u := range sorted {
|
||||
fmt.Printf("%3d. %s\n", i+1, u.Handle)
|
||||
fmt.Printf(" DID: %s\n", u.DID)
|
||||
fmt.Printf(" Unique Layers: %d\n", u.LayerCount)
|
||||
fmt.Printf(" Total Size: %s\n", humanSize(u.TotalSize))
|
||||
if len(u.Repositories) > 0 {
|
||||
var repos []string
|
||||
for r := range u.Repositories {
|
||||
repos = append(repos, r)
|
||||
}
|
||||
sort.Strings(repos)
|
||||
fmt.Printf(" Repositories: %s\n", strings.Join(repos, ", "))
|
||||
}
|
||||
pct := float64(0)
|
||||
if grandTotal > 0 {
|
||||
pct = float64(u.TotalSize) / float64(grandTotal) * 100
|
||||
}
|
||||
fmt.Printf(" Share: %.1f%%\n\n", pct)
|
||||
}
|
||||
|
||||
// Output CSV format for easy analysis
|
||||
fmt.Println("\n========================================")
|
||||
fmt.Println("CSV FORMAT")
|
||||
fmt.Println("========================================")
|
||||
fmt.Println("handle,did,unique_layers,total_bytes,total_human,repositories")
|
||||
for _, u := range sorted {
|
||||
var repos []string
|
||||
for r := range u.Repositories {
|
||||
repos = append(repos, r)
|
||||
}
|
||||
sort.Strings(repos)
|
||||
fmt.Printf("%s,%s,%d,%d,%s,\"%s\"\n", u.Handle, u.DID, u.LayerCount, u.TotalSize, humanSize(u.TotalSize), strings.Join(repos, ";"))
|
||||
}
|
||||
}
|
||||
|
||||
// listAllBlobs fetches all blobs and lists them sorted by size (largest first)
|
||||
func listAllBlobs(baseURL, holdDID string) {
|
||||
fmt.Println("=== Fetching all blob records ===")
|
||||
|
||||
layers, err := fetchAllLayerRecords(baseURL, holdDID)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to fetch layer records: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("Fetched %d layer records\n", len(layers))
|
||||
|
||||
// Deduplicate by digest, keeping track of first seen user
|
||||
blobMap := make(map[string]*BlobInfo)
|
||||
for _, layer := range layers {
|
||||
if existing, exists := blobMap[layer.Digest]; exists {
|
||||
// If we have a record with a user DID and existing doesn't, prefer this one
|
||||
if existing.UserDID == "" && layer.UserDID != "" {
|
||||
existing.UserDID = layer.UserDID
|
||||
}
|
||||
continue
|
||||
}
|
||||
blobMap[layer.Digest] = &BlobInfo{
|
||||
Digest: layer.Digest,
|
||||
Size: layer.Size,
|
||||
MediaType: layer.MediaType,
|
||||
UserDID: layer.UserDID,
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to slice
|
||||
var blobs []*BlobInfo
|
||||
for _, b := range blobMap {
|
||||
blobs = append(blobs, b)
|
||||
}
|
||||
|
||||
// Sort by size (largest first)
|
||||
sort.Slice(blobs, func(i, j int) bool {
|
||||
return blobs[i].Size > blobs[j].Size
|
||||
})
|
||||
|
||||
fmt.Printf("Found %d unique blobs\n\n", len(blobs))
|
||||
|
||||
// Resolve DIDs to handles (batch for efficiency)
|
||||
fmt.Println("Resolving DIDs to handles...")
|
||||
didToHandle := make(map[string]string)
|
||||
for _, b := range blobs {
|
||||
if b.UserDID == "" {
|
||||
continue
|
||||
}
|
||||
if _, exists := didToHandle[b.UserDID]; !exists {
|
||||
handle, err := resolveDIDToHandle(b.UserDID)
|
||||
if err != nil {
|
||||
didToHandle[b.UserDID] = b.UserDID
|
||||
} else {
|
||||
didToHandle[b.UserDID] = handle
|
||||
}
|
||||
}
|
||||
b.Handle = didToHandle[b.UserDID]
|
||||
}
|
||||
|
||||
// Calculate total
|
||||
var totalSize int64
|
||||
for _, b := range blobs {
|
||||
totalSize += b.Size
|
||||
}
|
||||
|
||||
// Print report
|
||||
fmt.Println("\n========================================")
|
||||
fmt.Println("BLOB SIZE REPORT (sorted largest to smallest)")
|
||||
fmt.Println("========================================")
|
||||
fmt.Printf("\nTotal Unique Blobs: %d\n", len(blobs))
|
||||
fmt.Printf("Total Storage: %s\n\n", humanSize(totalSize))
|
||||
|
||||
fmt.Println("BLOBS:")
|
||||
fmt.Println("----------------------------------------")
|
||||
for i, b := range blobs {
|
||||
pct := float64(0)
|
||||
if totalSize > 0 {
|
||||
pct = float64(b.Size) / float64(totalSize) * 100
|
||||
}
|
||||
owner := b.Handle
|
||||
if owner == "" {
|
||||
owner = "(unknown)"
|
||||
}
|
||||
fmt.Printf("%4d. %s\n", i+1, humanSize(b.Size))
|
||||
fmt.Printf(" Digest: %s\n", b.Digest)
|
||||
fmt.Printf(" Owner: %s\n", owner)
|
||||
if b.MediaType != "" {
|
||||
fmt.Printf(" Type: %s\n", b.MediaType)
|
||||
}
|
||||
fmt.Printf(" Share: %.2f%%\n\n", pct)
|
||||
}
|
||||
|
||||
// Output CSV format
|
||||
fmt.Println("\n========================================")
|
||||
fmt.Println("CSV FORMAT")
|
||||
fmt.Println("========================================")
|
||||
fmt.Println("rank,size_bytes,size_human,digest,owner,media_type,share_pct")
|
||||
for i, b := range blobs {
|
||||
pct := float64(0)
|
||||
if totalSize > 0 {
|
||||
pct = float64(b.Size) / float64(totalSize) * 100
|
||||
}
|
||||
owner := b.Handle
|
||||
if owner == "" {
|
||||
owner = ""
|
||||
}
|
||||
fmt.Printf("%d,%d,%s,%s,%s,%s,%.2f\n", i+1, b.Size, humanSize(b.Size), b.Digest, owner, b.MediaType, pct)
|
||||
}
|
||||
}
|
||||
|
||||
// calculateFromLayerRecords uses the hold's layer records (original method)
|
||||
func calculateFromLayerRecords(baseURL, holdDID string) (map[string]*UserUsage, error) {
|
||||
layers, err := fetchAllLayerRecords(baseURL, holdDID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Printf("Fetched %d layer records\n", len(layers))
|
||||
|
||||
userUsage := make(map[string]*UserUsage)
|
||||
for _, layer := range layers {
|
||||
if layer.UserDID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
usage, exists := userUsage[layer.UserDID]
|
||||
if !exists {
|
||||
usage = &UserUsage{
|
||||
DID: layer.UserDID,
|
||||
UniqueLayers: make(map[string]int64),
|
||||
Repositories: make(map[string]bool),
|
||||
}
|
||||
userUsage[layer.UserDID] = usage
|
||||
}
|
||||
|
||||
if _, seen := usage.UniqueLayers[layer.Digest]; !seen {
|
||||
usage.UniqueLayers[layer.Digest] = layer.Size
|
||||
usage.TotalSize += layer.Size
|
||||
usage.LayerCount++
|
||||
}
|
||||
}
|
||||
|
||||
return userUsage, nil
|
||||
}
|
||||
|
||||
// calculateFromManifests queries crew members and fetches their manifests from their PDSes
|
||||
func calculateFromManifests(baseURL, holdDID string) (map[string]*UserUsage, error) {
|
||||
// Get all crew members
|
||||
crewDIDs, err := fetchCrewMembers(baseURL, holdDID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch crew: %w", err)
|
||||
}
|
||||
|
||||
// Also get captain
|
||||
captainDID, err := fetchCaptain(baseURL, holdDID)
|
||||
if err == nil && captainDID != "" {
|
||||
// Add captain to list if not already there
|
||||
found := false
|
||||
for _, d := range crewDIDs {
|
||||
if d == captainDID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
crewDIDs = append(crewDIDs, captainDID)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Found %d users (crew + captain)\n", len(crewDIDs))
|
||||
|
||||
userUsage := make(map[string]*UserUsage)
|
||||
|
||||
for _, did := range crewDIDs {
|
||||
fmt.Printf(" Checking manifests for %s...", did)
|
||||
|
||||
// Resolve DID to PDS
|
||||
pdsEndpoint, err := resolveDIDToPDS(did)
|
||||
if err != nil {
|
||||
fmt.Printf(" (failed to resolve PDS: %v)\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Fetch manifests that use this hold
|
||||
manifests, err := fetchUserManifestsForHold(pdsEndpoint, did, holdDID)
|
||||
if err != nil {
|
||||
fmt.Printf(" (failed to fetch manifests: %v)\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(manifests) == 0 {
|
||||
fmt.Printf(" 0 manifests\n")
|
||||
continue
|
||||
}
|
||||
|
||||
// Calculate unique layers across all manifests
|
||||
usage := &UserUsage{
|
||||
DID: did,
|
||||
UniqueLayers: make(map[string]int64),
|
||||
Repositories: make(map[string]bool),
|
||||
}
|
||||
|
||||
for _, m := range manifests {
|
||||
usage.Repositories[m.Repository] = true
|
||||
|
||||
// Add config blob
|
||||
if m.Config != nil {
|
||||
if _, seen := usage.UniqueLayers[m.Config.Digest]; !seen {
|
||||
usage.UniqueLayers[m.Config.Digest] = m.Config.Size
|
||||
usage.TotalSize += m.Config.Size
|
||||
usage.LayerCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Add layers
|
||||
for _, layer := range m.Layers {
|
||||
if _, seen := usage.UniqueLayers[layer.Digest]; !seen {
|
||||
usage.UniqueLayers[layer.Digest] = layer.Size
|
||||
usage.TotalSize += layer.Size
|
||||
usage.LayerCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf(" %d manifests, %d unique layers, %s\n", len(manifests), usage.LayerCount, humanSize(usage.TotalSize))
|
||||
|
||||
if usage.LayerCount > 0 {
|
||||
userUsage[did] = usage
|
||||
}
|
||||
}
|
||||
|
||||
return userUsage, nil
|
||||
}
|
||||
|
||||
// fetchCrewMembers gets all crew member DIDs from the hold
|
||||
func fetchCrewMembers(baseURL, holdDID string) ([]string, error) {
|
||||
var dids []string
|
||||
seen := make(map[string]bool)
|
||||
|
||||
cursor := ""
|
||||
for {
|
||||
u := fmt.Sprintf("%s/xrpc/com.atproto.repo.listRecords", baseURL)
|
||||
params := url.Values{}
|
||||
params.Set("repo", holdDID)
|
||||
params.Set("collection", "io.atcr.hold.crew")
|
||||
params.Set("limit", "100")
|
||||
if cursor != "" {
|
||||
params.Set("cursor", cursor)
|
||||
}
|
||||
|
||||
resp, err := client.Get(u + "?" + params.Encode())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var listResp ListRecordsResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
for _, rec := range listResp.Records {
|
||||
var crew CrewRecord
|
||||
if err := json.Unmarshal(rec.Value, &crew); err != nil {
|
||||
continue
|
||||
}
|
||||
if crew.Member != "" && !seen[crew.Member] {
|
||||
seen[crew.Member] = true
|
||||
dids = append(dids, crew.Member)
|
||||
}
|
||||
}
|
||||
|
||||
if listResp.Cursor == "" || len(listResp.Records) < 100 {
|
||||
break
|
||||
}
|
||||
cursor = listResp.Cursor
|
||||
}
|
||||
|
||||
return dids, nil
|
||||
}
|
||||
|
||||
// fetchCaptain gets the captain DID from the hold
|
||||
func fetchCaptain(baseURL, holdDID string) (string, error) {
|
||||
u := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=io.atcr.hold.captain&rkey=self",
|
||||
baseURL, url.QueryEscape(holdDID))
|
||||
|
||||
resp, err := client.Get(u)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Value struct {
|
||||
Owner string `json:"owner"`
|
||||
} `json:"value"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return result.Value.Owner, nil
|
||||
}
|
||||
|
||||
// fetchUserManifestsForHold fetches all manifests from a user's PDS that use the specified hold
|
||||
func fetchUserManifestsForHold(pdsEndpoint, userDID, holdDID string) ([]ManifestRecord, error) {
|
||||
var manifests []ManifestRecord
|
||||
cursor := ""
|
||||
|
||||
for {
|
||||
u := fmt.Sprintf("%s/xrpc/com.atproto.repo.listRecords", pdsEndpoint)
|
||||
params := url.Values{}
|
||||
params.Set("repo", userDID)
|
||||
params.Set("collection", "io.atcr.manifest")
|
||||
params.Set("limit", "100")
|
||||
if cursor != "" {
|
||||
params.Set("cursor", cursor)
|
||||
}
|
||||
|
||||
resp, err := client.Get(u + "?" + params.Encode())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var listResp ListRecordsResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
for _, rec := range listResp.Records {
|
||||
var m ManifestRecord
|
||||
if err := json.Unmarshal(rec.Value, &m); err != nil {
|
||||
continue
|
||||
}
|
||||
// Only include manifests for this hold
|
||||
if m.HoldDID == holdDID {
|
||||
manifests = append(manifests, m)
|
||||
}
|
||||
}
|
||||
|
||||
if listResp.Cursor == "" || len(listResp.Records) < 100 {
|
||||
break
|
||||
}
|
||||
cursor = listResp.Cursor
|
||||
}
|
||||
|
||||
return manifests, nil
|
||||
}
|
||||
|
||||
// getHoldDID fetches the hold's DID from /.well-known/atproto-did
|
||||
func getHoldDID(baseURL string) (string, error) {
|
||||
resp, err := http.Get(baseURL + "/.well-known/atproto-did")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("unexpected status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(body)), nil
|
||||
}
|
||||
|
||||
// fetchAllLayerRecords fetches all layer records with pagination
|
||||
func fetchAllLayerRecords(baseURL, holdDID string) ([]LayerRecord, error) {
|
||||
var allLayers []LayerRecord
|
||||
cursor := ""
|
||||
limit := 100
|
||||
|
||||
for {
|
||||
u := fmt.Sprintf("%s/xrpc/com.atproto.repo.listRecords", baseURL)
|
||||
params := url.Values{}
|
||||
params.Set("repo", holdDID)
|
||||
params.Set("collection", "io.atcr.hold.layer")
|
||||
params.Set("limit", fmt.Sprintf("%d", limit))
|
||||
if cursor != "" {
|
||||
params.Set("cursor", cursor)
|
||||
}
|
||||
|
||||
fullURL := u + "?" + params.Encode()
|
||||
fmt.Printf(" Fetching: %s\n", fullURL)
|
||||
|
||||
resp, err := client.Get(fullURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("unexpected status %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var listResp ListRecordsResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("decode failed: %w", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
for _, rec := range listResp.Records {
|
||||
var layer LayerRecord
|
||||
if err := json.Unmarshal(rec.Value, &layer); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to parse layer record: %v\n", err)
|
||||
continue
|
||||
}
|
||||
allLayers = append(allLayers, layer)
|
||||
}
|
||||
|
||||
fmt.Printf(" Got %d records (total: %d)\n", len(listResp.Records), len(allLayers))
|
||||
|
||||
if listResp.Cursor == "" || len(listResp.Records) < limit {
|
||||
break
|
||||
}
|
||||
cursor = listResp.Cursor
|
||||
}
|
||||
|
||||
return allLayers, nil
|
||||
}
|
||||
|
||||
// resolveDIDToHandle resolves a DID to a handle using the PLC directory or did:web
|
||||
func resolveDIDToHandle(did string) (string, error) {
|
||||
if strings.HasPrefix(did, "did:web:") {
|
||||
return strings.TrimPrefix(did, "did:web:"), nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(did, "did:plc:") {
|
||||
plcURL := "https://plc.directory/" + did
|
||||
resp, err := client.Get(plcURL)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("PLC query failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("PLC returned status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var plcDoc struct {
|
||||
AlsoKnownAs []string `json:"alsoKnownAs"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&plcDoc); err != nil {
|
||||
return "", fmt.Errorf("failed to parse PLC response: %w", err)
|
||||
}
|
||||
|
||||
for _, aka := range plcDoc.AlsoKnownAs {
|
||||
if strings.HasPrefix(aka, "at://") {
|
||||
return strings.TrimPrefix(aka, "at://"), nil
|
||||
}
|
||||
}
|
||||
|
||||
return did, nil
|
||||
}
|
||||
|
||||
return did, nil
|
||||
}
|
||||
|
||||
// resolveDIDToPDS resolves a DID to its PDS endpoint
|
||||
func resolveDIDToPDS(did string) (string, error) {
|
||||
if strings.HasPrefix(did, "did:web:") {
|
||||
// did:web:example.com -> https://example.com
|
||||
// did:web:host%3A8080 -> http://host:8080
|
||||
domain := strings.TrimPrefix(did, "did:web:")
|
||||
domain = strings.ReplaceAll(domain, "%3A", ":")
|
||||
scheme := "https"
|
||||
if strings.Contains(domain, ":") {
|
||||
scheme = "http"
|
||||
}
|
||||
return scheme + "://" + domain, nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(did, "did:plc:") {
|
||||
plcURL := "https://plc.directory/" + did
|
||||
resp, err := client.Get(plcURL)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("PLC query failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("PLC returned status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var plcDoc struct {
|
||||
Service []struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
ServiceEndpoint string `json:"serviceEndpoint"`
|
||||
} `json:"service"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&plcDoc); err != nil {
|
||||
return "", fmt.Errorf("failed to parse PLC response: %w", err)
|
||||
}
|
||||
|
||||
for _, svc := range plcDoc.Service {
|
||||
if svc.Type == "AtprotoPersonalDataServer" {
|
||||
return svc.ServiceEndpoint, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no PDS found in DID document")
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unsupported DID method")
|
||||
}
|
||||
|
||||
// humanSize converts bytes to human-readable format
|
||||
func humanSize(bytes int64) string {
|
||||
const (
|
||||
KB = 1024
|
||||
MB = 1024 * KB
|
||||
GB = 1024 * MB
|
||||
TB = 1024 * GB
|
||||
)
|
||||
|
||||
switch {
|
||||
case bytes >= TB:
|
||||
return fmt.Sprintf("%.2f TB", float64(bytes)/TB)
|
||||
case bytes >= GB:
|
||||
return fmt.Sprintf("%.2f GB", float64(bytes)/GB)
|
||||
case bytes >= MB:
|
||||
return fmt.Sprintf("%.2f MB", float64(bytes)/MB)
|
||||
case bytes >= KB:
|
||||
return fmt.Sprintf("%.2f KB", float64(bytes)/KB)
|
||||
default:
|
||||
return fmt.Sprintf("%d B", bytes)
|
||||
}
|
||||
}
|
||||
166
config-appview.example.yaml
Normal file
166
config-appview.example.yaml
Normal file
@@ -0,0 +1,166 @@
|
||||
# ATCR AppView Configuration
|
||||
# Generated with defaults — edit as needed.
|
||||
|
||||
# Configuration format version.
|
||||
version: "0.1"
|
||||
# Log level: debug, info, warn, error.
|
||||
log_level: info
|
||||
# Remote log shipping settings.
|
||||
log_shipper:
|
||||
# Log shipping backend: "victoria", "opensearch", or "loki". Empty disables shipping.
|
||||
backend: ""
|
||||
# Remote log service endpoint, e.g. "http://victorialogs:9428".
|
||||
url: ""
|
||||
# Number of log entries to buffer before flushing to the remote service.
|
||||
batch_size: 100
|
||||
# Maximum time between flushes, even if batch is not full.
|
||||
flush_interval: 5s
|
||||
# Basic auth username for the log service (optional).
|
||||
username: ""
|
||||
# Basic auth password for the log service (optional).
|
||||
password: ""
|
||||
# HTTP server and identity settings.
|
||||
server:
|
||||
# Listen address, e.g. ":5000" or "127.0.0.1:5000".
|
||||
addr: :5000
|
||||
# Public-facing URL for OAuth callbacks and JWT realm. Auto-detected if empty.
|
||||
base_url: ""
|
||||
# DID of the hold service for blob storage, e.g. "did:web:hold01.atcr.io" (REQUIRED).
|
||||
default_hold_did: ""
|
||||
# Allows HTTP (not HTTPS) for DID resolution and uses transition:generic OAuth scope.
|
||||
test_mode: false
|
||||
# Path to P-256 private key for OAuth client authentication. Auto-generated on first run.
|
||||
oauth_key_path: /var/lib/atcr/oauth/client.key
|
||||
# Display name shown on OAuth authorization screens.
|
||||
client_name: AT Container Registry
|
||||
# Short name used in page titles and browser tabs.
|
||||
client_short_name: ATCR
|
||||
# Separate domains for OCI registry API (e.g. ["buoy.cr"]). First is primary. Browser visits redirect to BaseURL.
|
||||
registry_domains: []
|
||||
# DIDs of holds this appview manages billing for. Tier updates are pushed to these holds.
|
||||
managed_holds:
|
||||
- did:web:172.28.0.3%3A8080
|
||||
# Web UI settings.
|
||||
ui:
|
||||
# SQLite/libSQL database for OAuth sessions, stars, pull counts, and device approvals.
|
||||
database_path: /var/lib/atcr/ui.db
|
||||
# Visual theme name (e.g. "seamark"). Empty uses default atcr.io branding.
|
||||
theme: "seamark"
|
||||
# libSQL sync URL (libsql://...). Works with Turso cloud or self-hosted libsql-server. Leave empty for local-only SQLite.
|
||||
libsql_sync_url: ""
|
||||
# Auth token for libSQL sync. Required if libsql_sync_url is set.
|
||||
libsql_auth_token: ""
|
||||
# How often to sync with remote libSQL server. Default: 60s.
|
||||
libsql_sync_interval: 1m0s
|
||||
# Health check and cache settings.
|
||||
health:
|
||||
# How long to cache hold health check results.
|
||||
cache_ttl: 15m0s
|
||||
# How often to refresh hold health checks.
|
||||
check_interval: 15m0s
|
||||
# ATProto Jetstream event stream settings.
|
||||
jetstream:
|
||||
# Jetstream WebSocket endpoints, tried in order on failure.
|
||||
urls:
|
||||
- wss://jetstream2.us-west.bsky.network/subscribe
|
||||
- wss://jetstream1.us-west.bsky.network/subscribe
|
||||
- wss://jetstream2.us-east.bsky.network/subscribe
|
||||
- wss://jetstream1.us-east.bsky.network/subscribe
|
||||
# Sync existing records from PDS on startup.
|
||||
backfill_enabled: true
|
||||
# How often to re-run backfill to catch missed events. Set to 0 to only backfill on startup.
|
||||
backfill_interval: 24h0m0s
|
||||
# Relay endpoints for backfill, tried in order on failure.
|
||||
relay_endpoints:
|
||||
- https://relay1.us-east.bsky.network
|
||||
- https://relay1.us-west.bsky.network
|
||||
- https://zlay.waow.tech
|
||||
# JWT authentication settings.
|
||||
auth:
|
||||
# RSA private key for signing registry JWTs issued to Docker clients.
|
||||
key_path: /var/lib/atcr/auth/private-key.pem
|
||||
# X.509 certificate matching the JWT signing key.
|
||||
cert_path: /var/lib/atcr/auth/private-key.crt
|
||||
# Credential helper download settings.
|
||||
credential_helper:
|
||||
# Tangled repository URL for credential helper downloads.
|
||||
tangled_repo: ""
|
||||
# Legal page customization for self-hosted instances.
|
||||
legal:
|
||||
# Organization name for Terms of Service and Privacy Policy. Defaults to server.client_name.
|
||||
company_name: ""
|
||||
# Governing law jurisdiction for legal terms.
|
||||
jurisdiction: ""
|
||||
# Stripe billing integration (requires -tags billing build).
|
||||
billing:
|
||||
# Stripe secret key. Can also be set via STRIPE_SECRET_KEY env var (takes precedence). Billing is enabled automatically when set.
|
||||
stripe_secret_key: ""
|
||||
# Stripe webhook signing secret. Can also be set via STRIPE_WEBHOOK_SECRET env var (takes precedence).
|
||||
webhook_secret: ""
|
||||
# ISO 4217 currency code (e.g. "usd").
|
||||
currency: usd
|
||||
# Redirect URL after successful checkout. Use {base_url} placeholder.
|
||||
success_url: '{base_url}/settings#storage'
|
||||
# Redirect URL after cancelled checkout. Use {base_url} placeholder.
|
||||
cancel_url: '{base_url}/settings#storage'
|
||||
# Subscription tiers ordered by rank (lowest to highest).
|
||||
tiers:
|
||||
- # Tier name. Position in list determines rank (0-based).
|
||||
name: free
|
||||
# Short description shown on the plan card.
|
||||
description: Get started with basic storage
|
||||
# List of features included in this tier.
|
||||
features: []
|
||||
# Stripe price ID for monthly billing. Empty = free tier.
|
||||
stripe_price_monthly: ""
|
||||
# Stripe price ID for yearly billing.
|
||||
stripe_price_yearly: ""
|
||||
# Maximum webhooks for this tier (-1 = unlimited).
|
||||
max_webhooks: 1
|
||||
# Allow all webhook trigger types (not just first-scan).
|
||||
webhook_all_triggers: false
|
||||
supporter_badge: false
|
||||
- # Tier name. Position in list determines rank (0-based).
|
||||
name: Supporter
|
||||
# Short description shown on the plan card.
|
||||
description: Get started with basic storage
|
||||
# List of features included in this tier.
|
||||
features: []
|
||||
# Stripe price ID for monthly billing. Empty = free tier.
|
||||
stripe_price_monthly: ""
|
||||
# Stripe price ID for yearly billing.
|
||||
stripe_price_yearly: "price_1SmK1mRROAC4bYmSwhTQ7RY9"
|
||||
# Maximum webhooks for this tier (-1 = unlimited).
|
||||
max_webhooks: 1
|
||||
# Allow all webhook trigger types (not just first-scan).
|
||||
webhook_all_triggers: false
|
||||
supporter_badge: true
|
||||
- # Tier name. Position in list determines rank (0-based).
|
||||
name: bosun
|
||||
# Short description shown on the plan card.
|
||||
description: More storage with scan-on-push
|
||||
# List of features included in this tier.
|
||||
features: []
|
||||
# Stripe price ID for monthly billing. Empty = free tier.
|
||||
stripe_price_monthly: "price_1SmK4QRROAC4bYmSxpr35HUl"
|
||||
# Stripe price ID for yearly billing.
|
||||
stripe_price_yearly: "price_1SmJuLRROAC4bYmSUgVCwZWo"
|
||||
# Maximum webhooks for this tier (-1 = unlimited).
|
||||
max_webhooks: 10
|
||||
# Allow all webhook trigger types (not just first-scan).
|
||||
webhook_all_triggers: true
|
||||
supporter_badge: true
|
||||
# - # Tier name. Position in list determines rank (0-based).
|
||||
# name: quartermaster
|
||||
# # Short description shown on the plan card.
|
||||
# description: Maximum storage for power users
|
||||
# # List of features included in this tier.
|
||||
# features: []
|
||||
# # Stripe price ID for monthly billing. Empty = free tier.
|
||||
# stripe_price_monthly: price_xxx
|
||||
# # Stripe price ID for yearly billing.
|
||||
# stripe_price_yearly: price_yyy
|
||||
# # Maximum webhooks for this tier (-1 = unlimited).
|
||||
# max_webhooks: -1
|
||||
# # Allow all webhook trigger types (not just first-scan).
|
||||
# webhook_all_triggers: true
|
||||
137
config-hold.example.yaml
Normal file
137
config-hold.example.yaml
Normal file
@@ -0,0 +1,137 @@
|
||||
# ATCR Hold Service Configuration
|
||||
# Generated with defaults — edit as needed.
|
||||
|
||||
# Configuration format version.
|
||||
version: "0.1"
|
||||
# Log level: debug, info, warn, error.
|
||||
log_level: info
|
||||
# Remote log shipping settings.
|
||||
log_shipper:
|
||||
# Log shipping backend: "victoria", "opensearch", or "loki". Empty disables shipping.
|
||||
backend: ""
|
||||
# Remote log service endpoint, e.g. "http://victorialogs:9428".
|
||||
url: ""
|
||||
# Number of log entries to buffer before flushing to the remote service.
|
||||
batch_size: 100
|
||||
# Maximum time between flushes, even if batch is not full.
|
||||
flush_interval: 5s
|
||||
# Basic auth username for the log service (optional).
|
||||
username: ""
|
||||
# Basic auth password for the log service (optional).
|
||||
password: ""
|
||||
# S3-compatible blob storage settings.
|
||||
storage:
|
||||
# S3-compatible access key (AWS, Storj, Minio, UpCloud).
|
||||
access_key: ""
|
||||
# S3-compatible secret key.
|
||||
secret_key: ""
|
||||
# S3 region, e.g. "us-east-1". Used for request signing.
|
||||
region: us-east-1
|
||||
# S3 bucket for blob storage (REQUIRED). Must already exist.
|
||||
bucket: ""
|
||||
# Custom S3 endpoint for non-AWS providers (e.g. "https://gateway.storjshare.io").
|
||||
endpoint: ""
|
||||
# CDN pull zone URL for downloads. When set, presigned GET/HEAD URLs use this host instead of the S3 endpoint. Uploads and API calls still use the S3 endpoint.
|
||||
pull_zone: ""
|
||||
# HTTP server and identity settings.
|
||||
server:
|
||||
# Listen address, e.g. ":8080" or "0.0.0.0:8080".
|
||||
addr: :8080
|
||||
# Externally reachable URL used for did:web identity (REQUIRED), e.g. "https://hold.example.com".
|
||||
public_url: ""
|
||||
# Allow unauthenticated blob reads. If false, readers need crew membership.
|
||||
public: false
|
||||
# DID of successor hold for migration. Appview redirects all requests to the successor.
|
||||
successor: ""
|
||||
# Use localhost for OAuth redirects during development.
|
||||
test_mode: false
|
||||
# Request crawl from this relay on startup to make the embedded PDS discoverable.
|
||||
relay_endpoint: ""
|
||||
# DID of the appview this hold is managed by (e.g. did:web:atcr.io). Resolved via did:web for URL and public key.
|
||||
appview_did: did:web:172.28.0.2%3A5000
|
||||
# Read timeout for HTTP requests.
|
||||
read_timeout: 5m0s
|
||||
# Write timeout for HTTP requests.
|
||||
write_timeout: 5m0s
|
||||
# Auto-registration and bootstrap settings.
|
||||
registration:
|
||||
# DID of the hold captain. If set, auto-creates captain and profile records on startup.
|
||||
owner_did: ""
|
||||
# Create a wildcard crew record allowing any authenticated user to join.
|
||||
allow_all_crew: false
|
||||
# URL to fetch avatar image from during bootstrap.
|
||||
profile_avatar_url: https://atcr.io/web-app-manifest-192x192.png
|
||||
# Bluesky profile display name. Synced on every startup.
|
||||
profile_display_name: Cargo Hold
|
||||
# Bluesky profile description. Synced on every startup.
|
||||
profile_description: ahoy from the cargo hold
|
||||
# Post to Bluesky when users push images. Synced to captain record on startup.
|
||||
enable_bluesky_posts: false
|
||||
# Deployment region, auto-detected from cloud metadata or S3 config.
|
||||
region: ""
|
||||
# Embedded PDS database settings.
|
||||
database:
|
||||
# Directory for the embedded PDS database (carstore + SQLite).
|
||||
path: /var/lib/atcr-hold
|
||||
# PDS signing key path. Defaults to {database.path}/signing.key.
|
||||
key_path: ""
|
||||
# DID method: 'web' (default, derived from public_url) or 'plc' (registered with PLC directory).
|
||||
did_method: web
|
||||
# Explicit DID for this hold. If set with did_method 'plc', adopts this identity instead of creating new. Use for recovery/migration.
|
||||
did: ""
|
||||
# PLC directory URL. Only used when did_method is 'plc'. Default: https://plc.directory
|
||||
plc_directory_url: https://plc.directory
|
||||
# Rotation key for did:plc in multibase format (starting with 'z'). Generate with: goat key generate. Supports K-256 and P-256 curves. Controls DID identity (separate from signing key).
|
||||
rotation_key: ""
|
||||
# libSQL sync URL (libsql://...). Works with Turso cloud, Bunny DB, or self-hosted libsql-server. Leave empty for local-only SQLite.
|
||||
libsql_sync_url: ""
|
||||
# Auth token for libSQL sync. Required if libsql_sync_url is set.
|
||||
libsql_auth_token: ""
|
||||
# How often to sync with remote libSQL server. Default: 60s.
|
||||
libsql_sync_interval: 1m0s
|
||||
# Admin panel settings.
|
||||
admin:
|
||||
# Enable the web-based admin panel for crew and storage management.
|
||||
enabled: true
|
||||
# Garbage collection settings.
|
||||
gc:
|
||||
# Enable nightly garbage collection of orphaned blobs and records.
|
||||
enabled: false
|
||||
# Storage quota tiers. Empty disables quota enforcement.
|
||||
quota:
|
||||
# Quota tiers ordered by rank (lowest to highest). Position determines rank.
|
||||
tiers:
|
||||
- # Tier name used as the key for crew assignments.
|
||||
name: free
|
||||
# Storage quota limit (e.g. "5GB", "50GB", "1TB").
|
||||
quota: 5GB
|
||||
# Trigger vulnerability scan immediately on push. When false, images are still scanned by background scheduling.
|
||||
scan_on_push: false
|
||||
- # Tier name used as the key for crew assignments.
|
||||
name: deckhand
|
||||
# Storage quota limit (e.g. "5GB", "50GB", "1TB").
|
||||
quota: 5GB
|
||||
# Trigger vulnerability scan immediately on push. When false, images are still scanned by background scheduling.
|
||||
scan_on_push: false
|
||||
- # Tier name used as the key for crew assignments.
|
||||
name: bosun
|
||||
# Storage quota limit (e.g. "5GB", "50GB", "1TB").
|
||||
quota: 50GB
|
||||
# Trigger vulnerability scan immediately on push. When false, images are still scanned by background scheduling.
|
||||
scan_on_push: true
|
||||
- # Tier name used as the key for crew assignments.
|
||||
name: quartermaster
|
||||
# Storage quota limit (e.g. "5GB", "50GB", "1TB").
|
||||
quota: 100GB
|
||||
# Trigger vulnerability scan immediately on push. When false, images are still scanned by background scheduling.
|
||||
scan_on_push: true
|
||||
# Default tier assignment for new crew members.
|
||||
defaults:
|
||||
# Tier assigned to new crew members who don't have an explicit tier.
|
||||
new_crew_tier: deckhand
|
||||
# Vulnerability scanner settings. Empty disables scanning.
|
||||
scanner:
|
||||
# Shared secret for scanner WebSocket auth. Empty disables scanning.
|
||||
secret: ""
|
||||
# Minimum interval between re-scans of the same manifest. When set, the hold proactively scans manifests when the scanner is idle. Default: 168h (7 days). Set to 0 to disable.
|
||||
rescan_interval: 168h0m0s
|
||||
@@ -1,242 +0,0 @@
|
||||
# ATCR Production Environment Configuration
|
||||
# Copy this file to .env and fill in your values
|
||||
#
|
||||
# Usage:
|
||||
# 1. cp deploy/.env.prod.template .env
|
||||
# 2. Edit .env with your configuration
|
||||
# 3. systemctl restart atcr
|
||||
#
|
||||
# NOTE: This file is loaded by docker-compose.prod.yml
|
||||
|
||||
# ==============================================================================
|
||||
# Domain Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Main AppView domain (registry API + web UI)
|
||||
# REQUIRED: Update with your domain
|
||||
APPVIEW_DOMAIN=atcr.io
|
||||
|
||||
# ==============================================================================
|
||||
# Hold Service Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Hold service domain (REQUIRED)
|
||||
# The hostname where the hold service will be accessible
|
||||
# Used by docker-compose.prod.yml to derive:
|
||||
# - HOLD_PUBLIC_URL: https://${HOLD_DOMAIN}
|
||||
# - ATCR_DEFAULT_HOLD_DID: did:web:${HOLD_DOMAIN}
|
||||
# Example: hold01.atcr.io
|
||||
HOLD_DOMAIN=hold01.atcr.io
|
||||
|
||||
# Your ATProto DID (REQUIRED for hold registration)
|
||||
# Get your DID from: https://bsky.social/xrpc/com.atproto.identity.resolveHandle?handle=yourhandle.bsky.social
|
||||
# Example: did:plc:abc123xyz789
|
||||
HOLD_OWNER=did:plc:pddp4xt5lgnv2qsegbzzs4xg
|
||||
|
||||
# Directory path for embedded PDS carstore (SQLite database)
|
||||
# Default: /var/lib/atcr-hold
|
||||
# If empty, embedded PDS is disabled
|
||||
#
|
||||
# Note: This should be a directory path, NOT a file path
|
||||
# Carstore creates db.sqlite3 inside this directory
|
||||
#
|
||||
# The embedded PDS makes the hold a proper ATProto user with:
|
||||
# - did:web identity (derived from HOLD_DOMAIN)
|
||||
# - DID document at /.well-known/did.json
|
||||
# - XRPC endpoints for crew management
|
||||
# - ATProto blob endpoints (wraps existing presigned URL logic)
|
||||
#
|
||||
# Example: For HOLD_DOMAIN=hold01.atcr.io, the hold becomes did:web:hold01.atcr.io
|
||||
HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
|
||||
# Path to signing key (auto-generated on first run if missing)
|
||||
# Default: {HOLD_DATABASE_DIR}/signing.key
|
||||
# HOLD_KEY_PATH=/var/lib/atcr-hold/signing.key
|
||||
|
||||
# Allow public blob reads (pulls) without authentication
|
||||
# - true: Anyone can pull images (read-only)
|
||||
# - false: Only authenticated users can pull
|
||||
# Default: false (private)
|
||||
HOLD_PUBLIC=false
|
||||
|
||||
# Allow all authenticated users to write to this hold
|
||||
# This setting controls write permissions for authenticated ATCR users
|
||||
#
|
||||
# - true: Any authenticated ATCR user can push images (treat all as crew)
|
||||
# Useful for shared/community holds where you want to allow
|
||||
# multiple users to push without explicit crew membership.
|
||||
# Users must still authenticate via ATProto OAuth.
|
||||
#
|
||||
# - false: Only hold owner and explicit crew members can push (default)
|
||||
# Write access requires io.atcr.hold.crew record in owner's PDS.
|
||||
# Most secure option for production holds.
|
||||
#
|
||||
# Read permissions are controlled by HOLD_PUBLIC (above).
|
||||
#
|
||||
# Security model:
|
||||
# Read: HOLD_PUBLIC=true → anonymous + authenticated users
|
||||
# HOLD_PUBLIC=false → authenticated users only
|
||||
# Write: HOLD_ALLOW_ALL_CREW=true → all authenticated users
|
||||
# HOLD_ALLOW_ALL_CREW=false → owner + crew only (verified via PDS)
|
||||
#
|
||||
# Use cases:
|
||||
# - Public registry: HOLD_PUBLIC=true, HOLD_ALLOW_ALL_CREW=true
|
||||
# - ATProto users only: HOLD_PUBLIC=false, HOLD_ALLOW_ALL_CREW=true
|
||||
# - Private hold (default): HOLD_PUBLIC=false, HOLD_ALLOW_ALL_CREW=false
|
||||
#
|
||||
# Default: false
|
||||
HOLD_ALLOW_ALL_CREW=false
|
||||
|
||||
# Enable Bluesky posts when manifests are pushed
|
||||
# When enabled, the hold service creates Bluesky posts announcing new container
|
||||
# image pushes. Posts include image name, tag, size, and layer count.
|
||||
#
|
||||
# - true: Create Bluesky posts for manifest uploads
|
||||
# - false: Silent operation (no Bluesky posts)
|
||||
#
|
||||
# Note: This requires the hold owner to have OAuth credentials for posting.
|
||||
# See docs/BLUESKY_MANIFEST_POSTS.md for setup instructions.
|
||||
#
|
||||
# Default: false
|
||||
HOLD_BLUESKY_POSTS_ENABLED=true
|
||||
|
||||
# ==============================================================================
|
||||
# S3/UpCloud Object Storage Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Storage driver type
|
||||
# Options: s3, filesystem
|
||||
# Default: s3
|
||||
STORAGE_DRIVER=s3
|
||||
|
||||
# S3 Access Credentials
|
||||
# Get these from UpCloud Object Storage console
|
||||
AWS_ACCESS_KEY_ID=
|
||||
AWS_SECRET_ACCESS_KEY=
|
||||
|
||||
# S3 Region (for distribution S3 driver)
|
||||
# For third-party S3 providers (UpCloud, Storj, Minio), this value is ignored
|
||||
# when S3_ENDPOINT is set, but must be a valid AWS region to pass validation.
|
||||
# Default: us-east-1
|
||||
AWS_REGION=us-east-1
|
||||
|
||||
# S3 Bucket Name
|
||||
# Create this bucket in UpCloud Object Storage
|
||||
# Example: atcr-blobs
|
||||
S3_BUCKET=atcr
|
||||
|
||||
# S3 Endpoint
|
||||
# Get this from UpCloud Console → Storage → Object Storage → Your bucket → "S3 endpoint"
|
||||
# Format: https://[bucket-id].upcloudobjects.com
|
||||
# Example: https://6vmss.upcloudobjects.com
|
||||
#
|
||||
# NOTE: Use the bucket-specific endpoint, NOT a custom domain
|
||||
# Custom domains break presigned URL generation
|
||||
S3_ENDPOINT=https://6vmss.upcloudobjects.com
|
||||
|
||||
# ==============================================================================
|
||||
# AppView Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Default hold service DID (derived from HOLD_DOMAIN in docker-compose.prod.yml)
|
||||
# Uncomment to override if you want to use a different hold service as the default
|
||||
# ATCR_DEFAULT_HOLD_DID=did:web:some-other-hold.example.com
|
||||
|
||||
# JWT token expiration in seconds
|
||||
# Default: 300 (5 minutes)
|
||||
ATCR_TOKEN_EXPIRATION=300
|
||||
|
||||
# OAuth client display name (shown in authorization screens)
|
||||
# Default: AT Container Registry
|
||||
# ATCR_CLIENT_NAME=AT Container Registry
|
||||
|
||||
# Enable web UI
|
||||
# Default: true
|
||||
ATCR_UI_ENABLED=true
|
||||
|
||||
# Skip database migrations on startup
|
||||
# Default: false (migrations are applied on startup)
|
||||
# Set to "true" only for testing or when migrations are managed externally
|
||||
# Production: Keep as "false" to ensure migrations are applied
|
||||
SKIP_DB_MIGRATIONS=false
|
||||
|
||||
# ==============================================================================
|
||||
# Logging Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Log level: debug, info, warn, error
|
||||
# Default: info
|
||||
ATCR_LOG_LEVEL=debug
|
||||
|
||||
# Log formatter: text, json
|
||||
# Default: text
|
||||
ATCR_LOG_FORMATTER=text
|
||||
|
||||
# ==============================================================================
|
||||
# Jetstream Configuration (ATProto event streaming)
|
||||
# ==============================================================================
|
||||
|
||||
# Jetstream WebSocket URL for real-time ATProto events
|
||||
# Default: wss://jetstream2.us-west.bsky.network/subscribe
|
||||
JETSTREAM_URL=wss://jetstream2.us-west.bsky.network/subscribe
|
||||
|
||||
# Enable backfill worker to sync historical records
|
||||
# Default: true (recommended for production)
|
||||
ATCR_BACKFILL_ENABLED=true
|
||||
|
||||
# ATProto relay endpoint for backfill sync API
|
||||
# Default: https://relay1.us-east.bsky.network
|
||||
ATCR_RELAY_ENDPOINT=https://relay1.us-east.bsky.network
|
||||
|
||||
# Backfill interval
|
||||
# Examples: 30m, 1h, 2h, 24h
|
||||
# Default: 1h
|
||||
ATCR_BACKFILL_INTERVAL=1h
|
||||
|
||||
# ==============================================================================
|
||||
# Optional: Filesystem Storage (alternative to S3)
|
||||
# ==============================================================================
|
||||
|
||||
# If using filesystem storage instead of S3:
|
||||
# 1. Uncomment these lines
|
||||
# 2. Comment out all S3 variables above
|
||||
# 3. Set STORAGE_DRIVER=filesystem
|
||||
|
||||
# STORAGE_DRIVER=filesystem
|
||||
# STORAGE_ROOT_DIR=/var/lib/atcr/hold
|
||||
|
||||
# ==============================================================================
|
||||
# Advanced Configuration
|
||||
# ==============================================================================
|
||||
|
||||
# Override service name (defaults to APPVIEW_DOMAIN)
|
||||
# ATCR_SERVICE_NAME=atcr.io
|
||||
|
||||
# Debug listen address (optional - for pprof debugging)
|
||||
# ATCR_DEBUG_ADDR=:5001
|
||||
|
||||
# ==============================================================================
|
||||
# CHECKLIST
|
||||
# ==============================================================================
|
||||
#
|
||||
# Before starting ATCR, ensure you have:
|
||||
#
|
||||
# ☐ Set APPVIEW_DOMAIN (e.g., atcr.io)
|
||||
# ☐ Set HOLD_DOMAIN (e.g., hold01.atcr.io)
|
||||
# ☐ Set HOLD_OWNER (your ATProto DID)
|
||||
# ☐ Set HOLD_DATABASE_DIR (default: /var/lib/atcr-hold) - enables embedded PDS
|
||||
# ☐ Set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
|
||||
# ☐ Set S3_BUCKET (created in UpCloud Object Storage)
|
||||
# ☐ Set S3_ENDPOINT (UpCloud bucket endpoint, e.g., https://6vmss.upcloudobjects.com)
|
||||
# ☐ Configured DNS records:
|
||||
# - A record: atcr.io → server IP
|
||||
# - A record: hold01.atcr.io → server IP
|
||||
# - CNAME: blobs.atcr.io → [bucket].upcloudobjects.com
|
||||
# ☐ Disabled Cloudflare proxy (gray cloud, not orange)
|
||||
# ☐ Waited for DNS propagation (check with: dig atcr.io)
|
||||
#
|
||||
# After starting:
|
||||
# ☐ Complete hold OAuth registration (run: /opt/atcr/get-hold-oauth.sh)
|
||||
# ☐ Verify hold PDS: curl https://hold01.atcr.io/.well-known/did.json
|
||||
# ☐ Test registry: docker pull atcr.io/test/image
|
||||
# ☐ Monitor logs: /opt/atcr/logs.sh
|
||||
@@ -243,6 +243,26 @@ docker pull atcr.io/yourhandle/test:latest
|
||||
docker logs -f atcr-appview
|
||||
```
|
||||
|
||||
#### Enable debug logging
|
||||
|
||||
Toggle debug logging at runtime without restarting the container:
|
||||
|
||||
```bash
|
||||
# Enable debug logging (auto-reverts after 30 minutes)
|
||||
docker kill -s SIGUSR1 atcr-appview
|
||||
docker kill -s SIGUSR1 atcr-hold
|
||||
|
||||
# Manually disable before timeout
|
||||
docker kill -s SIGUSR1 atcr-appview
|
||||
```
|
||||
|
||||
When toggled, you'll see:
|
||||
```
|
||||
level=INFO msg="Log level changed" from=INFO to=DEBUG trigger=SIGUSR1 auto_revert_in=30m0s
|
||||
```
|
||||
|
||||
**Note:** Despite the command name, `docker kill -s SIGUSR1` does NOT stop the container. It sends a user-defined signal that the application handles to toggle debug mode.
|
||||
|
||||
#### Restart services
|
||||
|
||||
```bash
|
||||
@@ -398,10 +418,10 @@ Presigned URLs should eliminate hold bandwidth. If seeing high usage:
|
||||
docker logs atcr-hold | grep -i presigned
|
||||
```
|
||||
|
||||
**Check S3 driver:**
|
||||
**Check S3 configuration:**
|
||||
```bash
|
||||
docker exec atcr-hold env | grep STORAGE_DRIVER
|
||||
# Should be: s3 (not filesystem)
|
||||
docker exec atcr-hold env | grep S3_BUCKET
|
||||
# Should show your S3 bucket name
|
||||
```
|
||||
|
||||
**Verify direct S3 access:**
|
||||
@@ -465,6 +485,6 @@ docker run --rm \
|
||||
|
||||
## Support
|
||||
|
||||
- Documentation: https://tangled.org/@evan.jarrett.net/at-container-registry
|
||||
- Issues: https://tangled.org/@evan.jarrett.net/at-container-registry/issues
|
||||
- Documentation: https://tangled.org/evan.jarrett.net/at-container-registry
|
||||
- Issues: https://tangled.org/evan.jarrett.net/at-container-registry/issues
|
||||
- Bluesky: @evan.jarrett.net
|
||||
|
||||
@@ -31,7 +31,7 @@ services:
|
||||
networks:
|
||||
- atcr-network
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:2019/metrics"]
|
||||
test: ["CMD", "caddy", "validate", "--config", "/etc/caddy/Caddyfile"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -44,40 +44,22 @@ services:
|
||||
image: atcr-appview:latest
|
||||
container_name: atcr-appview
|
||||
restart: unless-stopped
|
||||
command: ["serve", "--config", "/config.yaml"]
|
||||
# Base config: config-appview.example.yaml
|
||||
# Env vars below override config file values for this deployment
|
||||
environment:
|
||||
# Server configuration
|
||||
ATCR_HTTP_ADDR: :5000
|
||||
ATCR_BASE_URL: https://${APPVIEW_DOMAIN:-atcr.io}
|
||||
ATCR_SERVICE_NAME: ${APPVIEW_DOMAIN:-atcr.io}
|
||||
|
||||
# Storage configuration (derived from HOLD_DOMAIN)
|
||||
ATCR_DEFAULT_HOLD_DID: ${ATCR_DEFAULT_HOLD_DID:-did:web:${HOLD_DOMAIN:-hold01.atcr.io}}
|
||||
|
||||
# Authentication
|
||||
ATCR_AUTH_KEY_PATH: /var/lib/atcr/auth/private-key.pem
|
||||
ATCR_AUTH_CERT_PATH: /var/lib/atcr/auth/private-key.crt
|
||||
ATCR_TOKEN_EXPIRATION: ${ATCR_TOKEN_EXPIRATION:-300}
|
||||
|
||||
# UI configuration
|
||||
ATCR_UI_ENABLED: ${ATCR_UI_ENABLED:-true}
|
||||
ATCR_UI_DATABASE_PATH: /var/lib/atcr/ui.db
|
||||
|
||||
# Logging
|
||||
ATCR_LOG_LEVEL: ${ATCR_LOG_LEVEL:-info}
|
||||
ATCR_LOG_FORMATTER: ${ATCR_LOG_FORMATTER:-text}
|
||||
|
||||
# Jetstream configuration
|
||||
JETSTREAM_URL: ${JETSTREAM_URL:-wss://jetstream2.us-west.bsky.network/subscribe}
|
||||
ATCR_BACKFILL_ENABLED: ${ATCR_BACKFILL_ENABLED:-true}
|
||||
ATCR_RELAY_ENDPOINT: ${ATCR_RELAY_ENDPOINT:-https://relay1.us-east.bsky.network}
|
||||
ATCR_BACKFILL_INTERVAL: ${ATCR_BACKFILL_INTERVAL:-1h}
|
||||
volumes:
|
||||
- ./config-appview.yaml:/config.yaml:ro
|
||||
# Persistent data: auth keys, UI database, OAuth tokens, Jetstream cache
|
||||
- atcr-appview-data:/var/lib/atcr
|
||||
networks:
|
||||
- atcr-network
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5000/v2/"]
|
||||
test: ["CMD", "/healthcheck", "http://localhost:5000/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -90,43 +72,29 @@ services:
|
||||
image: atcr-hold:latest
|
||||
container_name: atcr-hold
|
||||
restart: unless-stopped
|
||||
command: ["serve", "--config", "/config.yaml"]
|
||||
# Base config: config-hold.example.yaml
|
||||
# Env vars below override config file values for this deployment
|
||||
environment:
|
||||
# Hold service configuration (derived from HOLD_DOMAIN)
|
||||
HOLD_PUBLIC_URL: ${HOLD_PUBLIC_URL:-https://${HOLD_DOMAIN:-hold01.atcr.io}}
|
||||
HOLD_SERVER_ADDR: :8080
|
||||
HOLD_ALLOW_ALL_CREW: ${HOLD_ALLOW_ALL_CREW:-false}
|
||||
HOLD_PUBLIC: ${HOLD_PUBLIC:-false}
|
||||
HOLD_OWNER: ${HOLD_OWNER:-}
|
||||
HOLD_BLUESKY_POSTS_ENABLED: ${HOLD_BLUESKY_POSTS_ENABLED:-true}
|
||||
|
||||
# Embedded PDS configuration
|
||||
HOLD_DATABASE_DIR: ${HOLD_DATABASE_DIR:-/var/lib/atcr-hold}
|
||||
# HOLD_KEY_PATH: ${HOLD_KEY_PATH} # Optional, defaults to {HOLD_DATABASE_DIR}/signing.key
|
||||
|
||||
# Storage driver
|
||||
STORAGE_DRIVER: ${STORAGE_DRIVER:-s3}
|
||||
|
||||
# S3/UpCloud Object Storage configuration
|
||||
# S3/UpCloud Object Storage (REQUIRED)
|
||||
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-}
|
||||
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-}
|
||||
AWS_REGION: ${AWS_REGION:-us-east-1}
|
||||
S3_BUCKET: ${S3_BUCKET:-atcr-blobs}
|
||||
S3_ENDPOINT: ${S3_ENDPOINT:-}
|
||||
|
||||
# Logging
|
||||
ATCR_LOG_LEVEL: ${ATCR_LOG_LEVEL:-debug}
|
||||
ATCR_LOG_FORMATTER: ${ATCR_LOG_FORMATTER:-text}
|
||||
|
||||
# Optional: Filesystem storage (comment out S3 vars above)
|
||||
# STORAGE_DRIVER: filesystem
|
||||
# STORAGE_ROOT_DIR: /var/lib/atcr/hold
|
||||
HOLD_LOG_LEVEL: ${ATCR_LOG_LEVEL:-info}
|
||||
volumes:
|
||||
- ./config-hold.yaml:/config.yaml:ro
|
||||
# PDS data (carstore SQLite + signing keys)
|
||||
- atcr-hold-data:/var/lib/atcr-hold
|
||||
- ./quotas.yaml:/quotas.yaml:ro
|
||||
networks:
|
||||
- atcr-network
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/health"]
|
||||
test: ["CMD", "/healthcheck", "http://localhost:8080/xrpc/_health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
@@ -1,344 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# ATCR UpCloud Initialization Script for Rocky Linux
|
||||
#
|
||||
# This script sets up ATCR on a fresh Rocky Linux instance.
|
||||
# Paste this into UpCloud's "User data" field when creating a server.
|
||||
#
|
||||
# What it does:
|
||||
# - Updates system packages
|
||||
# - Creates 2GB swap file (for 1GB RAM instances)
|
||||
# - Installs Docker and Docker Compose
|
||||
# - Creates directory structure
|
||||
# - Clones ATCR repository
|
||||
# - Creates systemd service for auto-start
|
||||
# - Builds and starts containers
|
||||
#
|
||||
# Post-deployment:
|
||||
# 1. Edit /opt/atcr/.env with your configuration
|
||||
# 2. Run: systemctl restart atcr
|
||||
# 3. Check logs: docker logs atcr-hold (for OAuth URL)
|
||||
# 4. Complete hold registration via OAuth
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
ATCR_DIR="/opt/atcr"
|
||||
ATCR_REPO="https://tangled.org/@evan.jarrett.net/at-container-registry" # UPDATE THIS
|
||||
ATCR_BRANCH="main"
|
||||
|
||||
# Simple logging without colors (for cloud-init log compatibility)
|
||||
log_info() {
|
||||
echo "[INFO] $1"
|
||||
}
|
||||
|
||||
log_warn() {
|
||||
echo "[WARN] $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo "[ERROR] $1"
|
||||
}
|
||||
|
||||
# Function to check if command exists
|
||||
command_exists() {
|
||||
command -v "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
log_info "Starting ATCR deployment on Rocky Linux..."
|
||||
|
||||
# Update system packages
|
||||
log_info "Updating system packages..."
|
||||
dnf update -y
|
||||
|
||||
# Install required packages
|
||||
log_info "Installing prerequisites..."
|
||||
dnf install -y \
|
||||
git \
|
||||
wget \
|
||||
curl \
|
||||
nano \
|
||||
vim
|
||||
|
||||
log_info "Required ports: HTTP (80), HTTPS (443), SSH (22)"
|
||||
|
||||
# Create swap file for instances with limited RAM
|
||||
if [ ! -f /swapfile ]; then
|
||||
log_info "Creating 2GB swap file (allows builds on 1GB RAM instances)..."
|
||||
dd if=/dev/zero of=/swapfile bs=1M count=2048 status=progress
|
||||
chmod 600 /swapfile
|
||||
mkswap /swapfile
|
||||
swapon /swapfile
|
||||
|
||||
# Make swap permanent
|
||||
echo '/swapfile none swap sw 0 0' >> /etc/fstab
|
||||
|
||||
log_info "Swap file created and enabled"
|
||||
free -h
|
||||
else
|
||||
log_info "Swap file already exists"
|
||||
fi
|
||||
|
||||
# Install Docker
|
||||
if ! command_exists docker; then
|
||||
log_info "Installing Docker..."
|
||||
|
||||
# Add Docker repository
|
||||
dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||
|
||||
# Install Docker
|
||||
dnf install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
|
||||
# Start and enable Docker
|
||||
systemctl enable --now docker
|
||||
|
||||
log_info "Docker installed successfully"
|
||||
else
|
||||
log_info "Docker already installed"
|
||||
fi
|
||||
|
||||
# Verify Docker Compose
|
||||
if ! docker compose version >/dev/null 2>&1; then
|
||||
log_error "Docker Compose plugin not found. Please install manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_info "Docker Compose version: $(docker compose version)"
|
||||
|
||||
# Create ATCR directory
|
||||
log_info "Creating ATCR directory: $ATCR_DIR"
|
||||
mkdir -p "$ATCR_DIR"
|
||||
cd "$ATCR_DIR"
|
||||
|
||||
# Clone repository or create minimal structure
|
||||
if [ -n "$ATCR_REPO" ] && [ "$ATCR_REPO" != "https://tangled.org/@evan.jarrett.net/at-container-registry" ]; then
|
||||
log_info "Cloning ATCR repository..."
|
||||
git clone -b "$ATCR_BRANCH" "$ATCR_REPO" .
|
||||
else
|
||||
log_warn "ATCR_REPO not configured. You'll need to manually copy files to $ATCR_DIR"
|
||||
log_warn "Required files:"
|
||||
log_warn " - deploy/docker-compose.prod.yml"
|
||||
log_warn " - deploy/.env.prod.template"
|
||||
log_warn " - Dockerfile.appview"
|
||||
log_warn " - Dockerfile.hold"
|
||||
fi
|
||||
|
||||
# Create .env file from template if it doesn't exist
|
||||
if [ -f "deploy/.env.prod.template" ] && [ ! -f "$ATCR_DIR/.env" ]; then
|
||||
log_info "Creating .env file from template..."
|
||||
cp deploy/.env.prod.template "$ATCR_DIR/.env"
|
||||
log_warn "IMPORTANT: Edit $ATCR_DIR/.env with your configuration!"
|
||||
fi
|
||||
|
||||
# Create systemd services (caddy, appview, hold)
|
||||
log_info "Creating systemd services..."
|
||||
|
||||
# Caddy service (reverse proxy for both appview and hold)
|
||||
cat > /etc/systemd/system/atcr-caddy.service <<'EOF'
|
||||
[Unit]
|
||||
Description=ATCR Caddy Reverse Proxy
|
||||
Requires=docker.service
|
||||
After=docker.service network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
WorkingDirectory=/opt/atcr
|
||||
EnvironmentFile=/opt/atcr/.env
|
||||
|
||||
# Start caddy container
|
||||
ExecStart=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml up -d caddy
|
||||
|
||||
# Stop caddy container
|
||||
ExecStop=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml stop caddy
|
||||
|
||||
# Restart caddy container
|
||||
ExecReload=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml restart caddy
|
||||
|
||||
# Always restart on failure
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# AppView service (registry + web UI)
|
||||
cat > /etc/systemd/system/atcr-appview.service <<'EOF'
|
||||
[Unit]
|
||||
Description=ATCR AppView (Registry + Web UI)
|
||||
Requires=docker.service atcr-caddy.service
|
||||
After=docker.service network-online.target atcr-caddy.service
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
WorkingDirectory=/opt/atcr
|
||||
EnvironmentFile=/opt/atcr/.env
|
||||
|
||||
# Start appview container
|
||||
ExecStart=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml up -d atcr-appview
|
||||
|
||||
# Stop appview container
|
||||
ExecStop=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml stop atcr-appview
|
||||
|
||||
# Restart appview container
|
||||
ExecReload=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml restart atcr-appview
|
||||
|
||||
# Always restart on failure
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Hold service (storage backend)
|
||||
cat > /etc/systemd/system/atcr-hold.service <<'EOF'
|
||||
[Unit]
|
||||
Description=ATCR Hold (Storage Service)
|
||||
Requires=docker.service atcr-caddy.service
|
||||
After=docker.service network-online.target atcr-caddy.service
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
WorkingDirectory=/opt/atcr
|
||||
EnvironmentFile=/opt/atcr/.env
|
||||
|
||||
# Start hold container
|
||||
ExecStart=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml up -d atcr-hold
|
||||
|
||||
# Stop hold container
|
||||
ExecStop=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml stop atcr-hold
|
||||
|
||||
# Restart hold container
|
||||
ExecReload=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml restart atcr-hold
|
||||
|
||||
# Always restart on failure
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Reload systemd
|
||||
log_info "Reloading systemd daemon..."
|
||||
systemctl daemon-reload
|
||||
|
||||
# Enable all services (but don't start yet - user needs to configure .env)
|
||||
systemctl enable atcr-caddy.service
|
||||
systemctl enable atcr-appview.service
|
||||
systemctl enable atcr-hold.service
|
||||
|
||||
log_info "Systemd services created and enabled"
|
||||
|
||||
# Create helper scripts
|
||||
log_info "Creating helper scripts..."
|
||||
|
||||
# Script to rebuild and restart
|
||||
cat > "$ATCR_DIR/rebuild.sh" <<'EOF'
|
||||
#!/bin/bash
|
||||
set -e
|
||||
cd /opt/atcr
|
||||
docker compose -f deploy/docker-compose.prod.yml build
|
||||
docker compose -f deploy/docker-compose.prod.yml up -d
|
||||
docker compose -f deploy/docker-compose.prod.yml logs -f
|
||||
EOF
|
||||
chmod +x "$ATCR_DIR/rebuild.sh"
|
||||
|
||||
# Script to view logs
|
||||
cat > "$ATCR_DIR/logs.sh" <<'EOF'
|
||||
#!/bin/bash
|
||||
cd /opt/atcr
|
||||
docker compose -f deploy/docker-compose.prod.yml logs -f "$@"
|
||||
EOF
|
||||
chmod +x "$ATCR_DIR/logs.sh"
|
||||
|
||||
log_info "Helper scripts created in $ATCR_DIR"
|
||||
|
||||
# Print completion message
|
||||
cat <<'EOF'
|
||||
|
||||
================================================================================
|
||||
ATCR Installation Complete!
|
||||
================================================================================
|
||||
|
||||
NEXT STEPS:
|
||||
|
||||
1. Configure environment variables:
|
||||
nano /opt/atcr/.env
|
||||
|
||||
Required settings:
|
||||
- AWS_ACCESS_KEY_ID (UpCloud S3 credentials)
|
||||
- AWS_SECRET_ACCESS_KEY
|
||||
|
||||
Pre-configured (verify these are correct):
|
||||
- APPVIEW_DOMAIN=atcr.io
|
||||
- HOLD_DOMAIN=hold01.atcr.io
|
||||
- HOLD_OWNER=did:plc:pddp4xt5lgnv2qsegbzzs4xg
|
||||
- S3_BUCKET=atcr
|
||||
- S3_ENDPOINT=https://blobs.atcr.io
|
||||
|
||||
2. Configure UpCloud Cloud Firewall (in control panel):
|
||||
Allow: TCP 22 (SSH)
|
||||
Allow: TCP 80 (HTTP)
|
||||
Allow: TCP 443 (HTTPS)
|
||||
Drop: Everything else
|
||||
|
||||
3. Configure DNS (Cloudflare - DNS-only mode):
|
||||
EOF
|
||||
|
||||
echo " A atcr.io → $(curl -s ifconfig.me || echo '[server-ip]') (gray cloud)"
|
||||
echo " A hold01.atcr.io → $(curl -s ifconfig.me || echo '[server-ip]') (gray cloud)"
|
||||
echo " CNAME blobs.atcr.io → atcr.us-chi1.upcloudobjects.com (gray cloud)"
|
||||
|
||||
cat <<'EOF'
|
||||
|
||||
4. Start ATCR services:
|
||||
systemctl start atcr-caddy atcr-appview atcr-hold
|
||||
|
||||
5. Check status:
|
||||
systemctl status atcr-caddy
|
||||
systemctl status atcr-appview
|
||||
systemctl status atcr-hold
|
||||
docker ps
|
||||
/opt/atcr/logs.sh
|
||||
|
||||
Helper Scripts:
|
||||
/opt/atcr/rebuild.sh - Rebuild and restart containers
|
||||
/opt/atcr/logs.sh [service] - View logs (e.g., logs.sh atcr-hold)
|
||||
|
||||
Service Management:
|
||||
systemctl start atcr-caddy - Start Caddy reverse proxy
|
||||
systemctl start atcr-appview - Start AppView (registry + UI)
|
||||
systemctl start atcr-hold - Start Hold (storage service)
|
||||
|
||||
systemctl stop atcr-appview - Stop AppView only
|
||||
systemctl stop atcr-hold - Stop Hold only
|
||||
systemctl stop atcr-caddy - Stop all (stops reverse proxy)
|
||||
|
||||
systemctl restart atcr-appview - Restart AppView
|
||||
systemctl restart atcr-hold - Restart Hold
|
||||
|
||||
systemctl status atcr-caddy - Check Caddy status
|
||||
systemctl status atcr-appview - Check AppView status
|
||||
systemctl status atcr-hold - Check Hold status
|
||||
|
||||
Documentation:
|
||||
https://tangled.org/@evan.jarrett.net/at-container-registry
|
||||
|
||||
IMPORTANT:
|
||||
- Edit /opt/atcr/.env with S3 credentials before starting!
|
||||
- Configure UpCloud cloud firewall (see step 2)
|
||||
- DNS must be configured and propagated
|
||||
- Cloudflare proxy must be DISABLED (gray cloud)
|
||||
- Complete hold OAuth registration before first push
|
||||
|
||||
EOF
|
||||
|
||||
log_info "Installation complete. Follow the next steps above."
|
||||
@@ -1,55 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Request crawl for a PDS from the Bluesky relay
|
||||
#
|
||||
# Usage: ./request-crawl.sh <hostname> [relay-url]
|
||||
# Example: ./request-crawl.sh hold01.atcr.io
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
DEFAULT_RELAY="https://bsky.network/xrpc/com.atproto.sync.requestCrawl"
|
||||
|
||||
# Parse arguments
|
||||
HOSTNAME="${1:-}"
|
||||
RELAY_URL="${2:-$DEFAULT_RELAY}"
|
||||
|
||||
# Validate hostname
|
||||
if [ -z "$HOSTNAME" ]; then
|
||||
echo "Error: hostname is required" >&2
|
||||
echo "" >&2
|
||||
echo "Usage: $0 <hostname> [relay-url]" >&2
|
||||
echo "Example: $0 hold01.atcr.io" >&2
|
||||
echo "" >&2
|
||||
echo "Options:" >&2
|
||||
echo " hostname Hostname of the PDS to request crawl for (required)" >&2
|
||||
echo " relay-url Relay URL to send crawl request to (default: $DEFAULT_RELAY)" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Log what we're doing
|
||||
echo "Requesting crawl for hostname: $HOSTNAME"
|
||||
echo "Sending to relay: $RELAY_URL"
|
||||
|
||||
# Make the request
|
||||
RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$RELAY_URL" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"hostname\":\"$HOSTNAME\"}")
|
||||
|
||||
# Split response and status code
|
||||
HTTP_BODY=$(echo "$RESPONSE" | head -n -1)
|
||||
HTTP_CODE=$(echo "$RESPONSE" | tail -n 1)
|
||||
|
||||
# Check response
|
||||
if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ]; then
|
||||
echo "✅ Success! Crawl requested for $HOSTNAME"
|
||||
if [ -n "$HTTP_BODY" ]; then
|
||||
echo "Response: $HTTP_BODY"
|
||||
fi
|
||||
else
|
||||
echo "❌ Failed with status $HTTP_CODE" >&2
|
||||
if [ -n "$HTTP_BODY" ]; then
|
||||
echo "Response: $HTTP_BODY" >&2
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
416
deploy/upcloud/cloudinit.go
Normal file
416
deploy/upcloud/cloudinit.go
Normal file
@@ -0,0 +1,416 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"go.yaml.in/yaml/v3"
|
||||
)
|
||||
|
||||
//go:embed systemd/appview.service.tmpl
|
||||
var appviewServiceTmpl string
|
||||
|
||||
//go:embed systemd/hold.service.tmpl
|
||||
var holdServiceTmpl string
|
||||
|
||||
//go:embed systemd/scanner.service.tmpl
|
||||
var scannerServiceTmpl string
|
||||
|
||||
//go:embed configs/appview.yaml.tmpl
|
||||
var appviewConfigTmpl string
|
||||
|
||||
//go:embed configs/hold.yaml.tmpl
|
||||
var holdConfigTmpl string
|
||||
|
||||
//go:embed configs/scanner.yaml.tmpl
|
||||
var scannerConfigTmpl string
|
||||
|
||||
//go:embed configs/cloudinit.sh.tmpl
|
||||
var cloudInitTmpl string
|
||||
|
||||
// ConfigValues holds values injected into config YAML templates.
|
||||
// Only truly dynamic/computed values belong here — deployment-specific
|
||||
// values like client_name, owner_did, etc. are literal in the templates.
|
||||
type ConfigValues struct {
|
||||
// S3 / Object Storage
|
||||
S3Endpoint string
|
||||
S3Region string
|
||||
S3Bucket string
|
||||
S3AccessKey string
|
||||
S3SecretKey string
|
||||
|
||||
// Infrastructure (computed from zone + config)
|
||||
Zone string // e.g. "us-chi1"
|
||||
HoldDomain string // e.g. "us-chi1.cove.seamark.dev"
|
||||
HoldDid string // e.g. "did:web:us-chi1.cove.seamark.dev"
|
||||
BasePath string // e.g. "/var/lib/seamark"
|
||||
|
||||
// Scanner (auto-generated shared secret)
|
||||
ScannerSecret string // hex-encoded 32-byte secret; empty disables scanning
|
||||
}
|
||||
|
||||
// renderConfig executes a Go template with the given values.
|
||||
func renderConfig(tmplStr string, vals *ConfigValues) (string, error) {
|
||||
t, err := template.New("config").Parse(tmplStr)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse config template: %w", err)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := t.Execute(&buf, vals); err != nil {
|
||||
return "", fmt.Errorf("render config template: %w", err)
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// serviceUnitParams holds values for rendering systemd service unit templates.
|
||||
type serviceUnitParams struct {
|
||||
DisplayName string // e.g. "Seamark"
|
||||
User string // e.g. "seamark"
|
||||
BinaryPath string // e.g. "/opt/seamark/bin/seamark-appview"
|
||||
ConfigPath string // e.g. "/etc/seamark/appview.yaml"
|
||||
DataDir string // e.g. "/var/lib/seamark"
|
||||
ServiceName string // e.g. "seamark-appview"
|
||||
}
|
||||
|
||||
func renderServiceUnit(tmplStr string, p serviceUnitParams) (string, error) {
|
||||
t, err := template.New("service").Parse(tmplStr)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse service template: %w", err)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := t.Execute(&buf, p); err != nil {
|
||||
return "", fmt.Errorf("render service template: %w", err)
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// scannerServiceUnitParams holds values for rendering the scanner systemd unit.
|
||||
// Extends the standard fields with HoldServiceName for the After= dependency.
|
||||
type scannerServiceUnitParams struct {
|
||||
DisplayName string // e.g. "Seamark"
|
||||
User string // e.g. "seamark"
|
||||
BinaryPath string // e.g. "/opt/seamark/bin/seamark-scanner"
|
||||
ConfigPath string // e.g. "/etc/seamark/scanner.yaml"
|
||||
DataDir string // e.g. "/var/lib/seamark"
|
||||
ServiceName string // e.g. "seamark-scanner"
|
||||
HoldServiceName string // e.g. "seamark-hold" (After= dependency)
|
||||
}
|
||||
|
||||
func renderScannerServiceUnit(p scannerServiceUnitParams) (string, error) {
|
||||
t, err := template.New("scanner-service").Parse(scannerServiceTmpl)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse scanner service template: %w", err)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := t.Execute(&buf, p); err != nil {
|
||||
return "", fmt.Errorf("render scanner service template: %w", err)
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// generateAppviewCloudInit generates the cloud-init user-data script for the appview server.
|
||||
// Sets up the OS, directories, config, and systemd unit. Binaries are deployed separately via SCP.
|
||||
func generateAppviewCloudInit(cfg *InfraConfig, vals *ConfigValues) (string, error) {
|
||||
naming := cfg.Naming()
|
||||
|
||||
configYAML, err := renderConfig(appviewConfigTmpl, vals)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("appview config: %w", err)
|
||||
}
|
||||
|
||||
serviceUnit, err := renderServiceUnit(appviewServiceTmpl, serviceUnitParams{
|
||||
DisplayName: naming.DisplayName(),
|
||||
User: naming.SystemUser(),
|
||||
BinaryPath: naming.InstallDir() + "/bin/" + naming.Appview(),
|
||||
ConfigPath: naming.AppviewConfigPath(),
|
||||
DataDir: naming.BasePath(),
|
||||
ServiceName: naming.Appview(),
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("appview service unit: %w", err)
|
||||
}
|
||||
|
||||
return generateCloudInit(cloudInitParams{
|
||||
BinaryName: naming.Appview(),
|
||||
ServiceUnit: serviceUnit,
|
||||
ConfigYAML: configYAML,
|
||||
ConfigPath: naming.AppviewConfigPath(),
|
||||
ServiceName: naming.Appview(),
|
||||
DataDir: naming.BasePath(),
|
||||
InstallDir: naming.InstallDir(),
|
||||
SystemUser: naming.SystemUser(),
|
||||
ConfigDir: naming.ConfigDir(),
|
||||
LogFile: naming.LogFile(),
|
||||
DisplayName: naming.DisplayName(),
|
||||
})
|
||||
}
|
||||
|
||||
// generateHoldCloudInit generates the cloud-init user-data script for the hold server.
|
||||
// When withScanner is true, a second phase is appended that creates scanner data
|
||||
// directories and installs a scanner systemd service. Binaries are deployed separately via SCP.
|
||||
func generateHoldCloudInit(cfg *InfraConfig, vals *ConfigValues, withScanner bool) (string, error) {
|
||||
naming := cfg.Naming()
|
||||
|
||||
configYAML, err := renderConfig(holdConfigTmpl, vals)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("hold config: %w", err)
|
||||
}
|
||||
|
||||
serviceUnit, err := renderServiceUnit(holdServiceTmpl, serviceUnitParams{
|
||||
DisplayName: naming.DisplayName(),
|
||||
User: naming.SystemUser(),
|
||||
BinaryPath: naming.InstallDir() + "/bin/" + naming.Hold(),
|
||||
ConfigPath: naming.HoldConfigPath(),
|
||||
DataDir: naming.BasePath(),
|
||||
ServiceName: naming.Hold(),
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("hold service unit: %w", err)
|
||||
}
|
||||
|
||||
script, err := generateCloudInit(cloudInitParams{
|
||||
BinaryName: naming.Hold(),
|
||||
ServiceUnit: serviceUnit,
|
||||
ConfigYAML: configYAML,
|
||||
ConfigPath: naming.HoldConfigPath(),
|
||||
ServiceName: naming.Hold(),
|
||||
DataDir: naming.BasePath(),
|
||||
InstallDir: naming.InstallDir(),
|
||||
SystemUser: naming.SystemUser(),
|
||||
ConfigDir: naming.ConfigDir(),
|
||||
LogFile: naming.LogFile(),
|
||||
DisplayName: naming.DisplayName(),
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !withScanner {
|
||||
return script, nil
|
||||
}
|
||||
|
||||
// Render scanner config YAML
|
||||
scannerConfigYAML, err := renderConfig(scannerConfigTmpl, vals)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("scanner config: %w", err)
|
||||
}
|
||||
|
||||
// Append scanner setup phase (no build — binary deployed via SCP)
|
||||
scannerUnit, err := renderScannerServiceUnit(scannerServiceUnitParams{
|
||||
DisplayName: naming.DisplayName(),
|
||||
User: naming.SystemUser(),
|
||||
BinaryPath: naming.InstallDir() + "/bin/" + naming.Scanner(),
|
||||
ConfigPath: naming.ScannerConfigPath(),
|
||||
DataDir: naming.BasePath(),
|
||||
ServiceName: naming.Scanner(),
|
||||
HoldServiceName: naming.Hold(),
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("scanner service unit: %w", err)
|
||||
}
|
||||
|
||||
// Escape single quotes for heredoc embedding
|
||||
scannerUnit = strings.ReplaceAll(scannerUnit, "'", "'\\''")
|
||||
scannerConfigYAML = strings.ReplaceAll(scannerConfigYAML, "'", "'\\''")
|
||||
|
||||
scannerPhase := fmt.Sprintf(`
|
||||
# === Scanner Setup ===
|
||||
|
||||
# Scanner data dirs
|
||||
mkdir -p %s/vulndb %s/tmp
|
||||
chown -R %s:%s %s
|
||||
|
||||
# Scanner config
|
||||
cat > %s << 'CFGEOF'
|
||||
%s
|
||||
CFGEOF
|
||||
|
||||
# Scanner systemd service
|
||||
cat > /etc/systemd/system/%s.service << 'SVCEOF'
|
||||
%s
|
||||
SVCEOF
|
||||
systemctl daemon-reload
|
||||
systemctl enable %s
|
||||
|
||||
echo "=== Scanner setup complete ==="
|
||||
`,
|
||||
naming.ScannerDataDir(), naming.ScannerDataDir(),
|
||||
naming.SystemUser(), naming.SystemUser(), naming.ScannerDataDir(),
|
||||
naming.ScannerConfigPath(),
|
||||
scannerConfigYAML,
|
||||
naming.Scanner(),
|
||||
scannerUnit,
|
||||
naming.Scanner(),
|
||||
)
|
||||
|
||||
return script + scannerPhase, nil
|
||||
}
|
||||
|
||||
type cloudInitParams struct {
|
||||
BinaryName string
|
||||
ServiceUnit string
|
||||
ConfigYAML string
|
||||
ConfigPath string
|
||||
ServiceName string
|
||||
DataDir string
|
||||
InstallDir string
|
||||
SystemUser string
|
||||
ConfigDir string
|
||||
LogFile string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
func generateCloudInit(p cloudInitParams) (string, error) {
|
||||
// Escape single quotes in embedded content for heredoc safety
|
||||
p.ServiceUnit = strings.ReplaceAll(p.ServiceUnit, "'", "'\\''")
|
||||
p.ConfigYAML = strings.ReplaceAll(p.ConfigYAML, "'", "'\\''")
|
||||
|
||||
t, err := template.New("cloudinit").Parse(cloudInitTmpl)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse cloudinit template: %w", err)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := t.Execute(&buf, p); err != nil {
|
||||
return "", fmt.Errorf("render cloudinit template: %w", err)
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// syncServiceUnit compares a rendered systemd service unit against what's on
|
||||
// the server. If they differ, it writes the new unit file. Returns true if the
|
||||
// unit was updated (caller should daemon-reload before restart).
|
||||
func syncServiceUnit(name, ip, serviceName, renderedUnit string) (bool, error) {
|
||||
unitPath := "/etc/systemd/system/" + serviceName + ".service"
|
||||
|
||||
remote, err := runSSH(ip, fmt.Sprintf("cat %s 2>/dev/null || echo '__MISSING__'", unitPath), false)
|
||||
if err != nil {
|
||||
fmt.Printf(" service unit sync: could not reach %s (%v)\n", name, err)
|
||||
return false, nil
|
||||
}
|
||||
remote = strings.TrimSpace(remote)
|
||||
rendered := strings.TrimSpace(renderedUnit)
|
||||
|
||||
if remote == "__MISSING__" {
|
||||
fmt.Printf(" service unit: %s not found (cloud-init will handle it)\n", name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if remote == rendered {
|
||||
fmt.Printf(" service unit: %s up to date\n", name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Write the updated unit file
|
||||
script := fmt.Sprintf("cat > %s << 'SVCEOF'\n%s\nSVCEOF", unitPath, rendered)
|
||||
if _, err := runSSH(ip, script, false); err != nil {
|
||||
return false, fmt.Errorf("write service unit: %w", err)
|
||||
}
|
||||
fmt.Printf(" service unit: %s updated\n", name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// syncConfigKeys fetches the existing config from a server and merges in any
|
||||
// missing keys from the rendered template. Existing values are never overwritten.
|
||||
func syncConfigKeys(name, ip, configPath, templateYAML string) error {
|
||||
remote, err := runSSH(ip, fmt.Sprintf("cat %s 2>/dev/null || echo '__MISSING__'", configPath), false)
|
||||
if err != nil {
|
||||
fmt.Printf(" config sync: could not reach %s (%v)\n", name, err)
|
||||
return nil
|
||||
}
|
||||
remote = strings.TrimSpace(remote)
|
||||
|
||||
if remote == "__MISSING__" {
|
||||
fmt.Printf(" config sync: %s not yet created (cloud-init will handle it)\n", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse both into yaml.Node trees
|
||||
var templateDoc yaml.Node
|
||||
if err := yaml.Unmarshal([]byte(templateYAML), &templateDoc); err != nil {
|
||||
return fmt.Errorf("parse template yaml: %w", err)
|
||||
}
|
||||
var existingDoc yaml.Node
|
||||
if err := yaml.Unmarshal([]byte(remote), &existingDoc); err != nil {
|
||||
return fmt.Errorf("parse remote yaml: %w", err)
|
||||
}
|
||||
|
||||
// Unwrap document nodes to get the root mapping
|
||||
templateRoot := unwrapDocNode(&templateDoc)
|
||||
existingRoot := unwrapDocNode(&existingDoc)
|
||||
if templateRoot == nil || existingRoot == nil {
|
||||
fmt.Printf(" config sync: %s skipped (unexpected YAML structure)\n", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
added := mergeYAMLNodes(templateRoot, existingRoot)
|
||||
if !added {
|
||||
fmt.Printf(" config sync: %s up to date\n", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshal the modified tree back
|
||||
merged, err := yaml.Marshal(&existingDoc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal merged yaml: %w", err)
|
||||
}
|
||||
|
||||
// Write back to server
|
||||
script := fmt.Sprintf("cat > %s << 'CFGEOF'\n%sCFGEOF", configPath, string(merged))
|
||||
if _, err := runSSH(ip, script, false); err != nil {
|
||||
return fmt.Errorf("write merged config: %w", err)
|
||||
}
|
||||
fmt.Printf(" config sync: %s updated with new keys\n", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// unwrapDocNode returns the root mapping node, unwrapping a DocumentNode wrapper if present.
|
||||
func unwrapDocNode(n *yaml.Node) *yaml.Node {
|
||||
if n.Kind == yaml.DocumentNode && len(n.Content) > 0 {
|
||||
return n.Content[0]
|
||||
}
|
||||
if n.Kind == yaml.MappingNode {
|
||||
return n
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mergeYAMLNodes recursively adds keys from base into existing that are not
|
||||
// already present. Existing values are never overwritten. Returns true if any
|
||||
// new keys were added.
|
||||
func mergeYAMLNodes(base, existing *yaml.Node) bool {
|
||||
if base.Kind != yaml.MappingNode || existing.Kind != yaml.MappingNode {
|
||||
return false
|
||||
}
|
||||
|
||||
added := false
|
||||
for i := 0; i+1 < len(base.Content); i += 2 {
|
||||
baseKey := base.Content[i]
|
||||
baseVal := base.Content[i+1]
|
||||
|
||||
// Look for this key in existing
|
||||
found := false
|
||||
for j := 0; j+1 < len(existing.Content); j += 2 {
|
||||
if existing.Content[j].Value == baseKey.Value {
|
||||
found = true
|
||||
// If both are mappings, recurse to merge sub-keys
|
||||
if baseVal.Kind == yaml.MappingNode && existing.Content[j+1].Kind == yaml.MappingNode {
|
||||
if mergeYAMLNodes(baseVal, existing.Content[j+1]) {
|
||||
added = true
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
// Append the missing key+value pair
|
||||
existing.Content = append(existing.Content, baseKey, baseVal)
|
||||
added = true
|
||||
}
|
||||
}
|
||||
|
||||
return added
|
||||
}
|
||||
143
deploy/upcloud/config.go
Normal file
143
deploy/upcloud/config.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/UpCloudLtd/upcloud-go-api/v8/upcloud/client"
|
||||
"github.com/UpCloudLtd/upcloud-go-api/v8/upcloud/service"
|
||||
"go.yaml.in/yaml/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
repoURL = "https://tangled.org/evan.jarrett.net/at-container-registry"
|
||||
repoBranch = "main"
|
||||
privateNetworkCIDR = "10.0.1.0/24"
|
||||
)
|
||||
|
||||
// InfraConfig holds infrastructure configuration.
|
||||
type InfraConfig struct {
|
||||
Zone string
|
||||
Plan string
|
||||
SSHPublicKey string
|
||||
S3SecretKey string
|
||||
|
||||
// Infrastructure naming — derived from configs/appview.yaml.tmpl.
|
||||
// Edit that template to rebrand.
|
||||
ClientName string
|
||||
BaseDomain string
|
||||
RegistryDomains []string
|
||||
RepoURL string
|
||||
RepoBranch string
|
||||
}
|
||||
|
||||
// Naming returns a Naming helper derived from ClientName.
|
||||
func (c *InfraConfig) Naming() Naming {
|
||||
return Naming{ClientName: c.ClientName}
|
||||
}
|
||||
|
||||
func loadConfig(zone, plan, sshKeyPath, s3Secret string) (*InfraConfig, error) {
|
||||
sshKey, err := readSSHPublicKey(sshKeyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clientName, baseDomain, registryDomains, err := extractFromAppviewTemplate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("extract config from template: %w", err)
|
||||
}
|
||||
|
||||
return &InfraConfig{
|
||||
Zone: zone,
|
||||
Plan: plan,
|
||||
SSHPublicKey: sshKey,
|
||||
S3SecretKey: s3Secret,
|
||||
ClientName: clientName,
|
||||
BaseDomain: baseDomain,
|
||||
RegistryDomains: registryDomains,
|
||||
RepoURL: repoURL,
|
||||
RepoBranch: repoBranch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// extractFromAppviewTemplate renders the appview config template with
|
||||
// zero-value ConfigValues and parses the resulting YAML to extract
|
||||
// deployment-specific values. The template is the single source of truth.
|
||||
func extractFromAppviewTemplate() (clientName, baseDomain string, registryDomains []string, err error) {
|
||||
rendered, err := renderConfig(appviewConfigTmpl, &ConfigValues{})
|
||||
if err != nil {
|
||||
return "", "", nil, fmt.Errorf("render appview template: %w", err)
|
||||
}
|
||||
|
||||
var cfg struct {
|
||||
Server struct {
|
||||
BaseURL string `yaml:"base_url"`
|
||||
ClientName string `yaml:"client_name"`
|
||||
RegistryDomains []string `yaml:"registry_domains"`
|
||||
} `yaml:"server"`
|
||||
}
|
||||
if err := yaml.Unmarshal([]byte(rendered), &cfg); err != nil {
|
||||
return "", "", nil, fmt.Errorf("parse appview template YAML: %w", err)
|
||||
}
|
||||
|
||||
clientName = strings.ToLower(cfg.Server.ClientName)
|
||||
baseDomain = strings.TrimPrefix(cfg.Server.BaseURL, "https://")
|
||||
registryDomains = cfg.Server.RegistryDomains
|
||||
|
||||
return clientName, baseDomain, registryDomains, nil
|
||||
}
|
||||
|
||||
// readSSHPublicKey reads an SSH public key from a file path.
|
||||
func readSSHPublicKey(path string) (string, error) {
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("--ssh-key is required (path to SSH public key file)")
|
||||
}
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("read SSH public key %s: %w", path, err)
|
||||
}
|
||||
key := strings.TrimSpace(string(data))
|
||||
if key == "" {
|
||||
return "", fmt.Errorf("SSH public key file %s is empty", path)
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// resolveInteractive fills in any empty Zone/Plan fields by launching
|
||||
// interactive TUI pickers that query the UpCloud API.
|
||||
func resolveInteractive(ctx context.Context, svc *service.Service, cfg *InfraConfig) error {
|
||||
if cfg.Zone == "" {
|
||||
z, err := pickZone(ctx, svc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("zone picker: %w", err)
|
||||
}
|
||||
cfg.Zone = z
|
||||
}
|
||||
if cfg.Plan == "" {
|
||||
p, err := pickPlan(ctx, svc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("plan picker: %w", err)
|
||||
}
|
||||
cfg.Plan = p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newService creates an UpCloud API client. If token is non-empty it's used
|
||||
// directly; otherwise credentials are read from UPCLOUD_TOKEN env var.
|
||||
func newService(token string) (*service.Service, error) {
|
||||
var c *client.Client
|
||||
var err error
|
||||
if token != "" {
|
||||
c = client.New("", "", client.WithBearerAuth(token), client.WithTimeout(120*time.Second))
|
||||
} else {
|
||||
c, err = client.NewFromEnv(client.WithTimeout(120 * time.Second))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create UpCloud client: %w\n\nPass --token or set UPCLOUD_TOKEN", err)
|
||||
}
|
||||
}
|
||||
return service.New(c), nil
|
||||
}
|
||||
48
deploy/upcloud/configs/appview.yaml.tmpl
Normal file
48
deploy/upcloud/configs/appview.yaml.tmpl
Normal file
@@ -0,0 +1,48 @@
|
||||
version: "0.1"
|
||||
log_level: info
|
||||
log_shipper:
|
||||
backend: ""
|
||||
url: ""
|
||||
batch_size: 100
|
||||
flush_interval: 5s
|
||||
username: ""
|
||||
password: ""
|
||||
server:
|
||||
addr: :5000
|
||||
base_url: "https://seamark.dev"
|
||||
default_hold_did: "{{.HoldDid}}"
|
||||
oauth_key_path: "{{.BasePath}}/oauth/client.key"
|
||||
client_name: Seamark
|
||||
test_mode: false
|
||||
client_short_name: Seamark
|
||||
registry_domains:
|
||||
- "buoy.cr"
|
||||
- "bouy.cr"
|
||||
ui:
|
||||
database_path: "{{.BasePath}}/ui.db"
|
||||
theme: seamark
|
||||
libsql_sync_url: ""
|
||||
libsql_auth_token: ""
|
||||
libsql_sync_interval: 1m0s
|
||||
health:
|
||||
cache_ttl: 15m0s
|
||||
check_interval: 15m0s
|
||||
jetstream:
|
||||
urls:
|
||||
- wss://jetstream2.us-west.bsky.network/subscribe
|
||||
- wss://jetstream1.us-west.bsky.network/subscribe
|
||||
- wss://jetstream2.us-east.bsky.network/subscribe
|
||||
- wss://jetstream1.us-east.bsky.network/subscribe
|
||||
backfill_enabled: true
|
||||
backfill_interval: 24h
|
||||
relay_endpoints:
|
||||
- https://relay1.us-east.bsky.network
|
||||
- https://relay1.us-west.bsky.network
|
||||
auth:
|
||||
key_path: "{{.BasePath}}/auth/private-key.pem"
|
||||
cert_path: "{{.BasePath}}/auth/private-key.crt"
|
||||
credential_helper:
|
||||
tangled_repo: ""
|
||||
legal:
|
||||
company_name: Seamark
|
||||
jurisdiction: State of Texas, United States
|
||||
55
deploy/upcloud/configs/cloudinit.sh.tmpl
Normal file
55
deploy/upcloud/configs/cloudinit.sh.tmpl
Normal file
@@ -0,0 +1,55 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
exec > >(tee {{.LogFile}}) 2>&1
|
||||
|
||||
echo "=== {{.DisplayName}} Setup: {{.BinaryName}} ==="
|
||||
echo "Started at $(date -u)"
|
||||
|
||||
# Wait for network/DNS
|
||||
for i in $(seq 1 30); do
|
||||
if getent hosts go.dev >/dev/null 2>&1; then
|
||||
echo "Network ready after ${i}s"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# System packages
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get update && apt-get upgrade -y
|
||||
apt-get install -y git gcc make curl libsqlite3-dev nodejs npm htop systemd-timesyncd
|
||||
sed -i 's/^#NTP=.*/NTP=0.debian.pool.ntp.org 1.debian.pool.ntp.org 2.debian.pool.ntp.org 3.debian.pool.ntp.org/' /etc/systemd/timesyncd.conf
|
||||
timedatectl set-ntp true
|
||||
|
||||
# Swap (for small instances)
|
||||
if [ ! -f /swapfile ]; then
|
||||
dd if=/dev/zero of=/swapfile bs=1M count=2048
|
||||
chmod 600 /swapfile && mkswap /swapfile && swapon /swapfile
|
||||
echo '/swapfile none swap sw 0 0' >> /etc/fstab
|
||||
fi
|
||||
|
||||
# Install directory (binaries deployed via SCP)
|
||||
mkdir -p {{.InstallDir}}/bin
|
||||
|
||||
# Service user & data dirs
|
||||
useradd --system --no-create-home --shell /usr/sbin/nologin {{.SystemUser}} || true
|
||||
mkdir -p {{.DataDir}} && chown {{.SystemUser}}:{{.SystemUser}} {{.DataDir}}
|
||||
|
||||
# Config file
|
||||
mkdir -p {{.ConfigDir}}
|
||||
if [ ! -f {{.ConfigPath}} ]; then
|
||||
cat > {{.ConfigPath}} << 'CFGEOF'
|
||||
{{.ConfigYAML}}
|
||||
CFGEOF
|
||||
else
|
||||
echo "Config {{.ConfigPath}} already exists, skipping overwrite (missing keys merged separately)"
|
||||
fi
|
||||
|
||||
# Systemd service
|
||||
cat > /etc/systemd/system/{{.ServiceName}}.service << 'SVCEOF'
|
||||
{{.ServiceUnit}}
|
||||
SVCEOF
|
||||
systemctl daemon-reload
|
||||
systemctl enable {{.ServiceName}}
|
||||
|
||||
echo "=== Setup complete at $(date -u) ==="
|
||||
64
deploy/upcloud/configs/hold.yaml.tmpl
Normal file
64
deploy/upcloud/configs/hold.yaml.tmpl
Normal file
@@ -0,0 +1,64 @@
|
||||
version: "0.1"
|
||||
log_level: info
|
||||
log_shipper:
|
||||
backend: ""
|
||||
url: ""
|
||||
batch_size: 100
|
||||
flush_interval: 5s
|
||||
username: ""
|
||||
password: ""
|
||||
storage:
|
||||
access_key: "{{.S3AccessKey}}"
|
||||
secret_key: "{{.S3SecretKey}}"
|
||||
region: "{{.S3Region}}"
|
||||
bucket: "{{.S3Bucket}}"
|
||||
endpoint: "{{.S3Endpoint}}"
|
||||
pull_zone: ""
|
||||
server:
|
||||
addr: :8080
|
||||
public_url: "https://{{.HoldDomain}}"
|
||||
public: false
|
||||
successor: ""
|
||||
test_mode: false
|
||||
relay_endpoint: ""
|
||||
appview_did: did:web:seamark.dev
|
||||
read_timeout: 5m0s
|
||||
write_timeout: 5m0s
|
||||
registration:
|
||||
owner_did: "did:plc:pddp4xt5lgnv2qsegbzzs4xg"
|
||||
allow_all_crew: true
|
||||
profile_avatar_url: https://{{.HoldDomain}}/web-app-manifest-192x192.png
|
||||
profile_display_name: Cargo Hold
|
||||
profile_description: ahoy from the cargo hold
|
||||
enable_bluesky_posts: false
|
||||
region: ""
|
||||
database:
|
||||
path: "{{.BasePath}}"
|
||||
key_path: ""
|
||||
did_method: web
|
||||
did: ""
|
||||
plc_directory_url: https://plc.directory
|
||||
rotation_key: ""
|
||||
libsql_sync_url: ""
|
||||
libsql_auth_token: ""
|
||||
libsql_sync_interval: 1m0s
|
||||
admin:
|
||||
enabled: true
|
||||
gc:
|
||||
enabled: false
|
||||
quota:
|
||||
tiers:
|
||||
- name: deckhand
|
||||
quota: 5GB
|
||||
- name: bosun
|
||||
quota: 50GB
|
||||
scan_on_push: true
|
||||
- name: quartermaster
|
||||
quota: 100GB
|
||||
scan_on_push: true
|
||||
defaults:
|
||||
new_crew_tier: deckhand
|
||||
scanner:
|
||||
secret: "{{.ScannerSecret}}"
|
||||
rescan_interval: 168h0m0s
|
||||
|
||||
21
deploy/upcloud/configs/scanner.yaml.tmpl
Normal file
21
deploy/upcloud/configs/scanner.yaml.tmpl
Normal file
@@ -0,0 +1,21 @@
|
||||
version: "0.1"
|
||||
log_level: info
|
||||
log_shipper:
|
||||
backend: ""
|
||||
url: ""
|
||||
batch_size: 100
|
||||
flush_interval: 5s
|
||||
username: ""
|
||||
password: ""
|
||||
server:
|
||||
addr: :9090
|
||||
hold:
|
||||
url: "ws://localhost:8080"
|
||||
secret: "{{.ScannerSecret}}"
|
||||
scanner:
|
||||
workers: 2
|
||||
queue_size: 100
|
||||
vuln:
|
||||
enabled: true
|
||||
db_path: "{{.BasePath}}/scanner/vulndb"
|
||||
tmp_dir: "{{.BasePath}}/scanner/tmp"
|
||||
47
deploy/upcloud/go.mod
Normal file
47
deploy/upcloud/go.mod
Normal file
@@ -0,0 +1,47 @@
|
||||
module atcr.io/deploy
|
||||
|
||||
go 1.25.7
|
||||
|
||||
require (
|
||||
github.com/UpCloudLtd/upcloud-go-api/v8 v8.34.3
|
||||
github.com/charmbracelet/huh v0.8.0
|
||||
github.com/spf13/cobra v1.10.2
|
||||
go.yaml.in/yaml/v3 v3.0.4
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/atotto/clipboard v0.1.4 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/catppuccin/go v0.3.0 // indirect
|
||||
github.com/charmbracelet/bubbles v1.0.0 // indirect
|
||||
github.com/charmbracelet/bubbletea v1.3.10 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.4.2 // indirect
|
||||
github.com/charmbracelet/lipgloss v1.1.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.11.6 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.15 // indirect
|
||||
github.com/charmbracelet/x/exp/strings v0.1.0 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.2 // indirect
|
||||
github.com/clipperhouse/displaywidth v0.10.0 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.6.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect
|
||||
golang.org/x/sys v0.41.0 // indirect
|
||||
golang.org/x/text v0.34.0 // indirect
|
||||
)
|
||||
109
deploy/upcloud/go.sum
Normal file
109
deploy/upcloud/go.sum
Normal file
@@ -0,0 +1,109 @@
|
||||
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
|
||||
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
|
||||
github.com/UpCloudLtd/upcloud-go-api/v8 v8.34.3 h1:7ba03u4L5LafZPVO2k6B0/f114k5dFF3GtAN7FEKfno=
|
||||
github.com/UpCloudLtd/upcloud-go-api/v8 v8.34.3/go.mod h1:NBh1d/ip1bhdAIhuPWbyPme7tbLzDTV7dhutUmU1vg8=
|
||||
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
||||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||
github.com/aymanbagabas/go-udiff v0.3.1 h1:LV+qyBQ2pqe0u42ZsUEtPiCaUoqgA9gYRDs3vj1nolY=
|
||||
github.com/aymanbagabas/go-udiff v0.3.1/go.mod h1:G0fsKmG+P6ylD0r6N/KgQD/nWzgfnl8ZBcNLgcbrw8E=
|
||||
github.com/catppuccin/go v0.3.0 h1:d+0/YicIq+hSTo5oPuRi5kOpqkVA5tAsU6dNhvRu+aY=
|
||||
github.com/catppuccin/go v0.3.0/go.mod h1:8IHJuMGaUUjQM82qBrGNBv7LFq6JI3NnQCF6MOlZjpc=
|
||||
github.com/charmbracelet/bubbles v1.0.0 h1:12J8/ak/uCZEMQ6KU7pcfwceyjLlWsDLAxB5fXonfvc=
|
||||
github.com/charmbracelet/bubbles v1.0.0/go.mod h1:9d/Zd5GdnauMI5ivUIVisuEm3ave1XwXtD1ckyV6r3E=
|
||||
github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
|
||||
github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4=
|
||||
github.com/charmbracelet/colorprofile v0.4.2 h1:BdSNuMjRbotnxHSfxy+PCSa4xAmz7szw70ktAtWRYrY=
|
||||
github.com/charmbracelet/colorprofile v0.4.2/go.mod h1:0rTi81QpwDElInthtrQ6Ni7cG0sDtwAd4C4le060fT8=
|
||||
github.com/charmbracelet/huh v0.8.0 h1:Xz/Pm2h64cXQZn/Jvele4J3r7DDiqFCNIVteYukxDvY=
|
||||
github.com/charmbracelet/huh v0.8.0/go.mod h1:5YVc+SlZ1IhQALxRPpkGwwEKftN/+OlJlnJYlDRFqN4=
|
||||
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
|
||||
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
|
||||
github.com/charmbracelet/x/ansi v0.11.6 h1:GhV21SiDz/45W9AnV2R61xZMRri5NlLnl6CVF7ihZW8=
|
||||
github.com/charmbracelet/x/ansi v0.11.6/go.mod h1:2JNYLgQUsyqaiLovhU2Rv/pb8r6ydXKS3NIttu3VGZQ=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.15 h1:ur3pZy0o6z/R7EylET877CBxaiE1Sp1GMxoFPAIztPI=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.15/go.mod h1:J1YVbR7MUuEGIFPCaaZ96KDl5NoS0DAWkskup+mOY+Q=
|
||||
github.com/charmbracelet/x/conpty v0.1.0 h1:4zc8KaIcbiL4mghEON8D72agYtSeIgq8FSThSPQIb+U=
|
||||
github.com/charmbracelet/x/conpty v0.1.0/go.mod h1:rMFsDJoDwVmiYM10aD4bH2XiRgwI7NYJtQgl5yskjEQ=
|
||||
github.com/charmbracelet/x/errors v0.0.0-20240508181413-e8d8b6e2de86 h1:JSt3B+U9iqk37QUU2Rvb6DSBYRLtWqFqfxf8l5hOZUA=
|
||||
github.com/charmbracelet/x/errors v0.0.0-20240508181413-e8d8b6e2de86/go.mod h1:2P0UgXMEa6TsToMSuFqKFQR+fZTO9CNGUNokkPatT/0=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
|
||||
github.com/charmbracelet/x/exp/strings v0.1.0 h1:i69S2XI7uG1u4NLGeJPSYU++Nmjvpo9nwd6aoEm7gkA=
|
||||
github.com/charmbracelet/x/exp/strings v0.1.0/go.mod h1:/ehtMPNh9K4odGFkqYJKpIYyePhdp1hLBRvyY4bWkH8=
|
||||
github.com/charmbracelet/x/term v0.2.2 h1:xVRT/S2ZcKdhhOuSP4t5cLi5o+JxklsoEObBSgfgZRk=
|
||||
github.com/charmbracelet/x/term v0.2.2/go.mod h1:kF8CY5RddLWrsgVwpw4kAa6TESp6EB5y3uxGLeCqzAI=
|
||||
github.com/charmbracelet/x/termios v0.1.1 h1:o3Q2bT8eqzGnGPOYheoYS8eEleT5ZVNYNy8JawjaNZY=
|
||||
github.com/charmbracelet/x/termios v0.1.1/go.mod h1:rB7fnv1TgOPOyyKRJ9o+AsTU/vK5WHJ2ivHeut/Pcwo=
|
||||
github.com/charmbracelet/x/xpty v0.1.2 h1:Pqmu4TEJ8KeA9uSkISKMU3f+C1F6OGBn8ABuGlqCbtI=
|
||||
github.com/charmbracelet/x/xpty v0.1.2/go.mod h1:XK2Z0id5rtLWcpeNiMYBccNNBrP2IJnzHI0Lq13Xzq4=
|
||||
github.com/clipperhouse/displaywidth v0.10.0 h1:GhBG8WuerxjFQQYeuZAeVTuyxuX+UraiZGD4HJQ3Y8g=
|
||||
github.com/clipperhouse/displaywidth v0.10.0/go.mod h1:XqJajYsaiEwkxOj4bowCTMcT1SgvHo9flfF3jQasdbs=
|
||||
github.com/clipperhouse/uax29/v2 v2.6.0 h1:z0cDbUV+aPASdFb2/ndFnS9ts/WNXgTNNGFoKXuhpos=
|
||||
github.com/clipperhouse/uax29/v2 v2.6.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
|
||||
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag=
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75 h1:P8UmIzZMYDR+NGImiFvErt6VWfIRPuGM+vyjiEdkmIw=
|
||||
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
||||
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
|
||||
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
||||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
13
deploy/upcloud/goversion.go
Normal file
13
deploy/upcloud/goversion.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// projectRoot returns the absolute path to the repository root,
|
||||
// derived from the compile-time source file location.
|
||||
func projectRoot() string {
|
||||
_, thisFile, _, _ := runtime.Caller(0)
|
||||
return filepath.Join(filepath.Dir(thisFile), "..", "..")
|
||||
}
|
||||
23
deploy/upcloud/main.go
Normal file
23
deploy/upcloud/main.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "upcloud",
|
||||
Short: "ATCR infrastructure provisioning tool for UpCloud",
|
||||
SilenceUsage: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringP("token", "t", "", "UpCloud API token (env: UPCLOUD_TOKEN)")
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
61
deploy/upcloud/naming.go
Normal file
61
deploy/upcloud/naming.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package main
|
||||
|
||||
import "strings"
|
||||
|
||||
// Naming derives all infrastructure names and paths from a single ClientName.
|
||||
type Naming struct {
|
||||
ClientName string // e.g. "seamark"
|
||||
}
|
||||
|
||||
// DisplayName returns the title-cased client name (e.g. "Seamark").
|
||||
func (n Naming) DisplayName() string {
|
||||
if n.ClientName == "" {
|
||||
return ""
|
||||
}
|
||||
return strings.ToUpper(n.ClientName[:1]) + n.ClientName[1:]
|
||||
}
|
||||
|
||||
// SystemUser returns the unix user name.
|
||||
func (n Naming) SystemUser() string { return n.ClientName }
|
||||
|
||||
// InstallDir returns the source/build directory (e.g. "/opt/seamark").
|
||||
func (n Naming) InstallDir() string { return "/opt/" + n.ClientName }
|
||||
|
||||
// ConfigDir returns the config directory (e.g. "/etc/seamark").
|
||||
func (n Naming) ConfigDir() string { return "/etc/" + n.ClientName }
|
||||
|
||||
// BasePath returns the data directory (e.g. "/var/lib/seamark").
|
||||
func (n Naming) BasePath() string { return "/var/lib/" + n.ClientName }
|
||||
|
||||
// LogFile returns the setup log path (e.g. "/var/log/seamark-setup.log").
|
||||
func (n Naming) LogFile() string { return "/var/log/" + n.ClientName + "-setup.log" }
|
||||
|
||||
// Appview returns the appview binary/service/server name (e.g. "seamark-appview").
|
||||
func (n Naming) Appview() string { return n.ClientName + "-appview" }
|
||||
|
||||
// Hold returns the hold binary/service/server name (e.g. "seamark-hold").
|
||||
func (n Naming) Hold() string { return n.ClientName + "-hold" }
|
||||
|
||||
// AppviewConfigPath returns the appview config file path.
|
||||
func (n Naming) AppviewConfigPath() string { return n.ConfigDir() + "/appview.yaml" }
|
||||
|
||||
// HoldConfigPath returns the hold config file path.
|
||||
func (n Naming) HoldConfigPath() string { return n.ConfigDir() + "/hold.yaml" }
|
||||
|
||||
// NetworkName returns the private network name (e.g. "seamark-private").
|
||||
func (n Naming) NetworkName() string { return n.ClientName + "-private" }
|
||||
|
||||
// LBName returns the load balancer name (e.g. "seamark-lb").
|
||||
func (n Naming) LBName() string { return n.ClientName + "-lb" }
|
||||
|
||||
// Scanner returns the scanner binary/service name (e.g. "seamark-scanner").
|
||||
func (n Naming) Scanner() string { return n.ClientName + "-scanner" }
|
||||
|
||||
// ScannerConfigPath returns the scanner config file path.
|
||||
func (n Naming) ScannerConfigPath() string { return n.ConfigDir() + "/scanner.yaml" }
|
||||
|
||||
// ScannerDataDir returns the scanner data directory (e.g. "/var/lib/seamark/scanner").
|
||||
func (n Naming) ScannerDataDir() string { return n.BasePath() + "/scanner" }
|
||||
|
||||
// S3Name returns the name used for S3 storage, user, and bucket.
|
||||
func (n Naming) S3Name() string { return n.ClientName }
|
||||
88
deploy/upcloud/picker.go
Normal file
88
deploy/upcloud/picker.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/UpCloudLtd/upcloud-go-api/v8/upcloud"
|
||||
"github.com/UpCloudLtd/upcloud-go-api/v8/upcloud/service"
|
||||
"github.com/charmbracelet/huh"
|
||||
)
|
||||
|
||||
// pickZone fetches available zones from the UpCloud API and presents an
|
||||
// interactive selector. Only public zones are shown.
|
||||
func pickZone(ctx context.Context, svc *service.Service) (string, error) {
|
||||
resp, err := svc.GetZones(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("fetch zones: %w", err)
|
||||
}
|
||||
|
||||
var opts []huh.Option[string]
|
||||
for _, z := range resp.Zones {
|
||||
if z.Public != upcloud.True {
|
||||
continue
|
||||
}
|
||||
label := fmt.Sprintf("%s — %s", z.ID, z.Description)
|
||||
opts = append(opts, huh.NewOption(label, z.ID))
|
||||
}
|
||||
|
||||
if len(opts) == 0 {
|
||||
return "", fmt.Errorf("no public zones available")
|
||||
}
|
||||
|
||||
sort.Slice(opts, func(i, j int) bool {
|
||||
return opts[i].Value < opts[j].Value
|
||||
})
|
||||
|
||||
var zone string
|
||||
err = huh.NewSelect[string]().
|
||||
Title("Select a zone").
|
||||
Options(opts...).
|
||||
Value(&zone).
|
||||
Run()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
// pickPlan fetches available plans from the UpCloud API and presents an
|
||||
// interactive selector. GPU plans are filtered out.
|
||||
func pickPlan(ctx context.Context, svc *service.Service) (string, error) {
|
||||
resp, err := svc.GetPlans(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("fetch plans: %w", err)
|
||||
}
|
||||
|
||||
var opts []huh.Option[string]
|
||||
for _, p := range resp.Plans {
|
||||
if p.GPUAmount > 0 {
|
||||
continue
|
||||
}
|
||||
memGB := p.MemoryAmount / 1024
|
||||
label := fmt.Sprintf("%s — %d CPU, %d GB RAM, %d GB disk", p.Name, p.CoreNumber, memGB, p.StorageSize)
|
||||
opts = append(opts, huh.NewOption(label, p.Name))
|
||||
}
|
||||
|
||||
if len(opts) == 0 {
|
||||
return "", fmt.Errorf("no plans available")
|
||||
}
|
||||
|
||||
sort.Slice(opts, func(i, j int) bool {
|
||||
return opts[i].Value < opts[j].Value
|
||||
})
|
||||
|
||||
var plan string
|
||||
err = huh.NewSelect[string]().
|
||||
Title("Select a plan").
|
||||
Options(opts...).
|
||||
Value(&plan).
|
||||
Run()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return plan, nil
|
||||
}
|
||||
1249
deploy/upcloud/provision.go
Normal file
1249
deploy/upcloud/provision.go
Normal file
File diff suppressed because it is too large
Load Diff
93
deploy/upcloud/state.go
Normal file
93
deploy/upcloud/state.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// InfraState persists infrastructure resource UUIDs between commands.
|
||||
type InfraState struct {
|
||||
Zone string `json:"zone"`
|
||||
ClientName string `json:"client_name,omitempty"`
|
||||
RepoBranch string `json:"repo_branch,omitempty"`
|
||||
Network StateRef `json:"network"`
|
||||
Appview ServerState `json:"appview"`
|
||||
Hold ServerState `json:"hold"`
|
||||
LB StateRef `json:"loadbalancer"`
|
||||
ObjectStorage ObjectStorageState `json:"object_storage"`
|
||||
ScannerEnabled bool `json:"scanner_enabled,omitempty"`
|
||||
ScannerSecret string `json:"scanner_secret,omitempty"`
|
||||
}
|
||||
|
||||
// Naming returns a Naming helper, defaulting to "seamark" if ClientName is empty.
|
||||
func (s *InfraState) Naming() Naming {
|
||||
name := s.ClientName
|
||||
if name == "" {
|
||||
name = "seamark"
|
||||
}
|
||||
return Naming{ClientName: name}
|
||||
}
|
||||
|
||||
// Branch returns the repo branch, defaulting to "main" if empty.
|
||||
func (s *InfraState) Branch() string {
|
||||
if s.RepoBranch == "" {
|
||||
return "main"
|
||||
}
|
||||
return s.RepoBranch
|
||||
}
|
||||
|
||||
type StateRef struct {
|
||||
UUID string `json:"uuid"`
|
||||
}
|
||||
|
||||
type ServerState struct {
|
||||
UUID string `json:"server_uuid"`
|
||||
PublicIP string `json:"public_ip"`
|
||||
PrivateIP string `json:"private_ip"`
|
||||
}
|
||||
|
||||
type ObjectStorageState struct {
|
||||
UUID string `json:"uuid"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Region string `json:"region"`
|
||||
Bucket string `json:"bucket"`
|
||||
AccessKeyID string `json:"access_key_id"`
|
||||
}
|
||||
|
||||
func statePath() string {
|
||||
_, thisFile, _, _ := runtime.Caller(0)
|
||||
return filepath.Join(filepath.Dir(thisFile), "state.json")
|
||||
}
|
||||
|
||||
func loadState() (*InfraState, error) {
|
||||
data, err := os.ReadFile(statePath())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read state.json: %w (run 'provision' first)", err)
|
||||
}
|
||||
var st InfraState
|
||||
if err := json.Unmarshal(data, &st); err != nil {
|
||||
return nil, fmt.Errorf("parse state.json: %w", err)
|
||||
}
|
||||
return &st, nil
|
||||
}
|
||||
|
||||
func saveState(st *InfraState) error {
|
||||
data, err := json.MarshalIndent(st, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal state: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(statePath(), data, 0644); err != nil {
|
||||
return fmt.Errorf("write state.json: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteState() error {
|
||||
if err := os.Remove(statePath()); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("remove state.json: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
145
deploy/upcloud/status.go
Normal file
145
deploy/upcloud/status.go
Normal file
@@ -0,0 +1,145 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/UpCloudLtd/upcloud-go-api/v8/upcloud/request"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var statusCmd = &cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Show infrastructure state and health",
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
token, _ := cmd.Root().PersistentFlags().GetString("token")
|
||||
return cmdStatus(token)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(statusCmd)
|
||||
}
|
||||
|
||||
func cmdStatus(token string) error {
|
||||
state, err := loadState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
naming := state.Naming()
|
||||
|
||||
svc, err := newService(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
fmt.Printf("Zone: %s\n\n", state.Zone)
|
||||
|
||||
// Server status
|
||||
for _, s := range []struct {
|
||||
name string
|
||||
ss ServerState
|
||||
serviceName string
|
||||
healthURL string
|
||||
}{
|
||||
{"Appview", state.Appview, naming.Appview(), "http://localhost:5000/health"},
|
||||
{"Hold", state.Hold, naming.Hold(), "http://localhost:8080/xrpc/_health"},
|
||||
} {
|
||||
fmt.Printf("%-8s UUID: %s\n", s.name, s.ss.UUID)
|
||||
fmt.Printf(" Public: %s\n", s.ss.PublicIP)
|
||||
fmt.Printf(" Private: %s\n", s.ss.PrivateIP)
|
||||
|
||||
if s.ss.UUID != "" {
|
||||
details, err := svc.GetServerDetails(ctx, &request.GetServerDetailsRequest{
|
||||
UUID: s.ss.UUID,
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf(" State: error (%v)\n", err)
|
||||
} else {
|
||||
fmt.Printf(" State: %s\n", details.State)
|
||||
}
|
||||
}
|
||||
|
||||
// SSH health check
|
||||
if s.ss.PublicIP != "" {
|
||||
output, err := runSSH(s.ss.PublicIP, fmt.Sprintf(
|
||||
"systemctl is-active %s 2>/dev/null || echo 'inactive'; curl -sf %s > /dev/null 2>&1 && echo 'health:ok' || echo 'health:fail'",
|
||||
s.serviceName, s.healthURL,
|
||||
), false)
|
||||
if err != nil {
|
||||
fmt.Printf(" Service: unreachable\n")
|
||||
} else {
|
||||
lines := strings.Split(strings.TrimSpace(output), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "active" || line == "inactive" {
|
||||
fmt.Printf(" Service: %s\n", line)
|
||||
} else if strings.HasPrefix(line, "health:") {
|
||||
fmt.Printf(" Health: %s\n", strings.TrimPrefix(line, "health:"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Scanner status (runs on hold server)
|
||||
if state.ScannerEnabled {
|
||||
fmt.Printf("Scanner (on hold server)\n")
|
||||
if state.Hold.PublicIP != "" {
|
||||
output, err := runSSH(state.Hold.PublicIP, fmt.Sprintf(
|
||||
"systemctl is-active %s 2>/dev/null || echo 'inactive'; curl -sf http://localhost:9090/healthz > /dev/null 2>&1 && echo 'health:ok' || echo 'health:fail'",
|
||||
naming.Scanner(),
|
||||
), false)
|
||||
if err != nil {
|
||||
fmt.Printf(" Service: unreachable\n")
|
||||
} else {
|
||||
lines := strings.Split(strings.TrimSpace(output), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "active" || line == "inactive" {
|
||||
fmt.Printf(" Service: %s\n", line)
|
||||
} else if strings.HasPrefix(line, "health:") {
|
||||
fmt.Printf(" Health: %s\n", strings.TrimPrefix(line, "health:"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// LB status
|
||||
if state.LB.UUID != "" {
|
||||
fmt.Printf("Load Balancer: %s\n", state.LB.UUID)
|
||||
lb, err := svc.GetLoadBalancer(ctx, &request.GetLoadBalancerRequest{
|
||||
UUID: state.LB.UUID,
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf(" State: error (%v)\n", err)
|
||||
} else {
|
||||
fmt.Printf(" State: %s\n", lb.OperationalState)
|
||||
for _, n := range lb.Networks {
|
||||
fmt.Printf(" Network (%s): %s\n", n.Type, n.DNSName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\nNetwork: %s\n", state.Network.UUID)
|
||||
|
||||
if state.ObjectStorage.UUID != "" {
|
||||
fmt.Printf("\nObject Storage: %s\n", state.ObjectStorage.UUID)
|
||||
fmt.Printf(" Endpoint: %s\n", state.ObjectStorage.Endpoint)
|
||||
fmt.Printf(" Region: %s\n", state.ObjectStorage.Region)
|
||||
fmt.Printf(" Bucket: %s\n", state.ObjectStorage.Bucket)
|
||||
fmt.Printf(" Access Key: %s\n", state.ObjectStorage.AccessKeyID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
25
deploy/upcloud/systemd/appview.service.tmpl
Normal file
25
deploy/upcloud/systemd/appview.service.tmpl
Normal file
@@ -0,0 +1,25 @@
|
||||
[Unit]
|
||||
Description={{.DisplayName}} AppView (Registry + Web UI)
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User={{.User}}
|
||||
Group={{.User}}
|
||||
ExecStart={{.BinaryPath}} serve --config {{.ConfigPath}}
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
ReadWritePaths={{.DataDir}}
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier={{.ServiceName}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
25
deploy/upcloud/systemd/hold.service.tmpl
Normal file
25
deploy/upcloud/systemd/hold.service.tmpl
Normal file
@@ -0,0 +1,25 @@
|
||||
[Unit]
|
||||
Description={{.DisplayName}} Hold (Storage Service)
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User={{.User}}
|
||||
Group={{.User}}
|
||||
ExecStart={{.BinaryPath}} serve --config {{.ConfigPath}}
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
ReadWritePaths={{.DataDir}}
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier={{.ServiceName}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
25
deploy/upcloud/systemd/scanner.service.tmpl
Normal file
25
deploy/upcloud/systemd/scanner.service.tmpl
Normal file
@@ -0,0 +1,25 @@
|
||||
[Unit]
|
||||
Description={{.DisplayName}} Scanner (Vulnerability Scanning)
|
||||
After=network-online.target {{.HoldServiceName}}.service
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User={{.User}}
|
||||
Group={{.User}}
|
||||
ExecStart={{.BinaryPath}} serve --config {{.ConfigPath}}
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
ReadWritePaths={{.DataDir}}
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier={{.ServiceName}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
121
deploy/upcloud/teardown.go
Normal file
121
deploy/upcloud/teardown.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/UpCloudLtd/upcloud-go-api/v8/upcloud/request"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var teardownCmd = &cobra.Command{
|
||||
Use: "teardown",
|
||||
Short: "Destroy all infrastructure",
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
token, _ := cmd.Root().PersistentFlags().GetString("token")
|
||||
return cmdTeardown(token)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(teardownCmd)
|
||||
}
|
||||
|
||||
func cmdTeardown(token string) error {
|
||||
state, err := loadState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
naming := state.Naming()
|
||||
|
||||
// Confirmation prompt
|
||||
fmt.Printf("This will DESTROY all %s infrastructure:\n", naming.DisplayName())
|
||||
fmt.Printf(" Zone: %s\n", state.Zone)
|
||||
fmt.Printf(" Appview: %s (%s)\n", state.Appview.UUID, state.Appview.PublicIP)
|
||||
fmt.Printf(" Hold: %s (%s)\n", state.Hold.UUID, state.Hold.PublicIP)
|
||||
fmt.Printf(" Network: %s\n", state.Network.UUID)
|
||||
fmt.Printf(" LB: %s\n", state.LB.UUID)
|
||||
fmt.Println()
|
||||
fmt.Print("Type 'yes' to confirm: ")
|
||||
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
scanner.Scan()
|
||||
if strings.TrimSpace(scanner.Text()) != "yes" {
|
||||
fmt.Println("Aborted.")
|
||||
return nil
|
||||
}
|
||||
|
||||
svc, err := newService(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// Delete LB first (depends on network)
|
||||
if state.LB.UUID != "" {
|
||||
fmt.Printf("Deleting load balancer %s...\n", state.LB.UUID)
|
||||
if err := svc.DeleteLoadBalancer(ctx, &request.DeleteLoadBalancerRequest{
|
||||
UUID: state.LB.UUID,
|
||||
}); err != nil {
|
||||
fmt.Printf(" Warning: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop and delete servers (must stop before delete, and delete storage)
|
||||
for _, s := range []struct {
|
||||
name string
|
||||
uuid string
|
||||
}{
|
||||
{"appview", state.Appview.UUID},
|
||||
{"hold", state.Hold.UUID},
|
||||
} {
|
||||
if s.uuid == "" {
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Stopping server %s (%s)...\n", s.name, s.uuid)
|
||||
_, err := svc.StopServer(ctx, &request.StopServerRequest{
|
||||
UUID: s.uuid,
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf(" Warning (stop): %v\n", err)
|
||||
} else {
|
||||
_, _ = svc.WaitForServerState(ctx, &request.WaitForServerStateRequest{
|
||||
UUID: s.uuid,
|
||||
DesiredState: "stopped",
|
||||
})
|
||||
}
|
||||
|
||||
fmt.Printf("Deleting server %s...\n", s.name)
|
||||
if err := svc.DeleteServerAndStorages(ctx, &request.DeleteServerAndStoragesRequest{
|
||||
UUID: s.uuid,
|
||||
}); err != nil {
|
||||
fmt.Printf(" Warning (delete): %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete network (after servers are gone)
|
||||
if state.Network.UUID != "" {
|
||||
fmt.Printf("Deleting network %s...\n", state.Network.UUID)
|
||||
if err := svc.DeleteNetwork(ctx, &request.DeleteNetworkRequest{
|
||||
UUID: state.Network.UUID,
|
||||
}); err != nil {
|
||||
fmt.Printf(" Warning: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove state file
|
||||
if err := deleteState(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("\nTeardown complete. All infrastructure destroyed.")
|
||||
return nil
|
||||
}
|
||||
410
deploy/upcloud/update.go
Normal file
410
deploy/upcloud/update.go
Normal file
@@ -0,0 +1,410 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var updateCmd = &cobra.Command{
|
||||
Use: "update [target]",
|
||||
Short: "Deploy updates to servers",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
ValidArgs: []string{"all", "appview", "hold"},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
target := "all"
|
||||
if len(args) > 0 {
|
||||
target = args[0]
|
||||
}
|
||||
withScanner, _ := cmd.Flags().GetBool("with-scanner")
|
||||
return cmdUpdate(target, withScanner)
|
||||
},
|
||||
}
|
||||
|
||||
var sshCmd = &cobra.Command{
|
||||
Use: "ssh <target>",
|
||||
Short: "SSH into a server",
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgs: []string{"appview", "hold"},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdSSH(args[0])
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
updateCmd.Flags().Bool("with-scanner", false, "Enable and deploy vulnerability scanner alongside hold")
|
||||
rootCmd.AddCommand(updateCmd)
|
||||
rootCmd.AddCommand(sshCmd)
|
||||
}
|
||||
|
||||
func cmdUpdate(target string, withScanner bool) error {
|
||||
state, err := loadState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
naming := state.Naming()
|
||||
rootDir := projectRoot()
|
||||
|
||||
// Enable scanner retroactively via --with-scanner on update
|
||||
if withScanner && !state.ScannerEnabled {
|
||||
state.ScannerEnabled = true
|
||||
if state.ScannerSecret == "" {
|
||||
secret, err := generateScannerSecret()
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate scanner secret: %w", err)
|
||||
}
|
||||
state.ScannerSecret = secret
|
||||
fmt.Printf("Generated scanner shared secret\n")
|
||||
}
|
||||
_ = saveState(state)
|
||||
}
|
||||
|
||||
vals := configValsFromState(state)
|
||||
|
||||
targets := map[string]struct {
|
||||
ip string
|
||||
binaryName string
|
||||
buildCmd string
|
||||
localBinary string
|
||||
serviceName string
|
||||
healthURL string
|
||||
configTmpl string
|
||||
configPath string
|
||||
unitTmpl string
|
||||
}{
|
||||
"appview": {
|
||||
ip: state.Appview.PublicIP,
|
||||
binaryName: naming.Appview(),
|
||||
buildCmd: "appview",
|
||||
localBinary: "atcr-appview",
|
||||
serviceName: naming.Appview(),
|
||||
healthURL: "http://localhost:5000/health",
|
||||
configTmpl: appviewConfigTmpl,
|
||||
configPath: naming.AppviewConfigPath(),
|
||||
unitTmpl: appviewServiceTmpl,
|
||||
},
|
||||
"hold": {
|
||||
ip: state.Hold.PublicIP,
|
||||
binaryName: naming.Hold(),
|
||||
buildCmd: "hold",
|
||||
localBinary: "atcr-hold",
|
||||
serviceName: naming.Hold(),
|
||||
healthURL: "http://localhost:8080/xrpc/_health",
|
||||
configTmpl: holdConfigTmpl,
|
||||
configPath: naming.HoldConfigPath(),
|
||||
unitTmpl: holdServiceTmpl,
|
||||
},
|
||||
}
|
||||
|
||||
var toUpdate []string
|
||||
switch target {
|
||||
case "all":
|
||||
toUpdate = []string{"appview", "hold"}
|
||||
case "appview", "hold":
|
||||
toUpdate = []string{target}
|
||||
default:
|
||||
return fmt.Errorf("unknown target: %s (use: all, appview, hold)", target)
|
||||
}
|
||||
|
||||
// Run go generate before building
|
||||
if err := runGenerate(rootDir); err != nil {
|
||||
return fmt.Errorf("go generate: %w", err)
|
||||
}
|
||||
|
||||
// Build all binaries locally before touching servers
|
||||
fmt.Println("Building locally (GOOS=linux GOARCH=amd64)...")
|
||||
for _, name := range toUpdate {
|
||||
t := targets[name]
|
||||
outputPath := filepath.Join(rootDir, "bin", t.localBinary)
|
||||
if err := buildLocal(rootDir, outputPath, "./cmd/"+t.buildCmd); err != nil {
|
||||
return fmt.Errorf("build %s: %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Build scanner locally if needed
|
||||
needScanner := false
|
||||
for _, name := range toUpdate {
|
||||
if name == "hold" && state.ScannerEnabled {
|
||||
needScanner = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if needScanner {
|
||||
outputPath := filepath.Join(rootDir, "bin", "atcr-scanner")
|
||||
if err := buildLocal(filepath.Join(rootDir, "scanner"), outputPath, "./cmd/scanner"); err != nil {
|
||||
return fmt.Errorf("build scanner: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Deploy each target
|
||||
for _, name := range toUpdate {
|
||||
t := targets[name]
|
||||
fmt.Printf("\nDeploying %s (%s)...\n", name, t.ip)
|
||||
|
||||
// Sync config keys (adds missing keys from template, never overwrites)
|
||||
configYAML, err := renderConfig(t.configTmpl, vals)
|
||||
if err != nil {
|
||||
return fmt.Errorf("render %s config: %w", name, err)
|
||||
}
|
||||
if err := syncConfigKeys(name, t.ip, t.configPath, configYAML); err != nil {
|
||||
return fmt.Errorf("%s config sync: %w", name, err)
|
||||
}
|
||||
|
||||
// Sync systemd service unit
|
||||
renderedUnit, err := renderServiceUnit(t.unitTmpl, serviceUnitParams{
|
||||
DisplayName: naming.DisplayName(),
|
||||
User: naming.SystemUser(),
|
||||
BinaryPath: naming.InstallDir() + "/bin/" + t.binaryName,
|
||||
ConfigPath: t.configPath,
|
||||
DataDir: naming.BasePath(),
|
||||
ServiceName: t.serviceName,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("render %s service unit: %w", name, err)
|
||||
}
|
||||
unitChanged, err := syncServiceUnit(name, t.ip, t.serviceName, renderedUnit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s service unit sync: %w", name, err)
|
||||
}
|
||||
|
||||
// Upload binary
|
||||
localPath := filepath.Join(rootDir, "bin", t.localBinary)
|
||||
remotePath := naming.InstallDir() + "/bin/" + t.binaryName
|
||||
if err := scpFile(localPath, t.ip, remotePath); err != nil {
|
||||
return fmt.Errorf("upload %s: %w", name, err)
|
||||
}
|
||||
|
||||
daemonReload := ""
|
||||
if unitChanged {
|
||||
daemonReload = "systemctl daemon-reload"
|
||||
}
|
||||
|
||||
// Scanner additions for hold server
|
||||
scannerRestart := ""
|
||||
scannerHealthCheck := ""
|
||||
if name == "hold" && state.ScannerEnabled {
|
||||
// Sync scanner config keys
|
||||
scannerConfigYAML, err := renderConfig(scannerConfigTmpl, vals)
|
||||
if err != nil {
|
||||
return fmt.Errorf("render scanner config: %w", err)
|
||||
}
|
||||
if err := syncConfigKeys("scanner", t.ip, naming.ScannerConfigPath(), scannerConfigYAML); err != nil {
|
||||
return fmt.Errorf("scanner config sync: %w", err)
|
||||
}
|
||||
|
||||
// Sync scanner service unit
|
||||
scannerUnit, err := renderScannerServiceUnit(scannerServiceUnitParams{
|
||||
DisplayName: naming.DisplayName(),
|
||||
User: naming.SystemUser(),
|
||||
BinaryPath: naming.InstallDir() + "/bin/" + naming.Scanner(),
|
||||
ConfigPath: naming.ScannerConfigPath(),
|
||||
DataDir: naming.BasePath(),
|
||||
ServiceName: naming.Scanner(),
|
||||
HoldServiceName: naming.Hold(),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("render scanner service unit: %w", err)
|
||||
}
|
||||
scannerUnitChanged, err := syncServiceUnit("scanner", t.ip, naming.Scanner(), scannerUnit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("scanner service unit sync: %w", err)
|
||||
}
|
||||
if scannerUnitChanged {
|
||||
daemonReload = "systemctl daemon-reload"
|
||||
}
|
||||
|
||||
// Upload scanner binary
|
||||
scannerLocal := filepath.Join(rootDir, "bin", "atcr-scanner")
|
||||
scannerRemote := naming.InstallDir() + "/bin/" + naming.Scanner()
|
||||
if err := scpFile(scannerLocal, t.ip, scannerRemote); err != nil {
|
||||
return fmt.Errorf("upload scanner: %w", err)
|
||||
}
|
||||
|
||||
// Ensure scanner data dirs exist on server
|
||||
scannerSetup := fmt.Sprintf(`mkdir -p %s/vulndb %s/tmp
|
||||
chown -R %s:%s %s`,
|
||||
naming.ScannerDataDir(), naming.ScannerDataDir(),
|
||||
naming.SystemUser(), naming.SystemUser(), naming.ScannerDataDir())
|
||||
if _, err := runSSH(t.ip, scannerSetup, false); err != nil {
|
||||
return fmt.Errorf("scanner dir setup: %w", err)
|
||||
}
|
||||
|
||||
scannerRestart = fmt.Sprintf("\nsystemctl restart %s", naming.Scanner())
|
||||
scannerHealthCheck = `
|
||||
sleep 2
|
||||
curl -sf http://localhost:9090/healthz > /dev/null && echo "SCANNER_HEALTH_OK" || echo "SCANNER_HEALTH_FAIL"
|
||||
`
|
||||
}
|
||||
|
||||
// Restart services and health check
|
||||
restartScript := fmt.Sprintf(`set -euo pipefail
|
||||
%s
|
||||
systemctl restart %s%s
|
||||
sleep 2
|
||||
curl -sf %s > /dev/null && echo "HEALTH_OK" || echo "HEALTH_FAIL"
|
||||
%s`, daemonReload, t.serviceName, scannerRestart, t.healthURL, scannerHealthCheck)
|
||||
|
||||
output, err := runSSH(t.ip, restartScript, true)
|
||||
if err != nil {
|
||||
fmt.Printf(" ERROR: %v\n", err)
|
||||
fmt.Printf(" Output: %s\n", output)
|
||||
return fmt.Errorf("restart %s failed", name)
|
||||
}
|
||||
|
||||
if strings.Contains(output, "HEALTH_OK") {
|
||||
fmt.Printf(" %s: updated and healthy\n", name)
|
||||
} else if strings.Contains(output, "HEALTH_FAIL") {
|
||||
fmt.Printf(" %s: updated but health check failed!\n", name)
|
||||
fmt.Printf(" Check: ssh root@%s journalctl -u %s -n 50\n", t.ip, t.serviceName)
|
||||
} else {
|
||||
fmt.Printf(" %s: updated (health check inconclusive)\n", name)
|
||||
}
|
||||
|
||||
// Scanner health reporting
|
||||
if name == "hold" && state.ScannerEnabled {
|
||||
if strings.Contains(output, "SCANNER_HEALTH_OK") {
|
||||
fmt.Printf(" scanner: updated and healthy\n")
|
||||
} else if strings.Contains(output, "SCANNER_HEALTH_FAIL") {
|
||||
fmt.Printf(" scanner: updated but health check failed!\n")
|
||||
fmt.Printf(" Check: ssh root@%s journalctl -u %s -n 50\n", t.ip, naming.Scanner())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// configValsFromState builds ConfigValues from persisted state.
|
||||
// S3SecretKey is intentionally left empty — syncConfigKeys only adds missing
|
||||
// keys and never overwrites, so the server's existing secret is preserved.
|
||||
func configValsFromState(state *InfraState) *ConfigValues {
|
||||
naming := state.Naming()
|
||||
_, baseDomain, _, _ := extractFromAppviewTemplate()
|
||||
holdDomain := state.Zone + ".cove." + baseDomain
|
||||
|
||||
return &ConfigValues{
|
||||
S3Endpoint: state.ObjectStorage.Endpoint,
|
||||
S3Region: state.ObjectStorage.Region,
|
||||
S3Bucket: state.ObjectStorage.Bucket,
|
||||
S3AccessKey: state.ObjectStorage.AccessKeyID,
|
||||
S3SecretKey: "", // not persisted in state; existing value on server is preserved
|
||||
Zone: state.Zone,
|
||||
HoldDomain: holdDomain,
|
||||
HoldDid: "did:web:" + holdDomain,
|
||||
BasePath: naming.BasePath(),
|
||||
ScannerSecret: state.ScannerSecret,
|
||||
}
|
||||
}
|
||||
|
||||
// runGenerate runs go generate ./... in the given directory using host OS/arch
|
||||
// (no cross-compilation env vars — generate tools must run on the build machine).
|
||||
func runGenerate(dir string) error {
|
||||
fmt.Println("Running go generate ./...")
|
||||
cmd := exec.Command("go", "generate", "./...")
|
||||
cmd.Dir = dir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// buildLocal compiles a Go binary locally with cross-compilation flags for linux/amd64.
|
||||
func buildLocal(dir, outputPath, buildPkg string) error {
|
||||
fmt.Printf(" building %s...\n", filepath.Base(outputPath))
|
||||
cmd := exec.Command("go", "build",
|
||||
"-ldflags=-s -w",
|
||||
"-trimpath",
|
||||
"-o", outputPath,
|
||||
buildPkg,
|
||||
)
|
||||
cmd.Dir = dir
|
||||
cmd.Env = append(os.Environ(),
|
||||
"GOOS=linux",
|
||||
"GOARCH=amd64",
|
||||
"CGO_ENABLED=1",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// scpFile uploads a local file to a remote server via SCP.
|
||||
// Removes the remote file first to avoid ETXTBSY when overwriting a running binary.
|
||||
func scpFile(localPath, ip, remotePath string) error {
|
||||
fmt.Printf(" uploading %s → %s:%s\n", filepath.Base(localPath), ip, remotePath)
|
||||
_, _ = runSSH(ip, fmt.Sprintf("rm -f %s", remotePath), false)
|
||||
cmd := exec.Command("scp",
|
||||
"-o", "StrictHostKeyChecking=accept-new",
|
||||
"-o", "ConnectTimeout=10",
|
||||
localPath,
|
||||
"root@"+ip+":"+remotePath,
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func cmdSSH(target string) error {
|
||||
state, err := loadState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ip string
|
||||
switch target {
|
||||
case "appview":
|
||||
ip = state.Appview.PublicIP
|
||||
case "hold":
|
||||
ip = state.Hold.PublicIP
|
||||
default:
|
||||
return fmt.Errorf("unknown target: %s (use: appview, hold)", target)
|
||||
}
|
||||
|
||||
fmt.Printf("Connecting to %s (%s)...\n", target, ip)
|
||||
cmd := exec.Command("ssh",
|
||||
"-o", "StrictHostKeyChecking=accept-new",
|
||||
"root@"+ip,
|
||||
)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func runSSH(ip, script string, stream bool) (string, error) {
|
||||
cmd := exec.Command("ssh",
|
||||
"-o", "StrictHostKeyChecking=accept-new",
|
||||
"-o", "ConnectTimeout=10",
|
||||
"root@"+ip,
|
||||
"bash -s",
|
||||
)
|
||||
cmd.Stdin = strings.NewReader(script)
|
||||
|
||||
var buf bytes.Buffer
|
||||
if stream {
|
||||
cmd.Stdout = io.MultiWriter(os.Stdout, &buf)
|
||||
cmd.Stderr = io.MultiWriter(os.Stderr, &buf)
|
||||
} else {
|
||||
cmd.Stdout = &buf
|
||||
cmd.Stderr = &buf
|
||||
}
|
||||
|
||||
// Give deploys up to 5 minutes (SCP + restart, much faster than remote builds)
|
||||
done := make(chan error, 1)
|
||||
go func() { done <- cmd.Run() }()
|
||||
|
||||
select {
|
||||
case err := <-done:
|
||||
return buf.String(), err
|
||||
case <-time.After(5 * time.Minute):
|
||||
_ = cmd.Process.Kill()
|
||||
return buf.String(), fmt.Errorf("SSH command timed out after 5 minutes")
|
||||
}
|
||||
}
|
||||
@@ -7,23 +7,32 @@ services:
|
||||
container_name: atcr-appview
|
||||
ports:
|
||||
- "5000:5000"
|
||||
env_file:
|
||||
- ../atcr-secrets.env
|
||||
# Optional: Load from .env.appview file (create from .env.appview.example)
|
||||
# env_file:
|
||||
# - .env.appview
|
||||
# Base config: config-appview.example.yaml (passed via Air entrypoint)
|
||||
# Env vars below override config file values for local dev
|
||||
environment:
|
||||
# Server configuration
|
||||
ATCR_HTTP_ADDR: :5000
|
||||
ATCR_DEFAULT_HOLD_DID: did:web:172.28.0.3:8080
|
||||
# UI configuration
|
||||
ATCR_UI_ENABLED: "true"
|
||||
ATCR_BACKFILL_ENABLED: "true"
|
||||
# Test mode - fallback to default hold when user's hold is unreachable
|
||||
TEST_MODE: "true"
|
||||
# Logging
|
||||
# ATCR_SERVER_CLIENT_NAME: "Seamark"
|
||||
# ATCR_SERVER_CLIENT_SHORT_NAME: "Seamark"
|
||||
ATCR_SERVER_MANAGED_HOLDS: did:web:172.28.0.3%3A8080
|
||||
ATCR_SERVER_DEFAULT_HOLD_DID: did:web:172.28.0.3%3A8080
|
||||
ATCR_SERVER_TEST_MODE: true
|
||||
ATCR_LOG_LEVEL: debug
|
||||
LOG_SHIPPER_BACKEND: victoria
|
||||
LOG_SHIPPER_URL: http://172.28.0.10:9428
|
||||
# Limit local Docker logs - real logs go to Victoria Logs
|
||||
# Local logs just for live tailing (docker logs -f)
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "1"
|
||||
volumes:
|
||||
# Mount source code for Air hot reload
|
||||
- .:/app
|
||||
- .:/app:z
|
||||
# Cache go modules between rebuilds
|
||||
- go-mod-cache:/go/pkg/mod
|
||||
# UI database (includes OAuth sessions, devices, and Jetstream cache)
|
||||
@@ -39,30 +48,45 @@ services:
|
||||
# - Manifests/Tags -> ATProto PDS (via middleware)
|
||||
# - Blobs/Layers -> Hold service (via ProxyBlobStore)
|
||||
# - OAuth tokens -> SQLite database (atcr-ui volume)
|
||||
# - No config.yml needed - all config via environment variables
|
||||
|
||||
atcr-hold:
|
||||
env_file:
|
||||
- ../atcr-secrets.env # Load S3/Storj credentials from external file
|
||||
- ../atcr-secrets.env # Load S3/Storj credentials from external file
|
||||
# Base config: config-hold.example.yaml (passed via Air entrypoint)
|
||||
# Env vars below override config file values for local dev
|
||||
environment:
|
||||
HOLD_PUBLIC_URL: http://172.28.0.3:8080
|
||||
HOLD_OWNER: did:plc:pddp4xt5lgnv2qsegbzzs4xg
|
||||
HOLD_PUBLIC: false
|
||||
# STORAGE_DRIVER: filesystem
|
||||
# STORAGE_ROOT_DIR: /var/lib/atcr/hold
|
||||
TEST_MODE: true
|
||||
# DISABLE_PRESIGNED_URLS: true
|
||||
# Logging
|
||||
ATCR_LOG_LEVEL: debug
|
||||
# Storage config comes from env_file (STORAGE_DRIVER, AWS_*, S3_*)
|
||||
HOLD_SERVER_APPVIEW_DID: did:web:172.28.0.2%3A5000
|
||||
HOLD_SCANNER_SECRET: dev-secret
|
||||
HOLD_SERVER_PUBLIC_URL: http://172.28.0.3:8080
|
||||
HOLD_REGISTRATION_OWNER_DID: did:plc:pddp4xt5lgnv2qsegbzzs4xg
|
||||
HOLD_REGISTRATION_ALLOW_ALL_CREW: true
|
||||
HOLD_SERVER_TEST_MODE: true
|
||||
HOLD_LOG_LEVEL: debug
|
||||
LOG_SHIPPER_BACKEND: victoria
|
||||
LOG_SHIPPER_URL: http://172.28.0.10:9428
|
||||
# S3 storage config comes from env_file (AWS_*, S3_*)
|
||||
# Limit local Docker logs - real logs go to Victoria Logs
|
||||
# Local logs just for live tailing (docker logs -f)
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "1"
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.hold
|
||||
image: atcr-hold:latest
|
||||
dockerfile: Dockerfile.dev
|
||||
args:
|
||||
AIR_CONFIG: .air.hold.toml
|
||||
BILLING_ENABLED: "true"
|
||||
image: atcr-hold-dev:latest
|
||||
container_name: atcr-hold
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
# Mount source code for Air hot reload
|
||||
- .:/app:z
|
||||
# Cache go modules between rebuilds
|
||||
- go-mod-cache:/go/pkg/mod
|
||||
# PDS data (carstore SQLite + signing keys)
|
||||
- atcr-hold:/var/lib/atcr-hold
|
||||
restart: unless-stopped
|
||||
@@ -73,6 +97,23 @@ services:
|
||||
atcr-network:
|
||||
ipv4_address: 172.28.0.3
|
||||
|
||||
# Victoria Logs for centralized log storage
|
||||
# Uncomment to enable, then set LOG_SHIPPER_* env vars above
|
||||
victorialogs:
|
||||
image: victoriametrics/victoria-logs:latest
|
||||
container_name: victorialogs
|
||||
ports:
|
||||
- "9428:9428"
|
||||
volumes:
|
||||
- victorialogs-data:/victoria-logs-data
|
||||
command:
|
||||
- "-storageDataPath=/victoria-logs-data"
|
||||
- "-retentionPeriod=7d"
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
atcr-network:
|
||||
ipv4_address: 172.28.0.10
|
||||
|
||||
networks:
|
||||
atcr-network:
|
||||
driver: bridge
|
||||
@@ -85,3 +126,4 @@ volumes:
|
||||
atcr-auth:
|
||||
atcr-ui:
|
||||
go-mod-cache:
|
||||
victorialogs-data:
|
||||
|
||||
1403
docs/ADMIN_PANEL.md
Normal file
1403
docs/ADMIN_PANEL.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,23 +1,51 @@
|
||||
# ATCR AppView UI - Future Features
|
||||
# ATCR UI - Feature Roadmap
|
||||
|
||||
This document outlines potential features for future versions of the ATCR AppView UI, beyond the V1 MVP. These are ideas to consider as the project matures and user needs evolve.
|
||||
This document tracks the status of ATCR features beyond the V1 MVP. Features are marked with their current status:
|
||||
|
||||
- **DONE** — Fully implemented and shipping
|
||||
- **PARTIAL** — Some parts implemented
|
||||
- **BACKEND ONLY** — Backend exists, no UI yet
|
||||
- **NOT STARTED** — Future work
|
||||
- **BLOCKED** — Waiting on external dependency
|
||||
|
||||
---
|
||||
|
||||
## What's Already Built (not in original roadmap)
|
||||
|
||||
These features were implemented but weren't in the original future features list:
|
||||
|
||||
| Feature | Location | Notes |
|
||||
|---------|----------|-------|
|
||||
| **Billing (Stripe)** | `pkg/hold/billing/` | Checkout sessions, customer portal, subscription webhooks, tier upgrades. Build with `-tags billing`. |
|
||||
| **Garbage collection** | `pkg/hold/gc/` | Mark-and-sweep for orphaned blobs. Preview (dry-run) and execute modes. Triggered from hold admin UI. |
|
||||
| **libSQL embedded replicas** | AppView + Hold | Sync to Turso, Bunny DB, or self-hosted libsql-server. Configurable sync interval. |
|
||||
| **Hold successor/migration** | `pkg/hold/` | Promote a hold as successor to migrate users to new storage. |
|
||||
| **Relay management** | Hold admin | Manage firehose relay connections from admin panel. |
|
||||
| **Data export** | `pkg/appview/handlers/export.go` | GDPR-compliant export of all user data from AppView + all holds where user is member/captain. |
|
||||
| **Dark/light mode** | AppView UI | System preference detection, toggle, localStorage persistence. |
|
||||
| **Credential helper install page** | `/install` | Install scripts for macOS/Linux/Windows, version API. |
|
||||
| **Stars** | AppView UI | Star/unstar repos stored as `io.atcr.star` ATProto records, counts displayed. |
|
||||
|
||||
---
|
||||
|
||||
## Advanced Image Management
|
||||
|
||||
### Multi-Architecture Image Support
|
||||
### Multi-Architecture Image Support — DONE (display) / NOT STARTED (creation)
|
||||
|
||||
**Display image indexes:**
|
||||
- Show when a tag points to an image index (multi-arch manifest)
|
||||
- Display all architectures/platforms in the index (linux/amd64, linux/arm64, darwin/arm64, etc.)
|
||||
**Display image indexes — DONE:**
|
||||
- Show when a tag points to an image index (multi-arch manifest) — `IsMultiArch` flag, "Multi-arch" badge
|
||||
- Display all architectures/platforms in the index — platform badges (e.g., linux/amd64, linux/arm64)
|
||||
- Allow viewing individual manifests within the index
|
||||
- Show platform-specific layer details
|
||||
- Show platform-specific details
|
||||
|
||||
**Image index creation:**
|
||||
**Image index creation — NOT STARTED:**
|
||||
- UI for combining multiple single-arch manifests into an image index
|
||||
- Automatic platform detection from manifest metadata
|
||||
- Validate that all manifests are for the same image (different platforms)
|
||||
|
||||
### Layer Inspection & Visualization
|
||||
### Layer Inspection & Visualization — NOT STARTED
|
||||
|
||||
DB stores layer metadata (digest, size, media type, layer index) but there's no UI for any of this.
|
||||
|
||||
**Layer details page:**
|
||||
- Show Dockerfile command that created each layer (if available in history)
|
||||
@@ -30,594 +58,409 @@ This document outlines potential features for future versions of the ATCR AppVie
|
||||
- Calculate storage savings from layer sharing
|
||||
- Identify duplicate layers with different digests (potential optimization)
|
||||
|
||||
### Image Operations
|
||||
### Image Operations — PARTIAL (delete only)
|
||||
|
||||
**Tag Management:**
|
||||
- **Tag promotion workflow:** dev → staging → prod with one click
|
||||
- **Tag aliases:** Create multiple tags pointing to same digest
|
||||
- **Tag patterns:** Auto-tag based on git commit, semantic version, date
|
||||
- **Tag protection:** Mark tags as immutable (prevent deletion/re-pointing)
|
||||
**Tag/manifest deletion — DONE:**
|
||||
- Delete tags with `DeleteTagHandler` (cascade + confirmation modal)
|
||||
- Delete manifests with `DeleteManifestHandler` (handles tagged manifests gracefully)
|
||||
|
||||
**Image Copying:**
|
||||
**Tag Management — NOT STARTED:**
|
||||
- Tag promotion workflow (dev → staging → prod)
|
||||
- Tag aliases (multiple tags → same digest)
|
||||
- Tag patterns (auto-tag based on git commit, semantic version, date)
|
||||
- Tag protection (mark tags as immutable)
|
||||
|
||||
**Image Copying — NOT STARTED:**
|
||||
- Copy image from one repository to another
|
||||
- Copy image from another user's repository (fork)
|
||||
- Bulk copy operations (copy all tags, copy all manifests)
|
||||
- Bulk copy operations
|
||||
|
||||
**Image History:**
|
||||
- Timeline view of tag changes (what digest did "latest" point to over time)
|
||||
- Rollback functionality (revert tag to previous digest)
|
||||
- Audit log of all image operations (push, delete, tag changes)
|
||||
**Image History — NOT STARTED:**
|
||||
- Timeline view of tag changes
|
||||
- Rollback functionality
|
||||
- Audit log of image operations
|
||||
|
||||
### Vulnerability Scanning
|
||||
### Vulnerability Scanning — DONE (backend) / NOT STARTED (UI)
|
||||
|
||||
**Integration with security scanners:**
|
||||
- **Trivy** - Comprehensive vulnerability scanner
|
||||
- **Grype** - Anchore's vulnerability scanner
|
||||
- **Clair** - CoreOS vulnerability scanner
|
||||
**Backend — DONE:**
|
||||
- Separate scanner service (`scanner/` module) with Syft (SBOM) + Grype (vulnerabilities)
|
||||
- WebSocket-based job queue connecting scanner to hold service
|
||||
- Priority queue with tier-based scheduling (quartermaster > bosun > deckhand)
|
||||
- Scan results stored as ORAS artifacts in S3, referenced in hold PDS
|
||||
- Automatic scanning dispatched by hold on manifest push
|
||||
- See `docs/SBOM_SCANNING.md`
|
||||
|
||||
**Features:**
|
||||
- Automatic scanning on image push
|
||||
**AppView UI — NOT STARTED:**
|
||||
- Display CVE count by severity (critical, high, medium, low)
|
||||
- Show detailed CVE information (description, CVSS score, affected packages)
|
||||
- Filter images by vulnerability status
|
||||
- Subscribe to CVE notifications for your images
|
||||
- Compare vulnerability status across tags/versions
|
||||
|
||||
### Image Signing & Verification
|
||||
### Image Signing & Verification — NOT STARTED
|
||||
|
||||
**Cosign/Sigstore integration:**
|
||||
- Sign images with Cosign
|
||||
Concept doc exists at `docs/SIGNATURE_INTEGRATION.md` but no implementation.
|
||||
|
||||
- Sign images
|
||||
- Display signature verification status
|
||||
- Show keyless signing certificate chains
|
||||
- Integrate with transparency log (Rekor)
|
||||
|
||||
**Features:**
|
||||
- UI for signing images (generate key, sign manifest)
|
||||
- Verify signatures before pull (browser-based verification)
|
||||
- Display signature metadata (signer, timestamp, transparency log entry)
|
||||
- Display signature metadata
|
||||
- Require signatures for protected repositories
|
||||
|
||||
### SBOM (Software Bill of Materials)
|
||||
### SBOM (Software Bill of Materials) — DONE (backend) / NOT STARTED (UI)
|
||||
|
||||
**SBOM generation and display:**
|
||||
- Generate SBOM on push (SPDX or CycloneDX format)
|
||||
**Backend — DONE:**
|
||||
- Syft generates SPDX JSON format SBOMs
|
||||
- Stored as ORAS artifacts (referenced via `artifactType: "application/spdx+json"`)
|
||||
- Blobs in S3, metadata in hold's PDS
|
||||
- Accessible via ORAS CLI and hold XRPC endpoints
|
||||
|
||||
**UI — NOT STARTED:**
|
||||
- Display package list from SBOM
|
||||
- Show license information
|
||||
- Link to upstream package sources
|
||||
- Compare SBOMs across versions (what packages changed)
|
||||
- Compare SBOMs across versions
|
||||
|
||||
**SBOM attestation:**
|
||||
- Store SBOM as attestation (in-toto format)
|
||||
- Link SBOM to image signature
|
||||
- Verify SBOM integrity
|
||||
---
|
||||
|
||||
## Hold Management Dashboard
|
||||
## Hold Management Dashboard — DONE (on hold admin panel)
|
||||
|
||||
### Hold Discovery & Registration
|
||||
Hold management is implemented as a separate admin panel on the hold service itself (`pkg/hold/admin/`), not in the AppView UI. This makes sense architecturally — hold owners manage their own holds.
|
||||
|
||||
**Create hold:**
|
||||
### Hold Discovery & Registration — PARTIAL
|
||||
|
||||
**Hold registration — DONE:**
|
||||
- Automatic registration on hold startup (captain + crew records created in embedded PDS)
|
||||
- Auto-detection of region from cloud metadata
|
||||
|
||||
**NOT STARTED:**
|
||||
- UI wizard for deploying hold service
|
||||
- One-click deployment to Fly.io, Railway, Render
|
||||
- Configuration generator (environment variables, docker-compose)
|
||||
- Test connectivity after deployment
|
||||
- One-click deployment to cloud platforms
|
||||
- Configuration generator
|
||||
- Test connectivity UI
|
||||
|
||||
**Hold registration:**
|
||||
- Automatic registration via OAuth (already implemented)
|
||||
- Manual registration form (for existing holds)
|
||||
- Bulk import holds from JSON/YAML
|
||||
### Hold Configuration — DONE (admin panel)
|
||||
|
||||
### Hold Configuration
|
||||
|
||||
**Hold settings page:**
|
||||
- Edit hold metadata (name, description, icon)
|
||||
**Hold settings — DONE (hold admin):**
|
||||
- Toggle public/private flag
|
||||
- Configure storage backend (S3, Storj, Minio, filesystem)
|
||||
- Set storage quotas and limits
|
||||
- Configure retention policies (auto-delete old blobs)
|
||||
- Toggle allow-all-crew
|
||||
- Toggle Bluesky post announcements
|
||||
- Set successor hold DID for migration
|
||||
- Writes changes back to YAML config file
|
||||
|
||||
**Hold credentials:**
|
||||
- Rotate S3 access keys
|
||||
- Test hold connectivity
|
||||
- View hold service logs (if accessible)
|
||||
**Storage config — YAML-only:**
|
||||
- S3 credentials, region, bucket, endpoint, CDN pull zone all configured via YAML
|
||||
- No UI for editing S3 credentials or rotating keys
|
||||
|
||||
### Crew Management
|
||||
**Quotas — DONE (read-only UI):**
|
||||
- Tier-based limits (deckhand 5GB, bosun 50GB, quartermaster 100GB)
|
||||
- Per-user quota tracking and display in admin
|
||||
- Not editable via UI (requires YAML change)
|
||||
|
||||
**Invite crew members:**
|
||||
- Send invitation links (OAuth-based)
|
||||
- Invite by handle or DID
|
||||
- Set crew permissions (read-only, read-write, admin)
|
||||
- Bulk invite (upload CSV)
|
||||
**NOT STARTED:**
|
||||
- Retention policies (auto-delete old blobs)
|
||||
- Hold service log viewer
|
||||
|
||||
**Crew list:**
|
||||
- Display all crew members
|
||||
- Show last activity (last push, last pull)
|
||||
### Crew Management — DONE (hold admin panel)
|
||||
|
||||
**Implemented in `pkg/hold/admin/handlers_crew.go`:**
|
||||
- Add crew by DID with role, permissions (`blob:read`, `blob:write`, `crew:admin`), and tier
|
||||
- Crew list showing handle, role, permissions, tier, usage, quota
|
||||
- Edit crew permissions and tier
|
||||
- Remove crew members
|
||||
- Change crew permissions
|
||||
- Bulk JSON import/export with deduplication (`handlers_crew_io.go`)
|
||||
|
||||
**Crew request workflow:**
|
||||
- Allow users to request access to a hold
|
||||
- Hold owner approves/rejects requests
|
||||
- Notification system for requests
|
||||
**NOT STARTED:**
|
||||
- Invitation links (OAuth-based, currently must know DID)
|
||||
- Invite by handle (currently DID-only)
|
||||
- Crew request workflow (users can't self-request access)
|
||||
- Approval/rejection flow
|
||||
|
||||
### Hold Analytics
|
||||
### Hold Analytics — PARTIAL
|
||||
|
||||
**Storage metrics:**
|
||||
- Total storage used (bytes)
|
||||
- Blob count
|
||||
- Largest blobs
|
||||
- Growth over time (chart)
|
||||
- Deduplication savings
|
||||
**Storage metrics — DONE (hold admin):**
|
||||
- Total blobs, total size, unique digests
|
||||
- Per-user quota stats (total size, blob count)
|
||||
- Top users by storage (lazy-loaded HTMX partial)
|
||||
- Crew count and tier distribution
|
||||
|
||||
**Access metrics:**
|
||||
- Total downloads (pulls)
|
||||
- Bandwidth used
|
||||
- Popular images (most pulled)
|
||||
- Geographic distribution (if available)
|
||||
- Access logs (who pulled what, when)
|
||||
**NOT STARTED:**
|
||||
- Access metrics (downloads, pulls, bandwidth)
|
||||
- Growth over time charts
|
||||
- Cost estimation
|
||||
- Geographic distribution
|
||||
- Access logs
|
||||
|
||||
**Cost estimation:**
|
||||
- Calculate S3 storage costs
|
||||
- Calculate bandwidth costs
|
||||
- Compare costs across storage backends
|
||||
- Budget alerts (notify when approaching limit)
|
||||
---
|
||||
|
||||
## Discovery & Social Features
|
||||
|
||||
### Federated Browse & Search
|
||||
### Federated Browse & Search — PARTIAL
|
||||
|
||||
**Enhanced discovery:**
|
||||
- Full-text search across all ATCR images (repository name, tag, description)
|
||||
**Basic search — DONE:**
|
||||
- Full-text search across handles, DIDs, repo names, and annotations
|
||||
- Search UI with HTMX lazy loading and pagination
|
||||
- Navigation bar search component
|
||||
|
||||
**NOT STARTED:**
|
||||
- Filter by user, hold, architecture, date range
|
||||
- Sort by popularity, recency, size
|
||||
- Advanced query syntax (e.g., "user:alice tag:latest arch:arm64")
|
||||
- Advanced query syntax
|
||||
- Popular/trending images
|
||||
- Categories and user-defined tags
|
||||
|
||||
**Popular/Trending:**
|
||||
- Most pulled images (past day, week, month)
|
||||
- Fastest growing images (new pulls)
|
||||
- Recently updated images (new tags)
|
||||
- Community favorites (curated list)
|
||||
### Sailor Profiles — PARTIAL
|
||||
|
||||
**Categories & Tags:**
|
||||
- User-defined categories (web, database, ml, etc.)
|
||||
- Tag images with keywords (nginx, proxy, reverse-proxy)
|
||||
- Browse by category
|
||||
- Tag cloud visualization
|
||||
**Public profile page — DONE:**
|
||||
- `/u/{handle}` shows user's avatar, handle, DID, and all public repositories
|
||||
- OpenGraph meta tags and JSON-LD structured data
|
||||
|
||||
### Sailor Profiles (Public)
|
||||
|
||||
**Public profile page:**
|
||||
- `/ui/@alice` shows alice's public repositories
|
||||
- Bio, avatar, website links
|
||||
**NOT STARTED:**
|
||||
- Bio/description field
|
||||
- Website links
|
||||
- Statistics (total images, total pulls, joined date)
|
||||
- Pinned repositories (showcase best images)
|
||||
- Pinned/featured repositories
|
||||
|
||||
**Social features:**
|
||||
- Follow other sailors (get notified of their pushes)
|
||||
- Star repositories (bookmark favorites)
|
||||
- Comment on images (feedback, questions)
|
||||
### Social Features — PARTIAL (stars only)
|
||||
|
||||
**Stars — DONE:**
|
||||
- Star/unstar repositories stored as `io.atcr.star` ATProto records
|
||||
- Star counts displayed on repository pages
|
||||
|
||||
**NOT STARTED:**
|
||||
- Follow other sailors
|
||||
- Comment on images
|
||||
- Like/upvote images
|
||||
- Activity feed
|
||||
- Federated timeline / custom feeds
|
||||
- Sharing to Bluesky/ATProto social apps
|
||||
|
||||
**Activity feed:**
|
||||
- Timeline of followed sailors' activity
|
||||
- Recent pushes from community
|
||||
- Popular images from followed users
|
||||
|
||||
### Federated Timeline
|
||||
|
||||
**ATProto-native feed:**
|
||||
- Real-time feed of container pushes (like Bluesky's timeline)
|
||||
- Filter by follows, community, or global
|
||||
- React to pushes (like, share, comment)
|
||||
- Share images to Bluesky/ATProto social apps
|
||||
|
||||
**Custom feeds:**
|
||||
- Create algorithmic feeds (e.g., "Show me all ML images")
|
||||
- Subscribe to curated feeds
|
||||
- Publish feeds for others to subscribe
|
||||
---
|
||||
|
||||
## Access Control & Permissions
|
||||
|
||||
### Repository-Level Permissions
|
||||
### Hold-Level Access Control — DONE
|
||||
|
||||
**Private repositories:**
|
||||
- Mark repositories as private (only owner + collaborators can pull)
|
||||
- Invite collaborators by handle/DID
|
||||
- Set permissions (read-only, read-write, admin)
|
||||
- Public/private hold toggle (admin UI + OCI enforcement)
|
||||
- Crew permissions: `blob:read`, `blob:write`, `crew:admin`
|
||||
- `blob:write` implicitly grants `blob:read`
|
||||
- Captain has all permissions implicitly
|
||||
- See `docs/BYOS.md`
|
||||
|
||||
**Public repositories:**
|
||||
- Default: public (anyone can pull)
|
||||
- Require authentication for private repos
|
||||
- Generate read-only tokens (for CI/CD)
|
||||
### Repository-Level Permissions — BLOCKED
|
||||
|
||||
**Implementation challenge:**
|
||||
- ATProto doesn't support private records yet
|
||||
- May require proxy layer for access control
|
||||
- Or use encrypted blobs with shared keys
|
||||
- **Private repositories blocked by ATProto** — no private records support yet
|
||||
- Repository-level permissions, collaborator invites, read-only tokens all depend on this
|
||||
- May require proxy layer or encrypted blobs when ATProto adds private record support
|
||||
|
||||
### Team/Organization Accounts
|
||||
### Team/Organization Accounts — NOT STARTED
|
||||
|
||||
**Multi-user organizations:**
|
||||
- Create organization account (e.g., `@acme-corp`)
|
||||
- Add members with roles (owner, maintainer, member)
|
||||
- Organization-owned repositories
|
||||
- Billing and quotas at org level
|
||||
- Organization accounts, RBAC, SSO, audit logs
|
||||
- Likely a later-stage feature
|
||||
|
||||
**Features:**
|
||||
- Team-based access control
|
||||
- Shared hold for organization
|
||||
- Audit logs for all org activity
|
||||
- Single sign-on (SSO) integration
|
||||
---
|
||||
|
||||
## Analytics & Monitoring
|
||||
|
||||
### Dashboard
|
||||
### Dashboard — PARTIAL
|
||||
|
||||
**Personal dashboard:**
|
||||
**Hold dashboard — DONE (hold admin):**
|
||||
- Storage usage, crew count, tier distribution
|
||||
|
||||
**Personal dashboard — NOT STARTED:**
|
||||
- Overview of your images, holds, activity
|
||||
- Quick stats (total size, pull count, last push)
|
||||
- Recent activity (your pushes, pulls)
|
||||
- Alerts and notifications
|
||||
- Quick stats, recent activity, alerts
|
||||
|
||||
**Hold dashboard:**
|
||||
- Storage usage, bandwidth, costs
|
||||
- Active crew members
|
||||
- Recent uploads/downloads
|
||||
- Health status of hold service
|
||||
### Pull Analytics — NOT STARTED
|
||||
|
||||
### Pull Analytics
|
||||
|
||||
**Detailed metrics:**
|
||||
- Pull count per image/tag
|
||||
- Pull count by client (Docker, containerd, podman)
|
||||
- Pull count by geography (country, region)
|
||||
- Pull count over time (chart)
|
||||
- Failed pulls (errors, retries)
|
||||
- Pull count by client, geography, over time
|
||||
- User analytics (authenticated vs anonymous)
|
||||
|
||||
**User analytics:**
|
||||
- Who is pulling your images (if authenticated)
|
||||
- Anonymous vs authenticated pulls
|
||||
- Repeat users vs new users
|
||||
### Alerts & Notifications — NOT STARTED
|
||||
|
||||
### Alerts & Notifications
|
||||
- Alert types (quota exceeded, vulnerability detected, hold down, etc.)
|
||||
- Notification channels (email, webhook, ATProto, Slack/Discord)
|
||||
|
||||
**Alert types:**
|
||||
- Storage quota exceeded
|
||||
- High bandwidth usage
|
||||
- New vulnerability detected
|
||||
- Image signature invalid
|
||||
- Hold service down
|
||||
- Crew member joined/left
|
||||
|
||||
**Notification channels:**
|
||||
- Email
|
||||
- Webhook (POST to custom URL)
|
||||
- ATProto app notification (future: in-app notifications in Bluesky)
|
||||
- Slack, Discord, Telegram integrations
|
||||
---
|
||||
|
||||
## Developer Tools & Integrations
|
||||
|
||||
### API Documentation
|
||||
### Credential Helper — DONE
|
||||
|
||||
**Interactive API docs:**
|
||||
- Swagger/OpenAPI spec for OCI API
|
||||
- Swagger/OpenAPI spec for UI API
|
||||
- Interactive API explorer (try API calls in browser)
|
||||
- Code examples in multiple languages (curl, Go, Python, JavaScript)
|
||||
- Install page at `/install` with shell scripts
|
||||
- Version API endpoint for automatic updates
|
||||
|
||||
**SDK/Client Libraries:**
|
||||
- Official Go client library
|
||||
- JavaScript/TypeScript client
|
||||
- Python client
|
||||
- Rust client
|
||||
### API Documentation — NOT STARTED
|
||||
|
||||
### Webhooks
|
||||
- Swagger/OpenAPI specs
|
||||
- Interactive API explorer
|
||||
- Code examples, SDKs
|
||||
|
||||
**Webhook configuration:**
|
||||
- Register webhook URLs per repository
|
||||
- Select events to trigger (push, delete, tag update)
|
||||
- Test webhooks (send test payload)
|
||||
- View webhook delivery history
|
||||
- Retry failed deliveries
|
||||
### Webhooks — NOT STARTED
|
||||
|
||||
**Webhook events:**
|
||||
- `manifest.pushed`
|
||||
- `manifest.deleted`
|
||||
- `tag.created`
|
||||
- `tag.updated`
|
||||
- `tag.deleted`
|
||||
- `scan.completed` (vulnerability scan finished)
|
||||
- Repository-level webhook registration
|
||||
- Events: manifest.pushed, tag.created, scan.completed, etc.
|
||||
- Test, retry, delivery history
|
||||
|
||||
### CI/CD Integration Guides
|
||||
### CI/CD Integration — NOT STARTED
|
||||
|
||||
**Documentation for popular CI/CD platforms:**
|
||||
- GitHub Actions (example workflows)
|
||||
- GitLab CI (.gitlab-ci.yml examples)
|
||||
- CircleCI (config.yml examples)
|
||||
- Jenkins (Jenkinsfile examples)
|
||||
- Drone CI
|
||||
- GitHub Actions, GitLab CI, CircleCI example workflows
|
||||
- Pre-built actions/plugins
|
||||
- Build status badges
|
||||
|
||||
**Features:**
|
||||
- One-click workflow generation
|
||||
- Pre-built actions/plugins for ATCR
|
||||
- Cache layer optimization for faster builds
|
||||
- Build status badges (show build status in README)
|
||||
### Infrastructure as Code — PARTIAL
|
||||
|
||||
### Infrastructure as Code
|
||||
**DONE:**
|
||||
- Custom UpCloud deployment tool (`deploy/upcloud/`) with Go-based provisioning, cloud-init, systemd, config templates
|
||||
- Docker Compose for dev and production
|
||||
|
||||
**IaC examples:**
|
||||
- Terraform module for deploying hold service
|
||||
- Pulumi program for ATCR infrastructure
|
||||
- Kubernetes manifests for hold service
|
||||
- Docker Compose for local development
|
||||
- Helm chart for AppView + hold
|
||||
**NOT STARTED:**
|
||||
- Terraform modules
|
||||
- Helm charts
|
||||
- Kubernetes manifests (only an example verification webhook exists)
|
||||
- GitOps integrations (ArgoCD, FluxCD)
|
||||
|
||||
**GitOps workflows:**
|
||||
- ArgoCD integration (deploy images from ATCR)
|
||||
- FluxCD integration
|
||||
- Automated deployments on tag push
|
||||
---
|
||||
|
||||
## Documentation & Onboarding
|
||||
## Documentation & Onboarding — PARTIAL
|
||||
|
||||
### Interactive Getting Started
|
||||
**DONE:**
|
||||
- Install page with credential helper setup
|
||||
- Learn more page
|
||||
- Internal developer docs (`docs/`)
|
||||
|
||||
**Onboarding wizard:**
|
||||
- Step-by-step guide for first-time users
|
||||
- Interactive tutorial (push your first image)
|
||||
- Verify setup (test authentication, test push/pull)
|
||||
- Completion checklist
|
||||
|
||||
**Guided tours:**
|
||||
- Product tour of UI features
|
||||
- Tooltips and hints for new users
|
||||
**NOT STARTED:**
|
||||
- Interactive onboarding wizard
|
||||
- Product tour / tooltips
|
||||
- Help center with FAQs
|
||||
- Video tutorials
|
||||
- Comprehensive user-facing documentation site
|
||||
|
||||
### Comprehensive Documentation
|
||||
|
||||
**Documentation sections:**
|
||||
- Quickstart guide
|
||||
- Detailed user manual
|
||||
- API reference
|
||||
- ATProto record schemas
|
||||
- Deployment guides (hold service, AppView)
|
||||
- Troubleshooting guide
|
||||
- Security best practices
|
||||
|
||||
**Video tutorials:**
|
||||
- YouTube channel with how-to videos
|
||||
- Screen recordings of common tasks
|
||||
- Conference talks and demos
|
||||
|
||||
### Community & Support
|
||||
|
||||
**Community features:**
|
||||
- Discussion forum (or integrate with Discourse)
|
||||
- GitHub Discussions for ATCR project
|
||||
- Discord/Slack community
|
||||
- Monthly community calls
|
||||
|
||||
**Support channels:**
|
||||
- Email support
|
||||
- Live chat (for paid tiers)
|
||||
- Priority support (for enterprise)
|
||||
---
|
||||
|
||||
## Advanced ATProto Integration
|
||||
|
||||
### Record Viewer
|
||||
### Data Export — DONE
|
||||
|
||||
**ATProto record browser:**
|
||||
- Browse all your `io.atcr.*` records
|
||||
- Raw JSON view with ATProto metadata (CID, commit info, timestamp)
|
||||
- Diff viewer for record updates
|
||||
- History view (see all versions of a record)
|
||||
- Link to ATP URI (`at://did/collection/rkey`)
|
||||
- GDPR-compliant data export (`ExportUserDataHandler`)
|
||||
- Fetches data from AppView DB + all holds where user is member/captain
|
||||
|
||||
**Export/Import:**
|
||||
- Export all records as JSON (backup)
|
||||
- Import records from JSON (restore, migration)
|
||||
- CAR file export (ATProto native format)
|
||||
### Record Viewer — NOT STARTED
|
||||
|
||||
### PDS Integration
|
||||
- Browse `io.atcr.*` records with raw JSON view
|
||||
- Record history, diff viewer
|
||||
- ATP URI links
|
||||
|
||||
**Multi-PDS support:**
|
||||
- Switch between multiple PDS accounts
|
||||
- Manage images across different PDSs
|
||||
- Unified view of all your images (across PDSs)
|
||||
### PDS Integration — NOT STARTED
|
||||
|
||||
**PDS health monitoring:**
|
||||
- Show PDS connection status
|
||||
- Alert if PDS is unreachable
|
||||
- Fallback to alternate PDS (if configured)
|
||||
- Multi-PDS support, PDS health monitoring
|
||||
- PDS migration tools
|
||||
- "Verify on PDS" button
|
||||
|
||||
**PDS migration tools:**
|
||||
- Migrate images from one PDS to another
|
||||
- Bulk update hold endpoints
|
||||
- Re-sign OAuth tokens for new PDS
|
||||
### Federation — NOT STARTED
|
||||
|
||||
### Decentralization Features
|
||||
- Cross-AppView image pulls
|
||||
- AppView discovery
|
||||
- Federated search
|
||||
|
||||
**Data sovereignty:**
|
||||
- "Verify on PDS" button (proves manifest is in your PDS)
|
||||
- "Clone my registry" guide (backup to another PDS)
|
||||
- "Export registry" (download all manifests + metadata)
|
||||
|
||||
**Federation:**
|
||||
- Cross-AppView image pulls (pull from other ATCR AppViews)
|
||||
- AppView discovery (find other ATCR instances)
|
||||
- Federated search (search across multiple AppViews)
|
||||
|
||||
## Enterprise Features (Future Commercial Offering)
|
||||
|
||||
### Team Collaboration
|
||||
|
||||
**Organizations:**
|
||||
- Enterprise org accounts with unlimited members
|
||||
- RBAC (role-based access control)
|
||||
- SSO integration (SAML, OIDC)
|
||||
- Audit logs for compliance
|
||||
|
||||
### Compliance & Security
|
||||
|
||||
**Compliance tools:**
|
||||
- SOC 2 compliance reporting
|
||||
- HIPAA-compliant storage options
|
||||
- GDPR data export/deletion
|
||||
- Retention policies (auto-delete after N days)
|
||||
|
||||
**Security features:**
|
||||
- Image scanning with policy enforcement (block vulnerable images)
|
||||
- Malware scanning (scan blobs for malware)
|
||||
- Secrets scanning (detect leaked credentials in layers)
|
||||
- Content trust (require signed images)
|
||||
|
||||
### SLA & Support
|
||||
|
||||
**Paid tiers:**
|
||||
- Free tier: 5GB storage, community support
|
||||
- Pro tier: 100GB storage, email support, SLA
|
||||
- Enterprise tier: Unlimited storage, priority support, dedicated instance
|
||||
|
||||
**Features:**
|
||||
- Guaranteed uptime (99.9%)
|
||||
- Premium support (24/7, faster response)
|
||||
- Dedicated account manager
|
||||
- Custom contract terms
|
||||
---
|
||||
|
||||
## UI/UX Enhancements
|
||||
|
||||
### Design System
|
||||
### Theming — PARTIAL
|
||||
|
||||
**Theming:**
|
||||
- Light and dark modes (system preference)
|
||||
- Custom themes (nautical, cyberpunk, minimalist)
|
||||
- Accessibility (WCAG 2.1 AA compliance)
|
||||
**DONE:**
|
||||
- Light/dark mode with system preference detection and toggle
|
||||
- Responsive design (Tailwind/DaisyUI, mobile-friendly)
|
||||
- PWA manifest with icons (no service worker yet)
|
||||
|
||||
**NOT STARTED:**
|
||||
- Custom themes
|
||||
- WCAG 2.1 AA accessibility audit
|
||||
- High contrast mode
|
||||
- Internationalization (i18n)
|
||||
- Native mobile apps
|
||||
|
||||
**Responsive design:**
|
||||
- Mobile-first design
|
||||
- Progressive web app (PWA) with offline support
|
||||
- Native mobile apps (iOS, Android)
|
||||
### Performance — PARTIAL
|
||||
|
||||
### Performance Optimizations
|
||||
**DONE:**
|
||||
- HTMX lazy loading for data-heavy partials
|
||||
- Efficient server-side rendering
|
||||
|
||||
**Frontend optimizations:**
|
||||
- Lazy loading for images and data
|
||||
**NOT STARTED:**
|
||||
- Service worker for offline caching
|
||||
- Virtual scrolling for large lists
|
||||
- Service worker for caching
|
||||
- Code splitting (load only what's needed)
|
||||
- GraphQL API
|
||||
- Real-time WebSocket updates in UI
|
||||
|
||||
**Backend optimizations:**
|
||||
- GraphQL API (fetch only required fields)
|
||||
- Real-time updates via WebSocket
|
||||
- Server-sent events for firehose
|
||||
- Edge caching (CloudFlare, Fastly)
|
||||
---
|
||||
|
||||
### Internationalization
|
||||
## Enterprise Features — NOT STARTED (except billing)
|
||||
|
||||
**Multi-language support:**
|
||||
- UI translations (English, Spanish, French, German, Japanese, Chinese, etc.)
|
||||
- RTL (right-to-left) language support
|
||||
- Localized date/time formats
|
||||
- Locale-specific formatting (numbers, currencies)
|
||||
### Billing — DONE
|
||||
|
||||
## Miscellaneous Ideas
|
||||
- Stripe integration (`pkg/hold/billing/`, requires `-tags billing` build tag)
|
||||
- Checkout sessions, customer portal, subscription webhooks
|
||||
- Tier upgrades/downgrades
|
||||
|
||||
### Image Build Service
|
||||
### Everything Else — NOT STARTED
|
||||
|
||||
**Cloud-based builds:**
|
||||
- Build images from Dockerfile in the UI
|
||||
- Multi-stage build support
|
||||
- Build cache optimization
|
||||
- Build logs and status
|
||||
- Organization accounts with SSO (SAML, OIDC)
|
||||
- RBAC, audit logs for compliance
|
||||
- SOC 2, HIPAA, GDPR compliance tooling (data export exists, see above)
|
||||
- Image scanning policy enforcement
|
||||
- Paid tier SLAs
|
||||
|
||||
**Automated builds:**
|
||||
- Connect GitHub/GitLab repository
|
||||
- Auto-build on git push
|
||||
- Build matrix (multiple architectures, versions)
|
||||
- Build notifications
|
||||
---
|
||||
|
||||
### Image Registry Mirroring
|
||||
## Miscellaneous Ideas — NOT STARTED
|
||||
|
||||
**Mirror external registries:**
|
||||
- Cache images from Docker Hub, ghcr.io, quay.io
|
||||
- Transparent proxy (pull-through cache)
|
||||
- Reduce external bandwidth costs
|
||||
- Faster pulls (cache locally)
|
||||
These remain future ideas with no implementation:
|
||||
|
||||
**Features:**
|
||||
- Configurable cache retention
|
||||
- Whitelist/blacklist registries
|
||||
- Statistics (cache hit rate, savings)
|
||||
- **Image build service** — Cloud-based Dockerfile builds
|
||||
- **Registry mirroring** — Pull-through cache for Docker Hub, ghcr.io, etc.
|
||||
- **Deployment tools** — One-click deploy to K8s, ECS, Fly.io
|
||||
- **Image recommendations** — ML-based "similar images" and "people also pulled"
|
||||
- **Gamification** — Achievement badges, leaderboards
|
||||
- **Advanced search** — Semantic/AI-powered search, saved searches
|
||||
|
||||
### Deployment Tools
|
||||
---
|
||||
|
||||
**One-click deployments:**
|
||||
- Deploy image to Kubernetes
|
||||
- Deploy to Docker Swarm
|
||||
- Deploy to AWS ECS/Fargate
|
||||
- Deploy to Fly.io, Railway, Render
|
||||
## Updated Priority List
|
||||
|
||||
**Deployment tracking:**
|
||||
- Track where images are deployed
|
||||
- Show running versions (which environments use which tags)
|
||||
- Notify on new deployments
|
||||
**Already done (was "High Priority"):**
|
||||
1. ~~Multi-architecture image support~~ — display working
|
||||
2. ~~Vulnerability scanning integration~~ — backend complete
|
||||
3. ~~Hold management dashboard~~ — implemented on hold admin panel
|
||||
4. ~~Basic search~~ — working
|
||||
|
||||
### Image Recommendations
|
||||
**Remaining high priority:**
|
||||
1. Scan results UI in AppView (backend exists, just needs frontend)
|
||||
2. SBOM display UI in AppView (backend exists, just needs frontend)
|
||||
3. Webhooks for CI/CD integration
|
||||
4. Enhanced search (filters, sorting, advanced queries)
|
||||
5. Richer sailor profiles (bio, stats, pinned repos)
|
||||
|
||||
**ML-based recommendations:**
|
||||
- "Similar images" (based on layers, packages, tags)
|
||||
- "People who pulled this also pulled..." (collaborative filtering)
|
||||
- "Recommended for you" (personalized based on history)
|
||||
**Medium priority:**
|
||||
1. Layer inspection UI
|
||||
2. Pull analytics and monitoring
|
||||
3. API documentation (Swagger/OpenAPI)
|
||||
4. Tag management (promotion, protection, aliases)
|
||||
5. Onboarding wizard / getting started guide
|
||||
|
||||
### Gamification
|
||||
|
||||
**Achievements:**
|
||||
- Badges for milestones (first push, 100 pulls, 1GB storage, etc.)
|
||||
- Leaderboards (most popular images, most active sailors)
|
||||
- Community contributions (points for helping others)
|
||||
|
||||
### Advanced Search
|
||||
|
||||
**Semantic search:**
|
||||
- Search by description, README, labels
|
||||
- Natural language queries ("show me nginx images with SSL")
|
||||
- AI-powered search (GPT-based understanding)
|
||||
|
||||
**Saved searches:**
|
||||
- Save frequently used queries
|
||||
- Subscribe to search results (get notified of new matches)
|
||||
- Share searches with team
|
||||
|
||||
## Implementation Priority
|
||||
|
||||
If implementing these features, suggested priority order:
|
||||
|
||||
**High Priority (Next 6 months):**
|
||||
1. Multi-architecture image support
|
||||
2. Vulnerability scanning integration
|
||||
3. Hold management dashboard
|
||||
4. Enhanced search and filtering
|
||||
5. Webhooks for CI/CD integration
|
||||
|
||||
**Medium Priority (6-12 months):**
|
||||
**Low priority / long-term:**
|
||||
1. Team/organization accounts
|
||||
2. Repository-level permissions
|
||||
3. Image signing and verification
|
||||
4. Pull analytics and monitoring
|
||||
5. API documentation and SDKs
|
||||
|
||||
**Low Priority (12+ months):**
|
||||
1. Enterprise features (SSO, compliance, SLA)
|
||||
2. Image build service
|
||||
3. Registry mirroring
|
||||
4. Mobile apps
|
||||
5. ML-based recommendations
|
||||
4. Federation features
|
||||
5. Internationalization
|
||||
|
||||
**Research/Experimental:**
|
||||
**Blocked on external dependencies:**
|
||||
1. Private repositories (requires ATProto private records)
|
||||
2. Federated timeline (requires ATProto feed infrastructure)
|
||||
3. Deployment tools integration
|
||||
4. Semantic search
|
||||
|
||||
---
|
||||
|
||||
**Note:** This is a living document. Features may be added, removed, or reprioritized based on user feedback, technical feasibility, and ATProto ecosystem evolution.
|
||||
|
||||
*Last audited: 2026-02-12*
|
||||
|
||||
238
docs/BILLING.md
Normal file
238
docs/BILLING.md
Normal file
@@ -0,0 +1,238 @@
|
||||
# Hold Service Billing Integration
|
||||
|
||||
Optional Stripe billing integration for hold services. Allows hold operators to charge for storage tiers via subscriptions.
|
||||
|
||||
## Overview
|
||||
|
||||
- **Compile-time optional**: Build with `-tags billing` to enable Stripe support
|
||||
- **Hold owns billing**: Each hold operator has their own Stripe account
|
||||
- **AppView aggregates UI**: Fetches subscription info from holds, displays in settings
|
||||
- **Customer-DID mapping**: DIDs stored in Stripe customer metadata (no extra database)
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
User → AppView Settings UI → Hold XRPC endpoints → Stripe
|
||||
↓
|
||||
Stripe webhook → Hold → Update crew tier
|
||||
```
|
||||
|
||||
## Building with Billing Support
|
||||
|
||||
```bash
|
||||
# Without billing (default)
|
||||
go build ./cmd/hold
|
||||
|
||||
# With billing
|
||||
go build -tags billing ./cmd/hold
|
||||
|
||||
# Docker with billing
|
||||
docker build --build-arg BILLING_ENABLED=true -f Dockerfile.hold .
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Required for billing
|
||||
STRIPE_SECRET_KEY=sk_live_xxx # or sk_test_xxx for testing
|
||||
STRIPE_WEBHOOK_SECRET=whsec_xxx # from Stripe Dashboard or CLI
|
||||
|
||||
# Optional
|
||||
STRIPE_PUBLISHABLE_KEY=pk_live_xxx # for client-side (not currently used)
|
||||
```
|
||||
|
||||
### quotas.yaml
|
||||
|
||||
```yaml
|
||||
tiers:
|
||||
swabbie:
|
||||
quota: 2GB
|
||||
description: "Starter storage"
|
||||
# No stripe_price = free tier
|
||||
|
||||
deckhand:
|
||||
quota: 5GB
|
||||
description: "Standard storage"
|
||||
stripe_price_yearly: price_xxx # Price ID from Stripe
|
||||
|
||||
bosun:
|
||||
quota: 10GB
|
||||
description: "Mid-level storage"
|
||||
stripe_price_monthly: price_xxx
|
||||
stripe_price_yearly: price_xxx
|
||||
|
||||
defaults:
|
||||
new_crew_tier: swabbie
|
||||
plankowner_crew_tier: deckhand # Early adopters get this free
|
||||
|
||||
billing:
|
||||
enabled: true
|
||||
currency: usd
|
||||
success_url: "{hold_url}/billing/success"
|
||||
cancel_url: "{hold_url}/billing/cancel"
|
||||
```
|
||||
|
||||
### Stripe Price IDs
|
||||
|
||||
Use **Price IDs** (`price_xxx`), not Product IDs (`prod_xxx`).
|
||||
|
||||
To find Price IDs:
|
||||
1. Stripe Dashboard → Products → Select product
|
||||
2. Look at Pricing section
|
||||
3. Copy the Price ID
|
||||
|
||||
Or via API:
|
||||
```bash
|
||||
curl https://api.stripe.com/v1/prices?product=prod_xxx \
|
||||
-u sk_test_xxx:
|
||||
```
|
||||
|
||||
## XRPC Endpoints
|
||||
|
||||
| Endpoint | Auth | Description |
|
||||
|----------|------|-------------|
|
||||
| `GET /xrpc/io.atcr.hold.getSubscriptionInfo` | Optional | Get tiers and user's current subscription |
|
||||
| `POST /xrpc/io.atcr.hold.createCheckoutSession` | Required | Create Stripe checkout URL |
|
||||
| `GET /xrpc/io.atcr.hold.getBillingPortalUrl` | Required | Get Stripe billing portal URL |
|
||||
| `POST /xrpc/io.atcr.hold.stripeWebhook` | Stripe sig | Handle subscription events |
|
||||
|
||||
## Local Development
|
||||
|
||||
### Stripe CLI Setup
|
||||
|
||||
The Stripe CLI forwards webhooks to localhost:
|
||||
|
||||
```bash
|
||||
# Install
|
||||
brew install stripe/stripe-cli/stripe
|
||||
# Or: https://stripe.com/docs/stripe-cli
|
||||
|
||||
# Login
|
||||
stripe login
|
||||
|
||||
# Forward webhooks to local hold
|
||||
stripe listen --forward-to localhost:8080/xrpc/io.atcr.hold.stripeWebhook
|
||||
```
|
||||
|
||||
The CLI outputs a webhook signing secret:
|
||||
```
|
||||
Ready! Your webhook signing secret is whsec_xxxxxxxxxxxxx
|
||||
```
|
||||
|
||||
Use that as `STRIPE_WEBHOOK_SECRET` for local dev.
|
||||
|
||||
### Running Locally
|
||||
|
||||
```bash
|
||||
# Terminal 1: Run hold with billing
|
||||
export STRIPE_SECRET_KEY=sk_test_xxx
|
||||
export STRIPE_WEBHOOK_SECRET=whsec_xxx # from 'stripe listen'
|
||||
export HOLD_PUBLIC_URL=http://localhost:8080
|
||||
export STORAGE_DRIVER=filesystem
|
||||
export HOLD_DATABASE_DIR=/tmp/hold-test
|
||||
go run -tags billing ./cmd/hold
|
||||
|
||||
# Terminal 2: Forward webhooks
|
||||
stripe listen --forward-to localhost:8080/xrpc/io.atcr.hold.stripeWebhook
|
||||
|
||||
# Terminal 3: Trigger test events
|
||||
stripe trigger checkout.session.completed
|
||||
stripe trigger customer.subscription.created
|
||||
stripe trigger customer.subscription.updated
|
||||
stripe trigger customer.subscription.paused
|
||||
stripe trigger customer.subscription.resumed
|
||||
stripe trigger customer.subscription.deleted
|
||||
```
|
||||
|
||||
### Testing the Flow
|
||||
|
||||
1. Start hold with billing enabled
|
||||
2. Start Stripe CLI webhook forwarding
|
||||
3. Navigate to AppView settings page
|
||||
4. Click "Upgrade" on a tier
|
||||
5. Complete Stripe checkout (use test card `4242 4242 4242 4242`)
|
||||
6. Webhook fires → hold updates crew tier
|
||||
7. Refresh settings to see new tier
|
||||
|
||||
## Webhook Events
|
||||
|
||||
The hold handles these Stripe events:
|
||||
|
||||
| Event | Action |
|
||||
|-------|--------|
|
||||
| `checkout.session.completed` | Create/update subscription, set tier |
|
||||
| `customer.subscription.created` | Set crew tier from price ID |
|
||||
| `customer.subscription.updated` | Update crew tier if price changed |
|
||||
| `customer.subscription.paused` | Downgrade to free tier |
|
||||
| `customer.subscription.resumed` | Restore tier from subscription price |
|
||||
| `customer.subscription.deleted` | Downgrade to free tier |
|
||||
| `invoice.payment_failed` | Log warning (tier unchanged until canceled) |
|
||||
|
||||
## Plankowners (Grandfathering)
|
||||
|
||||
Early adopters can be marked as "plankowners" to get a paid tier for free:
|
||||
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.hold.crew",
|
||||
"member": "did:plc:xxx",
|
||||
"tier": "deckhand",
|
||||
"plankowner": true,
|
||||
"permissions": ["blob:read", "blob:write"],
|
||||
"addedAt": "2025-01-01T00:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
Plankowners:
|
||||
- Get `plankowner_crew_tier` (e.g., deckhand) without paying
|
||||
- Still see upgrade options in UI if they want to support
|
||||
- Can upgrade to higher tiers normally
|
||||
|
||||
## Customer-DID Mapping
|
||||
|
||||
DIDs are stored in Stripe customer metadata:
|
||||
|
||||
```json
|
||||
{
|
||||
"metadata": {
|
||||
"user_did": "did:plc:xxx",
|
||||
"hold_did": "did:web:hold.example.com"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The hold uses an in-memory cache (10 min TTL) to reduce Stripe API calls. On webhook events, the cache is invalidated for the affected customer.
|
||||
|
||||
## Production Checklist
|
||||
|
||||
- [ ] Create Stripe products and prices in live mode
|
||||
- [ ] Set `STRIPE_SECRET_KEY` to live key (`sk_live_xxx`)
|
||||
- [ ] Configure webhook endpoint in Stripe Dashboard:
|
||||
- URL: `https://your-hold.com/xrpc/io.atcr.hold.stripeWebhook`
|
||||
- Events: `checkout.session.completed`, `customer.subscription.created`, `customer.subscription.updated`, `customer.subscription.paused`, `customer.subscription.resumed`, `customer.subscription.deleted`, `invoice.payment_failed`
|
||||
- [ ] Set `STRIPE_WEBHOOK_SECRET` from Dashboard webhook settings
|
||||
- [ ] Update `quotas.yaml` with live price IDs
|
||||
- [ ] Build hold with `-tags billing`
|
||||
- [ ] Test with a real payment (can refund immediately)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Webhook signature verification failed
|
||||
- Ensure `STRIPE_WEBHOOK_SECRET` matches the webhook endpoint in Stripe Dashboard
|
||||
- For local dev, use the secret from `stripe listen` output
|
||||
|
||||
### Customer not found
|
||||
- Customer is created on first checkout
|
||||
- Check Stripe Dashboard → Customers for the DID in metadata
|
||||
|
||||
### Tier not updating after payment
|
||||
- Check hold logs for webhook processing errors
|
||||
- Verify price ID in `quotas.yaml` matches Stripe
|
||||
- Ensure `billing.enabled: true` in config
|
||||
|
||||
### "Billing not enabled" error
|
||||
- Build with `-tags billing`
|
||||
- Set `billing.enabled: true` in `quotas.yaml`
|
||||
- Ensure `STRIPE_SECRET_KEY` is set
|
||||
348
docs/BILLING_REFACTOR.md
Normal file
348
docs/BILLING_REFACTOR.md
Normal file
@@ -0,0 +1,348 @@
|
||||
# Billing & Webhooks Refactor: Move to AppView
|
||||
|
||||
## Motivation
|
||||
|
||||
The current billing model is **per-hold**: each hold operator runs their own Stripe integration, manages their own tiers, and users pay each hold separately. This creates problems:
|
||||
|
||||
1. **Multi-hold confusion**: A user on 3 holds could have 3 separate Stripe subscriptions with no unified view
|
||||
2. **Orphaned subscriptions**: Users can end up paying for holds they no longer use after switching their active hold
|
||||
3. **Complex UI**: The settings page needs to surface billing per-hold, with separate "Manage Billing" links for each
|
||||
4. **Captain-only billing**: Only hold captains can set up Stripe. Self-hosted hold operators who want to charge users would need their own Stripe account per hold
|
||||
|
||||
The proposed model is **per-appview**: a single Stripe integration on the appview, one subscription per user, covering all holds that appview manages.
|
||||
|
||||
## Current Architecture
|
||||
|
||||
```
|
||||
User ──Settings UI──→ AppView ──XRPC──→ Hold ──Stripe API──→ Stripe
|
||||
↑
|
||||
Stripe Webhooks
|
||||
```
|
||||
|
||||
### What lives where today
|
||||
|
||||
| Component | Location | Notes |
|
||||
|-----------|----------|-------|
|
||||
| Stripe customer management | Hold (`pkg/hold/billing/`) | Build tag: `-tags billing` |
|
||||
| Stripe checkout/portal | Hold XRPC endpoints | Authenticated via service token |
|
||||
| Stripe webhook receiver | Hold (`stripeWebhook` endpoint) | Updates crew tier on subscription change |
|
||||
| Tier definitions + pricing | Hold config (`quotas.yaml`, `billing` section) | Captain configures |
|
||||
| Quota enforcement | Hold (`pkg/hold/quota/`) | Checks tier limit on push |
|
||||
| Storage quota calculation | Hold PDS layer records | Deduped per-user |
|
||||
| Subscription UI | AppView handlers | Proxies all calls to hold |
|
||||
| Webhook management (scan) | Hold PDS + SQLite | URL/secret in SQLite, metadata in PDS record |
|
||||
| Webhook dispatch | Hold (`scan_broadcaster.go`) | Sends on scan completion |
|
||||
| Sailor webhook record | User's PDS | Links to hold's private webhook record |
|
||||
|
||||
## Proposed Architecture
|
||||
|
||||
```
|
||||
User ──Settings UI──→ AppView ──Stripe API──→ Stripe
|
||||
│ ↑
|
||||
│ Stripe Webhooks
|
||||
│
|
||||
├──XRPC──→ Hold A (quota enforcement, scan results)
|
||||
├──XRPC──→ Hold B
|
||||
└──XRPC──→ Hold C
|
||||
|
||||
AppView signs attestation
|
||||
│
|
||||
└──→ Hold stores in PDS (trust anchor)
|
||||
```
|
||||
|
||||
### What moves to AppView
|
||||
|
||||
| Component | From | To | Notes |
|
||||
|-----------|------|----|-------|
|
||||
| Stripe customer management | Hold | AppView | One customer per user, not per hold |
|
||||
| Stripe checkout/portal | Hold | AppView | Single subscription covers all holds |
|
||||
| Stripe webhook receiver | Hold | AppView | AppView updates tier across all holds |
|
||||
| Tier definitions + pricing | Hold config | AppView config | AppView defines billing tiers |
|
||||
| Scan webhooks (storage + dispatch) | Hold | AppView | AppView has user context, scan data comes via Jetstream/XRPC |
|
||||
|
||||
### What stays on the hold
|
||||
|
||||
| Component | Notes |
|
||||
|-----------|-------|
|
||||
| Quota enforcement | Hold still checks tier limit on push |
|
||||
| Storage quota calculation | Layer records stay in hold PDS |
|
||||
| Tier definitions (quota only) | Hold defines storage limits per tier, no pricing |
|
||||
| Scan execution + results | Scanner still talks to hold, results stored in hold PDS |
|
||||
| Crew tier field | Source of truth for enforcement, updated by appview |
|
||||
|
||||
## Billing Model
|
||||
|
||||
### One subscription, all holds
|
||||
|
||||
A user pays the appview once. Their subscription tier applies across every hold the appview manages.
|
||||
|
||||
```
|
||||
AppView billing tiers: [Free] [Tier 1] [Tier 2]
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
Hold A tiers (3GB/10GB/50GB): deckhand bosun quartermaster
|
||||
Hold B tiers (5GB/20GB/∞): deckhand bosun quartermaster
|
||||
```
|
||||
|
||||
### Tier pairing
|
||||
|
||||
The appview defines N billing slots. Each hold defines its own tier list with storage quotas. The appview maps its billing slots to each hold's lowest N tiers by rank order.
|
||||
|
||||
- AppView doesn't need to know tier names — just "slot 1, slot 2, slot 3"
|
||||
- Each hold independently decides what storage limit each tier gets
|
||||
- The settings UI shows the range: "5-10 GB depending on region" or "minimum 5 GB"
|
||||
|
||||
### Hold captains who want to charge
|
||||
|
||||
If a hold captain wants to charge their own users (not through the shared appview), they spin up their own appview instance with their own Stripe account. The billing code stays the same — it just runs on their appview instead of the shared one.
|
||||
|
||||
## AppView-Hold Trust Model
|
||||
|
||||
### Problem
|
||||
|
||||
The appview needs to tell holds "user X is tier Y." The hold needs to trust that instruction. If domains change, the hold needs to verify the appview's identity.
|
||||
|
||||
### Attestation handshake
|
||||
|
||||
1. **Hold config** already has `server.appview_url` (preferred appview)
|
||||
2. **AppView config** gains a `managed_holds` list (DIDs of holds it manages)
|
||||
3. On first connection, the appview signs an attestation with its private key:
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.appview.attestation",
|
||||
"appviewDid": "did:web:atcr.io",
|
||||
"holdDid": "did:web:hold01.atcr.io",
|
||||
"issuedAt": "2026-02-23T...",
|
||||
"signature": "<signed with appview's P-256 key>"
|
||||
}
|
||||
```
|
||||
4. The hold stores this attestation in its embedded PDS
|
||||
5. On subsequent requests, the hold can challenge the appview: present the attestation, appview proves it holds the matching private key
|
||||
6. If the appview's domain changes, the attestation (tied to DID, not URL) remains valid
|
||||
|
||||
### Trust verification flow
|
||||
|
||||
```
|
||||
AppView boots → checks managed_holds list
|
||||
→ for each hold:
|
||||
→ calls hold's describeServer endpoint to verify DID
|
||||
→ signs attestation { appviewDid, holdDid, issuedAt }
|
||||
→ sends to hold via XRPC
|
||||
→ hold stores in PDS as io.atcr.hold.appview record
|
||||
|
||||
Hold receives tier update from appview:
|
||||
→ checks: does this request come from my preferred appview?
|
||||
→ verifies: signature on stored attestation matches appview's current key
|
||||
→ if valid: updates crew tier
|
||||
→ if invalid: rejects, logs warning
|
||||
```
|
||||
|
||||
### Key material
|
||||
|
||||
- **AppView**: P-256 key (already exists at `/var/lib/atcr/oauth/client.key`, used for OAuth)
|
||||
- **Hold**: K-256 key (PDS signing key)
|
||||
- Attestation is signed by appview's P-256 key, verifiable by anyone with the appview's public key (available via DID document)
|
||||
|
||||
## Webhooks: Move to AppView
|
||||
|
||||
### Why move
|
||||
|
||||
Scan webhooks currently live on the hold, but:
|
||||
- The webhook payload needs user handles, repository names, tags — all resolved by the appview
|
||||
- The hold only has DIDs and digests
|
||||
- The appview already processes scan records via Jetstream (backfill + live)
|
||||
- Webhook secrets shouldn't need to live on every hold the user pushes to
|
||||
|
||||
### New flow
|
||||
|
||||
```
|
||||
Scanner completes scan
|
||||
→ Hold stores scan record in PDS
|
||||
→ Jetstream delivers scan record to AppView
|
||||
→ AppView resolves user handle, repo name, tags
|
||||
→ AppView dispatches webhooks with full context
|
||||
```
|
||||
|
||||
### What changes
|
||||
|
||||
| Aspect | Current (hold) | Proposed (appview) |
|
||||
|--------|---------------|-------------------|
|
||||
| Webhook storage | Hold SQLite + PDS record | AppView DB + user's PDS record |
|
||||
| Webhook secrets | Hold SQLite (`webhook_secrets` table) | AppView DB |
|
||||
| Dispatch trigger | `scan_broadcaster.go` on scan completion | Jetstream processor on `io.atcr.hold.scan` record |
|
||||
| Payload enrichment | Hold fetches handle from appview metadata | AppView has full context natively |
|
||||
| Discord/Slack formatting | Hold (`webhooks.go`) | AppView (same code, moved) |
|
||||
| Tier-based limits | Hold quota manager | AppView billing tier |
|
||||
| XRPC endpoints | Hold (`listWebhooks`, `addWebhook`, etc.) | AppView API endpoints (already exist as proxies) |
|
||||
|
||||
### Webhook record changes
|
||||
|
||||
The `io.atcr.sailor.webhook` record in the user's PDS stays. It already stores `holdDid` and `triggers`. The `privateCid` field (linking to hold's internal record) becomes unnecessary since appview owns the full webhook now.
|
||||
|
||||
The `io.atcr.hold.webhook` record in the hold's PDS is no longer needed. Webhooks are appview-scoped, not hold-scoped.
|
||||
|
||||
### Migration path
|
||||
|
||||
1. AppView gains webhook storage in its own DB (new table)
|
||||
2. AppView gains webhook dispatch in its Jetstream processor
|
||||
3. Hold's webhook endpoints deprecated (return 410 Gone after transition period)
|
||||
4. Existing hold webhook records migrated via one-time script reading from hold XRPC + user PDS
|
||||
|
||||
## Config Changes
|
||||
|
||||
### AppView config additions
|
||||
|
||||
```yaml
|
||||
server:
|
||||
# Existing
|
||||
default_hold_did: "did:web:hold01.atcr.io"
|
||||
|
||||
# New
|
||||
managed_holds:
|
||||
- "did:web:hold01.atcr.io"
|
||||
- "did:plc:abc123..."
|
||||
|
||||
# New section
|
||||
billing:
|
||||
enabled: true
|
||||
currency: usd
|
||||
success_url: "{base_url}/settings#storage"
|
||||
cancel_url: "{base_url}/settings#storage"
|
||||
tiers:
|
||||
- name: "Free"
|
||||
# No stripe_price = free tier
|
||||
- name: "Standard"
|
||||
stripe_price_monthly: price_xxx
|
||||
stripe_price_yearly: price_yyy
|
||||
- name: "Pro"
|
||||
stripe_price_monthly: price_xxx
|
||||
stripe_price_yearly: price_yyy
|
||||
```
|
||||
|
||||
### AppView environment additions
|
||||
|
||||
```bash
|
||||
STRIPE_SECRET_KEY=sk_live_xxx
|
||||
STRIPE_WEBHOOK_SECRET=whsec_xxx
|
||||
```
|
||||
|
||||
### Hold config changes
|
||||
|
||||
```yaml
|
||||
# Removed
|
||||
billing:
|
||||
# entire section removed from hold config
|
||||
|
||||
# Stays (quota enforcement only)
|
||||
quota:
|
||||
tiers:
|
||||
- name: deckhand
|
||||
quota: 5GB
|
||||
- name: bosun
|
||||
quota: 50GB
|
||||
- name: quartermaster
|
||||
quota: 100GB
|
||||
defaults:
|
||||
new_crew_tier: deckhand
|
||||
```
|
||||
|
||||
The hold no longer has Stripe config. It just defines storage limits per tier and enforces them.
|
||||
|
||||
## AppView DB Schema Additions
|
||||
|
||||
```sql
|
||||
-- Webhook configurations (moved from hold SQLite)
|
||||
CREATE TABLE webhooks (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
user_did TEXT NOT NULL,
|
||||
url TEXT NOT NULL,
|
||||
secret_hash TEXT, -- bcrypt hash of HMAC secret
|
||||
triggers INTEGER NOT NULL DEFAULT 1, -- bitmask: first=1, all=2, changed=4
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(user_did, url)
|
||||
);
|
||||
|
||||
-- Billing: track which holds have been attested
|
||||
CREATE TABLE hold_attestations (
|
||||
hold_did TEXT PRIMARY KEY,
|
||||
attestation_cid TEXT NOT NULL, -- CID of attestation record in hold's PDS
|
||||
issued_at DATETIME NOT NULL,
|
||||
verified_at DATETIME
|
||||
);
|
||||
```
|
||||
|
||||
Stripe customer/subscription data continues to live in Stripe (queried via API, cached in memory). No local subscription table needed — same pattern as current hold billing, just on appview.
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Trust foundation
|
||||
- Add `managed_holds` to appview config
|
||||
- Implement attestation signing (appview) and storage (hold)
|
||||
- Add attestation verification to hold's tier-update endpoint
|
||||
- New XRPC endpoint on hold: `io.atcr.hold.updateCrewTier` (appview-authenticated)
|
||||
|
||||
### Phase 2: Billing migration
|
||||
- Move Stripe integration from hold to appview (reuse `pkg/hold/billing/` code)
|
||||
- AppView billing uses `-tags billing` build tag (same pattern)
|
||||
- Implement tier pairing: appview billing slots mapped to hold tier lists
|
||||
- New appview endpoints: checkout, portal, stripe webhook receiver
|
||||
- Settings UI: single subscription section (not per-hold)
|
||||
|
||||
### Phase 3: Webhook migration ✅
|
||||
- Add webhook + scans tables to appview DB
|
||||
- Implement webhook dispatch in appview's Jetstream processor
|
||||
- Move Discord/Slack formatting code to `pkg/appview/webhooks/`
|
||||
- Deprecate hold webhook XRPC endpoints (X-Deprecated header)
|
||||
- Webhooks now user-scoped (global across all holds) in appview DB
|
||||
- Scan records cached from Jetstream for change detection
|
||||
|
||||
### Phase 4: Cleanup ✅
|
||||
- Removed hold webhook XRPC endpoints, dispatch code, and `webhooks.go`
|
||||
- Removed `io.atcr.hold.webhook` and `io.atcr.sailor.webhook` record types + lexicons
|
||||
- Removed `webhook_secrets` SQLite schema from scan_broadcaster
|
||||
- Removed `MaxWebhooks`/`WebhookAllTriggers` from hold quota config
|
||||
- Removed sailor webhook from OAuth scopes
|
||||
|
||||
## Settings UI Impact
|
||||
|
||||
The storage tab simplifies significantly:
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ Active Hold: [▼ hold01.atcr.io (Crew) ] │
|
||||
└──────────────────────────────────────────────────────┘
|
||||
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ Subscription: Standard ($5/mo) [Manage Billing] │
|
||||
│ Storage: 3-5 GB depending on region │
|
||||
└──────────────────────────────────────────────────────┘
|
||||
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ ★ hold01.atcr.io [Active] [Crew] [Online] │
|
||||
│ Tier: bosun · 281.5 MB / 5.0 GB (5%) │
|
||||
│ ▸ Webhooks (2 configured) │
|
||||
└──────────────────────────────────────────────────────┘
|
||||
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ Other Holds Role Status Storage │
|
||||
│ hold02.atcr.io Crew ● 230 MB / 3 GB │
|
||||
│ hold03.atcr.io Owner ● No data │
|
||||
└──────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Key changes:
|
||||
- **One subscription section** at the top (not per-hold)
|
||||
- **Webhooks section** under active hold card (managed by appview now)
|
||||
- **No "Paid" badge per hold** — subscription is global
|
||||
- **Storage range** shown on subscription card ("3-5 GB depending on region")
|
||||
- **Per-hold quota** still shown (each hold enforces its own limit for the user's tier)
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. **Tier list endpoint**: Holds need a new XRPC endpoint that returns their tier list with quotas (without pricing). The appview calls this to build the "3-5 GB depending on region" display. Something like `io.atcr.hold.listTiers`.
|
||||
|
||||
2. **Existing Stripe customers**: Holds with existing Stripe subscriptions need a migration plan. Options: honor existing subscriptions until they expire, or bulk-migrate customers to appview's Stripe account.
|
||||
|
||||
3. **Webhook delivery guarantees**: Moving dispatch to appview adds latency (scan record → Jetstream → appview → webhook). For time-sensitive notifications, consider the hold sending a lightweight "scan completed" signal directly to appview via XRPC rather than waiting for Jetstream propagation.
|
||||
|
||||
4. **Self-hosted appviews**: The attestation model assumes one appview per set of holds. If multiple appviews try to manage the same hold, the hold should only trust the most recent attestation (or maintain a list).
|
||||
36
docs/BYOS.md
36
docs/BYOS.md
@@ -5,7 +5,7 @@
|
||||
ATCR supports "Bring Your Own Storage" (BYOS) for blob storage. Users can:
|
||||
- Deploy their own hold service with embedded PDS
|
||||
- Control access via crew membership in the hold's PDS
|
||||
- Keep blob data in their own S3/Storj/Minio while manifests stay in their user PDS
|
||||
- Keep blob data in their own S3-compatible storage (AWS S3, Storj, Minio, UpCloud, etc.) while manifests stay in their user PDS
|
||||
|
||||
## Architecture
|
||||
|
||||
@@ -46,7 +46,7 @@ ATCR supports "Bring Your Own Storage" (BYOS) for blob storage. Users can:
|
||||
Each hold is a full ATProto actor with:
|
||||
- **DID**: `did:web:hold.example.com` (hold's identity)
|
||||
- **Embedded PDS**: Stores captain + crew records (shared data)
|
||||
- **Storage backend**: S3, Storj, Minio, filesystem, etc.
|
||||
- **Storage backend**: S3-compatible (AWS S3, Storj, Minio, UpCloud, etc.)
|
||||
- **XRPC endpoints**: Standard ATProto + custom OCI multipart upload
|
||||
|
||||
### Records in Hold's PDS
|
||||
@@ -98,8 +98,7 @@ Hold service is configured entirely via environment variables:
|
||||
HOLD_PUBLIC_URL=https://hold.example.com
|
||||
HOLD_OWNER=did:plc:your-did-here
|
||||
|
||||
# Storage backend
|
||||
STORAGE_DRIVER=s3
|
||||
# S3 storage backend (REQUIRED)
|
||||
AWS_ACCESS_KEY_ID=your_access_key
|
||||
AWS_SECRET_ACCESS_KEY=your_secret_key
|
||||
AWS_REGION=us-east-1
|
||||
@@ -116,15 +115,22 @@ HOLD_DATABASE_KEY_PATH=/var/lib/atcr-hold/keys
|
||||
|
||||
### Running Locally
|
||||
|
||||
For local development, use Minio as an S3-compatible storage:
|
||||
|
||||
```bash
|
||||
# Start Minio (in separate terminal)
|
||||
docker run -p 9000:9000 -p 9001:9001 minio/minio server /data --console-address ":9001"
|
||||
|
||||
# Build
|
||||
go build -o bin/atcr-hold ./cmd/hold
|
||||
|
||||
# Run (with env vars or .env file)
|
||||
export HOLD_PUBLIC_URL=http://localhost:8080
|
||||
export HOLD_OWNER=did:plc:your-did-here
|
||||
export STORAGE_DRIVER=filesystem
|
||||
export STORAGE_ROOT_DIR=/tmp/atcr-hold
|
||||
export AWS_ACCESS_KEY_ID=minioadmin
|
||||
export AWS_SECRET_ACCESS_KEY=minioadmin
|
||||
export S3_BUCKET=test
|
||||
export S3_ENDPOINT=http://localhost:9000
|
||||
export HOLD_DATABASE_PATH=/tmp/atcr-hold/hold.db
|
||||
|
||||
./bin/atcr-hold
|
||||
@@ -145,7 +151,6 @@ primary_region = "ord"
|
||||
|
||||
[env]
|
||||
HOLD_PUBLIC_URL = "https://my-atcr-hold.fly.dev"
|
||||
STORAGE_DRIVER = "s3"
|
||||
AWS_REGION = "us-east-1"
|
||||
S3_BUCKET = "my-blobs"
|
||||
HOLD_PUBLIC = "false"
|
||||
@@ -299,14 +304,15 @@ atproto delete-record \
|
||||
--rkey "{memberDID}"
|
||||
```
|
||||
|
||||
## Storage Drivers
|
||||
## Storage Backends
|
||||
|
||||
Hold service supports all distribution storage drivers:
|
||||
- **S3** - AWS S3, Minio, Storj (via S3 gateway)
|
||||
- **Filesystem** - Local disk (for testing)
|
||||
- **Azure** - Azure Blob Storage
|
||||
- **GCS** - Google Cloud Storage
|
||||
- **Swift** - OpenStack Swift
|
||||
Hold service requires S3-compatible storage. Supported providers:
|
||||
- **AWS S3** - Amazon Simple Storage Service
|
||||
- **Storj** - Decentralized cloud storage (via S3 gateway)
|
||||
- **Minio** - High-performance object storage (great for local development)
|
||||
- **UpCloud** - European cloud provider
|
||||
- **Azure** - Azure Blob Storage (via S3-compatible API)
|
||||
- **GCS** - Google Cloud Storage (via S3-compatible API)
|
||||
|
||||
## Example: Team Hold
|
||||
|
||||
@@ -315,8 +321,8 @@ Hold service supports all distribution storage drivers:
|
||||
export HOLD_PUBLIC_URL=https://team-hold.fly.dev
|
||||
export HOLD_OWNER=did:plc:admin
|
||||
export HOLD_PUBLIC=false # Private
|
||||
export STORAGE_DRIVER=s3
|
||||
export AWS_ACCESS_KEY_ID=...
|
||||
export AWS_SECRET_ACCESS_KEY=...
|
||||
export S3_BUCKET=team-blobs
|
||||
|
||||
fly deploy
|
||||
|
||||
49
docs/CONFIG_BLOB_STORAGE.md
Normal file
49
docs/CONFIG_BLOB_STORAGE.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# Config Blob Storage Decision
|
||||
|
||||
## Background
|
||||
|
||||
OCI image manifests reference two types of blobs:
|
||||
|
||||
1. **Layers** — filesystem diffs (tar+gzip), typically large, content-addressed and shared across users
|
||||
2. **Config blob** — small JSON (~2-15KB) containing image metadata: architecture, OS, environment variables, entrypoint, Dockerfile build history, and labels
|
||||
|
||||
In ATCR, manifests are stored in the user's PDS while all blobs (layers and config) are stored in S3 via the hold service. The hold tracks layers with `io.atcr.hold.layer` records but has no equivalent tracking for config blobs.
|
||||
|
||||
## Considered: Storing Config Blobs in PDS
|
||||
|
||||
Config blobs are unique per image build — unlike layers which are deduplicated across users, a config blob contains the specific Dockerfile history, env vars, and labels for that build. This makes them conceptually "user data" that could belong in the user's PDS alongside the manifest.
|
||||
|
||||
The proposal was to add a `ConfigBlob` field to `ManifestRecord`, uploading the config blob to PDS during push (the data is already fetched from S3 for label extraction). The config would remain in S3 as well since the distribution library puts it there during the blob push phase.
|
||||
|
||||
Potential benefits:
|
||||
- Manifests become more self-contained in PDS
|
||||
- Config metadata (entrypoint, env, history) available without S3 access (e.g., for web UI)
|
||||
- Aligns with the principle that user-specific data belongs in the user's PDS
|
||||
|
||||
## Decision: Keep Config Blobs in S3 Only
|
||||
|
||||
Config blobs can contain sensitive data:
|
||||
|
||||
- **Environment variables** — `ENV DATABASE_URL=...`, `ENV API_KEY=...` set in Dockerfiles
|
||||
- **Build history** — `history[].created_by` reveals exact Dockerfile commands, internal registry URLs, build arguments
|
||||
- **Labels** — may contain internal metadata not intended for public consumption
|
||||
|
||||
ATProto has no private data. The current storage split creates a useful privacy boundary:
|
||||
|
||||
| Storage | Visibility | Contains |
|
||||
|---------|-----------|----------|
|
||||
| PDS | Public (anyone) | Manifest structure, tags, repo names, annotations |
|
||||
| Hold/S3 | Auth-gated | Layers + config — actual image content |
|
||||
|
||||
This boundary enables **semi-private repos**: the public PDS metadata tells you what images exist (names, tags, sizes), but you cannot reconstruct or run the image without hold access. Storing config in PDS would break this — build secrets and Dockerfile history would be publicly readable even when the hold restricts blob access.
|
||||
|
||||
We considered making PDS storage optional (only for fully public holds or allow-all-crew holds), but an optional field that can't be relied upon adds complexity without clear benefit — the config must live in S3 regardless for the pull path.
|
||||
|
||||
## Current Status
|
||||
|
||||
Config blobs remain in S3 behind hold authorization. GC handles config digests to prevent orphaned deletion (config digests are included in the referenced set alongside layer digests).
|
||||
|
||||
## Revisit If
|
||||
|
||||
- ATProto adds private data support
|
||||
- A concrete use case emerges that requires PDS-native config access
|
||||
165
docs/CREDENTIAL_HELPER_V2.md
Normal file
165
docs/CREDENTIAL_HELPER_V2.md
Normal file
@@ -0,0 +1,165 @@
|
||||
# Credential Helper Rewrite
|
||||
|
||||
## Context
|
||||
|
||||
The current credential helper (`cmd/credential-helper/main.go`, ~1070 lines) is a monolithic single-file binary with a manual `switch` dispatch. It has no help text, hangs silently when run without stdin, embeds interactive device auth inside the Docker protocol `get` command (blocking pushes for up to 2 minutes while polling), and only supports one account per registry. Users want multi-account support (e.g., `evan.jarrett.net` and `michelle.jarrett.net` on the same `atcr.io`) and multi-registry support (e.g., `atcr.io` + `buoy.cr`).
|
||||
|
||||
## Approach
|
||||
|
||||
Rewrite using **Cobra** (already a project dependency) for the CLI framework and **charmbracelet/huh** for interactive prompts (select menus, confirmations, spinners). Separate Docker protocol commands (machine-readable, hidden) from user-facing commands (interactive, discoverable). Model after `gh auth` UX patterns.
|
||||
|
||||
**Smart account auto-detection**: The `get` command inspects the parent process command line (`/proc/<ppid>/cmdline` on Linux, `ps` on macOS) to determine which image Docker is pushing/pulling. Since ATCR URLs are `host/<identity>/repo:tag`, we can extract the identity and auto-select the matching account — no prompts, no manual switching needed in the common case.
|
||||
|
||||
## Command Tree
|
||||
|
||||
```
|
||||
docker-credential-atcr
|
||||
├── get (Docker protocol — stdin/stdout, hidden, smart account detection)
|
||||
├── store (Docker protocol — stdin, hidden)
|
||||
├── erase (Docker protocol — stdin, hidden)
|
||||
├── list (Docker protocol extension, hidden)
|
||||
├── login (Interactive device flow with huh prompts)
|
||||
├── logout (Remove account credentials)
|
||||
├── status (Show all accounts with active indicators)
|
||||
├── switch (Switch active account — auto-toggle for 2, select for 3+)
|
||||
├── configure-docker (Auto-edit ~/.docker/config.json credHelpers)
|
||||
├── update (Self-update, existing logic preserved)
|
||||
└── version (Built-in via cobra)
|
||||
```
|
||||
|
||||
## Smart Account Resolution (`get` command)
|
||||
|
||||
The `get` command resolves which account to use with this priority chain — fully non-interactive:
|
||||
|
||||
```
|
||||
1. Parse parent process cmdline → extract identity from image ref
|
||||
docker push atcr.io/evan.jarrett.net/test:latest
|
||||
→ parent cmdline contains "evan.jarrett.net" → use that account
|
||||
|
||||
2. Fall back to active account (set by `switch` command)
|
||||
|
||||
3. Fall back to sole account (if only one exists for this registry)
|
||||
|
||||
4. Error with helpful message:
|
||||
"Multiple accounts for atcr.io. Run: docker-credential-atcr switch"
|
||||
```
|
||||
|
||||
**Parent process detection** (in `helpers.go`):
|
||||
- Linux: read `/proc/<ppid>/cmdline` (null-separated args)
|
||||
- macOS: `ps -o args= -p <ppid>`
|
||||
- Windows: best-effort via `wmic` or skip (fall to active account)
|
||||
- Parse image ref: find the arg matching `<registry-host>/<identity>/...`, extract `<identity>`
|
||||
- Graceful failure: if parent isn't Docker, cmdline unreadable, or image ref not parseable → fall through to active account
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
cmd/credential-helper/
|
||||
main.go — Cobra root command, version vars, subcommand registration
|
||||
config.go — Config types, load/save/migrate, getConfigPath
|
||||
device_auth.go — authorizeDevice(), validateCredentials() HTTP logic
|
||||
protocol.go — Docker protocol: get, store, erase, list (all hidden)
|
||||
cmd_login.go — login command (huh prompts + device flow)
|
||||
cmd_logout.go — logout command (huh confirm)
|
||||
cmd_status.go — status display
|
||||
cmd_switch.go — switch command (huh select)
|
||||
cmd_configure.go — configure-docker (edit ~/.docker/config.json)
|
||||
cmd_update.go — update command (moved from existing code)
|
||||
helpers.go — openBrowser, buildAppViewURL, isInsecureRegistry, parentCmdline, etc.
|
||||
```
|
||||
|
||||
## Config Format (`~/.atcr/device.json`)
|
||||
|
||||
```json
|
||||
{
|
||||
"version": 2,
|
||||
"registries": {
|
||||
"https://atcr.io": {
|
||||
"active": "evan.jarrett.net",
|
||||
"accounts": {
|
||||
"evan.jarrett.net": {
|
||||
"handle": "evan.jarrett.net",
|
||||
"did": "did:plc:abc123",
|
||||
"device_secret": "atcr_device_..."
|
||||
},
|
||||
"michelle.jarrett.net": {
|
||||
"handle": "michelle.jarrett.net",
|
||||
"did": "did:plc:def456",
|
||||
"device_secret": "atcr_device_..."
|
||||
}
|
||||
}
|
||||
},
|
||||
"https://buoy.cr": {
|
||||
"active": "evan.jarrett.net",
|
||||
"accounts": { ... }
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Migration**: `loadConfig()` auto-detects and migrates from old formats:
|
||||
- Legacy single-device `{handle, device_secret, appview_url}` → v2
|
||||
- Current multi-registry `{credentials: {url: {...}}}` → v2
|
||||
- Writes back migrated config on first load
|
||||
|
||||
## Key Behavioral Changes
|
||||
|
||||
| Command | Current | New |
|
||||
|---------|---------|-----|
|
||||
| `get` | Opens browser, polls 2min if no creds | Smart detection → active account → error |
|
||||
| `get` (multi-account) | N/A (single account only) | Auto-detects identity from parent cmdline |
|
||||
| `get` (no stdin) | Hangs forever | Detects terminal, prints help, exits 1 |
|
||||
| `get` (OAuth expired) | Auto-opens browser, polls | Prints login URL, exits 1 |
|
||||
| `store` | No-op | Stores if secret is device secret (`atcr_device_*`) |
|
||||
| `erase` | Removes all creds for host | Removes active account only |
|
||||
| No args | Prints bare usage | Prints full cobra help with all commands |
|
||||
|
||||
## Dependencies
|
||||
|
||||
- `github.com/spf13/cobra` — already in go.mod
|
||||
- `github.com/charmbracelet/huh` — new (pure Go, CGO_ENABLED=0 safe)
|
||||
|
||||
No changes to `.goreleaser.yaml` needed.
|
||||
|
||||
## Implementation Order
|
||||
|
||||
### Phase 1: Foundation
|
||||
1. `helpers.go` — move utility functions verbatim + add `getParentCmdline()` and `detectIdentityFromParent(registryHost)`
|
||||
2. `config.go` — new config types + migration from old formats
|
||||
3. `main.go` — Cobra root command, register all subcommands
|
||||
|
||||
### Phase 2: Docker Protocol (must work for existing users)
|
||||
4. `device_auth.go` — extract `authorizeDevice()` + `validateCredentials()`
|
||||
5. `protocol.go` — `get`/`store`/`erase`/`list` using new config with smart account resolution
|
||||
|
||||
### Phase 3: User Commands
|
||||
6. `cmd_login.go` — interactive device flow with huh spinner
|
||||
7. `cmd_status.go` — display all registries/accounts
|
||||
8. `cmd_switch.go` — huh select for account switching
|
||||
9. `cmd_logout.go` — huh confirm for removal
|
||||
10. `cmd_configure.go` — Docker config.json manipulation
|
||||
11. `cmd_update.go` — move existing update logic
|
||||
|
||||
### Phase 4: Polish
|
||||
12. Add `huh` to go.mod
|
||||
13. Delete old `main.go` contents (replaced by new files)
|
||||
|
||||
## What to Keep vs Rewrite
|
||||
|
||||
**Keep** (move to new files): `openBrowser()`, `buildAppViewURL()`, `isInsecureRegistry()`, `getDockerInsecureRegistries()`, `readDockerDaemonConfig()`, `stripPort()`, `isTerminal()`, `authorizeDevice()` HTTP logic, `validateCredentials()`, all update/version check functions.
|
||||
|
||||
**Rewrite**: `main()`, `handleGet()` (split into non-interactive `get` with smart detection + interactive `login`), `handleStore()` (implement actual storage), `handleErase()` (multi-account aware), config types and loading.
|
||||
|
||||
**New**: `list`, `login`, `logout`, `status`, `switch`, `configure-docker` commands. Config migration. Parent process identity detection. huh integration.
|
||||
|
||||
## Verification
|
||||
|
||||
1. Build: `go build -o bin/docker-credential-atcr ./cmd/credential-helper`
|
||||
2. Help works: `bin/docker-credential-atcr --help` shows all user commands
|
||||
3. Protocol works: `echo "atcr.io" | bin/docker-credential-atcr get` returns credentials or helpful error
|
||||
4. No hang: `bin/docker-credential-atcr get` (no stdin pipe) detects terminal, prints help, exits
|
||||
5. Smart detection: `docker push atcr.io/evan.jarrett.net/test:latest` auto-selects `evan.jarrett.net`
|
||||
6. Login flow: `bin/docker-credential-atcr login` triggers device auth with huh prompts
|
||||
7. Status: `bin/docker-credential-atcr status` shows configured accounts
|
||||
8. Config migration: Place old-format `~/.atcr/device.json`, run any command, verify auto-migration
|
||||
9. GoReleaser: `CGO_ENABLED=0 go build ./cmd/credential-helper` succeeds
|
||||
@@ -47,7 +47,7 @@
|
||||
│ (changes appear instantly in container)
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ Container (golang:1.25.2 base, has all tools) │
|
||||
│ Container (golang:1.25.7 base, has all tools) │
|
||||
│ │
|
||||
│ ┌──────────────────────────────────────┐ │
|
||||
│ │ Air (hot reload tool) │ │
|
||||
@@ -65,7 +65,7 @@
|
||||
│ │ ui.go checks DEV_MODE: │ │
|
||||
│ │ if DEV_MODE: │ │
|
||||
│ │ templatesFS = os.DirFS("...") │ │
|
||||
│ │ staticFS = os.DirFS("...") │ │
|
||||
│ │ publicFS = os.DirFS("...") │ │
|
||||
│ │ else: │ │
|
||||
│ │ use embed.FS (production) │ │
|
||||
│ │ │ │
|
||||
@@ -78,7 +78,7 @@
|
||||
|
||||
#### Scenario 1: Edit CSS/JS/Templates
|
||||
```
|
||||
1. Edit pkg/appview/static/css/style.css in VSCode
|
||||
1. Edit pkg/appview/public/css/style.css in VSCode
|
||||
2. Save file
|
||||
3. Change appears in container via volume mount (instant)
|
||||
4. App uses os.DirFS → reads new file from disk (instant)
|
||||
@@ -107,7 +107,7 @@ Create `Dockerfile.devel` in project root:
|
||||
|
||||
```dockerfile
|
||||
# Development Dockerfile with hot reload support
|
||||
FROM golang:1.25.2-trixie
|
||||
FROM golang:1.25.7-trixie
|
||||
|
||||
# Install Air for hot reload
|
||||
RUN go install github.com/cosmtrek/air@latest
|
||||
@@ -165,9 +165,6 @@ services:
|
||||
# Auth
|
||||
ATCR_AUTH_KEY_PATH: "/var/lib/atcr/auth/private-key.pem"
|
||||
|
||||
# UI
|
||||
ATCR_UI_ENABLED: "true"
|
||||
|
||||
# Jetstream (optional)
|
||||
# JETSTREAM_URL: "wss://jetstream2.us-east.bsky.network/subscribe"
|
||||
# ATCR_BACKFILL_ENABLED: "false"
|
||||
@@ -316,23 +313,23 @@ import (
|
||||
var embeddedTemplatesFS embed.FS
|
||||
|
||||
//go:embed static
|
||||
var embeddedStaticFS embed.FS
|
||||
var embeddedpublicFS embed.FS
|
||||
|
||||
// Actual filesystems used at runtime (conditional)
|
||||
var templatesFS fs.FS
|
||||
var staticFS fs.FS
|
||||
var publicFS fs.FS
|
||||
|
||||
func init() {
|
||||
// Development mode: read from filesystem for instant updates
|
||||
if os.Getenv("ATCR_DEV_MODE") == "true" {
|
||||
log.Println("🔧 DEV MODE: Using filesystem for templates and static assets")
|
||||
templatesFS = os.DirFS("pkg/appview/templates")
|
||||
staticFS = os.DirFS("pkg/appview/static")
|
||||
publicFS = os.DirFS("pkg/appview/static")
|
||||
} else {
|
||||
// Production mode: use embedded assets
|
||||
log.Println("📦 PRODUCTION MODE: Using embedded assets")
|
||||
templatesFS = embeddedTemplatesFS
|
||||
staticFS = embeddedStaticFS
|
||||
publicFS = embeddedpublicFS
|
||||
}
|
||||
}
|
||||
|
||||
@@ -347,7 +344,7 @@ func Templates() *template.Template {
|
||||
|
||||
// StaticHandler returns a handler for static files
|
||||
func StaticHandler() http.Handler {
|
||||
sub, err := fs.Sub(staticFS, "static")
|
||||
sub, err := fs.Sub(publicFS, "static")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create static sub-filesystem: %v", err)
|
||||
}
|
||||
@@ -445,8 +442,8 @@ atcr-appview | running...
|
||||
```bash
|
||||
# Edit any template, CSS, or JS file
|
||||
vim pkg/appview/templates/pages/home.html
|
||||
vim pkg/appview/static/css/style.css
|
||||
vim pkg/appview/static/js/app.js
|
||||
vim pkg/appview/public/css/style.css
|
||||
vim pkg/appview/public/js/app.js
|
||||
|
||||
# Save file → changes appear instantly
|
||||
# Just refresh browser (Cmd+R / Ctrl+R)
|
||||
@@ -524,7 +521,6 @@ export ATCR_BASE_URL=http://localhost:5000
|
||||
export ATCR_DEFAULT_HOLD_DID=did:web:hold01.atcr.io
|
||||
export ATCR_UI_DATABASE_PATH=/tmp/atcr-ui.db
|
||||
export ATCR_AUTH_KEY_PATH=/tmp/atcr-auth-key.pem
|
||||
export ATCR_UI_ENABLED=true
|
||||
|
||||
# Or use .env file
|
||||
source .env.appview
|
||||
|
||||
304
docs/DIRECT_HOLD_ACCESS.md
Normal file
304
docs/DIRECT_HOLD_ACCESS.md
Normal file
@@ -0,0 +1,304 @@
|
||||
# Accessing Hold Data Without AppView
|
||||
|
||||
This document explains how to retrieve your data directly from a hold service without going through the ATCR AppView. This is useful for:
|
||||
- GDPR data export requests
|
||||
- Backup and migration
|
||||
- Debugging and development
|
||||
- Building alternative clients
|
||||
|
||||
## Quick Start: App Passwords (Recommended)
|
||||
|
||||
The simplest way to authenticate is using an ATProto app password. This avoids the complexity of OAuth + DPoP.
|
||||
|
||||
### Step 1: Create an App Password
|
||||
|
||||
1. Go to your Bluesky settings: https://bsky.app/settings/app-passwords
|
||||
2. Create a new app password
|
||||
3. Save it securely (you'll only see it once)
|
||||
|
||||
### Step 2: Get a Session Token
|
||||
|
||||
```bash
|
||||
# Replace with your handle and app password
|
||||
HANDLE="yourhandle.bsky.social"
|
||||
APP_PASSWORD="xxxx-xxxx-xxxx-xxxx"
|
||||
|
||||
# Create session with your PDS
|
||||
SESSION=$(curl -s -X POST "https://bsky.social/xrpc/com.atproto.server.createSession" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"identifier\": \"$HANDLE\", \"password\": \"$APP_PASSWORD\"}")
|
||||
|
||||
# Extract tokens
|
||||
ACCESS_JWT=$(echo "$SESSION" | jq -r '.accessJwt')
|
||||
DID=$(echo "$SESSION" | jq -r '.did')
|
||||
PDS=$(echo "$SESSION" | jq -r '.didDoc.service[0].serviceEndpoint')
|
||||
|
||||
echo "DID: $DID"
|
||||
echo "PDS: $PDS"
|
||||
```
|
||||
|
||||
### Step 3: Get a Service Token for the Hold
|
||||
|
||||
```bash
|
||||
# The hold DID you want to access (e.g., did:web:hold01.atcr.io)
|
||||
HOLD_DID="did:web:hold01.atcr.io"
|
||||
|
||||
# Get a service token from your PDS
|
||||
SERVICE_TOKEN=$(curl -s -X GET "$PDS/xrpc/com.atproto.server.getServiceAuth?aud=$HOLD_DID" \
|
||||
-H "Authorization: Bearer $ACCESS_JWT" | jq -r '.token')
|
||||
|
||||
echo "Service Token: $SERVICE_TOKEN"
|
||||
```
|
||||
|
||||
### Step 4: Call Hold Endpoints
|
||||
|
||||
Now you can call any authenticated hold endpoint with the service token:
|
||||
|
||||
```bash
|
||||
# Export your data from the hold
|
||||
curl -s "https://hold01.atcr.io/xrpc/io.atcr.hold.exportUserData" \
|
||||
-H "Authorization: Bearer $SERVICE_TOKEN" | jq .
|
||||
```
|
||||
|
||||
### Complete Script
|
||||
|
||||
Here's a complete script that does all the above:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# export-hold-data.sh - Export your data from an ATCR hold
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
HANDLE="${1:-yourhandle.bsky.social}"
|
||||
APP_PASSWORD="${2:-xxxx-xxxx-xxxx-xxxx}"
|
||||
HOLD_DID="${3:-did:web:hold01.atcr.io}"
|
||||
|
||||
# Default PDS (Bluesky's main PDS)
|
||||
DEFAULT_PDS="https://bsky.social"
|
||||
|
||||
echo "Authenticating as $HANDLE..."
|
||||
|
||||
# Step 1: Create session
|
||||
SESSION=$(curl -s -X POST "$DEFAULT_PDS/xrpc/com.atproto.server.createSession" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"identifier\": \"$HANDLE\", \"password\": \"$APP_PASSWORD\"}")
|
||||
|
||||
# Check for errors
|
||||
if echo "$SESSION" | jq -e '.error' > /dev/null 2>&1; then
|
||||
echo "Error: $(echo "$SESSION" | jq -r '.message')"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ACCESS_JWT=$(echo "$SESSION" | jq -r '.accessJwt')
|
||||
DID=$(echo "$SESSION" | jq -r '.did')
|
||||
|
||||
# Try to get PDS from didDoc, fall back to default
|
||||
PDS=$(echo "$SESSION" | jq -r '.didDoc.service[] | select(.id == "#atproto_pds") | .serviceEndpoint' 2>/dev/null || echo "$DEFAULT_PDS")
|
||||
if [ "$PDS" = "null" ] || [ -z "$PDS" ]; then
|
||||
PDS="$DEFAULT_PDS"
|
||||
fi
|
||||
|
||||
echo "Authenticated as $DID"
|
||||
echo "PDS: $PDS"
|
||||
|
||||
# Step 2: Get service token for the hold
|
||||
echo "Getting service token for $HOLD_DID..."
|
||||
SERVICE_RESPONSE=$(curl -s -X GET "$PDS/xrpc/com.atproto.server.getServiceAuth?aud=$HOLD_DID" \
|
||||
-H "Authorization: Bearer $ACCESS_JWT")
|
||||
|
||||
if echo "$SERVICE_RESPONSE" | jq -e '.error' > /dev/null 2>&1; then
|
||||
echo "Error getting service token: $(echo "$SERVICE_RESPONSE" | jq -r '.message')"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SERVICE_TOKEN=$(echo "$SERVICE_RESPONSE" | jq -r '.token')
|
||||
|
||||
# Step 3: Resolve hold DID to URL
|
||||
if [[ "$HOLD_DID" == did:web:* ]]; then
|
||||
# did:web:example.com -> https://example.com
|
||||
HOLD_HOST="${HOLD_DID#did:web:}"
|
||||
HOLD_URL="https://$HOLD_HOST"
|
||||
else
|
||||
echo "Error: Only did:web holds are currently supported for direct resolution"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Hold URL: $HOLD_URL"
|
||||
|
||||
# Step 4: Export data
|
||||
echo "Exporting data from $HOLD_URL..."
|
||||
curl -s "$HOLD_URL/xrpc/io.atcr.hold.exportUserData" \
|
||||
-H "Authorization: Bearer $SERVICE_TOKEN" | jq .
|
||||
```
|
||||
|
||||
Usage:
|
||||
```bash
|
||||
chmod +x export-hold-data.sh
|
||||
./export-hold-data.sh yourhandle.bsky.social xxxx-xxxx-xxxx-xxxx did:web:hold01.atcr.io
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Available Hold Endpoints
|
||||
|
||||
Once you have a service token, you can call these endpoints:
|
||||
|
||||
### Data Export (GDPR)
|
||||
```bash
|
||||
GET /xrpc/io.atcr.hold.exportUserData
|
||||
Authorization: Bearer {service_token}
|
||||
```
|
||||
|
||||
Returns all your data stored on that hold:
|
||||
- Layer records (blobs you've pushed)
|
||||
- Crew membership status
|
||||
- Usage statistics
|
||||
- Whether you're the hold captain
|
||||
|
||||
### Quota Information
|
||||
```bash
|
||||
GET /xrpc/io.atcr.hold.getQuota?userDid={your_did}
|
||||
# No auth required - just needs your DID
|
||||
```
|
||||
|
||||
### Blob Download (if you have read access)
|
||||
```bash
|
||||
GET /xrpc/com.atproto.sync.getBlob?did={owner_did}&cid={blob_digest}
|
||||
Authorization: Bearer {service_token}
|
||||
```
|
||||
|
||||
Returns a presigned URL to download the blob directly from storage.
|
||||
|
||||
---
|
||||
|
||||
## OAuth + DPoP (Advanced)
|
||||
|
||||
App passwords are the simplest option, but OAuth with DPoP is the "proper" way to authenticate in ATProto. However, it's significantly more complex because:
|
||||
|
||||
1. **DPoP (Demonstrating Proof of Possession)** - Every request requires a cryptographically signed JWT proving you control a specific key
|
||||
2. **PAR (Pushed Authorization Requests)** - Authorization parameters are sent server-to-server
|
||||
3. **PKCE (Proof Key for Code Exchange)** - Prevents authorization code interception
|
||||
|
||||
### Why DPoP Makes Curl Impractical
|
||||
|
||||
Each request requires a fresh DPoP proof JWT with:
|
||||
- Unique `jti` (request ID)
|
||||
- Current `iat` timestamp
|
||||
- HTTP method and URL bound to the request
|
||||
- Server-provided `nonce`
|
||||
- Signature using your P-256 private key
|
||||
|
||||
Example DPoP proof structure:
|
||||
```json
|
||||
{
|
||||
"alg": "ES256",
|
||||
"typ": "dpop+jwt",
|
||||
"jwk": { "kty": "EC", "crv": "P-256", "x": "...", "y": "..." }
|
||||
}
|
||||
{
|
||||
"htm": "GET",
|
||||
"htu": "https://bsky.social/xrpc/com.atproto.server.getServiceAuth",
|
||||
"jti": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"iat": 1735689100,
|
||||
"nonce": "server-provided-nonce"
|
||||
}
|
||||
```
|
||||
|
||||
### If You Need OAuth
|
||||
|
||||
If you need OAuth (e.g., for a production application), you'll want to use a library:
|
||||
|
||||
**Go:**
|
||||
```go
|
||||
import "github.com/bluesky-social/indigo/atproto/auth/oauth"
|
||||
```
|
||||
|
||||
**TypeScript/JavaScript:**
|
||||
```bash
|
||||
npm install @atproto/oauth-client-node
|
||||
```
|
||||
|
||||
**Python:**
|
||||
```bash
|
||||
pip install atproto
|
||||
```
|
||||
|
||||
These libraries handle all the DPoP complexity for you.
|
||||
|
||||
### High-Level OAuth Flow
|
||||
|
||||
For documentation purposes, here's what the flow looks like:
|
||||
|
||||
1. **Resolve identity**: `handle` → `DID` → `PDS endpoint`
|
||||
2. **Discover OAuth server**: `GET {pds}/.well-known/oauth-authorization-server`
|
||||
3. **Generate DPoP key**: Create P-256 key pair
|
||||
4. **PAR request**: Send authorization parameters (with DPoP proof)
|
||||
5. **User authorization**: Browser-based login
|
||||
6. **Token exchange**: Exchange code for tokens (with DPoP proof)
|
||||
7. **Use tokens**: All subsequent requests include DPoP proofs
|
||||
|
||||
Each step after #3 requires generating a fresh DPoP proof JWT, which is why libraries are essential.
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Invalid token" or "Token expired"
|
||||
|
||||
Service tokens are only valid for ~60 seconds. Get a fresh one:
|
||||
```bash
|
||||
SERVICE_TOKEN=$(curl -s "$PDS/xrpc/com.atproto.server.getServiceAuth?aud=$HOLD_DID" \
|
||||
-H "Authorization: Bearer $ACCESS_JWT" | jq -r '.token')
|
||||
```
|
||||
|
||||
### "Session expired"
|
||||
|
||||
Your access JWT from `createSession` has expired. Create a new session:
|
||||
```bash
|
||||
SESSION=$(curl -s -X POST "$PDS/xrpc/com.atproto.server.createSession" ...)
|
||||
ACCESS_JWT=$(echo "$SESSION" | jq -r '.accessJwt')
|
||||
```
|
||||
|
||||
### "Audience mismatch"
|
||||
|
||||
The service token is scoped to a specific hold. Make sure `HOLD_DID` matches exactly what's in the `aud` claim of your token.
|
||||
|
||||
### "Access denied: user is not a crew member"
|
||||
|
||||
You don't have access to this hold. You need to either:
|
||||
- Be the hold captain (owner)
|
||||
- Be a crew member with appropriate permissions
|
||||
|
||||
### Finding Your Hold DID
|
||||
|
||||
Check your sailor profile to find your default hold:
|
||||
```bash
|
||||
curl -s "https://bsky.social/xrpc/com.atproto.repo.getRecord?repo=$DID&collection=io.atcr.sailor.profile&rkey=self" \
|
||||
-H "Authorization: Bearer $ACCESS_JWT" | jq -r '.value.defaultHold'
|
||||
```
|
||||
|
||||
Or check your manifest records for the hold where your images are stored:
|
||||
```bash
|
||||
curl -s "https://bsky.social/xrpc/com.atproto.repo.listRecords?repo=$DID&collection=io.atcr.manifest&limit=1" \
|
||||
-H "Authorization: Bearer $ACCESS_JWT" | jq -r '.records[0].value.holdDid'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Security Notes
|
||||
|
||||
- **App passwords** are scoped tokens that can be revoked without changing your main password
|
||||
- **Service tokens** are short-lived (60 seconds) and scoped to a specific hold
|
||||
- **Never share** your app password or access tokens
|
||||
- Service tokens can only be used for the specific hold they were requested for (`aud` claim)
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- [ATProto OAuth Specification](https://atproto.com/specs/oauth)
|
||||
- [DPoP RFC 9449](https://datatracker.ietf.org/doc/html/rfc9449)
|
||||
- [Bluesky OAuth Guide](https://docs.bsky.app/docs/advanced-guides/oauth-client)
|
||||
- [ATCR BYOS Documentation](./BYOS.md)
|
||||
1721
docs/HOLD_DISCOVERY.md
Normal file
1721
docs/HOLD_DISCOVERY.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -21,6 +21,7 @@ This document lists all XRPC endpoints implemented in the Hold service (`pkg/hol
|
||||
| `/xrpc/com.atproto.identity.resolveHandle` | GET | Resolve handle to DID |
|
||||
| `/xrpc/app.bsky.actor.getProfile` | GET | Get actor profile |
|
||||
| `/xrpc/app.bsky.actor.getProfiles` | GET | Get multiple profiles |
|
||||
| `/xrpc/io.atcr.hold.listTiers` | GET | List hold's available tiers with quotas and features |
|
||||
| `/.well-known/did.json` | GET | DID document |
|
||||
| `/.well-known/atproto-did` | GET | DID for handle resolution |
|
||||
|
||||
@@ -37,11 +38,17 @@ This document lists all XRPC endpoints implemented in the Hold service (`pkg/hol
|
||||
| `/xrpc/com.atproto.repo.deleteRecord` | POST | Delete a record |
|
||||
| `/xrpc/com.atproto.repo.uploadBlob` | POST | Upload ATProto blob |
|
||||
|
||||
### DPoP Auth Required
|
||||
### Auth Required (Service Token or DPoP)
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
| `/xrpc/io.atcr.hold.requestCrew` | POST | Request crew membership |
|
||||
| `/xrpc/io.atcr.hold.exportUserData` | GET | GDPR data export (returns user's records) |
|
||||
### Appview Token Required
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
| `/xrpc/io.atcr.hold.updateCrewTier` | POST | Update a crew member's tier (appview-only) |
|
||||
|
||||
---
|
||||
|
||||
@@ -60,6 +67,26 @@ All require `blob:write` permission via service token:
|
||||
|
||||
---
|
||||
|
||||
## ATCR Hold-Specific Endpoints (`io.atcr.hold.*`)
|
||||
|
||||
| Endpoint | Method | Auth | Description |
|
||||
|----------|--------|------|-------------|
|
||||
| `/xrpc/io.atcr.hold.initiateUpload` | POST | blob:write | Start multipart upload |
|
||||
| `/xrpc/io.atcr.hold.getPartUploadUrl` | POST | blob:write | Get presigned URL for part |
|
||||
| `/xrpc/io.atcr.hold.uploadPart` | PUT | blob:write | Direct buffered part upload |
|
||||
| `/xrpc/io.atcr.hold.completeUpload` | POST | blob:write | Finalize multipart upload |
|
||||
| `/xrpc/io.atcr.hold.abortUpload` | POST | blob:write | Cancel multipart upload |
|
||||
| `/xrpc/io.atcr.hold.notifyManifest` | POST | blob:write | Notify manifest push |
|
||||
| `/xrpc/io.atcr.hold.requestCrew` | POST | auth | Request crew membership |
|
||||
| `/xrpc/io.atcr.hold.exportUserData` | GET | auth | GDPR data export |
|
||||
| `/xrpc/io.atcr.hold.getQuota` | GET | none | Get user quota info |
|
||||
| `/xrpc/io.atcr.hold.getLayersForManifest` | GET | none | Get layer records for a manifest AT-URI |
|
||||
| `/xrpc/io.atcr.hold.image.getConfig` | GET | none | Get OCI image config record for a manifest digest |
|
||||
| `/xrpc/io.atcr.hold.listTiers` | GET | none | List hold's available tiers with quotas and features (scanOnPush) |
|
||||
| `/xrpc/io.atcr.hold.updateCrewTier` | POST | appview token | Update crew member's tier |
|
||||
|
||||
---
|
||||
|
||||
## Standard ATProto Endpoints (excluding io.atcr.hold.*)
|
||||
|
||||
| Endpoint |
|
||||
@@ -82,3 +109,11 @@ All require `blob:write` permission via service token:
|
||||
| /xrpc/app.bsky.actor.getProfiles |
|
||||
| /.well-known/did.json |
|
||||
| /.well-known/atproto-did |
|
||||
|
||||
---
|
||||
|
||||
## See Also
|
||||
|
||||
- [DIRECT_HOLD_ACCESS.md](./DIRECT_HOLD_ACCESS.md) - How to call hold endpoints directly without AppView (app passwords, curl examples)
|
||||
- [BYOS.md](./BYOS.md) - Bring Your Own Storage architecture
|
||||
- [OAUTH.md](./OAUTH.md) - OAuth + DPoP authentication details
|
||||
|
||||
@@ -251,7 +251,7 @@ func (h *Handler) VerifyImage(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
json.NewEncoder(w).Encode(map[string]any{
|
||||
"verified": result.Verified,
|
||||
"did": result.Signature.DID,
|
||||
"signedAt": result.Signature.SignedAt,
|
||||
|
||||
62
docs/KNOWN_RELAYS.md
Normal file
62
docs/KNOWN_RELAYS.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# Known ATProto Relays
|
||||
|
||||
Reference list of known public ATProto relays and their capabilities, relevant to ATCR hold discovery and appview backfill.
|
||||
|
||||
There is no relay discovery protocol in ATProto — this list is manually maintained.
|
||||
|
||||
Last verified: 2026-02-08
|
||||
|
||||
## Relay List
|
||||
|
||||
### Bluesky (Official)
|
||||
|
||||
| Relay | URL | requestCrawl | listReposByCollection | Notes |
|
||||
|-------|-----|:---:|:---:|-------|
|
||||
| Bluesky (load balancer) | `https://bsky.network` | Yes | No (400 — not proxied) | Load balancer, proxies to regional relays |
|
||||
| Bluesky US-East | `https://relay1.us-east.bsky.network` | Yes | Yes | Regional relay with full collection directory |
|
||||
| Bluesky US-West | `https://relay1.us-west.bsky.network` | Yes | Yes | Regional relay with full collection directory |
|
||||
|
||||
### Community
|
||||
|
||||
| Relay | URL | requestCrawl | listReposByCollection | Notes |
|
||||
|-------|-----|:---:|:---:|-------|
|
||||
| Firehose NA | `https://northamerica.firehose.network` | Yes | No (404) | 72h replay buffer |
|
||||
| Firehose EU | `https://europe.firehose.network` | Yes | No (404) | 72h replay buffer |
|
||||
| Firehose Asia | `https://asia.firehose.network` | Yes | No (404) | 72h replay buffer |
|
||||
| Microcosm Montreal | `https://relay.fire.hose.cam` | Yes | No (404) | |
|
||||
| Microcosm France | `https://relay3.fr.hose.cam` | Yes | No (404) | |
|
||||
| Upcloud | `https://relay.upcloud.world` | Yes | No (404) | |
|
||||
| Blacksky | `https://atproto.africa` | Down (502) | Down (502) | Was offline as of 2026-02-08 |
|
||||
|
||||
## ATCR Usage
|
||||
|
||||
### Hold service (`requestCrawl`)
|
||||
|
||||
The hold announces its embedded PDS to relays on startup via `com.atproto.sync.requestCrawl`. Currently configured as a single relay in `server.relay_endpoint`. All healthy relays above accept `requestCrawl`.
|
||||
|
||||
### Appview backfill (`listReposByCollection`)
|
||||
|
||||
The appview uses `com.atproto.sync.listReposByCollection` to discover DIDs with `io.atcr.*` records during backfill. Only Bluesky's regional relays support this endpoint. The appview defaults to `relay1.us-east.bsky.network`.
|
||||
|
||||
## Why most relays lack `listReposByCollection`
|
||||
|
||||
The `listReposByCollection` endpoint is not part of the relay core. It's served by a separate microservice called [collectiondir](https://github.com/bluesky-social/indigo/tree/main/cmd/collectiondir) that maintains an index of `(collection, timestamp, DID)` tuples.
|
||||
|
||||
Community relays running the [Rainbow](https://github.com/bluesky-social/indigo/tree/main/cmd/rainbow) relay can optionally proxy to a collectiondir instance via `--collectiondir-host`, but most don't deploy one — likely because maintaining that index across the full network is expensive relative to just fan-out relaying.
|
||||
|
||||
## Other useful relay endpoints
|
||||
|
||||
These are standard XRPC endpoints that relays may implement:
|
||||
|
||||
- `com.atproto.sync.listRepos` — paginated list of all known repos (all tested relays support this)
|
||||
- `com.atproto.sync.getRepo` — all tested relays 302 redirect to the source PDS
|
||||
- `com.atproto.sync.getRepoStatus` — check if a relay knows about a specific DID
|
||||
- `com.atproto.sync.subscribeRepos` — WebSocket firehose subscription
|
||||
|
||||
## Sources
|
||||
|
||||
- [Bluesky indigo relay (Rainbow)](https://github.com/bluesky-social/indigo/tree/main/cmd/rainbow)
|
||||
- [Bluesky indigo collectiondir](https://github.com/bluesky-social/indigo/tree/main/cmd/collectiondir)
|
||||
- [firehose.network](https://firehose.network/)
|
||||
- [PDS debug tool relay list](https://tangled.org/microcosm.blue/pds-debug/raw/main/index.html)
|
||||
- [Sri's relay writeup](https://sri.leaflet.pub/3mddrqk5ays27)
|
||||
398
docs/MINIFY.md
398
docs/MINIFY.md
@@ -1,398 +0,0 @@
|
||||
# CSS/JS Minification for ATCR
|
||||
|
||||
## Overview
|
||||
|
||||
ATCR embeds static assets (CSS, JavaScript) directly into the binary using Go's `embed` directive. Currently:
|
||||
|
||||
- **CSS Size:** 40KB (`pkg/appview/static/css/style.css`, 2,210 lines)
|
||||
- **Embedded:** All static files compiled into binary at build time
|
||||
- **No Minification:** Source files embedded as-is
|
||||
|
||||
**Problem:** Embedded assets increase binary size and network transfer time.
|
||||
|
||||
**Solution:** Minify CSS/JS before embedding to reduce both binary size and network transfer.
|
||||
|
||||
## Recommended Approach: `tdewolff/minify`
|
||||
|
||||
Use the pure Go `tdewolff/minify` library with `go:generate` to minify assets at build time.
|
||||
|
||||
**Benefits:**
|
||||
- Pure Go, no external dependencies (Node.js, npm)
|
||||
- Integrates with existing `go:generate` workflow
|
||||
- ~30-40% CSS size reduction (40KB → ~28KB)
|
||||
- Minifies CSS, JS, HTML, JSON, SVG, XML
|
||||
|
||||
## Implementation
|
||||
|
||||
### Step 1: Add Dependency
|
||||
|
||||
```bash
|
||||
go get github.com/tdewolff/minify/v2
|
||||
```
|
||||
|
||||
This will update `go.mod`:
|
||||
```go
|
||||
require github.com/tdewolff/minify/v2 v2.20.37
|
||||
```
|
||||
|
||||
### Step 2: Create Minification Script
|
||||
|
||||
Create `pkg/appview/static/minify_assets.go`:
|
||||
|
||||
```go
|
||||
//go:build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/tdewolff/minify/v2"
|
||||
"github.com/tdewolff/minify/v2/css"
|
||||
"github.com/tdewolff/minify/v2/js"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m := minify.New()
|
||||
m.AddFunc("text/css", css.Minify)
|
||||
m.AddFunc("text/javascript", js.Minify)
|
||||
|
||||
// Get the directory of this script
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Minify CSS
|
||||
if err := minifyFile(m, "text/css",
|
||||
filepath.Join(dir, "pkg/appview/static/css/style.css"),
|
||||
filepath.Join(dir, "pkg/appview/static/css/style.min.css"),
|
||||
); err != nil {
|
||||
log.Fatalf("Failed to minify CSS: %v", err)
|
||||
}
|
||||
|
||||
// Minify JavaScript
|
||||
if err := minifyFile(m, "text/javascript",
|
||||
filepath.Join(dir, "pkg/appview/static/js/app.js"),
|
||||
filepath.Join(dir, "pkg/appview/static/js/app.min.js"),
|
||||
); err != nil {
|
||||
log.Fatalf("Failed to minify JS: %v", err)
|
||||
}
|
||||
|
||||
fmt.Println("✓ Assets minified successfully")
|
||||
}
|
||||
|
||||
func minifyFile(m *minify.M, mediatype, src, dst string) error {
|
||||
// Read source file
|
||||
input, err := os.ReadFile(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read %s: %w", src, err)
|
||||
}
|
||||
|
||||
// Minify
|
||||
output, err := m.Bytes(mediatype, input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("minify %s: %w", src, err)
|
||||
}
|
||||
|
||||
// Write minified output
|
||||
if err := os.WriteFile(dst, output, 0644); err != nil {
|
||||
return fmt.Errorf("write %s: %w", dst, err)
|
||||
}
|
||||
|
||||
// Print size reduction
|
||||
originalSize := len(input)
|
||||
minifiedSize := len(output)
|
||||
reduction := float64(originalSize-minifiedSize) / float64(originalSize) * 100
|
||||
|
||||
fmt.Printf(" %s: %d bytes → %d bytes (%.1f%% reduction)\n",
|
||||
filepath.Base(src), originalSize, minifiedSize, reduction)
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: Add `go:generate` Directive
|
||||
|
||||
Add to `pkg/appview/ui.go` (before the `//go:embed` directive):
|
||||
|
||||
```go
|
||||
//go:generate go run ./static/minify_assets.go
|
||||
|
||||
//go:embed static
|
||||
var staticFS embed.FS
|
||||
```
|
||||
|
||||
### Step 4: Update HTML Templates
|
||||
|
||||
Update all template files to reference minified assets:
|
||||
|
||||
**Before:**
|
||||
```html
|
||||
<link rel="stylesheet" href="/static/css/style.css">
|
||||
<script src="/static/js/app.js"></script>
|
||||
```
|
||||
|
||||
**After:**
|
||||
```html
|
||||
<link rel="stylesheet" href="/static/css/style.min.css">
|
||||
<script src="/static/js/app.min.js"></script>
|
||||
```
|
||||
|
||||
**Files to update:**
|
||||
- `pkg/appview/templates/components/head.html`
|
||||
- Any other templates that reference CSS/JS directly
|
||||
|
||||
### Step 5: Build Workflow
|
||||
|
||||
```bash
|
||||
# Generate minified assets
|
||||
go generate ./pkg/appview
|
||||
|
||||
# Build binary (embeds minified assets)
|
||||
go build -o bin/atcr-appview ./cmd/appview
|
||||
|
||||
# Or build all
|
||||
go generate ./...
|
||||
go build -o bin/atcr-appview ./cmd/appview
|
||||
go build -o bin/atcr-hold ./cmd/hold
|
||||
```
|
||||
|
||||
### Step 6: Add to .gitignore
|
||||
|
||||
Add minified files to `.gitignore` since they're generated:
|
||||
|
||||
```
|
||||
# Generated minified assets
|
||||
pkg/appview/static/css/*.min.css
|
||||
pkg/appview/static/js/*.min.js
|
||||
```
|
||||
|
||||
**Alternative:** Commit minified files if you want reproducible builds without running `go generate`.
|
||||
|
||||
## Build Modes (Optional Enhancement)
|
||||
|
||||
Use build tags to serve unminified assets in development:
|
||||
|
||||
**Development (default):**
|
||||
- Edit `style.css` directly
|
||||
- No minification, easier debugging
|
||||
- Faster build times
|
||||
|
||||
**Production (with `-tags production`):**
|
||||
- Use minified assets
|
||||
- Smaller binary size
|
||||
- Optimized for deployment
|
||||
|
||||
### Implementation with Build Tags
|
||||
|
||||
**pkg/appview/ui.go** (development):
|
||||
```go
|
||||
//go:build !production
|
||||
|
||||
//go:embed static
|
||||
var staticFS embed.FS
|
||||
|
||||
func StylePath() string { return "/static/css/style.css" }
|
||||
func ScriptPath() string { return "/static/js/app.js" }
|
||||
```
|
||||
|
||||
**pkg/appview/ui_production.go** (production):
|
||||
```go
|
||||
//go:build production
|
||||
|
||||
//go:generate go run ./static/minify_assets.go
|
||||
|
||||
//go:embed static
|
||||
var staticFS embed.FS
|
||||
|
||||
func StylePath() string { return "/static/css/style.min.css" }
|
||||
func ScriptPath() string { return "/static/js/app.min.js" }
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
# Development build (unminified)
|
||||
go build ./cmd/appview
|
||||
|
||||
# Production build (minified)
|
||||
go generate ./pkg/appview
|
||||
go build -tags production ./cmd/appview
|
||||
```
|
||||
|
||||
## Alternative Approaches
|
||||
|
||||
### Option 2: External Minifier (cssnano, esbuild)
|
||||
|
||||
Use Node.js-based minifiers via `go:generate`:
|
||||
|
||||
```go
|
||||
//go:generate sh -c "npx cssnano static/css/style.css static/css/style.min.css"
|
||||
//go:generate sh -c "npx esbuild static/js/app.js --minify --outfile=static/js/app.min.js"
|
||||
```
|
||||
|
||||
**Pros:**
|
||||
- Best-in-class minification (potentially better than tdewolff)
|
||||
- Wide ecosystem of tools
|
||||
|
||||
**Cons:**
|
||||
- Requires Node.js/npm in build environment
|
||||
- Cross-platform compatibility issues (Windows vs Unix)
|
||||
- External dependency management
|
||||
|
||||
### Option 3: Runtime Gzip Compression
|
||||
|
||||
Compress assets at runtime (complementary to minification):
|
||||
|
||||
```go
|
||||
import "github.com/NYTimes/gziphandler"
|
||||
|
||||
// Wrap static handler
|
||||
mux.Handle("/static/", gziphandler.GzipHandler(appview.StaticHandler()))
|
||||
```
|
||||
|
||||
**Pros:**
|
||||
- Works for all static files (images, fonts)
|
||||
- ~70-80% size reduction over network
|
||||
- No build changes needed
|
||||
|
||||
**Cons:**
|
||||
- Doesn't reduce binary size
|
||||
- Adds runtime CPU cost
|
||||
- Should be combined with minification for best results
|
||||
|
||||
### Option 4: Brotli Compression (Better than Gzip)
|
||||
|
||||
```go
|
||||
import "github.com/andybalholm/brotli"
|
||||
|
||||
// Custom handler with brotli
|
||||
func BrotliHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "br") {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Encoding", "br")
|
||||
bw := brotli.NewWriterLevel(w, brotli.DefaultCompression)
|
||||
defer bw.Close()
|
||||
h.ServeHTTP(&brotliResponseWriter{Writer: bw, ResponseWriter: w}, r)
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## Expected Benefits
|
||||
|
||||
### File Size Reduction
|
||||
|
||||
**Current (unminified):**
|
||||
- CSS: 40KB
|
||||
- JS: ~5KB (estimated)
|
||||
- **Total embedded:** ~45KB
|
||||
|
||||
**With Minification:**
|
||||
- CSS: ~28KB (30% reduction)
|
||||
- JS: ~3KB (40% reduction)
|
||||
- **Total embedded:** ~31KB
|
||||
- **Binary size savings:** ~14KB
|
||||
|
||||
**With Minification + Gzip (network transfer):**
|
||||
- CSS: ~8KB (80% reduction from original)
|
||||
- JS: ~1.5KB (70% reduction from original)
|
||||
- **Total transferred:** ~9.5KB
|
||||
|
||||
### Performance Impact
|
||||
|
||||
- **Build time:** +1-2 seconds (running minifier)
|
||||
- **Runtime:** No impact (files pre-minified)
|
||||
- **Network:** 75% less data transferred (with gzip)
|
||||
- **Browser parsing:** Slightly faster (smaller files)
|
||||
|
||||
## Maintenance
|
||||
|
||||
### Development Workflow
|
||||
|
||||
1. **Edit source files:**
|
||||
- Modify `pkg/appview/static/css/style.css`
|
||||
- Modify `pkg/appview/static/js/app.js`
|
||||
|
||||
2. **Test locally:**
|
||||
```bash
|
||||
# Development build (unminified)
|
||||
go run ./cmd/appview serve
|
||||
```
|
||||
|
||||
3. **Build for production:**
|
||||
```bash
|
||||
# Generate minified assets
|
||||
go generate ./pkg/appview
|
||||
|
||||
# Build binary
|
||||
go build -o bin/atcr-appview ./cmd/appview
|
||||
```
|
||||
|
||||
4. **CI/CD:**
|
||||
```bash
|
||||
# In GitHub Actions / CI
|
||||
go generate ./...
|
||||
go build ./...
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
**Q: Minified assets not updating?**
|
||||
- Delete `*.min.css` and `*.min.js` files
|
||||
- Run `go generate ./pkg/appview` again
|
||||
|
||||
**Q: Build fails with "package not found"?**
|
||||
- Run `go mod tidy` to download dependencies
|
||||
|
||||
**Q: CSS broken after minification?**
|
||||
- Check for syntax errors in source CSS
|
||||
- Minifier is strict about valid CSS
|
||||
|
||||
## Integration with Existing Build
|
||||
|
||||
ATCR already uses `go:generate` for:
|
||||
- CBOR generation (`pkg/atproto/lexicon.go`)
|
||||
- License downloads (`pkg/appview/licenses/licenses.go`)
|
||||
|
||||
Minification follows the same pattern:
|
||||
```bash
|
||||
# Generate all (CBOR, licenses, minified assets)
|
||||
go generate ./...
|
||||
|
||||
# Build all binaries
|
||||
go build -o bin/atcr-appview ./cmd/appview
|
||||
go build -o bin/atcr-hold ./cmd/hold
|
||||
go build -o bin/docker-credential-atcr ./cmd/credential-helper
|
||||
```
|
||||
|
||||
## Recommendation
|
||||
|
||||
**For ATCR:**
|
||||
|
||||
1. **Immediate:** Implement Option 1 (`tdewolff/minify`)
|
||||
- Pure Go, no external dependencies
|
||||
- Integrates with existing `go:generate` workflow
|
||||
- ~30% size reduction
|
||||
|
||||
2. **Future:** Add runtime gzip/brotli compression
|
||||
- Wrap static handler with compression middleware
|
||||
- Benefits all static assets
|
||||
- Standard practice for web servers
|
||||
|
||||
3. **Long-term:** Consider build modes (development vs production)
|
||||
- Use unminified assets in development
|
||||
- Use minified assets in production builds
|
||||
- Best developer experience
|
||||
|
||||
## References
|
||||
|
||||
- [tdewolff/minify](https://github.com/tdewolff/minify) - Go minifier library
|
||||
- [NYTimes/gziphandler](https://github.com/NYTimes/gziphandler) - Gzip middleware
|
||||
- [Go embed directive](https://pkg.go.dev/embed) - Embedding static files
|
||||
- [Go generate](https://go.dev/blog/generate) - Code generation tool
|
||||
68
docs/PULL_COUNT_RESEARCH.md
Normal file
68
docs/PULL_COUNT_RESEARCH.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# How container registries count image pulls
|
||||
|
||||
**A GET request to the manifest endpoint is the universal unit of a "pull," but every registry counts differently.** The OCI Distribution Specification deliberately says nothing about pull counting, rate limiting, or metrics — these are left entirely to each registry implementation. Docker Hub, the most consequential registry for pull counting, defines a pull as a `GET /v2/<name>/manifests/<reference>` request; HEAD requests and blob downloads do not count. Other registries diverge significantly: AWS ECR throttles per-API-endpoint, Google Artifact Registry counts raw HTTP requests against a project quota, Azure ACR tracks ReadOps per SKU tier, and Quay.io essentially imposes no pull-specific limits at all.
|
||||
|
||||
## The OCI spec intentionally stays silent on pull counting
|
||||
|
||||
The OCI Distribution Specification (v1.1.1) defines "Pull" only as a **workflow category** — the highest-priority conformance tier that all registries must support. The spec describes the process: "The process of pulling an object centers around retrieving two components: the manifest and one or more blobs. Typically, the first step in pulling an object is to retrieve the manifest." It defines four HTTP endpoints in this category: `GET` and `HEAD` on both `/v2/<name>/manifests/<reference>` and `/v2/<name>/blobs/<digest>`.
|
||||
|
||||
Critically, the spec contains **zero guidance** on how to count pulls, implement rate limiting, define deduplication windows, or report usage metrics. There are no rate-limit response headers, no 429 status codes, and no throttling semantics in the specification. A search of the opencontainers/distribution-spec GitHub repository reveals no open or closed issues proposing standardization of pull counting — the community treats this as firmly out of scope for a wire protocol specification. Every registry is free to define "pull" however it chooses.
|
||||
|
||||
At the protocol level, a single `docker pull` command generates a cascade of HTTP requests: one or more manifest GETs, plus one GET per blob (config + layers). For a multi-arch image, an additional manifest GET is required for platform resolution. The Docker daemon is somewhat intelligent here — it issues a HEAD request first to check the digest, and only proceeds to a GET if the image has changed.
|
||||
|
||||
## Docker Hub counts manifest GETs in a 6-hour sliding window
|
||||
|
||||
Docker Hub's pull counting is the most consequential and best-documented system. The official documentation states: **"Using GET emulates a real pull and counts towards the limit. Using HEAD won't."** Only `GET` requests to `/v2/*/manifests/*` are counted. Blob/layer downloads (`/v2/*/blobs/*`) do not count. Even if all layers are already cached locally and nothing is actually downloaded, the manifest GET still counts as a pull.
|
||||
|
||||
**Rate limits operate on a 6-hour (21,600-second) sliding window:**
|
||||
|
||||
- Unauthenticated users: **100 pulls per IPv4 address** (or per /64 IPv6 subnet)
|
||||
- Personal accounts (authenticated): **200 pulls per user**
|
||||
- Pro, Team, and Business accounts: **unlimited**
|
||||
|
||||
The response headers `ratelimit-limit: 100;w=21600`, `ratelimit-remaining`, and `docker-ratelimit-source` allow clients to monitor their status. When exceeded, Docker Hub returns HTTP 429.
|
||||
|
||||
**There is no deduplication for rate limiting.** Every manifest GET counts individually, even for the same image pulled repeatedly. As one analysis confirmed: "If you execute `docker pull alpine` twice, you come two steps closer to exhausting your rate limit. Even if on the second command execution no image was transferred, two pull requests referring to the same image tag count as two and not one." However, the Docker daemon's built-in behavior mitigates this: it sends a HEAD request first (which doesn't count), compares the digest locally, and only issues a GET if the image has changed. Tools that bypass this optimization and issue GETs directly will consume quota needlessly.
|
||||
|
||||
An important distinction: the **pull count statistic** displayed on Docker Hub image pages (the cumulative "10M+" counter) uses a **different tracking system** than rate limiting. The dashboard counter represents all-time manifest fetches, is not real-time, and has known quirks — for instance, GitHub issue docker/hub-feedback#2182 reports the counter incrementing by 4 per pushed tag. Docker's usage dashboard separately tracks "version_checks" (HEAD requests) and "pulls" (GET requests) in exportable CSV data.
|
||||
|
||||
## Multi-arch images count as one pull per architecture
|
||||
|
||||
When pulling a multi-arch image (OCI image index or Docker manifest list), the client first GETs the manifest list, then GETs the platform-specific manifest, then downloads blobs. **Docker Hub groups the manifest list GET and the platform-specific manifest GET together as one pull per architecture.** A Docker community moderator confirmed: "for multi-arch images 1 request contains one on the index and one on the platform specific manifest so basically 20 requests would be 10 pulls." The official docs state: "A pull for a multi-arch image will count as one pull for each different architecture."
|
||||
|
||||
This has significant implications for CI/CD. A `docker buildx build --platform linux/amd64,linux/arm64 --push` command pulls base images for each platform, so a `FROM` directive referencing a Docker Hub image will consume **two pulls** (one per architecture). Matrix CI strategies that build many platform combinations can exhaust rate limits quickly.
|
||||
|
||||
The `docker buildx imagetools create` command — used to assemble manifest lists from existing platform-specific images — also counts against rate limits. Buildx maintainer tonistiigi confirmed: "Yes, pulling down a manifest is what impacts the rate limit counter." Creating a manifest list from 4 platform digests produces at least 4 manifest GETs. Similarly, `docker manifest inspect` performs a GET and counts as a pull; there is no HEAD-only alternative.
|
||||
|
||||
For GitHub Actions specifically, GitHub-hosted runners pulling public Docker Hub images are **exempt from rate limits** due to an IP whitelisting agreement with Docker. Self-hosted runners are not exempt. When using buildx's `docker-container` driver, credentials must be explicitly passed to the builder container — a common misconfiguration that causes builds to hit unauthenticated rate limits even when the user has logged in.
|
||||
|
||||
## Each registry takes a fundamentally different approach
|
||||
|
||||
No two registries count pulls the same way. The differences are architectural, not just numerical.
|
||||
|
||||
**GitHub Container Registry (ghcr.io)** bills by **bandwidth transferred**, not request counts. Public container pulls have no apparent rate limit (or an extremely generous one — one user observed an internal threshold of ~44,000 requests/minute). GHCR displays no public pull counter. Pulls from GitHub Actions using `GITHUB_TOKEN` are completely free and uncounted. The rate limit documentation is minimal, with a notable open issue (github/docs#24504) requesting better documentation.
|
||||
|
||||
**AWS ECR** uses **per-API-endpoint token bucket throttling** — a fundamentally different model. Each registry API has its own rate: `BatchGetImage` (manifest retrieval) allows **2,000 requests/second**, while `GetDownloadUrlForLayer` allows **3,000/second**. These are per-account, per-region limits using a burst-capable token bucket. All quotas are adjustable via AWS Service Quotas. There is no single "pull count" — each API call counts against its respective endpoint quota.
|
||||
|
||||
**Google Artifact Registry** counts **every HTTP request** against a per-project, per-region quota of **60,000 requests/minute**. Google's docs explicitly state: "A Docker pull or push usually makes multiple HTTP requests, so quota is charged for each request." A pull of an image with 5 layers could consume 6-7 quota units. Remote repositories acting as pull-through caches have separate upstream limits (e.g., 600 reads/minute from Docker Hub per organization per region).
|
||||
|
||||
**Azure Container Registry** tracks **ReadOps per minute** on a SKU-tiered model: Basic (~1,000/min), Standard (~3,000/min), Premium (~10,000/min). Microsoft documents that "a docker pull translates to multiple read operations based on the number of layers in the image, plus the manifest retrieval." Each layer GET and manifest GET count as separate ReadOps. Bandwidth is throttled independently (30-100 Mbps depending on tier). HEAD requests count as ReadOps.
|
||||
|
||||
**Quay.io** is the most permissive: it "does not restrict anonymous pulls against its repositories and only rate limits in the most severe circumstances to maintain service levels (e.g., tens of requests per second from the same IP address)." There is no pull-specific rate limit — only a general abuse-prevention API rate limit of a few requests per second per IP. No public pull counter exists.
|
||||
|
||||
| Registry | Unit of counting | Effective limit | HEAD counts? | Deduplication | Public counter |
|
||||
|---|---|---|---|---|---|
|
||||
| Docker Hub | Manifest GET | 100-200/6hrs (free) | No | None | Yes (all-time) |
|
||||
| GHCR | Bandwidth | ~44K req/min (observed) | Unknown | None | No |
|
||||
| AWS ECR | Per-API calls | 2,000-3,000/sec | N/A (AWS API) | None | No |
|
||||
| Google AR | All HTTP requests | 60,000/min/project | Likely yes | None | No |
|
||||
| Azure ACR | ReadOps | 1,000-10,000/min by SKU | Yes | None | No |
|
||||
| Quay.io | API requests (abuse only) | ~tens/sec/IP | Likely yes | None | No |
|
||||
|
||||
## Practical implications and key takeaways
|
||||
|
||||
The fragmentation in pull counting creates real operational complexity. Docker Hub's model — counting only manifest GETs, excluding HEAD requests and blob downloads, using a 6-hour window with no deduplication — is the most restrictive and the most precisely defined. Every other major registry offers orders-of-magnitude more headroom, but counts differently: some charge per HTTP request (including blob downloads), others per API call, others per bandwidth.
|
||||
|
||||
For teams optimizing pull behavior, the critical insight is that **HEAD requests are the escape hatch on Docker Hub**. Tools like Watchtower switched to HEAD-based digest checks specifically to avoid incrementing pull counts. The Docker daemon itself uses this optimization — issuing HEAD first, then GET only if needed. For multi-arch workflows in CI, each platform multiplies the pull cost for base images, making authenticated access or registry mirrors essential for high-volume pipelines. The `docker buildx imagetools create` and `docker manifest inspect` commands both consume pulls, a fact confirmed by maintainers but poorly documented.
|
||||
|
||||
The absence of any OCI-level standard means this landscape will likely remain fragmented. Docker Hub's definition — manifest GET as the unit of pull — has become a de facto convention that influences how the community thinks about pulls, but registries that charge per HTTP request (Google, Azure) or per bandwidth (GHCR) are measuring fundamentally different things under the same word.
|
||||
1502
docs/QUOTAS.md
1502
docs/QUOTAS.md
File diff suppressed because it is too large
Load Diff
558
docs/REBRAND.md
Normal file
558
docs/REBRAND.md
Normal file
@@ -0,0 +1,558 @@
|
||||
# Website Visual Improvement Plan
|
||||
|
||||
## Goal
|
||||
Create a fun, personality-driven container registry that embraces its nautical theme while being clearly functional. Think GitHub's Octocat or DigitalOcean's Sammy - playful but professional.
|
||||
|
||||
## Brand Identity (from seahorse logo)
|
||||
- **Primary Teal**: #4ECDC4 (body color) - the "ocean" feel
|
||||
- **Dark Teal**: #2E8B8B (mane/fins) - depth and contrast
|
||||
- **Mint Background**: #C8F0E7 - light, airy, underwater
|
||||
- **Coral Accent**: #FF6B6B (eye) - warmth, CTAs, highlights
|
||||
- **Nautical theme to embrace:**
|
||||
- "Ship" containers (not just push)
|
||||
- "Holds" for storage (like a ship's cargo hold)
|
||||
- "Sailors" are users, "Captains" own holds
|
||||
- Seahorse mascot as the friendly guide
|
||||
|
||||
## Design Direction: Fun but Functional
|
||||
- Softer, more rounded corners
|
||||
- Playful color combinations (teal + coral)
|
||||
- Mascot appearances in empty states, loading, errors
|
||||
- Ocean-inspired subtle backgrounds (gradients, waves)
|
||||
- Friendly copy and microcopy throughout
|
||||
- Still clearly a container registry with all the technical info
|
||||
|
||||
## Current State
|
||||
- Pure CSS with custom properties for theming
|
||||
- Basic card designs for repositories
|
||||
- Simple hero section with terminal mockup
|
||||
- Existing badges: Helm charts, multi-arch, attestations
|
||||
- Existing stats: stars, pull counts
|
||||
|
||||
## Layout Wireframes
|
||||
|
||||
### Current Homepage Layout
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ [Logo] [Search] [Theme] [User] │ Navbar
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ship containers on the open web. │ Hero
|
||||
│ ┌─────────────────────────┐ │
|
||||
│ │ $ docker login atcr.io │ │
|
||||
│ └─────────────────────────┘ │
|
||||
│ [Get Started] [Learn More] │
|
||||
│ │
|
||||
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ Benefits
|
||||
│ │ Docker │ │ Your Data │ │ Discover │ │
|
||||
│ └─────────────┘ └─────────────┘ └─────────────┘ │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ Featured │
|
||||
│ ┌─────────────────────────────────────────────────────────────┐│
|
||||
│ │ [icon] user/repo ★ 12 ↓ 340 ││ WIDE cards
|
||||
│ │ Description text here... ││ (current)
|
||||
│ └─────────────────────────────────────────────────────────────┘│
|
||||
│ ┌─────────────────────────────────────────────────────────────┐│
|
||||
│ │ [icon] user/repo2 ★ 5 ↓ 120 ││
|
||||
│ └─────────────────────────────────────────────────────────────┘│
|
||||
│ │
|
||||
│ What's New │
|
||||
│ (similar wide cards) │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Proposed Layout: Tile Grid
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ [Logo] [Search] [Theme] [User] │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ship containers on the open web. │
|
||||
│ ┌─────────────────────────┐ │
|
||||
│ │ $ docker login atcr.io │ │
|
||||
│ └─────────────────────────┘ │
|
||||
│ [Get Started] [Learn More] │
|
||||
│ │
|
||||
│ ┌────────────┐ ┌────────────┐ ┌────────────┐ │
|
||||
│ │ Docker │ │ Your Data │ │ Discover │ │
|
||||
│ └────────────┘ └────────────┘ └────────────┘ │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ Featured [View All] │
|
||||
│ ┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐│
|
||||
│ │ [icon] │ │ [icon] │ │ [icon] ││ 3 columns
|
||||
│ │ user/repo │ │ user/repo2 │ │ user/repo3 ││ ~300px each
|
||||
│ │ Description... │ │ Description... │ │ Description... ││
|
||||
│ │ ────────────────││ │ ────────────────││ │ ────────────────│││
|
||||
│ │ ★ 12 ↓ 340 │ │ ★ 5 ↓ 120 │ │ ★ 8 ↓ 89 ││
|
||||
│ └──────────────────┘ └──────────────────┘ └──────────────────┘│
|
||||
│ ┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐│
|
||||
│ │ ... │ │ ... │ │ ... ││
|
||||
│ └──────────────────┘ └──────────────────┘ └──────────────────┘│
|
||||
│ │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ What's New │
|
||||
│ ┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐│
|
||||
│ │ ... │ │ ... │ │ ... ││ Same tile
|
||||
│ └──────────────────┘ └──────────────────┘ └──────────────────┘│ layout
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Unified Tile Card (Same for Featured & What's New)
|
||||
```
|
||||
┌─────────────────────────────┐
|
||||
│ ┌────┐ user/repo [Helm] │ Icon + name + type badge
|
||||
│ │icon│ :latest │ Tag (if applicable)
|
||||
│ └────┘ │
|
||||
│ │
|
||||
│ Description text that │ Description (2-3 lines max)
|
||||
│ wraps nicely here... │
|
||||
│ │
|
||||
│ sha256:abcdef12 │ Digest (truncated)
|
||||
│ ───────────────────────────│ Divider
|
||||
│ ★ 12 ↓ 340 1 day ago │ Stats + timestamp
|
||||
└─────────────────────────────┘
|
||||
|
||||
Card anatomy:
|
||||
┌─────────────────────────────┐
|
||||
│ HEADER │ - Icon (48x48)
|
||||
│ - icon + name + badge │ - user/repo
|
||||
│ - tag (optional) │ - :tag or :latest
|
||||
├─────────────────────────────┤
|
||||
│ BODY │ - Description (clamp 2-3 lines)
|
||||
│ - description │ - sha256:abc... (monospace)
|
||||
│ - digest │
|
||||
├─────────────────────────────┤
|
||||
│ FOOTER │ - ★ star count
|
||||
│ - stats + time │ - ↓ pull count
|
||||
│ │ - "2 hours ago"
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
### Both Sections Use Same Card (Different Sort)
|
||||
```
|
||||
Featured (by stars/curated): What's New (by last_push):
|
||||
┌─────────────────────────┐ ┌─────────────────────────┐
|
||||
│ user/repo │ │ user/repo │
|
||||
│ :latest │ │ :v1.2.3 │ ← latest tag
|
||||
│ Description... │ │ Description... │
|
||||
│ │ │ │
|
||||
│ sha256:abc123 │ │ sha256:def456 │ ← latest digest
|
||||
│ ───────────────────────│ │ ───────────────────────│
|
||||
│ ★ 12 ↓ 340 1 day ago │ │ ★ 5 ↓ 89 2 hrs ago │ ← last_push time
|
||||
└─────────────────────────┘ └─────────────────────────┘
|
||||
|
||||
Same card component, different data source:
|
||||
- Featured: GetFeaturedRepos() (curated or by stars)
|
||||
- What's New: GetRecentlyUpdatedRepos() (ORDER BY last_push DESC)
|
||||
```
|
||||
|
||||
### Card Dimensions Comparison
|
||||
```
|
||||
Current: █████████████████████████████████████████ (~800px+ wide)
|
||||
Proposed: ████████████ ████████████ ████████████ (~280-320px each)
|
||||
Card 1 Card 2 Card 3
|
||||
```
|
||||
|
||||
### Mobile Responsive Behavior
|
||||
```
|
||||
Desktop (>1024px): [Card] [Card] [Card] 3 columns
|
||||
Tablet (768-1024px): [Card] [Card] 2 columns
|
||||
Mobile (<768px): [Card] 1 column (full width)
|
||||
```
|
||||
|
||||
### Playful Elements
|
||||
```
|
||||
Empty State (no repos):
|
||||
┌─────────────────────────────────────────┐
|
||||
│ │
|
||||
│ 🐴 (seahorse) │
|
||||
│ "Nothing here yet!" │
|
||||
│ │
|
||||
│ Ship your first container to get │
|
||||
│ started on your voyage. │
|
||||
│ │
|
||||
│ [Start Shipping] │
|
||||
└─────────────────────────────────────────┘
|
||||
|
||||
Error/404:
|
||||
┌─────────────────────────────────────────┐
|
||||
│ │
|
||||
│ 🐴 (confused seahorse) │
|
||||
│ "Lost at sea!" │
|
||||
│ │
|
||||
│ We couldn't find that container. │
|
||||
│ Maybe it drifted away? │
|
||||
│ │
|
||||
│ [Back to Shore] │
|
||||
└─────────────────────────────────────────┘
|
||||
|
||||
Hero with subtle ocean feel:
|
||||
┌─────────────────────────────────────────┐
|
||||
│ ≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋ │ Subtle wave pattern bg
|
||||
│ │
|
||||
│ ship containers on the │
|
||||
│ open web. 🐴 │ Mascot appears!
|
||||
│ │
|
||||
│ ┌─────────────────────┐ │
|
||||
│ │ $ docker login ... │ │
|
||||
│ └─────────────────────┘ │
|
||||
│ │
|
||||
│ ≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋≋ │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Card with Personality
|
||||
```
|
||||
┌───────────────────────────────────┐
|
||||
│ ┌──────┐ │
|
||||
│ │ icon │ user/repo │
|
||||
│ │ │ :latest [⚓ Helm] │ Anchor icon for Helm
|
||||
│ └──────┘ │
|
||||
│ │
|
||||
│ A container that does amazing │
|
||||
│ things for your app... │
|
||||
│ │
|
||||
│ sha256:abcdef12 │
|
||||
│ ─────────────────────────────────│
|
||||
│ ★ 12 ↓ 340 1 day ago │
|
||||
│ │
|
||||
│ 🐴 Shipped by alice.bsky.social │ Playful "shipped by" line
|
||||
└───────────────────────────────────┘
|
||||
|
||||
(optional: "Shipped by" could be subtle or only on hover)
|
||||
```
|
||||
|
||||
## Design Improvements
|
||||
|
||||
### 1. Enhanced Card Design (Priority: High)
|
||||
**Files:** `pkg/appview/public/css/style.css`, `pkg/appview/templates/components/repo-card.html`
|
||||
|
||||
- Add subtle gradient backgrounds on hover
|
||||
- Improve shadow depth (layered shadows for modern look)
|
||||
- Add smooth transitions (transform, box-shadow)
|
||||
- Better icon styling with ring/border accent
|
||||
- Enhanced badge visibility with better contrast
|
||||
- Add "Updated X ago" timestamp to cards
|
||||
- Improve stat icon/count alignment and spacing
|
||||
|
||||
### 2. Hero Section Polish (Priority: High)
|
||||
**Files:** `pkg/appview/public/css/style.css`, `pkg/appview/templates/pages/home.html`
|
||||
|
||||
- Add subtle background pattern or gradient mesh
|
||||
- Improve terminal mockup styling (better shadows, glow effect)
|
||||
- Enhance benefit cards with icons and better spacing
|
||||
- Add visual separation between hero and content
|
||||
- Improve CTA button styling with better hover states
|
||||
|
||||
### 3. Typography & Spacing (Priority: High)
|
||||
**Files:** `pkg/appview/public/css/style.css`
|
||||
|
||||
- Increase visual hierarchy with better font weights
|
||||
- Add more breathing room (padding/margins)
|
||||
- Improve heading styles with subtle underlines or accents
|
||||
- Better link styling with hover states
|
||||
- Add letter-spacing to badges for readability
|
||||
|
||||
### 4. Badge System Enhancement (Priority: Medium)
|
||||
**Files:** `pkg/appview/public/css/style.css`, templates
|
||||
|
||||
- Create unified badge design language
|
||||
- Add subtle icons inside badges (already using Lucide)
|
||||
- Improve color coding: Helm (blue), Attestation (green), Multi-arch (purple)
|
||||
- Add "Official" or "Verified" badge styling (for future use)
|
||||
- Better hover states on interactive badges
|
||||
|
||||
### 5. Featured Section Improvements (Priority: Medium)
|
||||
**Files:** `pkg/appview/templates/pages/home.html`, `pkg/appview/public/css/style.css`
|
||||
|
||||
- Add section header with subtle styling
|
||||
- Improve grid responsiveness
|
||||
- Add "View All" link styling
|
||||
- Better visual distinction from "What's New" section
|
||||
|
||||
### 6. Navigation Polish (Priority: Medium)
|
||||
**Files:** `pkg/appview/public/css/style.css`, nav templates
|
||||
|
||||
- Improve search bar visibility and styling
|
||||
- Better user menu dropdown aesthetics
|
||||
- Add subtle border or shadow to navbar
|
||||
- Improve mobile responsiveness
|
||||
|
||||
### 7. Loading & Empty States (Priority: Low)
|
||||
**Files:** `pkg/appview/public/css/style.css`
|
||||
|
||||
- Add skeleton loading animations
|
||||
- Improve empty state illustrations/styling
|
||||
- Better transition when content loads
|
||||
|
||||
### 8. Micro-interactions (Priority: Low)
|
||||
**Files:** `pkg/appview/public/css/style.css`, `pkg/appview/public/js/app.js`
|
||||
|
||||
- Add subtle hover animations throughout
|
||||
- Improve button press feedback
|
||||
- Star button animation on click
|
||||
- Copy button success animation
|
||||
|
||||
## Implementation Order
|
||||
|
||||
1. **Phase 1: Core Card Styling**
|
||||
- Update `.featured-card` with modern shadows and transitions
|
||||
- Enhance badge styling in `style.css`
|
||||
- Add hover effects and transforms
|
||||
|
||||
2. **Phase 2: Hero & Featured Section**
|
||||
- Improve hero section gradient/background
|
||||
- Polish benefit cards
|
||||
- Add section separators
|
||||
|
||||
3. **Phase 3: Typography & Spacing**
|
||||
- Update font weights and sizes
|
||||
- Improve padding throughout
|
||||
- Better visual rhythm
|
||||
|
||||
4. **Phase 4: Navigation & Polish**
|
||||
- Navbar improvements
|
||||
- Loading states
|
||||
- Final micro-interactions
|
||||
|
||||
## Key CSS Changes
|
||||
|
||||
### Tile Grid Layout
|
||||
```css
|
||||
.featured-grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fill, minmax(280px, 1fr));
|
||||
gap: 1.5rem;
|
||||
}
|
||||
|
||||
/* Already exists but updating min-width */
|
||||
.featured-card {
|
||||
min-height: 200px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
justify-content: space-between;
|
||||
}
|
||||
```
|
||||
|
||||
### Enhanced Shadow System (Multi-layer for depth)
|
||||
```css
|
||||
--shadow-card: 0 1px 3px rgba(0,0,0,0.08), 0 4px 12px rgba(0,0,0,0.05);
|
||||
--shadow-card-hover: 0 8px 25px rgba(78,205,196,0.15), 0 4px 12px rgba(0,0,0,0.1);
|
||||
--shadow-nav: 0 2px 8px rgba(0,0,0,0.1);
|
||||
```
|
||||
|
||||
### Card Design Enhancement
|
||||
```css
|
||||
.featured-card {
|
||||
transition: transform 0.2s ease, box-shadow 0.2s ease, border-color 0.2s ease;
|
||||
border: 1px solid var(--border);
|
||||
}
|
||||
.featured-card:hover {
|
||||
transform: translateY(-4px);
|
||||
box-shadow: var(--shadow-card-hover);
|
||||
border-color: var(--primary); /* teal accent on hover */
|
||||
}
|
||||
```
|
||||
|
||||
### Icon Container Styling
|
||||
```css
|
||||
.featured-icon-placeholder {
|
||||
background: linear-gradient(135deg, var(--primary) 0%, var(--primary-dark) 100%);
|
||||
box-shadow: 0 2px 8px rgba(78,205,196,0.3);
|
||||
}
|
||||
```
|
||||
|
||||
### Badge System (Consistent, Accessible)
|
||||
```css
|
||||
.badge-helm {
|
||||
background: #0d6cbf;
|
||||
color: #fff;
|
||||
}
|
||||
.badge-multi {
|
||||
background: #7c3aed;
|
||||
color: #fff;
|
||||
}
|
||||
.badge-attestation {
|
||||
background: #059669;
|
||||
color: #fff;
|
||||
}
|
||||
/* All badges: */
|
||||
font-weight: 600;
|
||||
letter-spacing: 0.02em;
|
||||
text-transform: uppercase;
|
||||
font-size: 0.7rem;
|
||||
padding: 0.25rem 0.5rem;
|
||||
border-radius: 4px;
|
||||
```
|
||||
|
||||
### Hero Section Enhancement
|
||||
```css
|
||||
.hero-section {
|
||||
background:
|
||||
linear-gradient(135deg, var(--hero-bg-start) 0%, var(--hero-bg-end) 50%, rgba(78,205,196,0.1) 100%),
|
||||
url('/static/wave-pattern.svg'); /* subtle wave pattern */
|
||||
background-size: cover, 100% 50px;
|
||||
background-position: center, bottom;
|
||||
background-repeat: no-repeat, repeat-x;
|
||||
}
|
||||
.benefit-card {
|
||||
border: 1px solid transparent;
|
||||
border-radius: 12px; /* softer corners */
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
.benefit-card:hover {
|
||||
border-color: var(--primary);
|
||||
transform: translateY(-4px);
|
||||
}
|
||||
```
|
||||
|
||||
### Playful Border Radius (Softer Feel)
|
||||
```css
|
||||
:root {
|
||||
--radius-sm: 6px; /* was 4px */
|
||||
--radius-md: 12px; /* was 8px */
|
||||
--radius-lg: 16px; /* new */
|
||||
}
|
||||
|
||||
.featured-card { border-radius: var(--radius-md); }
|
||||
.benefit-card { border-radius: var(--radius-md); }
|
||||
.btn { border-radius: var(--radius-sm); }
|
||||
.hero-terminal { border-radius: var(--radius-lg); }
|
||||
```
|
||||
|
||||
### Fun Empty States
|
||||
```css
|
||||
.empty-state {
|
||||
text-align: center;
|
||||
padding: 3rem;
|
||||
}
|
||||
.empty-state-mascot {
|
||||
width: 120px;
|
||||
height: auto;
|
||||
margin-bottom: 1.5rem;
|
||||
animation: float 3s ease-in-out infinite;
|
||||
}
|
||||
@keyframes float {
|
||||
0%, 100% { transform: translateY(0); }
|
||||
50% { transform: translateY(-10px); }
|
||||
}
|
||||
.empty-state-title {
|
||||
font-size: 1.5rem;
|
||||
font-weight: 600;
|
||||
color: var(--fg);
|
||||
}
|
||||
.empty-state-text {
|
||||
color: var(--secondary);
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
```
|
||||
|
||||
### Typography Refinements
|
||||
```css
|
||||
.featured-title {
|
||||
font-weight: 600;
|
||||
letter-spacing: -0.01em;
|
||||
}
|
||||
.featured-description {
|
||||
line-height: 1.5;
|
||||
opacity: 0.85;
|
||||
}
|
||||
```
|
||||
|
||||
## Data Model Change
|
||||
|
||||
**Current "What's New":** Shows individual pushes (each tag push is a separate card)
|
||||
|
||||
**Proposed "What's New":** Shows repos ordered by last update time (same as Featured, different sort)
|
||||
|
||||
**Tracking:** `repository_stats` table already has `last_push` timestamp!
|
||||
```sql
|
||||
SELECT * FROM repository_stats ORDER BY last_push DESC LIMIT 9;
|
||||
```
|
||||
|
||||
**Unified Card Data:**
|
||||
| Field | Source |
|
||||
|-------|--------|
|
||||
| Handle, Repository | users + manifests |
|
||||
| Tag | Latest tag from `tags` table |
|
||||
| Digest | From latest tag or manifest |
|
||||
| Description, IconURL | repo_pages or annotations |
|
||||
| StarCount, PullCount | stars count + repository_stats |
|
||||
| LastUpdated | `repository_stats.last_push` |
|
||||
| ArtifactType | manifests.artifact_type |
|
||||
|
||||
## Files to Modify
|
||||
|
||||
| File | Changes |
|
||||
|------|---------|
|
||||
| `pkg/appview/public/css/style.css` | Rounded corners, shadows, hover, badges, ocean theme |
|
||||
| `pkg/appview/public/wave-pattern.svg` | NEW: Subtle wave pattern for hero background |
|
||||
| `pkg/appview/templates/components/repo-card.html` | Add Tag, Digest, LastUpdated fields |
|
||||
| `pkg/appview/templates/components/empty-state.html` | NEW: Reusable fun empty state with mascot |
|
||||
| `pkg/appview/templates/pages/home.html` | Both sections use repo-card grid |
|
||||
| `pkg/appview/templates/pages/404.html` | Fun "Lost at sea" error page |
|
||||
| `pkg/appview/db/queries.go` | New `GetRecentlyUpdatedRepos()` query; add fields to `RepoCardData` |
|
||||
| `pkg/appview/handlers/home.go` | Replace `GetRecentPushes` with `GetRecentlyUpdatedRepos` |
|
||||
| `pkg/appview/templates/partials/push-list.html` | Delete or repurpose (no longer needed) |
|
||||
|
||||
## Dependencies
|
||||
|
||||
**Mascot Art Needed:**
|
||||
- `seahorse-empty.svg` - Friendly pose for "nothing here yet" empty states
|
||||
- `seahorse-confused.svg` - Lost/confused pose for 404 errors
|
||||
- `seahorse-waving.svg` (optional) - For hero section accent
|
||||
|
||||
**Can proceed without art:**
|
||||
- CSS changes (colors, shadows, rounded corners, gradients)
|
||||
- Card layout and grid changes
|
||||
- Data layer changes (queries, handlers)
|
||||
- Wave pattern background (simple SVG)
|
||||
|
||||
**Blocked until art is ready:**
|
||||
- Empty state component with mascot
|
||||
- 404 page redesign with mascot
|
||||
- Hero mascot integration (optional)
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: CSS & Layout (No art needed)
|
||||
1. Update border-radius variables (softer corners)
|
||||
2. New shadow system
|
||||
3. Card hover effects with teal accent
|
||||
4. Tile grid layout (`minmax(280px, 1fr)`)
|
||||
5. Wave pattern SVG for hero background
|
||||
|
||||
### Phase 2: Card Component & Data
|
||||
1. Update `repo-card.html` with new structure
|
||||
2. Add `Digest`, `Tag`, `CreatedAt` fields
|
||||
3. Update queries for latest manifest info
|
||||
4. Replace push list with card grid
|
||||
|
||||
### Phase 3: Hero & Section Polish
|
||||
1. Hero gradient + wave pattern
|
||||
2. Benefit card improvements
|
||||
3. Section headers and spacing
|
||||
4. Mobile responsive breakpoints
|
||||
|
||||
### Phase 4: Mascot Integration (BLOCKED - needs art)
|
||||
1. Empty state component with mascot
|
||||
2. 404 page with confused seahorse
|
||||
3. Hero mascot (optional)
|
||||
|
||||
### Phase 5: Testing
|
||||
1. Dark mode verification
|
||||
2. Mobile responsive check
|
||||
3. All functionality works (stars, links, copy)
|
||||
|
||||
## Verification
|
||||
|
||||
1. **Visual check on homepage** - cards have depth and polish
|
||||
2. **Hover states** - smooth transitions on cards, buttons, badges
|
||||
3. **Dark mode** - all changes work in both themes
|
||||
4. **Mobile** - responsive at all breakpoints
|
||||
5. **Functionality** - stars, search, navigation all work
|
||||
6. **Performance** - no jank from CSS transitions
|
||||
7. **Accessibility** - badge text readable (contrast check)
|
||||
480
docs/REMOVING_DISTRIBUTION.md
Normal file
480
docs/REMOVING_DISTRIBUTION.md
Normal file
@@ -0,0 +1,480 @@
|
||||
# Removing distribution/distribution
|
||||
|
||||
This document analyzes what it would take to remove the `github.com/distribution/distribution/v3` library and implement ATCR's own OCI Distribution Spec HTTP endpoints.
|
||||
|
||||
## Why Consider Removing It
|
||||
|
||||
1. **Impedance mismatch** -- Distribution assumes manifests and blobs live in the same storage backend. ATCR routes manifests to ATProto PDS and blobs to hold/S3. Every storage interface is overridden.
|
||||
2. **Context value workaround** -- `Repository()` receives only `context.Context` from distribution's interface, forcing auth/identity data through context keys into `RegistryContext`.
|
||||
3. **Per-request repository creation** -- `RoutingRepository` is recreated on every request because distribution's caching assumptions conflict with ATCR's OAuth session model.
|
||||
4. **Stale transitive dependencies** -- Distribution pulls in AWS SDK v1 (EOL) via its S3 storage driver, even though ATCR doesn't use that driver.
|
||||
5. **Unused features** -- GC, notifications, storage drivers, replication -- none are used. ATCR has its own GC, its own event dispatch (`processManifest` XRPC), and its own S3 integration.
|
||||
6. **Upstream maintenance pace** -- Slow to merge dependency updates and bug fixes.
|
||||
|
||||
## What Distribution Currently Provides
|
||||
|
||||
Only these pieces are actually used:
|
||||
|
||||
| What | Distribution Package | ATCR Usage |
|
||||
|------|---------------------|------------|
|
||||
| HTTP endpoint routing | `registry/handlers` | `handlers.NewApp()` creates the `/v2/` handler |
|
||||
| OCI error responses | `registry/api/errcode` | `ErrorCodeUnauthorized`, `ErrorCodeDenied`, `ErrorCodeUnsupported` |
|
||||
| Middleware registration | `registry/middleware/registry` | `Register("atproto-resolver", ...)` |
|
||||
| Repository interface | `distribution` (root) | `Repository`, `ManifestService`, `BlobStore`, `TagService` |
|
||||
| Reference parsing | `distribution/reference` | `reference.Named` for `identity/image` parsing |
|
||||
| Token auth | `registry/auth/token` | Blank import for registration |
|
||||
| In-memory driver | `registry/storage/driver/inmemory` | Blank import; placeholder since real storage is external |
|
||||
| Configuration types | `configuration` | `configuration.Configuration` struct |
|
||||
|
||||
Everything else (S3 driver, GC, notifications, replication, schema validation) is dead weight.
|
||||
|
||||
## Files That Import Distribution
|
||||
|
||||
All in `pkg/appview/` -- hold and scanner are unaffected.
|
||||
|
||||
**Core implementation (8 files):**
|
||||
- `storage/routing_repository.go` -- `distribution.Repository` wrapper
|
||||
- `storage/manifest_store.go` -- `distribution.ManifestService` impl
|
||||
- `storage/proxy_blob_store.go` -- `distribution.BlobStore` + `BlobWriter` impl
|
||||
- `storage/tag_store.go` -- `distribution.TagService` impl
|
||||
- `middleware/registry.go` -- `distribution.Namespace` + middleware registration
|
||||
- `config.go` -- Builds `configuration.Configuration`
|
||||
- `server.go` -- `handlers.NewApp()`, `errcode` for error responses
|
||||
- `cmd/appview/main.go` -- Blank imports for driver/auth registration
|
||||
|
||||
**Tests (6 files):**
|
||||
- `storage/routing_repository_test.go`
|
||||
- `storage/manifest_store_test.go`
|
||||
- `storage/proxy_blob_store_test.go`
|
||||
- `storage/tag_store_test.go`
|
||||
- `middleware/registry_test.go`
|
||||
|
||||
## OCI Distribution Spec Endpoints to Implement
|
||||
|
||||
The spec defines these HTTP endpoints. ATCR would need handlers for each.
|
||||
|
||||
### Version Check
|
||||
|
||||
```
|
||||
GET /v2/
|
||||
200 OK (confirms OCI compliance)
|
||||
401 Unauthorized (triggers auth flow)
|
||||
```
|
||||
|
||||
Docker clients hit this first. Must return 200 for authenticated requests. A 401 response with `WWW-Authenticate` header triggers the Docker auth handshake.
|
||||
|
||||
### Manifests
|
||||
|
||||
```
|
||||
GET /v2/<name>/manifests/<reference> -> 200 + manifest body
|
||||
HEAD /v2/<name>/manifests/<reference> -> 200 + headers only
|
||||
PUT /v2/<name>/manifests/<reference> -> 201 Created
|
||||
DELETE /v2/<name>/manifests/<reference> -> 202 Accepted
|
||||
```
|
||||
|
||||
`<reference>` is either a tag (`latest`) or digest (`sha256:abc...`).
|
||||
|
||||
**Required headers:**
|
||||
- Request `Accept`: manifest media types the client supports
|
||||
- Response `Content-Type`: actual manifest media type
|
||||
- Response `Docker-Content-Digest`: canonical digest of manifest
|
||||
|
||||
**Media types to support:**
|
||||
- `application/vnd.oci.image.manifest.v1+json`
|
||||
- `application/vnd.oci.image.index.v1+json`
|
||||
- `application/vnd.docker.distribution.manifest.v2+json`
|
||||
- `application/vnd.docker.distribution.manifest.list.v2+json`
|
||||
|
||||
### Blobs
|
||||
|
||||
```
|
||||
GET /v2/<name>/blobs/<digest> -> 200 + blob body (or 307 redirect)
|
||||
HEAD /v2/<name>/blobs/<digest> -> 200 + headers only
|
||||
DELETE /v2/<name>/blobs/<digest> -> 202 Accepted
|
||||
```
|
||||
|
||||
ATCR already redirects to presigned S3 URLs via `ServeBlob()` -- this would become a direct 307 redirect in the handler.
|
||||
|
||||
### Blob Uploads (Chunked/Resumable)
|
||||
|
||||
**Initiate:**
|
||||
```
|
||||
POST /v2/<name>/blobs/uploads/
|
||||
202 Accepted
|
||||
Location: /v2/<name>/blobs/uploads/<uuid>
|
||||
```
|
||||
|
||||
**Monolithic (single request):**
|
||||
```
|
||||
POST /v2/<name>/blobs/uploads/?digest=sha256:...
|
||||
Content-Type: application/octet-stream
|
||||
Body: <entire blob>
|
||||
201 Created
|
||||
```
|
||||
|
||||
**Chunked:**
|
||||
```
|
||||
PATCH /v2/<name>/blobs/uploads/<uuid>
|
||||
Content-Type: application/octet-stream
|
||||
Content-Range: <start>-<end>
|
||||
Body: <chunk data>
|
||||
202 Accepted
|
||||
Range: 0-<end>
|
||||
|
||||
(repeat PATCH for each chunk)
|
||||
|
||||
PUT /v2/<name>/blobs/uploads/<uuid>?digest=sha256:...
|
||||
201 Created
|
||||
Location: /v2/<name>/blobs/<digest>
|
||||
```
|
||||
|
||||
**Check progress:**
|
||||
```
|
||||
GET /v2/<name>/blobs/uploads/<uuid>
|
||||
204 No Content
|
||||
Range: 0-<bytes received>
|
||||
```
|
||||
|
||||
**Cancel:**
|
||||
```
|
||||
DELETE /v2/<name>/blobs/uploads/<uuid>
|
||||
204 No Content
|
||||
```
|
||||
|
||||
**Cross-repo mount:**
|
||||
```
|
||||
POST /v2/<name>/blobs/uploads/?mount=<digest>&from=<other-repo>
|
||||
201 Created (if blob exists in source repo)
|
||||
202 Accepted (fall back to regular upload)
|
||||
```
|
||||
|
||||
### Tags
|
||||
|
||||
```
|
||||
GET /v2/<name>/tags/list
|
||||
200 OK
|
||||
{
|
||||
"name": "<name>",
|
||||
"tags": ["latest", "v1.0"]
|
||||
}
|
||||
```
|
||||
|
||||
Supports pagination via `n` (count) and `last` (cursor) query params.
|
||||
|
||||
### Referrers (OCI v1.1)
|
||||
|
||||
```
|
||||
GET /v2/<name>/referrers/<digest>
|
||||
200 OK
|
||||
Content-Type: application/vnd.oci.image.index.v1+json
|
||||
Body: image index of referring manifests
|
||||
```
|
||||
|
||||
Supports `artifactType` query filter. Returns manifests whose `subject` field points to the given digest.
|
||||
|
||||
### Catalog (Optional)
|
||||
|
||||
```
|
||||
GET /v2/_catalog
|
||||
200 OK
|
||||
{ "repositories": ["alice/app", "bob/tool"] }
|
||||
```
|
||||
|
||||
Pagination via `n` and `last`. ATCR may choose not to implement this (many registries don't).
|
||||
|
||||
## Error Response Format
|
||||
|
||||
All 4xx/5xx responses must use the OCI error envelope:
|
||||
|
||||
```json
|
||||
{
|
||||
"errors": [
|
||||
{
|
||||
"code": "MANIFEST_UNKNOWN",
|
||||
"message": "manifest not found",
|
||||
"detail": { "tag": "latest" }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Standard error codes:**
|
||||
|
||||
| Code | HTTP Status | Meaning |
|
||||
|------|-------------|---------|
|
||||
| `BLOB_UNKNOWN` | 404 | Blob not found |
|
||||
| `BLOB_UPLOAD_INVALID` | 400 | Bad digest or size mismatch |
|
||||
| `BLOB_UPLOAD_UNKNOWN` | 404 | Upload session expired/missing |
|
||||
| `DIGEST_INVALID` | 400 | Digest doesn't match content |
|
||||
| `MANIFEST_BLOB_UNKNOWN` | 404 | Manifest references missing blob |
|
||||
| `MANIFEST_INVALID` | 400 | Malformed manifest |
|
||||
| `MANIFEST_UNKNOWN` | 404 | Manifest not found |
|
||||
| `NAME_INVALID` | 400 | Bad repository name |
|
||||
| `NAME_UNKNOWN` | 404 | Repository doesn't exist |
|
||||
| `SIZE_INVALID` | 400 | Content-Length mismatch |
|
||||
| `UNAUTHORIZED` | 401 | Authentication required |
|
||||
| `DENIED` | 403 | Permission denied |
|
||||
| `UNSUPPORTED` | 405 | Operation not supported |
|
||||
| `TOOMANYREQUESTS` | 429 | Rate limited |
|
||||
|
||||
## What Exists Today vs What's New
|
||||
|
||||
For each handler, this breaks down what logic already exists in the storage layer (and just needs to be called) vs what new HTTP glue code must be written. Distribution's handler layer currently handles all the HTTP parsing, header validation, content negotiation, and response formatting -- all of that becomes our responsibility.
|
||||
|
||||
### Shared New Code
|
||||
|
||||
**Error helpers (~50 lines, new):**
|
||||
OCI error envelope formatting. Currently provided by `errcode.ErrorCodeUnauthorized` etc.
|
||||
|
||||
```go
|
||||
type RegistryError struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Detail interface{} `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
func WriteError(w http.ResponseWriter, status int, code, message string) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
json.NewEncoder(w).Encode(struct {
|
||||
Errors []RegistryError `json:"errors"`
|
||||
}{Errors: []RegistryError{{Code: code, Message: message}}})
|
||||
}
|
||||
```
|
||||
|
||||
**Auth middleware (~80 lines, mostly exists):**
|
||||
`ExtractAuthMethod()` already exists in `middleware/registry.go`. Needs adaptation to work standalone (currently wraps distribution's app). Must also generate `WWW-Authenticate` header for 401 responses -- distribution's token auth handler currently does this via blank import of `registry/auth/token`.
|
||||
|
||||
**Identity resolution middleware (~250 lines, exists):**
|
||||
`NamespaceResolver.Repository()` in `middleware/registry.go` does identity resolution, hold discovery, service token acquisition, and ATProto client creation. This logic moves into an HTTP middleware but the code is the same -- resolves DID, finds hold, gets service token, builds `RegistryContext`. The validation cache (concurrent service token deduplication) comes along as-is.
|
||||
|
||||
**Router (~30 lines, new):**
|
||||
```go
|
||||
mux.HandleFunc("GET /v2/", handleVersionCheck)
|
||||
mux.HandleFunc("GET /v2/{name...}/manifests/{reference}", handleManifestGet)
|
||||
// ... etc
|
||||
```
|
||||
|
||||
### Handler-by-Handler Breakdown
|
||||
|
||||
---
|
||||
|
||||
**`handleVersionCheck`** -- `GET /v2/`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | None needed -- this is just a 200 OK response |
|
||||
| New code | ~10 lines. Return 200 with `Docker-Distribution-API-Version: registry/2.0` header. If unauthenticated, return 401 with `WWW-Authenticate` header to trigger Docker's auth flow |
|
||||
|
||||
---
|
||||
|
||||
**`handleManifestGet`** -- `GET /v2/<name>/manifests/<reference>`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | `ManifestStore.Get()` fetches manifest from PDS (record lookup, optional blob download for new-format records). Returns media type + raw bytes. Also fires async pull notification to hold for stats. `TagStore.Get()` resolves tag → digest when reference is a tag |
|
||||
| New code (~40 lines) | Parse `<reference>` to determine tag vs digest. If tag, call `TagStore.Get()` first to resolve digest. Call `ManifestStore.Get()`. Set response headers: `Content-Type` (manifest media type), `Docker-Content-Digest` (canonical digest), `Content-Length`. Write body. Handle 404 (manifest not found → `MANIFEST_UNKNOWN` error) |
|
||||
| Subtle | Content negotiation: must check client's `Accept` header against the manifest's actual media type. Distribution handles this transparently. If client doesn't accept the type, return 404. In practice most clients accept everything, but `crane` and `skopeo` can be picky |
|
||||
|
||||
---
|
||||
|
||||
**`handleManifestHead`** -- `HEAD /v2/<name>/manifests/<reference>`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | `ManifestStore.Exists()` checks PDS record existence. `ManifestStore.Get()` needed for full headers |
|
||||
| New code (~30 lines) | Same as GET but write headers only, no body. Needs `Content-Type`, `Docker-Content-Digest`, `Content-Length`. Could call `Exists()` for a fast path and `Get()` for full header population, or just call `Get()` and skip the body write |
|
||||
| Note | Some clients (Docker) use HEAD to check existence before pulling. Must return same headers as GET |
|
||||
|
||||
---
|
||||
|
||||
**`handleManifestPut`** -- `PUT /v2/<name>/manifests/<reference>`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | `ManifestStore.Put()` does a LOT: calculates digest, uploads manifest bytes as blob to PDS, creates `ManifestRecord` with structured metadata, validates manifest list child references, extracts config labels, fetches README/icon, creates tag record, fires async notifications to hold, creates repo page records, handles successor migration |
|
||||
| New code (~50 lines) | Read request body. Extract `Content-Type` header as media type. Parse `<reference>` to determine if this is a tag push. Call `ManifestStore.Put()` with payload, media type, and optional tag. Set response headers: `Location` (`/v2/<name>/manifests/<digest>`), `Docker-Content-Digest`. Return 201 Created. Handle errors: `MANIFEST_INVALID` (bad JSON), `MANIFEST_BLOB_UNKNOWN` (missing child manifest in manifest list) |
|
||||
| Subtle | Distribution currently wraps the manifest in a `distribution.Manifest` interface (with `Payload()` and `References()` methods) before passing to `Put()`. Without distribution, we'd change `Put()` to accept raw `[]byte` + `mediaType` + optional tag directly -- simpler but requires updating the method signature and its internals |
|
||||
|
||||
---
|
||||
|
||||
**`handleManifestDelete`** -- `DELETE /v2/<name>/manifests/<reference>`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | `ManifestStore.Delete()` calls `ATProtoClient.DeleteRecord()` |
|
||||
| New code (~15 lines) | Parse digest from `<reference>`. Call `ManifestStore.Delete()`. Return 202 Accepted. Handle 404 |
|
||||
|
||||
---
|
||||
|
||||
**`handleBlobGet`** -- `GET /v2/<name>/blobs/<digest>`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | `ProxyBlobStore.ServeBlob()` checks read access, gets presigned URL from hold, and issues 307 redirect. This is already essentially an HTTP handler |
|
||||
| New code (~20 lines) | Parse digest from path. Call the presigned URL logic (read access check + hold XRPC call). Write 307 redirect with `Location` header pointing to presigned S3 URL |
|
||||
| Note | `ServeBlob()` currently takes `http.ResponseWriter` and `*http.Request` -- it's already doing the HTTP work. This handler is mostly just calling it. Could almost be used as-is |
|
||||
|
||||
---
|
||||
|
||||
**`handleBlobHead`** -- `HEAD /v2/<name>/blobs/<digest>`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | `ProxyBlobStore.Stat()` checks read access, gets presigned HEAD URL, makes HEAD request to S3, returns size |
|
||||
| New code (~20 lines) | Parse digest. Call `Stat()`. Set `Content-Length`, `Docker-Content-Digest`, `Content-Type: application/octet-stream`. Return 200. Handle 404 (`BLOB_UNKNOWN`) |
|
||||
|
||||
---
|
||||
|
||||
**`handleBlobUploadInit`** -- `POST /v2/<name>/blobs/uploads/`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | `ProxyBlobStore.Create()` checks write access, generates upload ID, calls `startMultipartUpload()` XRPC to hold, creates `ProxyBlobWriter`, stores in `globalUploads` map |
|
||||
| New code (~50 lines) | Check for `?mount=<digest>&from=<repo>` query params (cross-repo mount). Check for `?digest=<digest>` (monolithic upload -- read body, write to store, complete in one shot). Otherwise, call `Create()` to start a new upload session. Return 202 Accepted with `Location: /v2/<name>/blobs/uploads/<uuid>` header, `Docker-Upload-UUID` header |
|
||||
| Subtle | Monolithic upload (single POST with digest and body) is a shortcut some clients use. Distribution handles this transparently. We'd need to handle it explicitly: read body, create writer, write, commit. Cross-repo mount is also handled here -- check if blob exists in source repo, skip upload if so |
|
||||
|
||||
---
|
||||
|
||||
**`handleBlobUploadChunk`** -- `PATCH /v2/<name>/blobs/uploads/<uuid>`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | `ProxyBlobWriter.Write()` buffers data and auto-flushes 10MB chunks to S3 via presigned URLs. `flushPart()` handles the XRPC call to hold for part upload URLs and ETag tracking |
|
||||
| New code (~40 lines) | Look up writer from `globalUploads` by UUID. Parse `Content-Range` header (format: `<start>-<end>`). Read request body. Call `writer.Write(body)`. Return 202 Accepted with `Location` header (same upload URL), `Range: 0-<total bytes received>` header. Handle missing upload (`BLOB_UPLOAD_UNKNOWN`) |
|
||||
| Subtle | `Content-Range` validation: must verify start offset matches current writer position (no gaps, no out-of-order). Return 416 Range Not Satisfiable if misaligned. Distribution handles this; we'd need to track and validate |
|
||||
|
||||
---
|
||||
|
||||
**`handleBlobUploadComplete`** -- `PUT /v2/<name>/blobs/uploads/<uuid>?digest=sha256:...`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | `ProxyBlobWriter.Commit()` flushes remaining buffer, calls `completeMultipartUpload()` XRPC to hold, removes writer from `globalUploads` |
|
||||
| New code (~40 lines) | Look up writer from `globalUploads`. Parse `?digest=` query param. If request has body, write it to the writer (final chunk can be in the PUT). Call `writer.Commit()` with digest descriptor. Return 201 Created with `Location: /v2/<name>/blobs/<digest>`, `Docker-Content-Digest` header. Handle errors: `DIGEST_INVALID` (provided digest doesn't match), `BLOB_UPLOAD_UNKNOWN` (expired session) |
|
||||
| Subtle | Digest validation: distribution verifies the provided digest matches what was actually uploaded. Our writer doesn't currently track a running digest hash -- `Commit()` just passes the digest through to hold. Need to decide: trust the hold to validate, or add client-side validation. Currently hold does the final validation since it has all the parts |
|
||||
|
||||
---
|
||||
|
||||
**`handleBlobUploadStatus`** -- `GET /v2/<name>/blobs/uploads/<uuid>`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | `ProxyBlobWriter.Size()` returns total bytes written |
|
||||
| New code (~15 lines) | Look up writer from `globalUploads`. Return 204 No Content with `Range: 0-<size - 1>`, `Docker-Upload-UUID`, `Location` headers. Handle missing upload |
|
||||
|
||||
---
|
||||
|
||||
**`handleBlobUploadCancel`** -- `DELETE /v2/<name>/blobs/uploads/<uuid>`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | `ProxyBlobWriter.Cancel()` calls `abortMultipartUpload()` XRPC to hold, removes from `globalUploads` |
|
||||
| New code (~15 lines) | Look up writer. Call `Cancel()`. Return 204 No Content. Handle missing upload |
|
||||
|
||||
---
|
||||
|
||||
**`handleTagsList`** -- `GET /v2/<name>/tags/list`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | `TagStore.All()` lists all tag records from PDS, filters by repository |
|
||||
| New code (~30 lines) | Call `TagStore.All()`. Parse `?n=` and `?last=` query params for pagination (slice the results). Return JSON: `{"name": "<name>", "tags": [...]}`. Set `Link` header for pagination if there are more results |
|
||||
| Note | Distribution handles pagination. We'd need to implement it ourselves -- sort tags, apply cursor, set Link header with next page URL |
|
||||
|
||||
---
|
||||
|
||||
**`handleReferrers`** -- `GET /v2/<name>/referrers/<digest>`
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| Existing logic | Not currently implemented in ATCR's storage layer. Distribution may return an empty index |
|
||||
| New code (~30 lines) | Query manifests that have a `subject` field pointing to the given digest. Return an OCI image index containing descriptors for each referrer. Support `?artifactType=` filter. If no referrers, return empty index |
|
||||
| Note | This is new functionality either way. ATCR would need to query PDS for manifests with matching subject digests. Could defer this (return empty index) and implement properly later |
|
||||
|
||||
---
|
||||
|
||||
### Interface Changes to Storage Layer
|
||||
|
||||
The existing stores would need their method signatures simplified. This is mostly mechanical -- removing distribution wrapper types:
|
||||
|
||||
**ManifestStore changes:**
|
||||
- `Get()`: returns `(distribution.Manifest, error)` → returns `(mediaType string, payload []byte, err error)`
|
||||
- `Put()`: accepts `distribution.Manifest` + `...distribution.ManifestServiceOption` → accepts `payload []byte, mediaType string, tag string`
|
||||
- `Exists()` and `Delete()`: signatures stay roughly the same (just `digest.Digest` in, error out)
|
||||
- Remove `rawManifest` struct (wrapper implementing `distribution.Manifest` interface)
|
||||
- Remove `distribution.WithTagOption` extraction logic in `Put()`
|
||||
|
||||
**ProxyBlobStore changes:**
|
||||
- `Stat()`: returns `distribution.Descriptor` → returns `(size int64, err error)`
|
||||
- `Get()`: stays the same (returns `[]byte`)
|
||||
- `ServeBlob()`: already takes `http.ResponseWriter`/`*http.Request` -- could become the handler itself
|
||||
- `Create()`: returns `distribution.BlobWriter` → returns `*ProxyBlobWriter` directly
|
||||
- `Resume()`: same change
|
||||
- Remove `distribution.BlobCreateOption` / `distribution.CreateOptions` parsing
|
||||
- `ProxyBlobWriter.Commit()`: accepts `distribution.Descriptor` → accepts `digest string, size int64`
|
||||
|
||||
**TagStore changes:**
|
||||
- `Get()`: returns `distribution.Descriptor` → returns `(digest string, err error)`
|
||||
- `Tag()`: accepts `distribution.Descriptor` → accepts `digest string`
|
||||
- `All()`, `Untag()`, `Lookup()`: minimal changes
|
||||
|
||||
**RoutingRepository:**
|
||||
- Removed entirely. Handlers call stores directly. The lazy initialization via `sync.Once` goes away since there's no interface requiring a `Repository` object.
|
||||
|
||||
**Estimated interface change work:** ~150 lines changed across storage files + ~150 lines changed across test files.
|
||||
|
||||
## What Stays
|
||||
|
||||
These dependencies are used directly and stay regardless:
|
||||
|
||||
- `github.com/opencontainers/go-digest` -- Digest parsing/validation (standard, lightweight)
|
||||
- `github.com/opencontainers/image-spec` -- OCI manifest/index structs (optional but useful for validation)
|
||||
- `github.com/distribution/reference` -- Could stay (lightweight, no heavy transitive deps) or replace with string splitting since ATCR's name format is always `<identity>/<image>`
|
||||
|
||||
## Revised Effort Estimate
|
||||
|
||||
| Component | New Lines | Changed Lines | Notes |
|
||||
|-----------|-----------|---------------|-------|
|
||||
| Router + version check | ~40 | 0 | Trivial |
|
||||
| Error helpers | ~50 | 0 | OCI error envelope, error code constants |
|
||||
| Auth middleware adaptation | ~30 | ~50 | `WWW-Authenticate` header generation is new; `ExtractAuthMethod` moves |
|
||||
| Identity resolution middleware | ~20 | ~30 | `NamespaceResolver.Repository()` logic moves to HTTP middleware; code is the same |
|
||||
| Manifest handlers (GET/HEAD/PUT/DELETE) | ~135 | 0 | Content negotiation, header writing, tag vs digest parsing |
|
||||
| Blob handlers (GET/HEAD/DELETE) | ~55 | 0 | Presigned URL redirect, stat, delete stub |
|
||||
| Blob upload handlers (POST/PATCH/PUT/GET/DELETE) | ~160 | 0 | Chunked upload protocol, Content-Range validation, monolithic upload, cross-repo mount |
|
||||
| Tags list handler | ~30 | 0 | Pagination logic |
|
||||
| Referrers handler | ~30 | 0 | Could defer with empty index |
|
||||
| Storage interface changes | 0 | ~150 | Remove distribution types from method signatures |
|
||||
| Test updates | 0 | ~150 | Update mocks and assertions for new signatures |
|
||||
| Config cleanup | 0 | ~80 | Remove `buildDistributionConfig()`, blank imports |
|
||||
| **Total** | **~550 new** | **~460 changed** | **~1010 lines total** |
|
||||
|
||||
This is not a trivial migration. The ~550 new lines are genuine new HTTP handler code that doesn't exist today -- distribution's handler layer provides all of it currently. The changed lines are mostly mechanical (removing distribution type wrappers) but still need care and test updates.
|
||||
|
||||
## Risk Assessment
|
||||
|
||||
**Low risk:**
|
||||
- Storage logic is unchanged -- same PDS calls, same hold XRPC calls, same presigned URLs
|
||||
- Auth flow is unchanged -- same JWT validation, same OAuth refresh
|
||||
- Tests can be adapted incrementally
|
||||
|
||||
**Medium risk:**
|
||||
- Subtle OCI spec compliance gaps (edge cases in content negotiation, digest validation, chunked upload semantics)
|
||||
- Docker client compatibility -- different clients (Docker, Podman, crane, skopeo) may exercise different code paths
|
||||
|
||||
**Mitigation:**
|
||||
- Use [OCI conformance tests](https://github.com/opencontainers/distribution-spec/tree/main/conformance) to validate
|
||||
- Test against Docker, Podman, crane, and skopeo before shipping
|
||||
- Can be done incrementally: build new router, test alongside distribution handler, swap when ready
|
||||
|
||||
## Dependencies Removed
|
||||
|
||||
Removing distribution eliminates ~30-40 transitive packages, notably:
|
||||
- `github.com/aws/aws-sdk-go` (v1, EOL)
|
||||
- Azure cloud SDK packages
|
||||
- Google Cloud Storage packages
|
||||
- Distribution-specific logging/metrics
|
||||
- Unused storage driver registrations
|
||||
|
||||
Most other transitive deps (gRPC, protobuf, OpenTelemetry, logrus) are also pulled by `bluesky-social/indigo` and would remain.
|
||||
148
docs/REPOMGR_MIGRATION.md
Normal file
148
docs/REPOMGR_MIGRATION.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# Incremental Migration: Vendored repomgr → Direct `indigo/repo`
|
||||
|
||||
## Context
|
||||
|
||||
The hold PDS uses a vendored copy of indigo's `repomgr` (~1450 lines in `pkg/hold/pds/repomgr.go`). Upstream, repomgr is [soft-deprecated](https://github.com/bluesky-social/indigo/pull/1102#issuecomment-2985956040) — bnewbold recommends using `indigo/repo` directly (as [cocoon](https://github.com/haileyok/cocoon) does). The vendored copy already has custom patches (PutRecord, UpsertRecord, prevData) and will continue to drift. This migration defines a clean interface, then swaps the implementation behind it.
|
||||
|
||||
## Phase 0: Save plan to docs, remove dead code, define interface ✅
|
||||
|
||||
**Goal:** Persist this migration plan as a reference doc. Shrink repomgr.go from ~1450 lines to ~750 by removing dead code. Fix import.go encapsulation. Define `RepoOperator` interface so all code accesses repomgr through it. No behavior change.
|
||||
|
||||
**Completed 2026-02-28.** repomgr.go: 1453 → 871 lines (-40%). import.go: 189 → 88 lines (-53%). New repo_operator.go: 82 lines.
|
||||
|
||||
### Step 1: Save plan to docs ✅
|
||||
- Write this plan to `docs/REPOMGR_MIGRATION.md`
|
||||
|
||||
### Step 2: Dead code deleted from `repomgr.go` ✅
|
||||
- `HandleExternalUserEvent()` + `handleExternalUserEventNoArchive()` + `handleExternalUserEventArchive()`
|
||||
- `ImportNewRepo()` + `processNewRepo()` + `walkTree()` + `processOp()` + `stringOrNil()`
|
||||
- `CheckRepoSig()`
|
||||
- `TakeDownRepo()`, `ResetRepo()`, `VerifyRepo()`
|
||||
- `GetProfile()`
|
||||
- `CarStore()`
|
||||
- `NextTID()` / `nextTID()` (removed entirely — `BatchWrite` uses `rm.clk.Next()` directly)
|
||||
- `noArchive` field removed from struct
|
||||
- 12 unused imports cleaned up
|
||||
|
||||
### Step 3: Fixed `import.go` encapsulation ✅
|
||||
Added `BulkUpsert()` method to `RepoManager`. Rewrote `ImportFromCAR` to call `p.repomgr.BulkUpsert()` instead of reaching into `p.repomgr.lockUser`, `p.repomgr.cs`, `p.repomgr.kmgr`, `p.repomgr.events`. Removed the 88-line `bulkImportRecords()` private method.
|
||||
|
||||
### Step 4: Defined `RepoOperator` interface ✅
|
||||
|
||||
New file: `pkg/hold/pds/repo_operator.go` — interface with 16 methods, compile-time check, shared types (`RepoEvent`, `RepoOp`, `EventKind`, `BulkRecord`).
|
||||
|
||||
### Step 5: Updated callers to use interface ✅
|
||||
- `pkg/hold/pds/server.go` — `repomgr *RepoManager` → `repomgr RepoOperator`, `RepomgrRef()` returns `RepoOperator`
|
||||
- Downstream callers (`hold/server.go`, `admin/handlers_relays.go`, tests) unchanged — they go through `RepomgrRef()` which returns the interface
|
||||
- `var _ RepoOperator = (*RepoManager)(nil)` compile-time check in `repo_operator.go`
|
||||
|
||||
### Verification: ✅
|
||||
- `make lint` — 0 issues (also fixed pre-existing unchecked error in `events.go`)
|
||||
- `make test` — all tests pass
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Test hardening against the interface ✅
|
||||
|
||||
**Goal:** Write tests against `RepoOperator` that verify current behavior while `RepoManager` is the only implementation. These become the regression safety net when swapping to the new implementation in Phase 3.
|
||||
|
||||
**Completed 2026-02-28.** New `repo_operator_test.go`: 38 subtests covering all CRUD, read, event emission, error paths, and edge cases. `runRepoOperatorTests(t, setup)` pattern ready for Phase 2's `DirectRepoOperator`.
|
||||
|
||||
### Gaps covered ✅
|
||||
- `CreateRecord` — round-trip, TID 13-char rkey format, no-panic without event handler
|
||||
- `UpdateRecord` — CID changes, new data returned, non-existent record error, hydrated events
|
||||
- `PutRecord` — explicit rkey, duplicate rkey error, hydrated events
|
||||
- `UpsertRecord` — create path (created=true), update path (created=false, CID changes)
|
||||
- `DeleteRecord` — delete + verify gone, non-existent rkey error
|
||||
- `BatchWrite` — create+delete batch, update write type, auto-rkey (nil Rkey), delete-not-found error, empty write elem error, multi-op event emission with hydration, update hydration
|
||||
- `BulkUpsert` — create + re-upsert with changed data, multi-op event emission
|
||||
- `GetRecord` — CID match, CID mismatch error, not-found error
|
||||
- `GetRecordProof` — head CID + proof blocks, not-found error, no-repo error
|
||||
- `GetRepoRoot` — defined CID after init, changes after write
|
||||
- `GetRepoRev` — non-empty, changes after write
|
||||
- `ReadRepo` — non-empty CAR output, incremental export with `since`
|
||||
- `InitNewActor` — empty DID error, zero user error, event emission with hydration
|
||||
- Event emission — create/update/delete events verified: prevData, ops, oldRoot, newRoot, rev, since, repoSlice, hydration
|
||||
|
||||
### Coverage ✅
|
||||
All RepoOperator methods 81–100%. Remaining uncovered lines are internal infrastructure error guards (`GetUserRepoRev`, `NewDeltaSession`, `OpenRepo`, `Commit`, `CloseWithRoot`) — not reachable without mocking the carstore.
|
||||
|
||||
### Files modified ✅
|
||||
- `pkg/hold/pds/repo_operator_test.go` — new file, 38 subtests
|
||||
|
||||
### Verification ✅
|
||||
- `make lint` — 0 issues
|
||||
- `make test` — all tests pass
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Build new implementation ✅
|
||||
|
||||
**Goal:** Create `DirectRepoOperator` using `indigo/repo` directly (cocoon pattern).
|
||||
|
||||
**Completed 2026-02-28.** New `pkg/hold/pds/repo.go`: 548 lines (vs 852 in repomgr.go, ~36% reduction). All 37 subtests pass identically for both implementations. Race detector, shuffled order, and parallel execution all clean.
|
||||
|
||||
### New file: `pkg/hold/pds/repo.go` ✅
|
||||
|
||||
Key differences from vendored repomgr:
|
||||
- **Single `sync.Mutex`** instead of per-user lock map (`lklk` + `userLocks` + `userLock` struct + reference counting)
|
||||
- **No OpenTelemetry tracing** (`otel.Tracer` calls removed)
|
||||
- **No `gorm` dependency** (`RepoHead` struct removed, `gorm.io/gorm` dropped from go.mod)
|
||||
- **`openWriteSession` / `commitWrite` helpers** extract the repeated 6-step write pattern
|
||||
|
||||
Core mutation pattern (same as current, just cleaner):
|
||||
1. Lock → get rev → open delta session → open repo
|
||||
2. Capture `r.DataCid()` for prevData
|
||||
3. Perform operation(s)
|
||||
4. `r.Commit()` → `ds.CloseWithRoot()` → emit event → unlock
|
||||
|
||||
### Shared types moved to `repo_operator.go` ✅
|
||||
- `KeyManager` interface and `ActorInfo` struct moved from `repomgr.go`
|
||||
- Both implementations import from the same location
|
||||
|
||||
### Test wiring ✅
|
||||
- `setupTestDirectRepoOperator` — creates carstore + key manager directly (no `NewHoldPDS`)
|
||||
- `runRepoOperatorTests` refactored to accept optional `freshSetup` for `InitNewActor_EventEmission`
|
||||
- `TestDirectRepoOperator` runs all 37 subtests identically
|
||||
|
||||
### Verification ✅
|
||||
- `go build ./cmd/hold` — compiles
|
||||
- `TestRepoManager` — 37/37 subtests pass
|
||||
- `TestDirectRepoOperator` — 37/37 subtests pass
|
||||
- `-race -shuffle=on -count=5 -parallel=8` — all clean
|
||||
- `make lint` — 0 issues
|
||||
- `make test` — all tests pass
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Config flag + switchover
|
||||
|
||||
**Goal:** Feature flag to select implementation, default old.
|
||||
|
||||
### Changes:
|
||||
- `pkg/hold/config.go` — add `UseDirectRepo bool` to DatabaseConfig
|
||||
- `pkg/hold/pds/server.go` — select implementation based on config in `NewHoldPDS`/`NewHoldPDSWithDB`
|
||||
- Regenerate example configs
|
||||
|
||||
### Verification:
|
||||
- Deploy with `use_direct_repo: false` (default)
|
||||
- Test with `use_direct_repo: true` in staging
|
||||
- `make lint && make test`
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Remove vendored repomgr
|
||||
|
||||
**Goal:** After production validation, delete the old code.
|
||||
|
||||
- Delete `repomgr.go`
|
||||
- Remove config flag, make `DirectRepoOperator` the only implementation
|
||||
- Rename `repo_direct.go` → `repo_operator_impl.go`
|
||||
- Regenerate example configs
|
||||
- `make lint && make test`
|
||||
|
||||
---
|
||||
|
||||
## Decision log
|
||||
|
||||
- **`indigo/repo` over `atproto/repo`**: `atproto/repo` has the MST primitives (`Insert`, `Remove`, `ApplyOp`) but no high-level PDS API (`OpenRepo`, `CreateRecord`, `Commit(signFn)`). Its own doc.go says "does not yet work for implementing a repository host (PDS)." `indigo/repo` is what the reference PDS and cocoon use. The `RepoOperator` interface means we can swap later if `atproto/repo` adds PDS support.
|
||||
@@ -150,10 +150,11 @@ HOLD_SBOM_FORMAT=spdx-json
|
||||
```bash
|
||||
# .env.hold
|
||||
HOLD_PUBLIC_URL=https://hold01.atcr.io
|
||||
STORAGE_DRIVER=s3
|
||||
S3_BUCKET=my-hold-blobs
|
||||
AWS_ACCESS_KEY_ID=your-access-key
|
||||
AWS_SECRET_ACCESS_KEY=your-secret-key
|
||||
HOLD_OWNER=did:plc:xyz123
|
||||
HOLD_DATABASE_PATH=/var/lib/atcr/hold.db
|
||||
HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
|
||||
# Enable SBOM scanning
|
||||
HOLD_SBOM_ENABLED=true
|
||||
@@ -494,8 +495,9 @@ Users can verify scanner integrity:
|
||||
# 1. Configure hold with SBOM enabled
|
||||
cat > .env.hold <<EOF
|
||||
HOLD_PUBLIC_URL=https://myhold.example.com
|
||||
STORAGE_DRIVER=s3
|
||||
S3_BUCKET=my-blobs
|
||||
AWS_ACCESS_KEY_ID=your-access-key
|
||||
AWS_SECRET_ACCESS_KEY=your-secret-key
|
||||
HOLD_OWNER=did:plc:myid
|
||||
|
||||
# Enable SBOM scanning
|
||||
|
||||
@@ -545,7 +545,7 @@ func (v *ATProtoVerifier) VerifyReference(
|
||||
Name: v.name,
|
||||
Type: v.Type(),
|
||||
Message: fmt.Sprintf("Verified for DID %s", sigData.ATProto.DID),
|
||||
Extensions: map[string]interface{}{
|
||||
Extensions: map[string]any{
|
||||
"did": sigData.ATProto.DID,
|
||||
"handle": sigData.ATProto.Handle,
|
||||
"signedAt": sigData.ATProto.SignedAt,
|
||||
@@ -673,7 +673,7 @@ type ProviderRequest struct {
|
||||
|
||||
type ProviderResponse struct {
|
||||
SystemError string `json:"system_error,omitempty"`
|
||||
Responses []map[string]interface{} `json:"responses"`
|
||||
Responses []map[string]any `json:"responses"`
|
||||
}
|
||||
|
||||
func handleProvide(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -684,11 +684,11 @@ func handleProvide(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Verify each image
|
||||
responses := make([]map[string]interface{}, 0, len(req.Values))
|
||||
responses := make([]map[string]any, 0, len(req.Values))
|
||||
for _, image := range req.Values {
|
||||
result, err := verifier.Verify(context.Background(), image)
|
||||
|
||||
response := map[string]interface{}{
|
||||
response := map[string]any{
|
||||
"image": image,
|
||||
"verified": false,
|
||||
}
|
||||
|
||||
@@ -1,641 +0,0 @@
|
||||
# Test Coverage Gaps
|
||||
|
||||
**Overall Coverage:** 39.0% (improved from 37.7%, +1.3%)
|
||||
|
||||
This document tracks files in the `pkg/` directory that need test coverage, organized by package. Data is based on actual `coverage.out` analysis.
|
||||
|
||||
**Last Updated:** After adding tests for atproto utilities, handlers improvements, and OAuth browser functionality.
|
||||
|
||||
## Recent Achievements 🎯
|
||||
|
||||
In this testing session, we achieved:
|
||||
|
||||
1. **pkg/appview/handlers** - 2.1% → 19.7% (**+17.6%** 🎉)
|
||||
- Significant improvement in web handler coverage
|
||||
- Better test coverage across handler functions
|
||||
|
||||
2. **pkg/atproto** - 26.1% → 27.8% (**+1.7%**)
|
||||
- New test files added:
|
||||
- directory_test.go (NEW)
|
||||
- endpoints_test.go (NEW)
|
||||
- utils_test.go (NEW)
|
||||
- Improved lexicon tests
|
||||
|
||||
3. **pkg/auth/oauth** - 48.3% → 50.7% (**+2.4%**)
|
||||
- browser_test.go improvements
|
||||
- Better OAuth flow coverage
|
||||
|
||||
4. **Overall improvement** - 37.7% → 39.0% (**+1.3%**)
|
||||
- Cumulative improvement from baseline: 31.2% → 39.0% (**+7.8%**)
|
||||
|
||||
**Note:** pkg/appview/db coverage decreased slightly from 44.8% → 41.2% (-3.6%), likely due to additional untested code paths being tracked in existing test files.
|
||||
|
||||
**Next Priority:** Continue with storage blob write operations (proxy_blob_store.go Put/Create/Writer methods)
|
||||
|
||||
---
|
||||
|
||||
Legend:
|
||||
- ⭐ **Critical Priority** - Core functionality that must be tested
|
||||
- 🔴 **High Priority** - Important functionality with security/data implications
|
||||
- 🟡 **Medium Priority** - Supporting functionality
|
||||
- 🟢 **Low Priority** - Nice-to-have, less critical features
|
||||
- ✅ **Good Coverage** - Package has >70% coverage
|
||||
- 📊 **Partial Coverage** - File has some coverage but needs more
|
||||
- 🎯 **Recently Improved** - Coverage significantly improved in latest update
|
||||
|
||||
---
|
||||
|
||||
## Package Coverage Summary
|
||||
|
||||
| Package | Coverage | Status | Priority | Change |
|
||||
|---------|----------|--------|----------|--------|
|
||||
| `pkg/hold` | 98.0% | ✅ Excellent | - | - |
|
||||
| `pkg/s3` | 97.4% | ✅ Excellent | - | - |
|
||||
| `pkg/appview/licenses` | 93.0% | ✅ Excellent | - | - |
|
||||
| `pkg/appview` | 81.9% | ✅ Excellent | - | +0.1% |
|
||||
| `pkg/logging` | 75.0% | ✅ Good | - | - |
|
||||
| `pkg/auth/token` | 68.8% | 🟡 Good | - | - |
|
||||
| `pkg/appview/middleware` | 57.8% | 🟡 Good | - | - |
|
||||
| `pkg/auth` | 55.7% | 🟡 Needs work | Medium | - |
|
||||
| `pkg/hold/oci` | 51.9% | 🟡 Needs work | Medium | - |
|
||||
| `pkg/appview/storage` | 51.4% | 🟡 Needs work | **High** | - |
|
||||
| `pkg/auth/oauth` | 50.7% | 🟡 Needs work | High | 🎯 **+2.4%** |
|
||||
| `pkg/hold/pds` | 47.2% | 🟡 Needs work | Low | - |
|
||||
| `pkg/appview/db` | 41.2% | 🟡 Needs work | Medium | 🔴 **-3.6%** |
|
||||
| `pkg/appview/holdhealth` | 41.0% | 🟡 Needs work | Low | - |
|
||||
| `pkg/atproto` | 27.8% | 🟡 Needs work | High | 🎯 **+1.7%** |
|
||||
| `pkg/appview/readme` | 27.2% | 🟡 Needs work | Low | - |
|
||||
| `pkg/appview/handlers` | 19.7% | 🟡 Needs work | Low | 🎯 **+17.6%** |
|
||||
| `pkg/appview/jetstream` | 11.6% | 🟡 Needs work | Medium | - |
|
||||
| `pkg/appview/routes` | 10.4% | 🟡 Needs work | Low | - |
|
||||
|
||||
**⚠️ Notes on Coverage Changes:**
|
||||
|
||||
Several packages show decreased percentages despite improvements. This is due to:
|
||||
1. **New test files added** - Coverage now tracks previously untested files
|
||||
2. **Statement weighting** - Large untested functions (like `Repository()` at 0% in middleware) lower overall package percentage
|
||||
3. **More comprehensive tracking** - Better coverage analysis reveals gaps that were previously invisible
|
||||
|
||||
**Specific file-level improvements (hidden by package averages):**
|
||||
- `pkg/appview/middleware/auth.go`: 98.8% average (excellent)
|
||||
- `pkg/appview/middleware/registry.go`: 90.8% average (excellent)
|
||||
- `pkg/appview/storage/manifest_store.go`: 0% → 85%+ (critical improvement)
|
||||
- `pkg/atproto/client.go`: 74.8% average (good)
|
||||
- `pkg/atproto/resolver.go`: 74.5% average (good)
|
||||
|
||||
**Key Insight:** Focus on file-level coverage for critical paths rather than package averages, as new comprehensive testing can paradoxically lower package percentages while improving actual test quality.
|
||||
|
||||
---
|
||||
|
||||
## Recently Completed ✅
|
||||
|
||||
### ✅ pkg/appview/storage/manifest_store.go (85%+ coverage) - **COMPLETED** 🎉
|
||||
|
||||
**Achievement:** Improved from 0% to 85%+ (Critical Priority #1 from previous plan)
|
||||
|
||||
**Well-covered functions:**
|
||||
- `NewManifestStore()` - 100% ✅
|
||||
- `Exists()` - 100% ✅
|
||||
- `Get()` - 85.7% ✅
|
||||
- `Put()` - 75.5% ✅
|
||||
- `Delete()` - 100% ✅
|
||||
- `digestToRKey()` - 100% ✅
|
||||
- `GetLastFetchedHoldDID()` - 100% ✅
|
||||
- `extractConfigLabels()` - 90.0% ✅
|
||||
- `resolveDIDToHTTPSEndpoint()` - 100% ✅
|
||||
|
||||
**Why This Was Critical:**
|
||||
- Core OCI manifest operations (store/retrieve/delete)
|
||||
- ATProto record conversion
|
||||
- Digest-based addressing
|
||||
- Essential for registry functionality
|
||||
|
||||
**Remaining gaps:**
|
||||
- `notifyHoldAboutManifest()` - 0% (background notification, less critical)
|
||||
|
||||
## Critical Priority: Core Registry Functionality
|
||||
|
||||
These components are essential to registry operation and still need coverage.
|
||||
|
||||
### ⭐ pkg/appview/storage (51.4% coverage) - **HIGHEST PRIORITY**
|
||||
|
||||
**Status:** Manifest operations completed ✅, blob write operations remain critical gap
|
||||
|
||||
#### proxy_blob_store.go (Partial coverage) - **HIGHEST PRIORITY** 🎯
|
||||
|
||||
**Why Critical:** Handles all blob upload/download operations for the registry
|
||||
|
||||
**Well-covered (blob reads and helpers):**
|
||||
- `NewProxyBlobStore()` - 100% ✅
|
||||
- `doAuthenticatedRequest()` - 100% ✅
|
||||
- `getPresignedURL()` - 70% ✅
|
||||
- `startMultipartUpload()` - 70% ✅
|
||||
- `getPartUploadInfo()` - 70% ✅
|
||||
- `completeMultipartUpload()` - 75% ✅
|
||||
- `abortMultipartUpload()` - 70.6% ✅
|
||||
- `Get()` - 68.8% ✅
|
||||
- `Open()` - 62.5% ✅
|
||||
|
||||
**Needs improvement:**
|
||||
- `Stat()` - 26.3% 📊
|
||||
- `checkReadAccess()` - 25.0% 📊
|
||||
|
||||
**Critical gaps (0% coverage):**
|
||||
- `Put()` - Main upload entry point (CRITICAL)
|
||||
- `Create()` - Blob creation (CRITICAL)
|
||||
- `Delete()` - Blob deletion
|
||||
- `ServeBlob()` - Blob serving
|
||||
- `Resume()` - Upload resumption
|
||||
- `checkWriteAccess()` - Write authorization
|
||||
|
||||
**Writer interface (0% coverage - CRITICAL for uploads):**
|
||||
- `Write()` - Write data to multipart upload
|
||||
- `flushPart()` - Flush buffered part
|
||||
- `ReadFrom()` - io.ReaderFrom implementation
|
||||
- `Commit()` - Finalize upload
|
||||
- `Cancel()` - Cancel upload
|
||||
- `Close()` - Close writer
|
||||
- `Size()` - Get written size
|
||||
- `ID()` - Get upload ID
|
||||
- `StartedAt()` - Get start time
|
||||
- `Seek()` - Seek in upload
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
1. Full multipart upload flow: `Put()` → `Create()` → `Write()` → `Commit()`
|
||||
2. Large blob upload with multiple parts
|
||||
3. Upload cancellation and cleanup
|
||||
4. Error handling for failed uploads
|
||||
5. Upload resumption with `Resume()`
|
||||
6. Write authorization checks
|
||||
7. Delete operations
|
||||
|
||||
#### routing_repository.go (Partial coverage) - **HIGH PRIORITY**
|
||||
|
||||
**Current coverage:**
|
||||
- `Manifests()` - Returns manifest store (mostly tested via manifest_store tests)
|
||||
- `Blobs()` - 0% coverage (blob routing logic untested)
|
||||
- `Repository()` - 0% coverage (wrapper method, lower priority)
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Blob routing using cached hold DID (pull scenario)
|
||||
- Blob routing using discovered hold DID (push scenario)
|
||||
- Error handling for missing hold
|
||||
- Hold cache integration
|
||||
|
||||
#### crew.go (11.1% coverage) - **MEDIUM PRIORITY**
|
||||
**Functions:**
|
||||
- `EnsureCrewMembership()` - 11.1%
|
||||
- `requestCrewMembership()` - 0%
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Valid crew member with permissions
|
||||
- Crew member without required permission
|
||||
- Non-member access denial
|
||||
- Crew membership request flow
|
||||
|
||||
#### hold_cache.go (93% coverage) - **EXCELLENT** ✅
|
||||
|
||||
**Well-covered:**
|
||||
- `init()` - 80% ✅
|
||||
- `GetGlobalHoldCache()` - 100% ✅
|
||||
- `Set()` - 100% ✅
|
||||
- `Get()` - 100% ✅
|
||||
- `Cleanup()` - 100% ✅
|
||||
|
||||
---
|
||||
|
||||
## High Priority: Supporting Infrastructure
|
||||
|
||||
### 🔴 pkg/auth/oauth (48.3% coverage, improved from 40.4%)
|
||||
|
||||
OAuth implementation has test files but many functions remain untested.
|
||||
|
||||
#### client.go - Session Management (Refresher) (Partial coverage)
|
||||
|
||||
**Well-covered:**
|
||||
- `NewRefresher()` - 100% ✅
|
||||
- `SetUISessionStore()` - 100% ✅
|
||||
|
||||
**Critical gaps (0% coverage):**
|
||||
- `GetSession()` - 0% (CRITICAL - main session retrieval)
|
||||
- `resumeSession()` - 0% (CRITICAL - session resumption)
|
||||
- `InvalidateSession()` - 0%
|
||||
- `GetSessionID()` - 0%
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Session retrieval and caching
|
||||
- Token refresh flow
|
||||
- Concurrent refresh handling (per-DID locking)
|
||||
|
||||
**Note:** Refresher functionality merged into client.go (previously separate refresher.go file)
|
||||
- Cache expiration
|
||||
- Error handling for failed refreshes
|
||||
|
||||
#### server.go (Partial coverage)
|
||||
|
||||
**Well-covered:**
|
||||
- `NewServer()` - 100% ✅
|
||||
- `SetRefresher()` - 100% ✅
|
||||
- `SetUISessionStore()` - 100% ✅
|
||||
- `SetPostAuthCallback()` - 100% ✅
|
||||
- `renderRedirectToSettings()` - 80.0% ✅
|
||||
- `renderError()` - 83.3% ✅
|
||||
|
||||
**Critical gaps:**
|
||||
- `ServeAuthorize()` - 36.8% (needs more coverage)
|
||||
- `ServeCallback()` - 16.3% (CRITICAL - main OAuth callback handler)
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Authorization flow initiation
|
||||
- Callback handling with valid code
|
||||
- Error handling for invalid state/code
|
||||
- DPoP proof validation
|
||||
- State parameter validation
|
||||
|
||||
#### interactive.go (41.7% coverage)
|
||||
**Function:**
|
||||
- `InteractiveFlowWithCallback()` - 41.7%
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Two-phase callback setup
|
||||
- Browser interaction flow
|
||||
- Callback server lifecycle
|
||||
|
||||
#### client.go (Excellent coverage) ✅
|
||||
|
||||
**Well-covered:**
|
||||
- `NewApp()` - 100% ✅
|
||||
- `NewAppWithScopes()` - 100% ✅
|
||||
- `NewClientConfigWithScopes()` - 80.0% ✅
|
||||
- `GetConfig()` - 100% ✅
|
||||
- `StartAuthFlow()` - 75.0% ✅
|
||||
- `ClientIDWithScopes()` - 75.0% ✅
|
||||
- `RedirectURI()` - 100% ✅
|
||||
- `GetDefaultScopes()` - 100% ✅
|
||||
- `ScopesMatch()` - 100% ✅
|
||||
|
||||
**Improved (from previous 0%):**
|
||||
- `ProcessCallback()` - Improved coverage
|
||||
- `ResumeSession()` - Improved coverage
|
||||
- `GetClientApp()` - Improved coverage
|
||||
- `Directory()` - Improved coverage (directory_test.go added)
|
||||
|
||||
#### store.go (Good coverage, some gaps)
|
||||
|
||||
**Well-covered:**
|
||||
- `NewFileStore()` - 100% ✅
|
||||
- `GetSession()` - 100% ✅
|
||||
- `SaveSession()` - 100% ✅
|
||||
|
||||
**Gaps:**
|
||||
- `GetDefaultStorePath()` - 30.0%
|
||||
|
||||
#### browser.go (Improved coverage) 🎯
|
||||
**Function:**
|
||||
- `OpenBrowser()` - Improved coverage (browser_test.go enhanced)
|
||||
|
||||
**Note:** Browser interaction testing improved, though full CI testing remains challenging
|
||||
|
||||
---
|
||||
|
||||
### 🔴 pkg/appview/db (41.2% coverage, decreased from 44.8%)
|
||||
|
||||
Database layer has test files but many functions remain untested. Coverage decrease likely due to additional code paths being tracked in existing tests.
|
||||
|
||||
#### queries.go (0% coverage for most functions)
|
||||
**Functions:**
|
||||
- Repository queries
|
||||
- Star counting
|
||||
- Pull counting
|
||||
- Search queries
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Repository listing with pagination
|
||||
- Search functionality
|
||||
- Aggregation queries
|
||||
- Error handling
|
||||
|
||||
#### session_store.go (0% coverage)
|
||||
**Functions:**
|
||||
- Session creation and retrieval
|
||||
- Session expiration
|
||||
- Session deletion
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- Session lifecycle
|
||||
- Expiration handling
|
||||
- Cleanup of expired sessions
|
||||
- Concurrent session access
|
||||
|
||||
#### device_store.go (📊 Partial coverage)
|
||||
**Functions:**
|
||||
- OAuth device flow storage
|
||||
- Has test file but many functions still at 0%
|
||||
|
||||
**Test Scenarios Needed:**
|
||||
- User code lookups
|
||||
- Status updates (pending → approved)
|
||||
- Expiration handling
|
||||
- Delete operations
|
||||
|
||||
#### hold_store.go (📊 Partial coverage)
|
||||
**Needs integration tests for cache invalidation**
|
||||
|
||||
#### oauth_store.go (📊 Partial coverage)
|
||||
**Uncovered Functions:**
|
||||
- `GetAuthRequestInfo()` - 0%
|
||||
- `DeleteAuthRequestInfo()` - 0%
|
||||
- `SaveAuthRequestInfo()` - 0%
|
||||
|
||||
#### annotations.go (0% coverage)
|
||||
**Functions:**
|
||||
- Repository annotations and metadata
|
||||
|
||||
#### readonly.go (0% coverage)
|
||||
**Functions:**
|
||||
- Read-only database wrapper
|
||||
|
||||
---
|
||||
|
||||
## Medium Priority: Supporting Features
|
||||
|
||||
### 🟡 pkg/appview/jetstream (16.7% coverage)
|
||||
|
||||
Event processing for real-time updates.
|
||||
|
||||
#### worker.go (0% coverage)
|
||||
**Functions:**
|
||||
- Jetstream event consumption
|
||||
- Event routing to handlers
|
||||
- Repository indexing
|
||||
|
||||
#### backfill.go (0% coverage)
|
||||
**Functions:**
|
||||
- PDS repository backfilling
|
||||
- Batch processing
|
||||
|
||||
#### processor.go (📊 Partial coverage)
|
||||
**Needs more comprehensive testing**
|
||||
|
||||
---
|
||||
|
||||
### 🟡 pkg/hold/oci (69.9% coverage)
|
||||
|
||||
Multipart upload implementation for hold service. Has good coverage overall but some functions still need tests.
|
||||
|
||||
#### xrpc.go (📊 Partial coverage)
|
||||
**Functions:**
|
||||
- Multipart upload XRPC endpoints
|
||||
- Most functions tested, but edge cases need coverage
|
||||
|
||||
---
|
||||
|
||||
### 🟡 pkg/hold/pds (57.8% coverage)
|
||||
|
||||
Embedded PDS implementation. Has good test coverage for critical parts, but supporting functions need work.
|
||||
|
||||
#### repomgr.go (📊 Partial coverage)
|
||||
**Many functions still at 0% coverage**
|
||||
|
||||
#### profile.go (0% coverage)
|
||||
**Functions:**
|
||||
- Sailor profile management
|
||||
|
||||
#### layer.go (📊 Partial coverage)
|
||||
#### auth.go (0% coverage)
|
||||
#### events.go (📊 Partial coverage)
|
||||
|
||||
---
|
||||
|
||||
### 🟡 pkg/auth (55.8% coverage)
|
||||
|
||||
#### hold_local.go (0% coverage)
|
||||
**Functions:**
|
||||
- Local hold authorization
|
||||
|
||||
#### session.go (0% coverage)
|
||||
**Functions:**
|
||||
- Session management
|
||||
|
||||
#### hold_remote.go (📊 Partial coverage)
|
||||
**Needs more edge case testing**
|
||||
|
||||
---
|
||||
|
||||
### 🟡 pkg/appview/readme (Partial coverage)
|
||||
|
||||
README rendering for repo page descriptions. The cache.go was removed as README content is now stored in `io.atcr.repo.page` records and synced via Jetstream.
|
||||
|
||||
#### fetcher.go (📊 Partial coverage)
|
||||
- `RenderMarkdown()` - renders repo page description markdown
|
||||
|
||||
---
|
||||
|
||||
### 🟡 pkg/appview/routes (33.3% coverage)
|
||||
|
||||
#### routes.go (📊 Partial coverage)
|
||||
**Needs integration tests for route registration and middleware chains**
|
||||
|
||||
---
|
||||
|
||||
## Low Priority: Web UI and Supporting Features
|
||||
|
||||
### 🟢 pkg/appview/handlers (19.7% coverage, improved from 2.1%) 🎯
|
||||
|
||||
Web UI handlers. Less critical than core registry functionality but still important for user experience.
|
||||
|
||||
**Status:** Significant improvement (+17.6%)! Many handlers now have improved test coverage.
|
||||
|
||||
**Improved coverage:**
|
||||
- Multiple handler functions now have better test coverage
|
||||
- Common patterns across handlers now tested
|
||||
|
||||
**Files with partial coverage:**
|
||||
- `common.go` (📊)
|
||||
- `device.go` (📊)
|
||||
- `auth.go` (📊)
|
||||
- `repository.go` (📊)
|
||||
- `search.go` (📊)
|
||||
- `settings.go` (📊)
|
||||
- `user.go` (📊)
|
||||
- `images.go` (📊)
|
||||
- `home.go` (📊)
|
||||
- `install.go` (📊)
|
||||
- `logout.go` (📊)
|
||||
- `manifest_health.go` (📊)
|
||||
- `api.go` (📊)
|
||||
|
||||
**Note:** While individual files may still show gaps, overall handler package coverage has improved significantly.
|
||||
|
||||
---
|
||||
|
||||
### 🟢 pkg/appview/holdhealth (66.1% coverage)
|
||||
|
||||
Hold health checking. Adequate coverage overall.
|
||||
|
||||
#### worker.go (📊 Partial coverage)
|
||||
**Could use more edge case testing**
|
||||
|
||||
---
|
||||
|
||||
### 🟢 pkg/appview/ui.go (0% coverage)
|
||||
|
||||
UI initialization and setup. Low priority.
|
||||
|
||||
---
|
||||
|
||||
## Recommended Testing Order
|
||||
|
||||
### Phase 1: Critical Infrastructure ✅ **NEARLY COMPLETE** (Target: 45% overall)
|
||||
|
||||
**Completed:**
|
||||
1. ✅ `pkg/appview/middleware/auth.go` - Authentication (0% → 98.8% avg)
|
||||
2. ✅ `pkg/appview/middleware/registry.go` - Core routing (0% → 90.8% avg)
|
||||
3. ✅ `pkg/atproto/client.go` - PDS client (0% → 74.8%)
|
||||
4. ✅ `pkg/atproto/resolver.go` - Identity resolution (0% → 74.5%)
|
||||
5. ✅ `pkg/appview/storage/manifest_store.go` - Manifest operations (0% → 85%+) **🎉 COMPLETED**
|
||||
6. ✅ `pkg/appview/storage/profile.go` - Sailor profiles (NEW → 98%+) **🎉 COMPLETED**
|
||||
|
||||
**Remaining (HIGHEST PRIORITY):**
|
||||
7. ⭐⭐⭐ `pkg/appview/storage/proxy_blob_store.go` - Blob write operations **CRITICAL**
|
||||
- `Put()`, `Create()`, Writer interface (0% → 80%+)
|
||||
- Essential for docker push operations
|
||||
8. ⭐ `pkg/appview/storage/routing_repository.go` - Blob routing
|
||||
- `Blobs()` method (0% → 80%+)
|
||||
|
||||
**Current Status:** Overall coverage improved from 37.7% → 39.0% (+1.3%). On track for 45% with Phase 1 completion.
|
||||
|
||||
### Phase 2: Supporting Infrastructure (Target: 50% overall)
|
||||
|
||||
**In Progress:**
|
||||
9. 🔴 `pkg/appview/db/*` - Database layer (41.2%, needs improvement)
|
||||
- queries.go, session_store.go, device_store.go
|
||||
10. 🔴 `pkg/auth/oauth/client.go` - Session management (Refresher) (Partial → 70%+)
|
||||
- `GetSession()`, `resumeSession()` (currently 0%)
|
||||
- Note: Refresher merged into client.go
|
||||
11. 🔴 `pkg/auth/oauth/server.go` - OAuth endpoints (50.7%, continue improvements)
|
||||
- `ServeCallback()` at 16.3% needs major improvement
|
||||
12. 🔴 `pkg/appview/storage/crew.go` - Crew validation (11.1% → 80%+)
|
||||
13. 🔴 `pkg/auth/*` - Continue auth improvements (55.7% → 70%+)
|
||||
- hold_remote.go gaps, session.go
|
||||
14. 🎯 `pkg/atproto/*` - ATProto improvements (27.8%, continue adding tests)
|
||||
- directory_test.go, endpoints_test.go, utils_test.go added ✅
|
||||
|
||||
### Phase 3: Event Processing (Target: 55% overall)
|
||||
15. 🟡 `pkg/appview/jetstream/worker.go` - Event processing (0% → 70%+)
|
||||
16. 🟡 `pkg/appview/jetstream/backfill.go` - Backfill logic (0% → 70%+)
|
||||
17. 🟡 `pkg/hold/pds/*` - Fill in gaps in embedded PDS
|
||||
18. 🟡 `pkg/hold/oci/*` - OCI multipart upload improvements
|
||||
|
||||
### Phase 4: Web UI (Target: 60% overall)
|
||||
19. 🎯 `pkg/appview/handlers/*` - Web handlers (19.7%, greatly improved from 2.1%) **+17.6%** ✅
|
||||
- Continue adding handler tests to reach 50%+
|
||||
20. 🟢 `pkg/appview/routes/*` - Route registration (10.4% → 50%+)
|
||||
|
||||
---
|
||||
|
||||
## Testing Best Practices for This Codebase
|
||||
|
||||
### For Middleware Tests
|
||||
- Mock HTTP handlers to test middleware wrapping
|
||||
- Use `httptest.ResponseRecorder` for response inspection
|
||||
- Test context injection and extraction
|
||||
- Mock ATProto client for PDS interactions
|
||||
|
||||
### For Storage Tests
|
||||
- Mock `distribution` interfaces (BlobStore, ManifestService)
|
||||
- Use in-memory implementations where possible
|
||||
- Test error propagation from underlying storage
|
||||
- Mock hold XRPC endpoints
|
||||
|
||||
### For Database Tests
|
||||
- Use in-memory SQLite (`:memory:`)
|
||||
- Run migrations in test setup
|
||||
- Clean up after each test
|
||||
- Test concurrent operations where relevant
|
||||
|
||||
### For Authorization Tests
|
||||
- Mock ATProto client for crew lookups
|
||||
- Test both legacy and new hold models
|
||||
- Test permission combinations
|
||||
- Mock service token acquisition
|
||||
|
||||
### For OAuth Tests
|
||||
- Mock HTTP servers for PDS endpoints
|
||||
- Test DPoP proof generation/validation
|
||||
- Test PAR request flow
|
||||
- Mock browser interaction
|
||||
|
||||
### For ATProto Tests
|
||||
- Mock HTTP responses for resolver tests
|
||||
- Test DID document parsing
|
||||
- Mock XRPC endpoints
|
||||
- Test authentication flows
|
||||
|
||||
---
|
||||
|
||||
## Coverage Goals
|
||||
|
||||
**Current:** 39.0% (improved from 37.7%, +1.3%)
|
||||
**Previous:** 37.7% (improved from 33.5%, +4.2%)
|
||||
**Total improvement:** 39.0% vs 31.2% baseline = **+7.8%**
|
||||
|
||||
**Top Packages by Coverage:**
|
||||
- ✅ `pkg/hold`: 98.0% (excellent)
|
||||
- ✅ `pkg/s3`: 97.4% (excellent)
|
||||
- ✅ `pkg/appview/licenses`: 93.0% (excellent)
|
||||
- ✅ `pkg/appview`: 81.8% (excellent)
|
||||
- ✅ `pkg/logging`: 75.0% (good)
|
||||
|
||||
**Key File-Level Achievements:**
|
||||
- ✅ `pkg/appview/middleware/auth.go`: 98.8% avg (excellent)
|
||||
- ✅ `pkg/appview/middleware/registry.go`: 90.8% avg (excellent)
|
||||
- ✅ `pkg/appview/storage/manifest_store.go`: 85%+ (CRITICAL improvement from 0%)
|
||||
- ✅ `pkg/appview/storage/profile.go`: 98%+ (new file, excellent)
|
||||
- ✅ `pkg/atproto/client.go`: 74.8% (good)
|
||||
- ✅ `pkg/atproto/resolver.go`: 74.5% (good)
|
||||
|
||||
**Packages Needing Work:**
|
||||
- 🟡 `pkg/auth/token`: 68.8% (good)
|
||||
- 🟡 `pkg/appview/middleware`: 57.8% (package avg lowered by Repository())
|
||||
- 🟡 `pkg/auth`: 55.7% (stable)
|
||||
- 🟡 `pkg/hold/oci`: 51.9% (needs work)
|
||||
- 🟡 `pkg/appview/storage`: 51.4% (critical gaps remain)
|
||||
- 🟡 `pkg/auth/oauth`: 50.7% (improving, was 48.3%) 🎯 **+2.4%**
|
||||
- 🟡 `pkg/hold/pds`: 47.2% (needs work)
|
||||
- 🟡 `pkg/appview/db`: 41.2% (decreased from 44.8%, tracking more code paths) 🔴 **-3.6%**
|
||||
- 🟡 `pkg/atproto`: 27.8% (improving, was 26.1%) 🎯 **+1.7%**
|
||||
- 🟡 `pkg/appview/handlers`: 19.7% (greatly improved from 2.1%) 🎯 **+17.6%**
|
||||
|
||||
**Short-term Goal (Phase 1 completion):** 45%+
|
||||
- ✅ Cover all critical middleware (**COMPLETE**)
|
||||
- ✅ Cover ATProto client and resolver (**COMPLETE**)
|
||||
- ✅ Cover storage manifest operations (**COMPLETE** 🎉)
|
||||
- ⭐ Cover storage blob write operations (**HIGHEST PRIORITY** - Put/Create/Writer)
|
||||
- ⭐ Cover storage blob routing (**HIGH PRIORITY**)
|
||||
|
||||
**Medium-term Goal (Phase 2):** 50%+
|
||||
- Complete remaining storage layer (blob writes)
|
||||
- Improve database layer coverage (44.8% → 70%+)
|
||||
- Complete OAuth implementation (refresher.GetSession, server.ServeCallback)
|
||||
- Add storage crew validation
|
||||
|
||||
**Long-term Goal (Phase 3-4):** 55-60%
|
||||
- Event processing (jetstream)
|
||||
- Web UI handlers (currently 2.1%)
|
||||
- Comprehensive integration tests
|
||||
|
||||
**Realistic Target:** 55-60% (excluding some UI handlers and integration-heavy code)
|
||||
|
||||
**Note:** Package percentages may decrease as new files are added to coverage tracking, but this reflects improved test comprehensiveness, not regression. Focus on file-level coverage for critical paths.
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
- **Test files exist:** Most files in `pkg/` now have corresponding `*_test.go` files, but many functions remain at 0% coverage
|
||||
- **SQLite vs PostgreSQL:** Current tests use SQLite. For production multi-instance deployments, consider PostgreSQL tests
|
||||
- **Concurrency:** Many components (cache, token refresher, OAuth) have concurrency concerns that need explicit testing
|
||||
- **Integration Tests:** Consider adding integration tests that spin up a real PDS + hold service for end-to-end validation
|
||||
- **Mock Strategy:** Use interfaces (like `atproto.Client`) to enable easy mocking. Consider a mock package in `pkg/testing/`
|
||||
- **Critical path first:** Focus on middleware and storage layers before web UI, as these are essential for core registry operations
|
||||
227
docs/WEBHOOKS.md
Normal file
227
docs/WEBHOOKS.md
Normal file
@@ -0,0 +1,227 @@
|
||||
# Webhooks
|
||||
|
||||
Webhooks notify external services when events occur in the registry. Payloads are JSON, signed with HMAC-SHA256 (optional), and delivered with retry (exponential backoff: 0s, 30s, 2m, 8m). Discord and Slack URLs are auto-detected and receive platform-native formatting.
|
||||
|
||||
## Current Events
|
||||
|
||||
### `push` — Image Push
|
||||
|
||||
Fires when a manifest is stored (the "logical push complete" moment). Tagless pushes (e.g., buildx platform manifests) also fire with an empty `tag` field.
|
||||
|
||||
**Bitmask:** `0x08` — Free tier
|
||||
|
||||
```json
|
||||
{
|
||||
"trigger": "push",
|
||||
"push_data": {
|
||||
"pushed_at": "2026-02-27T15:30:00Z",
|
||||
"pusher": "alice.bsky.social",
|
||||
"pusher_did": "did:plc:abc123",
|
||||
"tag": "latest",
|
||||
"digest": "sha256:abc..."
|
||||
},
|
||||
"repository": {
|
||||
"name": "myapp",
|
||||
"namespace": "alice.bsky.social",
|
||||
"repo_name": "alice.bsky.social/myapp",
|
||||
"repo_url": "https://buoy.cr/alice.bsky.social/myapp",
|
||||
"media_type": "application/vnd.oci.image.manifest.v1+json",
|
||||
"star_count": 42,
|
||||
"pull_count": 1337
|
||||
},
|
||||
"hold": {
|
||||
"did": "did:web:hold01.atcr.io",
|
||||
"endpoint": "https://hold01.atcr.io"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`repo_url` uses `registry_domains[0]` (the pull domain) when configured, otherwise falls back to `base_url`.
|
||||
|
||||
### `scan:first` — First Scan
|
||||
|
||||
Fires the first time an image is scanned (no previous scan record exists).
|
||||
|
||||
**Bitmask:** `0x01` — Free tier
|
||||
|
||||
### `scan:all` — Every Scan
|
||||
|
||||
Fires on every scan completion.
|
||||
|
||||
**Bitmask:** `0x02` — Paid tier
|
||||
|
||||
### `scan:changed` — Vulnerability Change
|
||||
|
||||
Fires when vulnerability counts change from the previous scan. Includes a `previous` field with the old counts.
|
||||
|
||||
**Bitmask:** `0x04` — Paid tier
|
||||
|
||||
**Scan payload format** (shared by all scan triggers):
|
||||
|
||||
```json
|
||||
{
|
||||
"trigger": "scan:first",
|
||||
"holdDid": "did:web:hold01.atcr.io",
|
||||
"holdEndpoint": "https://hold01.atcr.io",
|
||||
"manifest": {
|
||||
"digest": "sha256:abc...",
|
||||
"repository": "myapp",
|
||||
"tag": "latest",
|
||||
"userDid": "did:plc:abc123",
|
||||
"userHandle": "alice.bsky.social"
|
||||
},
|
||||
"scan": {
|
||||
"scannedAt": "2026-02-27T16:00:00Z",
|
||||
"scannerVersion": "atcr-scanner-v1.0.0",
|
||||
"vulnerabilities": {
|
||||
"critical": 0,
|
||||
"high": 2,
|
||||
"medium": 5,
|
||||
"low": 12,
|
||||
"total": 19
|
||||
}
|
||||
},
|
||||
"previous": null
|
||||
}
|
||||
```
|
||||
|
||||
For `scan:changed`, the `previous` field contains the previous vulnerability counts.
|
||||
|
||||
## Billing
|
||||
|
||||
| Tier | Max Webhooks | Available Triggers |
|
||||
|------|-------------|-------------------|
|
||||
| Free | 1 | `push`, `scan:first` |
|
||||
| Paid | Per plan | All triggers |
|
||||
| Captain | Unlimited | All triggers |
|
||||
|
||||
Free users can enable both `push` and `scan:first` on their single webhook.
|
||||
|
||||
## Security
|
||||
|
||||
- **HMAC-SHA256 signing:** If a secret is set, payloads include `X-Webhook-Signature-256: sha256=<hex>`. The signature covers the delivered payload (including platform-specific formatting for Discord/Slack).
|
||||
- **Retry:** 4 attempts with exponential backoff (0s, 30s, 2m, 8m).
|
||||
- **Test delivery:** The settings UI supports sending a test payload to verify connectivity.
|
||||
|
||||
## Implementation
|
||||
|
||||
- Types: `pkg/appview/webhooks/types.go`
|
||||
- Dispatch + retry: `pkg/appview/webhooks/dispatch.go`
|
||||
- Discord/Slack formatting: `pkg/appview/webhooks/format.go`
|
||||
- UI handlers: `pkg/appview/handlers/webhooks.go`
|
||||
- Settings page SSR: `pkg/appview/handlers/settings.go`
|
||||
- Template: `pkg/appview/templates/partials/webhooks_list.html`
|
||||
- Trigger bitmask stored in `webhooks.triggers` column (integer)
|
||||
|
||||
---
|
||||
|
||||
## Future Events
|
||||
|
||||
Inspired by [Harbor's webhook model](https://goharbor.io/docs/working-with-projects/project-configuration/configure-webhooks/). These are not yet implemented but document the intended direction.
|
||||
|
||||
### `pull` — Image Pull
|
||||
|
||||
**Bitmask:** `0x10` (reserved)
|
||||
|
||||
Fires when a manifest is pulled. This is tricky because pulls go through presigned S3 URLs — the appview issues a redirect and never sees the actual blob download. Manifest fetches *are* visible to the appview, so a pull event would fire on manifest GET, not blob download.
|
||||
|
||||
**Scalability concern:** Public repos with high pull volume would generate excessive webhook traffic. Would need rate limiting or batching (e.g., "5 pulls in the last minute" digest). Not suitable for free tier without throttling.
|
||||
|
||||
**Suggested payload:**
|
||||
|
||||
```json
|
||||
{
|
||||
"trigger": "pull",
|
||||
"pull_data": {
|
||||
"pulled_at": "2026-02-27T15:30:00Z",
|
||||
"puller": "bob.bsky.social",
|
||||
"puller_did": "did:plc:def456",
|
||||
"tag": "latest",
|
||||
"digest": "sha256:abc..."
|
||||
},
|
||||
"repository": {
|
||||
"name": "myapp",
|
||||
"namespace": "alice.bsky.social",
|
||||
"repo_name": "alice.bsky.social/myapp",
|
||||
"repo_url": "https://buoy.cr/alice.bsky.social/myapp",
|
||||
"star_count": 42,
|
||||
"pull_count": 1338
|
||||
},
|
||||
"hold": {
|
||||
"did": "did:web:hold01.atcr.io",
|
||||
"endpoint": "https://hold01.atcr.io"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Anonymous pulls would have empty `puller` / `puller_did` fields.
|
||||
|
||||
### `delete` — Manifest Delete
|
||||
|
||||
**Bitmask:** `0x20` (reserved)
|
||||
|
||||
Fires when a manifest is deleted from the user's PDS. Lower priority — deletes are uncommon.
|
||||
|
||||
**Suggested payload:**
|
||||
|
||||
```json
|
||||
{
|
||||
"trigger": "delete",
|
||||
"delete_data": {
|
||||
"deleted_at": "2026-02-27T15:30:00Z",
|
||||
"deleted_by": "alice.bsky.social",
|
||||
"deleted_by_did": "did:plc:abc123",
|
||||
"tag": "v1.0.0",
|
||||
"digest": "sha256:abc..."
|
||||
},
|
||||
"repository": {
|
||||
"name": "myapp",
|
||||
"namespace": "alice.bsky.social",
|
||||
"repo_name": "alice.bsky.social/myapp",
|
||||
"repo_url": "https://buoy.cr/alice.bsky.social/myapp",
|
||||
"star_count": 42,
|
||||
"pull_count": 1337
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
No `hold` field — deletion removes the manifest record from the PDS; blob cleanup is handled separately by GC.
|
||||
|
||||
### `quota:warning` / `quota:exceeded` — Storage Quota
|
||||
|
||||
**Bitmask:** `0x40` (warning), `0x80` (exceeded) — reserved
|
||||
|
||||
Fires when a hold's storage quota reaches a threshold or is exceeded. Open design questions:
|
||||
|
||||
- **Thresholds:** Harbor uses a single warning threshold (85%). Options: fixed 80/90/100%, or configurable per hold.
|
||||
- **Recipient:** Who gets the webhook — the user who pushed (triggering the quota check), the hold captain, or both? Likely the captain, since they own the storage.
|
||||
- **Scope:** Per-user quotas (crew member limits) vs per-hold quotas (total storage). Both exist in the quota system.
|
||||
|
||||
**Suggested payload:**
|
||||
|
||||
```json
|
||||
{
|
||||
"trigger": "quota:warning",
|
||||
"quota_data": {
|
||||
"timestamp": "2026-02-27T15:30:00Z",
|
||||
"usage_bytes": 8589934592,
|
||||
"limit_bytes": 10737418240,
|
||||
"usage_percent": 80,
|
||||
"threshold_percent": 80
|
||||
},
|
||||
"hold": {
|
||||
"did": "did:web:hold01.atcr.io",
|
||||
"endpoint": "https://hold01.atcr.io"
|
||||
},
|
||||
"user": {
|
||||
"did": "did:plc:abc123",
|
||||
"handle": "alice.bsky.social"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Events explicitly not planned
|
||||
|
||||
- **Scan failed / scan stopped** — Server-side operational issues, not user-actionable. Belongs in ops monitoring (logs, alerting), not user-facing webhooks.
|
||||
- **Replication** — No replication feature in ATCR.
|
||||
- **Tag retention** — No retention policies yet.
|
||||
422
docs/appview.md
422
docs/appview.md
@@ -6,23 +6,21 @@
|
||||
|
||||
**AppView** is the frontend server component of ATCR. It serves as the OCI-compliant registry API endpoint and web interface that Docker clients interact with when pushing and pulling container images.
|
||||
|
||||
### What AppView Does
|
||||
|
||||
AppView is the orchestration layer that:
|
||||
|
||||
- **Serves the OCI Distribution API V2** - Compatible with Docker, containerd, podman, and all OCI clients
|
||||
- **Resolves ATProto identities** - Converts handles (`alice.bsky.social`) and DIDs (`did:plc:xyz123`) to PDS endpoints
|
||||
- **Routes manifests** - Stores container image manifests as ATProto records in users' Personal Data Servers
|
||||
- **Routes blobs** - Proxies blob (layer) operations to hold services for S3-compatible storage
|
||||
- **Provides web UI** - Browse repositories, search images, view tags, track pull counts, manage stars
|
||||
- **Manages authentication** - Validates OAuth tokens and issues registry JWTs to Docker clients
|
||||
- **Provides web UI** - Browse repositories, search images, view tags, track pull counts, manage stars, vulnerability scan results
|
||||
- **Manages authentication** - ATProto OAuth with device authorization flow, issues registry JWTs to Docker clients
|
||||
|
||||
### The ATCR Ecosystem
|
||||
|
||||
AppView is the **frontend** of a multi-component architecture:
|
||||
|
||||
1. **AppView** (this component) - Registry API + web interface
|
||||
2. **[Hold Service](https://atcr.io/r/evan.jarrett.net/atcr-hold)** - Storage backend with embedded PDS for blob storage
|
||||
2. **[Hold Service](hold.md)** - Storage backend with embedded PDS for blob storage
|
||||
3. **Credential Helper** - Client-side tool for ATProto OAuth authentication
|
||||
|
||||
**Data flow:**
|
||||
@@ -45,277 +43,231 @@ Most users can simply use **https://atcr.io** - you don't need to run your own A
|
||||
- Maintain full control over registry infrastructure
|
||||
|
||||
**Prerequisites:**
|
||||
- A running [Hold service](https://atcr.io/r/evan.jarrett.net/atcr-hold) (required for blob storage)
|
||||
- A running [Hold service](hold.md) (required for blob storage)
|
||||
- (Optional) Domain name with SSL/TLS certificates for production
|
||||
- (Optional) Access to ATProto Jetstream for real-time indexing
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Using Docker Compose
|
||||
|
||||
The fastest way to run AppView alongside a Hold service:
|
||||
### 1. Build the Docker image
|
||||
|
||||
```bash
|
||||
# Clone repository
|
||||
git clone https://tangled.org/@evan.jarrett.net/at-container-registry
|
||||
cd atcr
|
||||
docker build -t atcr-appview:latest -f Dockerfile.appview .
|
||||
```
|
||||
|
||||
# Copy and configure environment
|
||||
cp .env.appview.example .env.appview
|
||||
# Edit .env.appview - set ATCR_DEFAULT_HOLD_DID (see Configuration below)
|
||||
This produces a ~30MB scratch image with a statically-linked binary.
|
||||
|
||||
# Start services
|
||||
docker-compose up -d
|
||||
### 2. Generate a config file
|
||||
|
||||
# Verify
|
||||
```bash
|
||||
docker run --rm atcr-appview config init > config-appview.yaml
|
||||
```
|
||||
|
||||
This creates a fully-commented YAML file with all available options and their defaults. You can also generate it from a local binary:
|
||||
|
||||
```bash
|
||||
./bin/atcr-appview config init config-appview.yaml
|
||||
```
|
||||
|
||||
### 3. Set the required field
|
||||
|
||||
Edit `config-appview.yaml` and set `server.default_hold_did` to your hold service's DID:
|
||||
|
||||
```yaml
|
||||
server:
|
||||
default_hold_did: "did:web:127.0.0.1:8080" # local dev
|
||||
# default_hold_did: "did:web:hold01.example.com" # production
|
||||
```
|
||||
|
||||
This is the **only required configuration field**. To find a hold's DID, visit its `/.well-known/did.json` endpoint.
|
||||
|
||||
For production, also set your public URL:
|
||||
|
||||
```yaml
|
||||
server:
|
||||
base_url: "https://registry.example.com"
|
||||
default_hold_did: "did:web:hold01.example.com"
|
||||
```
|
||||
|
||||
### 4. Run
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
-v ./config-appview.yaml:/config.yaml:ro \
|
||||
-v atcr-data:/var/lib/atcr \
|
||||
-p 5000:5000 \
|
||||
atcr-appview serve --config /config.yaml
|
||||
```
|
||||
|
||||
### 5. Verify
|
||||
|
||||
```bash
|
||||
curl http://localhost:5000/v2/
|
||||
# Should return: {}
|
||||
|
||||
curl http://localhost:5000/health
|
||||
# Should return: {"status":"ok"}
|
||||
```
|
||||
|
||||
### Minimal Configuration
|
||||
## Configuration
|
||||
|
||||
At minimum, you must set:
|
||||
AppView uses YAML configuration with environment variable overrides. The generated `config-appview.yaml` is the canonical reference — every field is commented inline with its purpose and default value.
|
||||
|
||||
### Config loading priority (highest wins)
|
||||
|
||||
1. Environment variables (`ATCR_` prefix)
|
||||
2. YAML config file (`--config`)
|
||||
3. Built-in defaults
|
||||
|
||||
### Environment variable convention
|
||||
|
||||
YAML paths map to env vars with `ATCR_` prefix and `_` separators:
|
||||
|
||||
```
|
||||
server.default_hold_did → ATCR_SERVER_DEFAULT_HOLD_DID
|
||||
server.base_url → ATCR_SERVER_BASE_URL
|
||||
ui.database_path → ATCR_UI_DATABASE_PATH
|
||||
jetstream.backfill_enabled → ATCR_JETSTREAM_BACKFILL_ENABLED
|
||||
```
|
||||
|
||||
### Config sections overview
|
||||
|
||||
| Section | Purpose | Notes |
|
||||
|---------|---------|-------|
|
||||
| `server` | Listen address, public URL, hold DID, OAuth key, branding | Only `default_hold_did` is required |
|
||||
| `ui` | Database path, theme, libSQL sync | All have defaults; auto-creates DB on first run |
|
||||
| `auth` | JWT signing key/cert paths | Auto-generated on first run |
|
||||
| `jetstream` | Real-time ATProto event streaming, backfill sync | Runs automatically; backfill enabled by default |
|
||||
| `health` | Hold health check interval and cache TTL | Sensible defaults (15m) |
|
||||
| `log_shipper` | Remote log shipping (Victoria, OpenSearch, Loki) | Disabled by default |
|
||||
| `legal` | Terms/privacy page customization | Optional |
|
||||
| `credential_helper` | Credential helper download source | Optional |
|
||||
|
||||
### Auto-generated files
|
||||
|
||||
On first run, AppView auto-generates these under `/var/lib/atcr/`:
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `ui.db` | SQLite database (OAuth sessions, stars, pull counts, device approvals) |
|
||||
| `auth/private-key.pem` | RSA private key for signing registry JWTs |
|
||||
| `auth/private-key.crt` | X.509 certificate for JWT verification |
|
||||
| `oauth/client.key` | P-256 private key for OAuth client authentication |
|
||||
|
||||
**Persist `/var/lib/atcr/` across restarts.** Losing the auth keys invalidates all active sessions; losing the database loses OAuth state and UI data.
|
||||
|
||||
## Deployment
|
||||
|
||||
### Docker (recommended)
|
||||
|
||||
`Dockerfile.appview` builds a minimal scratch image (~30MB) containing:
|
||||
- Static `atcr-appview` binary (CGO-enabled with embedded SQLite)
|
||||
- `healthcheck` binary for container health checks
|
||||
- CA certificates and timezone data
|
||||
|
||||
**Port:** `5000` (HTTP)
|
||||
|
||||
**Volume:** `/var/lib/atcr` (auth keys, database, OAuth keys)
|
||||
|
||||
**Health check:** `GET /health` returns `{"status":"ok"}`
|
||||
|
||||
```bash
|
||||
# Required: Default hold service for blob storage
|
||||
ATCR_DEFAULT_HOLD_DID=did:web:127.0.0.1:8080
|
||||
|
||||
# Recommended for production
|
||||
ATCR_BASE_URL=https://registry.example.com
|
||||
ATCR_HTTP_ADDR=:5000
|
||||
docker run -d \
|
||||
--name atcr-appview \
|
||||
-v ./config-appview.yaml:/config.yaml:ro \
|
||||
-v atcr-data:/var/lib/atcr \
|
||||
-p 5000:5000 \
|
||||
--health-cmd '/healthcheck http://localhost:5000/health' \
|
||||
--health-interval 30s \
|
||||
--restart unless-stopped \
|
||||
atcr-appview serve --config /config.yaml
|
||||
```
|
||||
|
||||
See **Configuration Reference** below for all options.
|
||||
### Production with reverse proxy
|
||||
|
||||
## Configuration Reference
|
||||
AppView serves HTTP on port 5000. For production, put a reverse proxy in front for HTTPS termination. The repository includes a working Caddy + Docker Compose setup at [`deploy/docker-compose.prod.yml`](../deploy/docker-compose.prod.yml) that runs AppView, Hold, and Caddy together with automatic TLS.
|
||||
|
||||
AppView is configured entirely via environment variables. Load them with:
|
||||
```bash
|
||||
source .env.appview
|
||||
./bin/atcr-appview serve
|
||||
A minimal production compose override:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
atcr-appview:
|
||||
image: atcr-appview:latest
|
||||
command: ["serve", "--config", "/config.yaml"]
|
||||
environment:
|
||||
ATCR_SERVER_BASE_URL: https://registry.example.com
|
||||
ATCR_SERVER_DEFAULT_HOLD_DID: did:web:hold.example.com
|
||||
volumes:
|
||||
- ./config-appview.yaml:/config.yaml:ro
|
||||
- atcr-appview-data:/var/lib/atcr
|
||||
healthcheck:
|
||||
test: ["CMD", "/healthcheck", "http://localhost:5000/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
|
||||
volumes:
|
||||
atcr-appview-data:
|
||||
```
|
||||
|
||||
Or via Docker Compose (recommended).
|
||||
### Systemd (bare metal)
|
||||
|
||||
### Server Configuration
|
||||
|
||||
#### `ATCR_HTTP_ADDR`
|
||||
- **Default:** `:5000`
|
||||
- **Description:** HTTP listen address for the registry API and web UI
|
||||
- **Example:** `:5000`, `:8080`, `0.0.0.0:5000`
|
||||
|
||||
#### `ATCR_BASE_URL`
|
||||
- **Default:** Auto-detected from `ATCR_HTTP_ADDR` (e.g., `http://127.0.0.1:5000`)
|
||||
- **Description:** Public URL for the AppView service. Used to generate OAuth redirect URIs and JWT realm claims.
|
||||
- **Development:** Auto-detection works fine (`http://127.0.0.1:5000`)
|
||||
- **Production:** Set to your public URL (e.g., `https://atcr.example.com`)
|
||||
- **Example:** `https://atcr.io`, `http://127.0.0.1:5000`
|
||||
|
||||
#### `ATCR_SERVICE_NAME`
|
||||
- **Default:** Derived from `ATCR_BASE_URL` hostname, or `atcr.io`
|
||||
- **Description:** Service name used for JWT `service` and `issuer` fields. Controls token scope.
|
||||
- **Example:** `atcr.io`, `registry.example.com`
|
||||
|
||||
#### `ATCR_DEBUG_ADDR`
|
||||
- **Default:** `:5001`
|
||||
- **Description:** Debug listen address for pprof debugging endpoints
|
||||
- **Example:** `:5001`, `:6060`
|
||||
|
||||
### Storage Configuration
|
||||
|
||||
#### `ATCR_DEFAULT_HOLD_DID` ⚠️ REQUIRED
|
||||
- **Default:** None (required)
|
||||
- **Description:** DID of the default hold service for blob storage. Used when users don't have their own hold configured in their sailor profile. AppView routes all blob operations to this hold.
|
||||
- **Format:** `did:web:hostname[:port]`
|
||||
- **Docker Compose:** `did:web:atcr-hold:8080` (internal Docker network)
|
||||
- **Local dev:** `did:web:127.0.0.1:8080`
|
||||
- **Production:** `did:web:hold01.atcr.io`
|
||||
- **Note:** This hold must be reachable from AppView. To find a hold's DID, visit `https://hold-url/.well-known/did.json`
|
||||
|
||||
### Authentication Configuration
|
||||
|
||||
#### `ATCR_AUTH_KEY_PATH`
|
||||
- **Default:** `/var/lib/atcr/auth/private-key.pem`
|
||||
- **Description:** Path to JWT signing private key (RSA). Auto-generated if missing.
|
||||
- **Note:** Keep this secure - it signs all registry JWTs issued to Docker clients
|
||||
|
||||
#### `ATCR_AUTH_CERT_PATH`
|
||||
- **Default:** `/var/lib/atcr/auth/private-key.crt`
|
||||
- **Description:** Path to JWT signing certificate. Auto-generated if missing.
|
||||
- **Note:** Paired with `ATCR_AUTH_KEY_PATH`
|
||||
|
||||
#### `ATCR_TOKEN_EXPIRATION`
|
||||
- **Default:** `300` (5 minutes)
|
||||
- **Description:** JWT token expiration in seconds. Registry JWTs are short-lived for security.
|
||||
- **Recommendation:** Keep between 300-900 seconds (5-15 minutes)
|
||||
|
||||
### Web UI Configuration
|
||||
|
||||
#### `ATCR_UI_ENABLED`
|
||||
- **Default:** `true`
|
||||
- **Description:** Enable the web interface. Set to `false` to run registry API only (no web UI, no database).
|
||||
- **Use case:** API-only deployments where you don't need the browsing interface
|
||||
|
||||
#### `ATCR_UI_DATABASE_PATH`
|
||||
- **Default:** `/var/lib/atcr/ui.db`
|
||||
- **Description:** SQLite database path for UI data (OAuth sessions, stars, pull counts, repository metadata)
|
||||
- **Note:** For multi-instance deployments, use PostgreSQL (see production docs)
|
||||
|
||||
### Logging Configuration
|
||||
|
||||
#### `ATCR_LOG_LEVEL`
|
||||
- **Default:** `info`
|
||||
- **Options:** `debug`, `info`, `warn`, `error`
|
||||
- **Description:** Log verbosity level
|
||||
- **Development:** Use `debug` for detailed troubleshooting
|
||||
- **Production:** Use `info` or `warn`
|
||||
|
||||
#### `ATCR_LOG_FORMATTER`
|
||||
- **Default:** `text`
|
||||
- **Options:** `text`, `json`
|
||||
- **Description:** Log output format
|
||||
- **Production:** Use `json` for structured logging (easier to parse with log aggregators)
|
||||
|
||||
### Hold Health Check Configuration
|
||||
|
||||
AppView periodically checks if hold services are reachable and caches results to display health indicators in the UI.
|
||||
|
||||
#### `ATCR_HEALTH_CHECK_INTERVAL`
|
||||
- **Default:** `15m`
|
||||
- **Description:** How often to check health of hold endpoints in the background
|
||||
- **Format:** Duration string (e.g., `5m`, `15m`, `30m`, `1h`)
|
||||
- **Recommendation:** 15-30 minutes for production
|
||||
|
||||
#### `ATCR_HEALTH_CACHE_TTL`
|
||||
- **Default:** `15m`
|
||||
- **Description:** How long to cache health check results before re-checking
|
||||
- **Format:** Duration string (e.g., `15m`, `30m`, `1h`)
|
||||
- **Note:** Should be >= `ATCR_HEALTH_CHECK_INTERVAL` for efficiency
|
||||
|
||||
### Jetstream Configuration (ATProto Event Streaming)
|
||||
|
||||
Jetstream provides real-time indexing of ATProto records (manifests, tags) into the AppView database for the web UI.
|
||||
|
||||
#### `JETSTREAM_URL`
|
||||
- **Default:** `wss://jetstream2.us-west.bsky.network/subscribe`
|
||||
- **Description:** Jetstream WebSocket URL for real-time ATProto events
|
||||
- **Note:** Connects to Bluesky's public Jetstream by default
|
||||
|
||||
#### `ATCR_BACKFILL_ENABLED`
|
||||
- **Default:** `false`
|
||||
- **Description:** Enable periodic sync of historical ATProto records. Set to `true` for production to ensure database completeness.
|
||||
- **Recommendation:** Enable for production AppView instances
|
||||
|
||||
#### `ATCR_RELAY_ENDPOINT`
|
||||
- **Default:** `https://relay1.us-east.bsky.network`
|
||||
- **Description:** ATProto relay endpoint for backfill sync API
|
||||
- **Note:** Used when `ATCR_BACKFILL_ENABLED=true`
|
||||
|
||||
#### `ATCR_BACKFILL_INTERVAL`
|
||||
- **Default:** `1h`
|
||||
- **Description:** How often to run backfill sync
|
||||
- **Format:** Duration string (e.g., `30m`, `1h`, `2h`, `24h`)
|
||||
|
||||
### Legacy Configuration
|
||||
|
||||
#### `TEST_MODE`
|
||||
- **Default:** `false`
|
||||
- **Description:** Enable test mode (skips some validations). Do not use in production.
|
||||
|
||||
## Web Interface Features
|
||||
|
||||
The AppView web UI provides:
|
||||
|
||||
- **Home page** - Featured repositories and recent pushes feed
|
||||
- **Repository pages** - View tags, manifests, pull instructions, health status
|
||||
- **Search** - Find repositories by owner handle or repository name
|
||||
- **User profiles** - View a user's repositories and activity
|
||||
- **Stars** - Favorite repositories (requires OAuth login)
|
||||
- **Pull counts** - Track image pull statistics
|
||||
- **Multi-arch support** - Display platform-specific manifests (linux/amd64, linux/arm64)
|
||||
- **Health indicators** - Real-time hold service reachability status
|
||||
- **Install scripts** - Host credential helper installation scripts at `/install.sh`
|
||||
For non-Docker deployments, see the systemd service templates in [`deploy/upcloud/`](../deploy/upcloud/) which include security hardening (dedicated user, filesystem protection, private tmp).
|
||||
|
||||
## Deployment Scenarios
|
||||
|
||||
### Public Registry (like atcr.io)
|
||||
### Public Registry
|
||||
|
||||
Open to all ATProto users:
|
||||
|
||||
```bash
|
||||
# AppView config
|
||||
ATCR_BASE_URL=https://registry.example.com
|
||||
ATCR_DEFAULT_HOLD_DID=did:web:hold01.example.com
|
||||
ATCR_UI_ENABLED=true
|
||||
ATCR_BACKFILL_ENABLED=true
|
||||
|
||||
# Hold config (linked hold service)
|
||||
HOLD_PUBLIC=true # Allow public pulls
|
||||
HOLD_ALLOW_ALL_CREW=true # Allow all authenticated users to push
|
||||
```yaml
|
||||
# config-appview.yaml
|
||||
server:
|
||||
base_url: "https://registry.example.com"
|
||||
default_hold_did: "did:web:hold01.example.com"
|
||||
jetstream:
|
||||
backfill_enabled: true
|
||||
```
|
||||
|
||||
The linked hold service should have `server.public: true` and `registration.allow_all_crew: true`.
|
||||
|
||||
### Private Organizational Registry
|
||||
|
||||
Restricted to crew members only:
|
||||
|
||||
```bash
|
||||
# AppView config
|
||||
ATCR_BASE_URL=https://registry.internal.example.com
|
||||
ATCR_DEFAULT_HOLD_DID=did:web:hold.internal.example.com
|
||||
ATCR_UI_ENABLED=true
|
||||
|
||||
# Hold config (linked hold service)
|
||||
HOLD_PUBLIC=false # Require auth for pulls
|
||||
HOLD_ALLOW_ALL_CREW=false # Only owner + explicit crew can push
|
||||
HOLD_OWNER=did:plc:your-org-did # Organization DID
|
||||
```yaml
|
||||
# config-appview.yaml
|
||||
server:
|
||||
base_url: "https://registry.internal.example.com"
|
||||
default_hold_did: "did:web:hold.internal.example.com"
|
||||
```
|
||||
|
||||
### Development/Testing
|
||||
The linked hold service should have `server.public: false` and `registration.allow_all_crew: false`, with an explicit `registration.owner_did` set to the organization's DID.
|
||||
|
||||
Local Docker Compose setup:
|
||||
### Local Development
|
||||
|
||||
```bash
|
||||
# AppView config
|
||||
ATCR_HTTP_ADDR=:5000
|
||||
ATCR_DEFAULT_HOLD_DID=did:web:atcr-hold:8080
|
||||
ATCR_LOG_LEVEL=debug
|
||||
|
||||
# Hold config (linked hold service)
|
||||
STORAGE_DRIVER=filesystem
|
||||
STORAGE_ROOT_DIR=/tmp/atcr-hold
|
||||
HOLD_PUBLIC=true
|
||||
HOLD_ALLOW_ALL_CREW=true
|
||||
```yaml
|
||||
# config-appview.yaml
|
||||
log_level: debug
|
||||
server:
|
||||
default_hold_did: "did:web:127.0.0.1:8080"
|
||||
test_mode: true # allows HTTP for DID resolution
|
||||
```
|
||||
|
||||
## Production Deployment
|
||||
Run a hold service locally with Minio for S3-compatible storage. See [hold.md](hold.md) for hold setup.
|
||||
|
||||
For production deployments with:
|
||||
- Multiple AppView instances (load balancing)
|
||||
- PostgreSQL database (instead of SQLite)
|
||||
- SSL/TLS certificates
|
||||
- Systemd service files
|
||||
- Log rotation
|
||||
- Monitoring
|
||||
## Web Interface
|
||||
|
||||
See **[deploy/README.md](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/deploy/README.md)** for comprehensive production deployment guide.
|
||||
The AppView web UI provides:
|
||||
|
||||
### Quick Production Checklist
|
||||
|
||||
Before going to production:
|
||||
|
||||
- [ ] Set `ATCR_BASE_URL` to your public HTTPS URL
|
||||
- [ ] Set `ATCR_DEFAULT_HOLD_DID` to a production hold service
|
||||
- [ ] Enable Jetstream backfill (`ATCR_BACKFILL_ENABLED=true`)
|
||||
- [ ] Use `ATCR_LOG_FORMATTER=json` for structured logging
|
||||
- [ ] Secure JWT keys (`ATCR_AUTH_KEY_PATH`, `ATCR_AUTH_CERT_PATH`)
|
||||
- [ ] Configure SSL/TLS termination (nginx/Caddy/Cloudflare)
|
||||
- [ ] Set up database backups (if using SQLite, consider PostgreSQL)
|
||||
- [ ] Monitor hold health checks
|
||||
- [ ] Test OAuth flow end-to-end
|
||||
- [ ] Verify Docker push/pull works
|
||||
|
||||
## Configuration Files Reference
|
||||
|
||||
- **[.env.appview.example](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/.env.appview.example)** - All available environment variables with documentation
|
||||
- **[deploy/.env.prod.template](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/deploy/.env.prod.template)** - Production configuration template
|
||||
- **[deploy/README.md](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/deploy/README.md)** - Production deployment guide
|
||||
- **[Hold Service Documentation](https://atcr.io/r/evan.jarrett.net/atcr-hold)** - Storage backend setup
|
||||
- **Home page** - Featured repositories and recent pushes
|
||||
- **Repository pages** - Tags, manifests, pull instructions, health status, vulnerability scan results
|
||||
- **Search** - Find repositories by owner handle or repository name
|
||||
- **User profiles** - View a user's repositories and starred images
|
||||
- **Stars** - Favorite repositories (requires login)
|
||||
- **Pull counts** - Image pull statistics
|
||||
- **Multi-arch support** - Platform-specific manifests (linux/amd64, linux/arm64, etc.)
|
||||
- **Health indicators** - Real-time hold service reachability
|
||||
- **Device management** - Approve and revoke Docker credential helper pairings
|
||||
- **Settings** - Choose default hold, view crew memberships, storage usage
|
||||
|
||||
493
docs/hold.md
493
docs/hold.md
@@ -1,403 +1,190 @@
|
||||
# ATCR Hold Service
|
||||
|
||||
> The storage backend component of ATCR (ATProto Container Registry)
|
||||
Hold Service is the BYOS (Bring Your Own Storage) blob storage backend for ATCR. It stores container image layers in your own S3-compatible storage (AWS S3, Storj, Minio, UpCloud, etc.) and generates presigned URLs so clients transfer data directly to/from S3. Each hold runs an embedded ATProto PDS with its own DID, repository, and crew-based access control.
|
||||
|
||||
## Overview
|
||||
Hold Service is one component of the ATCR ecosystem:
|
||||
|
||||
**Hold Service** is the storage backend component of ATCR. It enables BYOS (Bring Your Own Storage) - users can store their own container image layers in their own S3, Storj, Minio, or filesystem storage. Each hold runs as a full ATProto user with an embedded PDS, exposing both standard ATProto sync endpoints and custom XRPC endpoints for OCI multipart blob uploads.
|
||||
1. **[AppView](https://atcr.io/r/evan.jarrett.net/atcr-appview)** — Registry API + web interface
|
||||
2. **Hold Service** (this component) — Storage backend with embedded PDS
|
||||
3. **Credential Helper** — Client-side tool for ATProto OAuth authentication
|
||||
|
||||
### What Hold Service Does
|
||||
|
||||
Hold Service is the storage layer that:
|
||||
|
||||
- **Bring Your Own Storage (BYOS)** - Store your own container image layers in your own S3, Storj, Minio, or filesystem
|
||||
- **Embedded ATProto PDS** - Each hold is a full ATProto user with its own DID, repository, and identity
|
||||
- **Custom XRPC Endpoints** - OCI-compatible multipart upload endpoints (`io.atcr.hold.*`) for blob operations
|
||||
- **Presigned URL Generation** - Creates time-limited S3 URLs for direct client-to-storage transfers (~99% bandwidth reduction)
|
||||
- **Crew Management** - Controls access via captain and crew records stored in the hold's embedded PDS
|
||||
- **Standard ATProto Sync** - Exposes com.atproto.sync.* endpoints for repository synchronization and firehose
|
||||
- **Multi-Backend Support** - Works with S3, Storj, Minio, filesystem, Azure, GCS via distribution's driver system
|
||||
- **Bluesky Integration** - Optional: Posts container image push notifications from the hold's identity to Bluesky
|
||||
|
||||
### The ATCR Ecosystem
|
||||
|
||||
Hold Service is the **storage backend** of a multi-component architecture:
|
||||
|
||||
1. **[AppView](https://atcr.io/r/evan.jarrett.net/atcr-appview)** - Registry API + web interface
|
||||
2. **Hold Service** (this component) - Storage backend with embedded PDS
|
||||
3. **Credential Helper** - Client-side tool for ATProto OAuth authentication
|
||||
|
||||
**Data flow:**
|
||||
```
|
||||
Docker Client → AppView (resolves identity) → User's PDS (stores manifest)
|
||||
↓
|
||||
Hold Service (generates presigned URL)
|
||||
↓
|
||||
S3/Storj/etc. (client uploads/downloads blobs directly)
|
||||
Docker Client --> AppView (resolves identity) --> User's PDS (stores manifest)
|
||||
|
|
||||
Hold Service (generates presigned URL)
|
||||
|
|
||||
S3/Storj/etc. (client uploads/downloads directly)
|
||||
```
|
||||
|
||||
Manifests (small JSON metadata) live in users' ATProto PDS, while blobs (large binary layers) live in hold services. AppView orchestrates the routing, and hold services provide presigned URLs to eliminate bandwidth bottlenecks.
|
||||
Manifests (small JSON metadata) live in users' ATProto PDS. Blobs (large binary layers) live in hold services. AppView orchestrates the routing.
|
||||
|
||||
## When to Run Your Own Hold
|
||||
|
||||
Most users can push to the default hold at **https://hold01.atcr.io** - you don't need to run your own hold.
|
||||
Most users can push to the default hold at **https://hold01.atcr.io** — you don't need to run your own.
|
||||
|
||||
**Run your own hold if you want to:**
|
||||
- Control where your container layer data is stored (own S3 bucket, Storj, etc.)
|
||||
Run your own hold if you want to:
|
||||
- Control where your container layer data is stored (own S3 bucket, geographic region)
|
||||
- Manage access for a team or organization via crew membership
|
||||
- Reduce bandwidth costs by using presigned URLs for direct S3 transfers
|
||||
- Run a shared hold for a community or project
|
||||
- Maintain data sovereignty (keep blobs in specific geographic regions)
|
||||
- Use a CDN pull zone for faster downloads
|
||||
|
||||
**Prerequisites:**
|
||||
- S3-compatible storage (AWS S3, Storj, Minio, UpCloud, etc.) OR filesystem storage
|
||||
- (Optional) Domain name with SSL/TLS certificates for production
|
||||
- ATProto DID for hold owner (get from: `https://bsky.social/xrpc/com.atproto.identity.resolveHandle?handle=yourhandle.bsky.social`)
|
||||
**Prerequisites:** S3-compatible storage with a bucket already created, and a domain with TLS for production.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Using Docker Compose
|
||||
|
||||
The fastest way to run Hold service with S3 storage:
|
||||
### 1. Generate Configuration
|
||||
|
||||
```bash
|
||||
# Clone repository
|
||||
git clone https://tangled.org/@evan.jarrett.net/at-container-registry
|
||||
cd atcr
|
||||
# Build the hold binary
|
||||
go build -o bin/atcr-hold ./cmd/hold
|
||||
|
||||
# Copy and configure environment
|
||||
cp .env.hold.example .env.hold
|
||||
# Edit .env.hold - set HOLD_PUBLIC_URL, HOLD_OWNER, S3 credentials (see Configuration below)
|
||||
|
||||
# Start hold service
|
||||
docker-compose -f docker-compose.hold.yml up -d
|
||||
|
||||
# Verify
|
||||
curl http://localhost:8080/.well-known/did.json
|
||||
# Generate a fully-commented config file with all defaults
|
||||
./bin/atcr-hold config init config-hold.yaml
|
||||
```
|
||||
|
||||
### Minimal Configuration
|
||||
|
||||
At minimum, you must set:
|
||||
Or generate config from Docker without building locally:
|
||||
|
||||
```bash
|
||||
# Required: Public URL (generates did:web identity)
|
||||
HOLD_PUBLIC_URL=https://hold.example.com
|
||||
|
||||
# Required: Your ATProto DID (for captain record)
|
||||
HOLD_OWNER=did:plc:your-did-here
|
||||
|
||||
# Required: Storage driver type
|
||||
STORAGE_DRIVER=s3
|
||||
|
||||
# Required for S3: Credentials and bucket
|
||||
AWS_ACCESS_KEY_ID=your-access-key
|
||||
AWS_SECRET_ACCESS_KEY=your-secret-key
|
||||
S3_BUCKET=your-bucket-name
|
||||
|
||||
# Recommended: Database directory for embedded PDS
|
||||
HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
docker run --rm -i $(docker build -q -f Dockerfile.hold .) config init > config-hold.yaml
|
||||
```
|
||||
|
||||
See **Configuration Reference** below for all options.
|
||||
The generated file documents every option with inline comments. Edit only what you need.
|
||||
|
||||
## Configuration Reference
|
||||
### 2. Minimal Configuration
|
||||
|
||||
Hold Service is configured entirely via environment variables. Load them with:
|
||||
```bash
|
||||
source .env.hold
|
||||
./bin/atcr-hold
|
||||
Only three things need to be set — everything else has sensible defaults:
|
||||
|
||||
```yaml
|
||||
storage:
|
||||
access_key: "YOUR_S3_ACCESS_KEY"
|
||||
secret_key: "YOUR_S3_SECRET_KEY"
|
||||
bucket: "your-bucket-name"
|
||||
endpoint: "https://gateway.storjshare.io" # omit for AWS S3
|
||||
|
||||
server:
|
||||
public_url: "https://hold.example.com"
|
||||
|
||||
registration:
|
||||
owner_did: "did:plc:your-did-here"
|
||||
```
|
||||
|
||||
Or via Docker Compose (recommended).
|
||||
- **`server.public_url`** — Your hold's public HTTPS URL. This becomes the hold's `did:web` identity.
|
||||
- **`storage.bucket`** — S3 bucket name (must already exist).
|
||||
- **`registration.owner_did`** — Your ATProto DID. Creates you as captain (admin) on first boot. Get yours from: `https://bsky.social/xrpc/com.atproto.identity.resolveHandle?handle=yourhandle.bsky.social`
|
||||
|
||||
### Server Configuration
|
||||
|
||||
#### `HOLD_PUBLIC_URL` ⚠️ REQUIRED
|
||||
- **Default:** None (required)
|
||||
- **Description:** Public URL of this hold service. Used to generate the hold's did:web identity. The hostname becomes the hold's DID.
|
||||
- **Format:** `https://hold.example.com` or `http://127.0.0.1:8080` (development)
|
||||
- **Example:** `https://hold01.atcr.io` → DID is `did:web:hold01.atcr.io`
|
||||
- **Note:** This URL must be reachable by AppView and Docker clients
|
||||
|
||||
#### `HOLD_SERVER_ADDR`
|
||||
- **Default:** `:8080`
|
||||
- **Description:** HTTP listen address for XRPC endpoints
|
||||
- **Example:** `:8080`, `:9000`, `0.0.0.0:8080`
|
||||
|
||||
#### `HOLD_PUBLIC`
|
||||
- **Default:** `false`
|
||||
- **Description:** Allow public blob reads (pulls) without authentication. Writes always require crew membership.
|
||||
- **Use cases:**
|
||||
- `true`: Public registry (anyone can pull, authenticated users can push if crew)
|
||||
- `false`: Private registry (authentication required for both push and pull)
|
||||
|
||||
### Storage Configuration
|
||||
|
||||
#### `STORAGE_DRIVER`
|
||||
- **Default:** `s3`
|
||||
- **Options:** `s3`, `filesystem`
|
||||
- **Description:** Storage backend type. S3 enables presigned URLs for direct client-to-storage transfers (~99% bandwidth reduction). Filesystem stores blobs locally (development/testing).
|
||||
|
||||
#### S3 Storage (when `STORAGE_DRIVER=s3`)
|
||||
|
||||
##### `AWS_ACCESS_KEY_ID` ⚠️ REQUIRED for S3
|
||||
- **Description:** S3 access key ID for authentication
|
||||
- **Example:** `AKIAIOSFODNN7EXAMPLE`
|
||||
|
||||
##### `AWS_SECRET_ACCESS_KEY` ⚠️ REQUIRED for S3
|
||||
- **Description:** S3 secret access key for authentication
|
||||
- **Example:** `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY`
|
||||
|
||||
##### `AWS_REGION`
|
||||
- **Default:** `us-east-1`
|
||||
- **Description:** S3 region
|
||||
- **AWS regions:** `us-east-1`, `us-west-2`, `eu-west-1`, etc.
|
||||
- **UpCloud regions:** `us-chi1`, `us-nyc1`, `de-fra1`, `uk-lon1`, `sg-sin1`
|
||||
|
||||
##### `S3_BUCKET` ⚠️ REQUIRED for S3
|
||||
- **Description:** S3 bucket name where blobs will be stored
|
||||
- **Example:** `atcr-blobs`, `my-company-registry-blobs`
|
||||
- **Note:** Bucket must already exist
|
||||
|
||||
##### `S3_ENDPOINT`
|
||||
- **Default:** None (uses AWS S3)
|
||||
- **Description:** S3-compatible endpoint URL for non-AWS providers
|
||||
- **Storj:** `https://gateway.storjshare.io`
|
||||
- **UpCloud:** `https://[bucket-id].upcloudobjects.com`
|
||||
- **Minio:** `http://minio:9000`
|
||||
- **Note:** Leave empty for AWS S3
|
||||
|
||||
#### Filesystem Storage (when `STORAGE_DRIVER=filesystem`)
|
||||
|
||||
##### `STORAGE_ROOT_DIR`
|
||||
- **Default:** `/var/lib/atcr/hold`
|
||||
- **Description:** Directory path where blobs will be stored on local filesystem
|
||||
- **Use case:** Development, testing, or single-server deployments
|
||||
- **Note:** Presigned URLs are not available with filesystem driver (hold proxies all blob transfers)
|
||||
|
||||
### Embedded PDS Configuration
|
||||
|
||||
#### `HOLD_DATABASE_DIR`
|
||||
- **Default:** `/var/lib/atcr-hold`
|
||||
- **Description:** Directory path for embedded PDS carstore (SQLite database). Carstore creates `db.sqlite3` inside this directory.
|
||||
- **Note:** This must be a directory path, NOT a file path. If empty, embedded PDS is disabled (not recommended - hold authorization requires PDS).
|
||||
|
||||
#### `HOLD_KEY_PATH`
|
||||
- **Default:** `{HOLD_DATABASE_DIR}/signing.key`
|
||||
- **Description:** Path to hold's signing key (secp256k1). Auto-generated on first run if missing.
|
||||
- **Note:** Keep this secure - it's used to sign ATProto commits in the hold's repository
|
||||
|
||||
### Access Control
|
||||
|
||||
#### `HOLD_OWNER`
|
||||
- **Default:** None
|
||||
- **Description:** Your ATProto DID. Used to create the captain record and add you as the first crew member with admin role.
|
||||
- **Get your DID:** `https://bsky.social/xrpc/com.atproto.identity.resolveHandle?handle=yourhandle.bsky.social`
|
||||
- **Example:** `did:plc:abc123xyz789`
|
||||
- **Note:** If set, the hold will initialize with your DID as owner on first run
|
||||
|
||||
#### `HOLD_ALLOW_ALL_CREW`
|
||||
- **Default:** `false`
|
||||
- **Description:** Allow any authenticated ATCR user to write to this hold (treat all as crew)
|
||||
- **Security model:**
|
||||
- `true`: Any authenticated user can push images (useful for shared/community holds)
|
||||
- `false`: Only hold owner and explicit crew members can push (verified via crew records in hold's PDS)
|
||||
- **Use cases:**
|
||||
- Public registry: `HOLD_PUBLIC=true, HOLD_ALLOW_ALL_CREW=true`
|
||||
- ATProto users only: `HOLD_PUBLIC=false, HOLD_ALLOW_ALL_CREW=true`
|
||||
- Private hold: `HOLD_PUBLIC=false, HOLD_ALLOW_ALL_CREW=false` (default)
|
||||
|
||||
### Bluesky Integration
|
||||
|
||||
#### `HOLD_BLUESKY_POSTS_ENABLED`
|
||||
- **Default:** `false`
|
||||
- **Description:** Create Bluesky posts when users push container images. Posts include image name, tag, size, and layer count.
|
||||
- **Note:** Posts are created from the hold's embedded PDS identity (did:web). Requires hold to be crawled by Bluesky relay.
|
||||
- **Enable relay crawl:** `./deploy/request-crawl.sh hold.example.com`
|
||||
|
||||
#### `HOLD_PROFILE_AVATAR`
|
||||
- **Default:** `https://imgs.blue/evan.jarrett.net/1TpTOdtS60GdJWBYEqtK22y688jajbQ9a5kbYRFtwuqrkBAE`
|
||||
- **Description:** URL to download avatar image for hold's Bluesky profile. Downloaded and uploaded as blob during bootstrap.
|
||||
- **Note:** Avatar is stored in hold's PDS and displayed on Bluesky profile
|
||||
|
||||
### Advanced Configuration
|
||||
|
||||
#### `TEST_MODE`
|
||||
- **Default:** `false`
|
||||
- **Description:** Enable test mode (skips some validations). Do not use in production.
|
||||
|
||||
#### `DISABLE_PRESIGNED_URLS`
|
||||
- **Default:** `false`
|
||||
- **Description:** Force proxy mode even with S3 configured (for testing). Disables presigned URL generation and routes all blob transfers through the hold service.
|
||||
- **Use case:** Testing, debugging, or environments where presigned URLs don't work
|
||||
|
||||
## XRPC Endpoints
|
||||
|
||||
Hold Service exposes two types of XRPC endpoints:
|
||||
|
||||
### ATProto Sync Endpoints (Standard)
|
||||
- `GET /.well-known/did.json` - DID document (did:web resolution)
|
||||
- `GET /xrpc/com.atproto.sync.getRepo` - Download full repository as CAR file
|
||||
- `GET /xrpc/com.atproto.sync.getBlob` - Get blob or presigned download URL
|
||||
- `GET /xrpc/com.atproto.sync.subscribeRepos` - WebSocket firehose for real-time events
|
||||
- `GET /xrpc/com.atproto.sync.listRepos` - List all repositories (single-user PDS)
|
||||
- `GET /xrpc/com.atproto.repo.describeRepo` - Repository metadata
|
||||
- `GET /xrpc/com.atproto.repo.getRecord` - Get record by collection and rkey
|
||||
- `GET /xrpc/com.atproto.repo.listRecords` - List records in collection
|
||||
- `POST /xrpc/com.atproto.repo.deleteRecord` - Delete record (owner/crew admin only)
|
||||
|
||||
### OCI Multipart Upload Endpoints (Custom)
|
||||
- `POST /xrpc/io.atcr.hold.initiateUpload` - Start multipart upload session
|
||||
- `POST /xrpc/io.atcr.hold.getPartUploadUrl` - Get presigned URL for uploading a part
|
||||
- `PUT /xrpc/io.atcr.hold.uploadPart` - Direct buffered part upload (alternative to presigned URLs)
|
||||
- `POST /xrpc/io.atcr.hold.completeUpload` - Finalize multipart upload
|
||||
- `POST /xrpc/io.atcr.hold.abortUpload` - Cancel multipart upload
|
||||
- `POST /xrpc/io.atcr.hold.notifyManifest` - Notify hold of manifest upload (creates layer records, Bluesky posts)
|
||||
|
||||
## Authorization Model
|
||||
|
||||
Hold Service uses crew membership records in its embedded PDS for access control:
|
||||
|
||||
### Read Access (Blob Downloads)
|
||||
|
||||
**Public Hold** (`HOLD_PUBLIC=true`):
|
||||
- Anonymous users: ✅ Allowed
|
||||
- Authenticated users: ✅ Allowed
|
||||
|
||||
**Private Hold** (`HOLD_PUBLIC=false`):
|
||||
- Anonymous users: ❌ Forbidden
|
||||
- Authenticated users with crew membership: ✅ Allowed
|
||||
- Crew must have `blob:read` permission
|
||||
|
||||
### Write Access (Blob Uploads)
|
||||
|
||||
Regardless of `HOLD_PUBLIC` setting:
|
||||
- Hold owner (from captain record): ✅ Allowed
|
||||
- Crew members with `blob:write` permission: ✅ Allowed
|
||||
- Non-crew authenticated users: Depends on `HOLD_ALLOW_ALL_CREW`
|
||||
- `HOLD_ALLOW_ALL_CREW=true`: ✅ Allowed
|
||||
- `HOLD_ALLOW_ALL_CREW=false`: ❌ Forbidden
|
||||
|
||||
### Authentication Method
|
||||
|
||||
AppView uses **service tokens** from user's PDS to authenticate with hold service:
|
||||
1. AppView calls user's PDS: `com.atproto.server.getServiceAuth` with hold DID
|
||||
2. User's PDS returns a service token scoped to the hold DID
|
||||
3. AppView includes service token in XRPC requests to hold
|
||||
4. Hold validates token and checks crew membership in its embedded PDS
|
||||
|
||||
## Deployment Scenarios
|
||||
|
||||
### Personal Hold (Single User)
|
||||
|
||||
Your own storage for your images:
|
||||
### 3. Build and Run with Docker
|
||||
|
||||
```bash
|
||||
# Hold config
|
||||
HOLD_PUBLIC_URL=https://hold.alice.com
|
||||
HOLD_OWNER=did:plc:alice-did
|
||||
HOLD_PUBLIC=false # Private (only you can pull)
|
||||
HOLD_ALLOW_ALL_CREW=false # Only you can push
|
||||
HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
# Build the image
|
||||
docker build -f Dockerfile.hold -t atcr-hold:latest .
|
||||
|
||||
# S3 storage
|
||||
STORAGE_DRIVER=s3
|
||||
AWS_ACCESS_KEY_ID=your-key
|
||||
AWS_SECRET_ACCESS_KEY=your-secret
|
||||
S3_BUCKET=alice-container-registry
|
||||
S3_ENDPOINT=https://gateway.storjshare.io # Using Storj
|
||||
# Run it
|
||||
docker run -d \
|
||||
--name atcr-hold \
|
||||
-p 8080:8080 \
|
||||
-v $(pwd)/config-hold.yaml:/config.yaml:ro \
|
||||
-v atcr-hold-data:/var/lib/atcr-hold \
|
||||
atcr-hold:latest serve --config /config.yaml
|
||||
```
|
||||
|
||||
### Shared Hold (Team/Organization)
|
||||
- **`/var/lib/atcr-hold`** — Persistent volume for the embedded PDS (carstore database + signing keys). Back this up.
|
||||
- **Port 8080** — Default listen address. Put a reverse proxy (Caddy, nginx) in front for TLS.
|
||||
- The image is built `FROM scratch` — the binary includes SQLite statically linked.
|
||||
- Optional: `docker build --build-arg BILLING_ENABLED=true` to include Stripe billing support.
|
||||
|
||||
Shared storage for a team with crew members:
|
||||
## Configuration
|
||||
|
||||
Config loads in layers: **defaults → YAML file → environment variables**. Later layers override earlier ones.
|
||||
|
||||
All YAML fields can be overridden with environment variables using the `HOLD_` prefix and `_` path separators. For example, `server.public_url` becomes `HOLD_SERVER_PUBLIC_URL`.
|
||||
|
||||
S3 credentials also accept standard AWS environment variable names: `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_REGION`, `S3_BUCKET`, `S3_ENDPOINT`.
|
||||
|
||||
For the complete configuration reference with all options and defaults, see [`config-hold.example.yaml`](../config-hold.example.yaml) or run `atcr-hold config init`.
|
||||
|
||||
## Access Control
|
||||
|
||||
| Setting | Who can pull | Who can push |
|
||||
|---|---|---|
|
||||
| `server.public: true` | Anyone | Captain + crew with `blob:write` |
|
||||
| `server.public: false` (default) | Crew with `blob:read` | Captain + crew with `blob:write` |
|
||||
| + `registration.allow_all_crew: true` | (per above) | Any authenticated user |
|
||||
|
||||
The captain (set via `registration.owner_did`) has all permissions implicitly. `blob:write` implies `blob:read`.
|
||||
|
||||
Authentication uses ATProto service tokens: AppView requests a token from the user's PDS scoped to the hold's DID, then includes it in XRPC requests. The hold validates the token and checks crew membership.
|
||||
|
||||
See [BYOS.md](BYOS.md) for the full authorization model.
|
||||
|
||||
## Optional Subsystems
|
||||
|
||||
| Subsystem | Default | Config key | Notes |
|
||||
|---|---|---|---|
|
||||
| Admin panel | Enabled | `admin.enabled` | Web UI for crew, settings, and storage management |
|
||||
| Quotas | Disabled | `quota.tiers` | Tier-based storage limits (e.g., deckhand=5GB, bosun=50GB) |
|
||||
| Garbage collection | Disabled | `gc.enabled` | Nightly cleanup of orphaned blobs and records |
|
||||
| Vulnerability scanner | Disabled | `scanner.secret` | Requires separate scanner service; see [SBOM_SCANNING.md](SBOM_SCANNING.md) |
|
||||
| Billing (Stripe) | Disabled | Build flag + env | Build with `--build-arg BILLING_ENABLED=true`; see [BILLING.md](BILLING.md) |
|
||||
| Bluesky posts | Disabled | `registration.enable_bluesky_posts` | Posts push notifications from hold's identity |
|
||||
|
||||
## Hold Identity
|
||||
|
||||
**did:web (default)** — Derived from `server.public_url` with zero setup. `https://hold.example.com` becomes `did:web:hold.example.com`. The DID document is served at `/.well-known/did.json`. Tied to domain ownership — if you lose the domain, you lose the identity.
|
||||
|
||||
**did:plc (portable)** — Set `database.did_method: plc` in config. Registered with plc.directory. Survives domain changes. Requires a rotation key (auto-generated at `{database.path}/rotation.key`). Use `database.did` to adopt an existing DID for recovery or migration.
|
||||
|
||||
## Verification
|
||||
|
||||
After starting your hold, verify it's working:
|
||||
|
||||
```bash
|
||||
# Hold config
|
||||
HOLD_PUBLIC_URL=https://hold.acme.corp
|
||||
HOLD_OWNER=did:plc:acme-org-did
|
||||
HOLD_PUBLIC=false # Private reads (crew only)
|
||||
HOLD_ALLOW_ALL_CREW=false # Explicit crew membership required
|
||||
HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
# Health check — should return {"version":"..."}
|
||||
curl https://hold.example.com/xrpc/_health
|
||||
|
||||
# S3 storage
|
||||
STORAGE_DRIVER=s3
|
||||
AWS_ACCESS_KEY_ID=your-key
|
||||
AWS_SECRET_ACCESS_KEY=your-secret
|
||||
S3_BUCKET=acme-registry-blobs
|
||||
# DID document — should return valid JSON with service endpoints
|
||||
curl https://hold.example.com/.well-known/did.json
|
||||
|
||||
# Captain record — should show your owner DID
|
||||
curl "https://hold.example.com/xrpc/com.atproto.repo.listRecords?repo=HOLD_DID&collection=io.atcr.hold.captain"
|
||||
|
||||
# Crew records
|
||||
curl "https://hold.example.com/xrpc/com.atproto.repo.listRecords?repo=HOLD_DID&collection=io.atcr.hold.crew"
|
||||
```
|
||||
|
||||
Then add crew members via XRPC or hold PDS records.
|
||||
Replace `HOLD_DID` with your hold's DID (from the `/.well-known/did.json` response).
|
||||
|
||||
### Public Hold (Community Registry)
|
||||
## Docker Compose
|
||||
|
||||
Open storage allowing anyone to push and pull:
|
||||
```yaml
|
||||
services:
|
||||
atcr-hold:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.hold
|
||||
command: ["serve", "--config", "/config.yaml"]
|
||||
volumes:
|
||||
- ./config-hold.yaml:/config.yaml:ro
|
||||
- atcr-hold-data:/var/lib/atcr-hold
|
||||
ports:
|
||||
- "8080:8080"
|
||||
healthcheck:
|
||||
test: ["CMD", "/healthcheck", "http://localhost:8080/xrpc/_health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
|
||||
```bash
|
||||
# Hold config
|
||||
HOLD_PUBLIC_URL=https://hold.community.io
|
||||
HOLD_OWNER=did:plc:community-did
|
||||
HOLD_PUBLIC=true # Public reads (anyone can pull)
|
||||
HOLD_ALLOW_ALL_CREW=true # Any authenticated user can push
|
||||
HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
|
||||
# S3 storage
|
||||
STORAGE_DRIVER=s3
|
||||
AWS_ACCESS_KEY_ID=your-key
|
||||
AWS_SECRET_ACCESS_KEY=your-secret
|
||||
S3_BUCKET=community-registry-blobs
|
||||
volumes:
|
||||
atcr-hold-data:
|
||||
```
|
||||
|
||||
### Development/Testing
|
||||
For production with TLS termination, see [`deploy/docker-compose.prod.yml`](../deploy/docker-compose.prod.yml) which includes a Caddy reverse proxy.
|
||||
|
||||
Local filesystem storage for testing:
|
||||
## Further Reading
|
||||
|
||||
```bash
|
||||
# Hold config
|
||||
HOLD_PUBLIC_URL=http://127.0.0.1:8080
|
||||
HOLD_OWNER=did:plc:your-test-did
|
||||
HOLD_PUBLIC=true
|
||||
HOLD_ALLOW_ALL_CREW=true
|
||||
HOLD_DATABASE_DIR=/tmp/atcr-hold
|
||||
|
||||
# Filesystem storage
|
||||
STORAGE_DRIVER=filesystem
|
||||
STORAGE_ROOT_DIR=/tmp/atcr-hold-blobs
|
||||
```
|
||||
|
||||
## Production Deployment
|
||||
|
||||
For production deployments with:
|
||||
- SSL/TLS certificates
|
||||
- S3 storage with presigned URLs
|
||||
- Proper access control
|
||||
- Systemd service files
|
||||
- Monitoring
|
||||
|
||||
See **[deploy/README.md](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/deploy/README.md)** for comprehensive production deployment guide.
|
||||
|
||||
### Quick Production Checklist
|
||||
|
||||
Before going to production:
|
||||
|
||||
- [ ] Set `HOLD_PUBLIC_URL` to your public HTTPS URL
|
||||
- [ ] Set `HOLD_OWNER` to your ATProto DID
|
||||
- [ ] Configure S3 storage (`STORAGE_DRIVER=s3`)
|
||||
- [ ] Set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `S3_BUCKET`, `S3_ENDPOINT`
|
||||
- [ ] Set `HOLD_DATABASE_DIR` to persistent directory
|
||||
- [ ] Configure `HOLD_PUBLIC` and `HOLD_ALLOW_ALL_CREW` for desired access model
|
||||
- [ ] Configure SSL/TLS termination (Caddy/nginx/Cloudflare)
|
||||
- [ ] Verify DID document: `curl https://hold.example.com/.well-known/did.json`
|
||||
- [ ] Test presigned URLs: Check logs for "presigned URL" messages during push
|
||||
- [ ] Monitor crew membership: `curl https://hold.example.com/xrpc/com.atproto.repo.listRecords?repo={holdDID}&collection=io.atcr.hold.crew`
|
||||
- [ ] (Optional) Enable Bluesky posts: `HOLD_BLUESKY_POSTS_ENABLED=true`
|
||||
- [ ] (Optional) Request relay crawl: `./deploy/request-crawl.sh hold.example.com`
|
||||
|
||||
## Configuration Files Reference
|
||||
|
||||
- **[.env.hold.example](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/.env.hold.example)** - All available environment variables with documentation
|
||||
- **[deploy/.env.prod.template](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/deploy/.env.prod.template)** - Production configuration template (includes both AppView and Hold)
|
||||
- **[deploy/README.md](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/deploy/README.md)** - Production deployment guide
|
||||
- **[AppView Documentation](https://atcr.io/r/evan.jarrett.net/atcr-appview)** - Registry API server setup
|
||||
- **[BYOS Architecture](https://tangled.org/@evan.jarrett.net/at-container-registry/blob/main/docs/BYOS.md)** - Bring Your Own Storage technical design
|
||||
- [`config-hold.example.yaml`](../config-hold.example.yaml) — Complete configuration reference with inline comments
|
||||
- [BYOS.md](BYOS.md) — Bring Your Own Storage architecture and authorization model
|
||||
- [HOLD_XRPC_ENDPOINTS.md](HOLD_XRPC_ENDPOINTS.md) — XRPC endpoint reference
|
||||
- [BILLING.md](BILLING.md) — Stripe billing integration
|
||||
- [QUOTAS.md](QUOTAS.md) — Quota management
|
||||
- [SBOM_SCANNING.md](SBOM_SCANNING.md) — Vulnerability scanning
|
||||
|
||||
@@ -35,7 +35,7 @@ type ProviderRequest struct {
|
||||
// ProviderResponse is the response format to Gatekeeper.
|
||||
type ProviderResponse struct {
|
||||
SystemError string `json:"system_error,omitempty"`
|
||||
Responses []map[string]interface{} `json:"responses"`
|
||||
Responses []map[string]any `json:"responses"`
|
||||
}
|
||||
|
||||
// VerificationResult holds the result of verifying a single image.
|
||||
@@ -110,7 +110,7 @@ func (s *Server) handleProvide(w http.ResponseWriter, r *http.Request) {
|
||||
log.Printf("INFO: received verification request for %d images", len(req.Values))
|
||||
|
||||
// Verify each image
|
||||
responses := make([]map[string]interface{}, 0, len(req.Values))
|
||||
responses := make([]map[string]any, 0, len(req.Values))
|
||||
for _, image := range req.Values {
|
||||
result := s.verifyImage(r.Context(), image)
|
||||
responses = append(responses, structToMap(result))
|
||||
@@ -186,9 +186,9 @@ func (s *Server) handleReady(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// structToMap converts a struct to a map for JSON encoding.
|
||||
func structToMap(v interface{}) map[string]interface{} {
|
||||
func structToMap(v any) map[string]any {
|
||||
data, _ := json.Marshal(v)
|
||||
var m map[string]interface{}
|
||||
var m map[string]any
|
||||
json.Unmarshal(data, &m)
|
||||
return m
|
||||
}
|
||||
|
||||
@@ -196,7 +196,7 @@ type VerifierResult struct {
|
||||
Name string
|
||||
Type string
|
||||
Message string
|
||||
Extensions map[string]interface{}
|
||||
Extensions map[string]any
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user