8 Commits

Author SHA1 Message Date
Evan Jarrett
dd5d2aab55 test tag push 2025-11-03 13:45:59 -06:00
Evan Jarrett
6b56f18715 begin brew tap support 2025-11-02 22:11:19 -06:00
Evan Jarrett
e296971c47 add makefile fix race conditions 2025-11-01 19:37:29 -05:00
Evan Jarrett
d7eba25f66 update workflow for buildah 2025-11-01 15:05:36 -05:00
Evan Jarrett
7a0050235d background ensurecrew to prevent stalling oauth 2025-11-01 11:08:53 -05:00
Evan Jarrett
ff7bc131b2 rename example go files for documentation 2025-11-01 10:29:11 -05:00
Evan Jarrett
2d720e4154 remove extra docker volume in prod 2025-10-31 21:06:11 -05:00
Evan Jarrett
e6b1264269 try and offline holds 2025-10-31 21:03:33 -05:00
41 changed files with 8895 additions and 905 deletions

5
.gitignore vendored
View File

@@ -11,6 +11,11 @@ dist/
# Environment configuration
.env
# Generated assets (run go generate to rebuild)
pkg/appview/licenses/spdx-licenses.json
pkg/appview/static/js/htmx.min.js
pkg/appview/static/js/lucide.min.js
# IDE
.claude/
.vscode/

View File

@@ -0,0 +1,137 @@
# Tangled Workflow: Release Credential Helper to Tangled.org
#
# This workflow builds the docker-credential-atcr binary and publishes it
# to Tangled.org for distribution via Homebrew.
#
# Current limitation: Tangled doesn't support triggering on tags yet,
# so this triggers on push to main. Manually verify you've tagged the
# release before pushing.
when:
- event: ["push"]
tag: ["v*"]
engine: "nixery"
dependencies:
nixpkgs:
- go_1_24 # Go 1.24+ for building
- git # For finding tags
- goreleaser # For building multi-platform binaries
# - goat # TODO: Add goat CLI for uploading to Tangled (if available in nixpkgs)
environment:
CGO_ENABLED: "0" # Build static binaries
steps:
- name: Find latest git tag
command: |
# Get the most recent version tag
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.1")
echo "Latest tag: $LATEST_TAG"
echo "$LATEST_TAG" > .version
# Also get the commit hash for reference
COMMIT_HASH=$(git rev-parse HEAD)
echo "Commit: $COMMIT_HASH"
- name: Build binaries with GoReleaser
command: |
# Read version from previous step
VERSION=$(cat .version)
export VERSION
# Build for all platforms using GoReleaser
# This creates artifacts in dist/ directory
goreleaser build --clean --snapshot --config .goreleaser.yaml
# List what was built
echo "Built artifacts:"
ls -lh dist/
- name: Package artifacts
command: |
VERSION=$(cat .version)
VERSION_NO_V=${VERSION#v} # Remove 'v' prefix for filenames
cd dist
# Create tarballs for each platform (GoReleaser might already do this)
# Darwin x86_64
if [ -d "docker-credential-atcr_darwin_amd64_v1" ]; then
tar czf "docker-credential-atcr_${VERSION_NO_V}_Darwin_x86_64.tar.gz" \
-C docker-credential-atcr_darwin_amd64_v1 docker-credential-atcr
fi
# Darwin arm64
if [ -d "docker-credential-atcr_darwin_arm64" ]; then
tar czf "docker-credential-atcr_${VERSION_NO_V}_Darwin_arm64.tar.gz" \
-C docker-credential-atcr_darwin_arm64 docker-credential-atcr
fi
# Linux x86_64
if [ -d "docker-credential-atcr_linux_amd64_v1" ]; then
tar czf "docker-credential-atcr_${VERSION_NO_V}_Linux_x86_64.tar.gz" \
-C docker-credential-atcr_linux_amd64_v1 docker-credential-atcr
fi
# Linux arm64
if [ -d "docker-credential-atcr_linux_arm64" ]; then
tar czf "docker-credential-atcr_${VERSION_NO_V}_Linux_arm64.tar.gz" \
-C docker-credential-atcr_linux_arm64 docker-credential-atcr
fi
echo "Created tarballs:"
ls -lh *.tar.gz
- name: Upload to Tangled.org
command: |
VERSION=$(cat .version)
VERSION_NO_V=${VERSION#v}
# TODO: Authenticate with goat CLI
# You'll need to set up credentials/tokens for goat
# Example (adjust based on goat's actual auth mechanism):
# goat login --pds https://your-pds.example.com --handle your.handle
# TODO: Upload each artifact to Tangled.org
# This creates sh.tangled.repo.artifact records in your ATProto PDS
# Adjust these commands based on scripts/publish-artifact.sh pattern
# Example structure (you'll need to fill in actual goat commands):
# for artifact in dist/*.tar.gz; do
# echo "Uploading $artifact..."
# goat upload \
# --repo "at-container-registry" \
# --tag "$VERSION" \
# --file "$artifact"
# done
echo "TODO: Implement goat upload commands"
echo "See scripts/publish-artifact.sh for reference"
echo ""
echo "After uploading, you'll receive a TAG_HASH from Tangled."
echo "Update Formula/docker-credential-atcr.rb with:"
echo " VERSION = \"$VERSION_NO_V\""
echo " TAG_HASH = \"<hash-from-tangled>\""
echo ""
echo "Then run: scripts/update-homebrew-formula.sh $VERSION_NO_V <tag-hash>"
- name: Generate checksums for verification
command: |
VERSION=$(cat .version)
VERSION_NO_V=${VERSION#v}
cd dist
echo "SHA256 checksums for Homebrew formula:"
echo "======================================="
for file in docker-credential-atcr_${VERSION_NO_V}_*.tar.gz; do
if [ -f "$file" ]; then
sha256sum "$file"
fi
done
echo ""
echo "Copy these checksums to Formula/docker-credential-atcr.rb"

View File

@@ -1,5 +1,5 @@
# ATCR Release Pipeline for Tangled.org
# Triggers on version tags and builds cross-platform binaries using GoReleaser
# Triggers on version tags and builds cross-platform binaries using buildah
when:
- event: ["manual"]
@@ -10,46 +10,50 @@ engine: "nixery"
dependencies:
nixpkgs:
- git
- go
#- goreleaser
- podman
- buildah
- chroot
environment:
IMAGE_REGISTRY: atcr.io
IMAGE_USER: evan.jarrett.net
steps:
- name: Fetch git tags
command: git fetch --tags --force
- name: Checkout tag for current commit
- name: Setup build environment
command: |
CURRENT_COMMIT=$(git rev-parse HEAD)
export TAG=$(git tag --points-at $CURRENT_COMMIT --sort=-version:refname | head -n1)
if [ -z "$TAG" ]; then
echo "Error: No tag found for commit $CURRENT_COMMIT"
exit 1
if ! grep -q "^root:" /etc/passwd 2>/dev/null; then
echo "root:x:0:0:root:/root:/bin/sh" >> /etc/passwd
fi
echo "Found tag $TAG for commit $CURRENT_COMMIT"
git checkout $TAG
- name: Build AppView Docker image
- name: Login to registry
command: |
TAG=$(git describe --tags --exact-match 2>/dev/null || git tag --points-at HEAD | head -n1)
podman login atcr.io -u evan.jarrett.net -p ${APP_PASSWORD}
podman build -f Dockerfile.appview -t atcr.io/evan.jarrett.net/atcr-appview:${TAG} .
podman push atcr.io/evan.jarrett.net/atcr-appview:${TAG}
echo "${APP_PASSWORD}" | buildah login \
--storage-driver vfs \
-u "${IMAGE_USER}" \
--password-stdin \
${IMAGE_REGISTRY}
- name: Build Hold Docker image
- name: Build and push AppView image
command: |
TAG=$(git describe --tags --exact-match 2>/dev/null || git tag --points-at HEAD | head -n1)
podman login atcr.io -u evan.jarrett.net -p ${APP_PASSWORD}
podman build -f Dockerfile.hold -t atcr.io/evan.jarrett.net/atcr-hold:${TAG} .
podman push atcr.io/evan.jarrett.net/atcr-hold:${TAG}
# disable for now
# - name: Tidy Go modules
# command: go mod tidy
buildah bud \
--storage-driver vfs \
--tag ${IMAGE_REGISTRY}/${IMAGE_USER}/atcr-appview:${TAG} \
--tag ${IMAGE_REGISTRY}/${IMAGE_USER}/atcr-appview:latest \
--file ./Dockerfile.appview \
.
# - name: Install Goat
# command: go install github.com/bluesky-social/goat@latest
buildah push \
--storage-driver vfs \
${IMAGE_REGISTRY}/${IMAGE_USER}/atcr-appview:latest
# - name: Run GoReleaser
# command: goreleaser release --clean
- name: Build and push Hold image
command: |
buildah bud \
--storage-driver vfs \
--tag ${IMAGE_REGISTRY}/${IMAGE_USER}/atcr-hold:${TAG} \
--tag ${IMAGE_REGISTRY}/${IMAGE_USER}/atcr-hold:latest \
--file ./Dockerfile.hold \
.
buildah push \
--storage-driver vfs \
${IMAGE_REGISTRY}/${IMAGE_USER}/atcr-hold:latest

View File

@@ -0,0 +1,54 @@
# typed: false
# frozen_string_literal: true
class DockerCredentialAtcr < Formula
desc "Docker credential helper for ATCR (ATProto Container Registry)"
homepage "https://atcr.io"
url "https://github.com/atcr-io/atcr/archive/refs/tags/v0.0.1.tar.gz"
sha256 "REPLACE_WITH_TARBALL_SHA256"
license "MIT"
head "https://github.com/atcr-io/atcr.git", branch: "main"
depends_on "go" => :build
def install
# Build the credential helper binary
# Use ldflags to inject version information
ldflags = %W[
-s -w
-X main.version=#{version}
-X main.commit=#{tap.user}
-X main.date=#{time.iso8601}
]
system "go", "build", *std_go_args(ldflags:, output: bin/"docker-credential-atcr"), "./cmd/credential-helper"
end
test do
# Test that the binary exists and is executable
assert_match version.to_s, shell_output("#{bin}/docker-credential-atcr version 2>&1")
end
def caveats
<<~EOS
To configure Docker to use ATCR credential helper, add the following
to your ~/.docker/config.json:
{
"credHelpers": {
"atcr.io": "atcr"
}
}
Note: The credential helper name is "atcr" (Docker automatically prefixes
with "docker-credential-" when looking for the binary).
To authenticate with ATCR:
docker push atcr.io/<your-handle>/<image>:latest
This will open your browser to complete the OAuth device flow.
Configuration is stored in: ~/.atcr/device.json
EOS
end
end

View File

@@ -37,13 +37,22 @@ Invoke-WebRequest -Uri https://atcr.io/install.ps1 -OutFile install.ps1
.\install.ps1
```
### Using Homebrew (macOS)
You can read the full manifest spec here, but the dependencies block is the real interesting bit. Dependencies for your workflow, like Go, Node.js, Python etc. can be pulled in from nixpkgs. Nixpkgs—for the uninitiated—is a vast collection of packages for the Nix package manager. Fortunately, you neednt know nor care about Nix to use it! Just head to https://search.nixos.org to find your package of choice (Ill bet 1€ that its there1), toss it in the list and run your build. The Nix-savvy of you lot will be happy to know that you can use custom registries too.
### Using Homebrew (macOS and Linux)
```bash
# Add the ATCR tap
brew tap atcr-io/tap
# Install the credential helper
brew install docker-credential-atcr
```
The Homebrew formula supports:
- **macOS**: Intel (x86_64) and Apple Silicon (arm64)
- **Linux**: x86_64 and arm64
Homebrew will automatically download the correct binary for your platform.
### Manual Installation
1. **Download the binary** for your platform from [GitHub Releases](https://github.com/atcr-io/atcr/releases)

84
Makefile Normal file
View File

@@ -0,0 +1,84 @@
# ATCR Makefile
# Build targets for the ATProto Container Registry
.PHONY: all build build-appview build-hold build-credential-helper build-oauth-helper \
generate test test-race test-verbose lint clean help
.DEFAULT_GOAL := help
help: ## Show this help message
@echo "ATCR Build Targets:"
@echo ""
@awk 'BEGIN {FS = ":.*##"; printf ""} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-28s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
all: generate build ## Generate assets and build all binaries (default)
# Generated asset files
GENERATED_ASSETS = \
pkg/appview/static/js/htmx.min.js \
pkg/appview/static/js/lucide.min.js \
pkg/appview/licenses/spdx-licenses.json
generate: $(GENERATED_ASSETS) ## Run go generate to download vendor assets
$(GENERATED_ASSETS):
@echo "→ Generating vendor assets and code..."
go generate ./...
##@ Build Targets
build: build-appview build-hold build-credential-helper ## Build all binaries
build-appview: $(GENERATED_ASSETS) ## Build appview binary only
@echo "→ Building appview..."
@mkdir -p bin
go build -o bin/atcr-appview ./cmd/appview
build-hold: $(GENERATED_ASSETS) ## Build hold binary only
@echo "→ Building hold..."
@mkdir -p bin
go build -o bin/atcr-hold ./cmd/hold
build-credential-helper: $(GENERATED_ASSETS) ## Build credential helper only
@echo "→ Building credential helper..."
@mkdir -p bin
go build -o bin/docker-credential-atcr ./cmd/credential-helper
build-oauth-helper: $(GENERATED_ASSETS) ## Build OAuth helper only
@echo "→ Building OAuth helper..."
@mkdir -p bin
go build -o bin/oauth-helper ./cmd/oauth-helper
##@ Test Targets
test: ## Run all tests
@echo "→ Running tests..."
go test -cover ./...
test-race: ## Run tests with race detector
@echo "→ Running tests with race detector..."
go test -race ./...
test-verbose: ## Run tests with verbose output
@echo "→ Running tests with verbose output..."
go test -v ./...
##@ Quality Targets
.PHONY: check-golangci-lint
check-golangci-lint:
@which golangci-lint > /dev/null || (echo "→ Installing golangci-lint..." && go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest)
lint: check-golangci-lint ## Run golangci-lint
@echo "→ Running golangci-lint..."
golangci-lint run ./...
##@ Utility Targets
clean: ## Remove built binaries and generated assets
@echo "→ Cleaning build artifacts..."
rm -rf bin/
rm -f pkg/appview/static/js/htmx.min.js
rm -f pkg/appview/static/js/lucide.min.js
rm -f pkg/appview/licenses/spdx-licenses.json
@echo "✓ Clean complete"

View File

@@ -314,15 +314,16 @@ func serveRegistry(cmd *cobra.Command, args []string) error {
} else {
slog.Debug("Updated profile with hold DID", "component", "appview/callback", "hold_did", holdDID)
}
slog.Debug("Attempting crew registration", "component", "oauth/server", "did", did, "hold_did", holdDID)
storage.EnsureCrewMembership(ctx, client, refresher, holdDID)
} else {
// Already a DID - use it
holdDID = profile.DefaultHold
}
// Register crew regardless of migration (outside the migration block)
// Run in background to avoid blocking OAuth callback if hold is offline
slog.Debug("Attempting crew registration", "component", "appview/callback", "did", did, "hold_did", holdDID)
storage.EnsureCrewMembership(ctx, client, refresher, holdDID)
go func(ctx context.Context, client *atproto.Client, refresher *oauth.Refresher, holdDID string) {
storage.EnsureCrewMembership(ctx, client, refresher, holdDID)
}(ctx, client, refresher, holdDID)
}

View File

@@ -149,8 +149,6 @@ volumes:
driver: local
atcr-hold-data:
driver: local
atcr-hold-tokens:
driver: local
configs:
caddyfile:

728
docs/ATCR_VERIFY_CLI.md Normal file
View File

@@ -0,0 +1,728 @@
# atcr-verify CLI Tool
## Overview
`atcr-verify` is a command-line tool for verifying ATProto signatures on container images stored in ATCR. It provides cryptographic verification of image manifests using ATProto's DID-based trust model.
## Features
- ✅ Verify ATProto signatures via OCI Referrers API
- ✅ DID resolution and public key extraction
- ✅ PDS query and commit signature verification
- ✅ Trust policy enforcement
- ✅ Offline verification mode (with cached data)
- ✅ Multiple output formats (human-readable, JSON, quiet)
- ✅ Exit codes for CI/CD integration
- ✅ Kubernetes admission controller integration
## Installation
### Binary Release
```bash
# Linux (x86_64)
curl -L https://github.com/atcr-io/atcr/releases/latest/download/atcr-verify-linux-amd64 -o atcr-verify
chmod +x atcr-verify
sudo mv atcr-verify /usr/local/bin/
# macOS (Apple Silicon)
curl -L https://github.com/atcr-io/atcr/releases/latest/download/atcr-verify-darwin-arm64 -o atcr-verify
chmod +x atcr-verify
sudo mv atcr-verify /usr/local/bin/
# Windows
curl -L https://github.com/atcr-io/atcr/releases/latest/download/atcr-verify-windows-amd64.exe -o atcr-verify.exe
```
### From Source
```bash
git clone https://github.com/atcr-io/atcr.git
cd atcr
go install ./cmd/atcr-verify
```
### Container Image
```bash
docker pull atcr.io/atcr/verify:latest
# Run
docker run --rm atcr.io/atcr/verify:latest verify IMAGE
```
## Usage
### Basic Verification
```bash
# Verify an image
atcr-verify atcr.io/alice/myapp:latest
# Output:
# ✓ Image verified successfully
# Signed by: alice.bsky.social (did:plc:alice123)
# Signed at: 2025-10-31T12:34:56.789Z
```
### With Trust Policy
```bash
# Verify against trust policy
atcr-verify atcr.io/alice/myapp:latest --policy trust-policy.yaml
# Output:
# ✓ Image verified successfully
# ✓ Trust policy satisfied
# Policy: production-images
# Trusted DID: did:plc:alice123
```
### JSON Output
```bash
atcr-verify atcr.io/alice/myapp:latest --output json
# Output:
{
"verified": true,
"image": "atcr.io/alice/myapp:latest",
"digest": "sha256:abc123...",
"signature": {
"did": "did:plc:alice123",
"handle": "alice.bsky.social",
"pds": "https://bsky.social",
"recordUri": "at://did:plc:alice123/io.atcr.manifest/abc123",
"commitCid": "bafyreih8...",
"signedAt": "2025-10-31T12:34:56.789Z",
"algorithm": "ECDSA-K256-SHA256"
},
"trustPolicy": {
"satisfied": true,
"policy": "production-images",
"trustedDID": true
}
}
```
### Quiet Mode
```bash
# Exit code only (for scripts)
atcr-verify atcr.io/alice/myapp:latest --quiet
echo $? # 0 = verified, 1 = failed
```
### Offline Mode
```bash
# Export verification bundle
atcr-verify export atcr.io/alice/myapp:latest -o bundle.json
# Verify offline (in air-gapped environment)
atcr-verify atcr.io/alice/myapp:latest --offline --bundle bundle.json
```
## Command Reference
### verify
Verify ATProto signature for an image.
```bash
atcr-verify verify IMAGE [flags]
atcr-verify IMAGE [flags] # 'verify' subcommand is optional
```
**Arguments:**
- `IMAGE` - Image reference (registry/owner/repo:tag or @digest)
**Flags:**
- `--policy FILE` - Trust policy file (default: none)
- `--output FORMAT` - Output format: text, json, quiet (default: text)
- `--offline` - Offline mode (requires --bundle)
- `--bundle FILE` - Verification bundle for offline mode
- `--cache-dir DIR` - Cache directory for DID documents (default: ~/.atcr/cache)
- `--no-cache` - Disable caching
- `--timeout DURATION` - Verification timeout (default: 30s)
- `--verbose` - Verbose output
**Exit Codes:**
- `0` - Verification succeeded
- `1` - Verification failed
- `2` - Invalid arguments
- `3` - Network error
- `4` - Trust policy violation
**Examples:**
```bash
# Basic verification
atcr-verify atcr.io/alice/myapp:latest
# With specific digest
atcr-verify atcr.io/alice/myapp@sha256:abc123...
# With trust policy
atcr-verify atcr.io/alice/myapp:latest --policy production-policy.yaml
# JSON output for scripting
atcr-verify atcr.io/alice/myapp:latest --output json | jq .verified
# Quiet mode for CI/CD
if atcr-verify atcr.io/alice/myapp:latest --quiet; then
echo "Deploy approved"
fi
```
### export
Export verification bundle for offline verification.
```bash
atcr-verify export IMAGE [flags]
```
**Arguments:**
- `IMAGE` - Image reference to export bundle for
**Flags:**
- `-o, --output FILE` - Output file (default: stdout)
- `--include-did-docs` - Include DID documents in bundle
- `--include-commit` - Include ATProto commit data
**Examples:**
```bash
# Export to file
atcr-verify export atcr.io/alice/myapp:latest -o myapp-bundle.json
# Export with all verification data
atcr-verify export atcr.io/alice/myapp:latest \
--include-did-docs \
--include-commit \
-o complete-bundle.json
# Export for multiple images
for img in $(cat images.txt); do
atcr-verify export $img -o bundles/$(echo $img | tr '/:' '_').json
done
```
### trust
Manage trust policies and trusted DIDs.
```bash
atcr-verify trust COMMAND [flags]
```
**Subcommands:**
**`trust list`** - List trusted DIDs
```bash
atcr-verify trust list
# Output:
# Trusted DIDs:
# - did:plc:alice123 (alice.bsky.social)
# - did:plc:bob456 (bob.example.com)
```
**`trust add DID`** - Add trusted DID
```bash
atcr-verify trust add did:plc:alice123
atcr-verify trust add did:plc:alice123 --name "Alice (DevOps)"
```
**`trust remove DID`** - Remove trusted DID
```bash
atcr-verify trust remove did:plc:alice123
```
**`trust policy validate`** - Validate trust policy file
```bash
atcr-verify trust policy validate policy.yaml
```
### version
Show version information.
```bash
atcr-verify version
# Output:
# atcr-verify version 1.0.0
# Go version: go1.21.5
# Commit: 3b5b89b
# Built: 2025-10-31T12:00:00Z
```
## Trust Policy
Trust policies define which signatures to trust and what to do when verification fails.
### Policy File Format
```yaml
version: 1.0
# Global settings
defaultAction: enforce # enforce, audit, allow
requireSignature: true
# Policies matched by image pattern (first match wins)
policies:
- name: production-images
description: "Production images must be signed by DevOps or Security"
scope: "atcr.io/*/prod-*"
require:
signature: true
trustedDIDs:
- did:plc:devops-team
- did:plc:security-team
minSignatures: 1
maxAge: 2592000 # 30 days in seconds
action: enforce
- name: staging-images
scope: "atcr.io/*/staging-*"
require:
signature: true
trustedDIDs:
- did:plc:devops-team
- did:plc:developers
minSignatures: 1
action: enforce
- name: dev-images
scope: "atcr.io/*/dev-*"
require:
signature: false
action: audit # Log but don't fail
# Trusted DID registry
trustedDIDs:
did:plc:devops-team:
name: "DevOps Team"
validFrom: "2024-01-01T00:00:00Z"
expiresAt: null
contact: "devops@example.com"
did:plc:security-team:
name: "Security Team"
validFrom: "2024-01-01T00:00:00Z"
expiresAt: null
did:plc:developers:
name: "Developer Team"
validFrom: "2024-06-01T00:00:00Z"
expiresAt: "2025-12-31T23:59:59Z"
```
### Policy Matching
Policies are evaluated in order. First match wins.
**Scope patterns:**
- `atcr.io/*/*` - All ATCR images
- `atcr.io/myorg/*` - All images from myorg
- `atcr.io/*/prod-*` - All images with "prod-" prefix
- `atcr.io/myorg/myapp` - Specific repository
- `atcr.io/myorg/myapp:v*` - Tag pattern matching
### Policy Actions
**`enforce`** - Reject if policy fails
- Exit code 4
- Blocks deployment
**`audit`** - Log but allow
- Exit code 0 (success)
- Warning message printed
**`allow`** - Always allow
- No verification performed
- Exit code 0
### Policy Requirements
**`signature: true`** - Require signature present
**`trustedDIDs`** - List of trusted DIDs
```yaml
trustedDIDs:
- did:plc:alice123
- did:web:example.com
```
**`minSignatures`** - Minimum number of signatures required
```yaml
minSignatures: 2 # Require 2 signatures
```
**`maxAge`** - Maximum signature age in seconds
```yaml
maxAge: 2592000 # 30 days
```
**`algorithms`** - Allowed signature algorithms
```yaml
algorithms:
- ECDSA-K256-SHA256
```
## Verification Flow
### 1. Image Resolution
```
Input: atcr.io/alice/myapp:latest
Resolve tag to digest
Output: sha256:abc123...
```
### 2. Signature Discovery
```
Query OCI Referrers API:
GET /v2/alice/myapp/referrers/sha256:abc123
?artifactType=application/vnd.atproto.signature.v1+json
Returns: List of signature artifacts
Download signature metadata blobs
```
### 3. DID Resolution
```
Extract DID from signature: did:plc:alice123
Query PLC directory:
GET https://plc.directory/did:plc:alice123
Extract public key from DID document
```
### 4. PDS Query
```
Get PDS endpoint from DID document
Query for manifest record:
GET {pds}/xrpc/com.atproto.repo.getRecord
?repo=did:plc:alice123
&collection=io.atcr.manifest
&rkey=abc123
Get commit CID from record
Fetch commit data (includes signature)
```
### 5. Signature Verification
```
Extract signature bytes from commit
Compute commit hash (SHA-256)
Verify: ECDSA_K256(hash, signature, publicKey)
Result: Valid or Invalid
```
### 6. Trust Policy Evaluation
```
Check if DID is in trustedDIDs list
Check signature age < maxAge
Check minSignatures satisfied
Apply policy action (enforce/audit/allow)
```
## Integration Examples
### CI/CD Pipeline
**GitHub Actions:**
```yaml
name: Deploy
on:
push:
branches: [main]
jobs:
verify-and-deploy:
runs-on: ubuntu-latest
steps:
- name: Install atcr-verify
run: |
curl -L https://github.com/atcr-io/atcr/releases/latest/download/atcr-verify-linux-amd64 -o atcr-verify
chmod +x atcr-verify
sudo mv atcr-verify /usr/local/bin/
- name: Verify image signature
run: |
atcr-verify ${{ env.IMAGE }} --policy .github/trust-policy.yaml
- name: Deploy to production
if: success()
run: kubectl set image deployment/app app=${{ env.IMAGE }}
```
**GitLab CI:**
```yaml
verify:
stage: verify
image: atcr.io/atcr/verify:latest
script:
- atcr-verify ${IMAGE} --policy trust-policy.yaml
deploy:
stage: deploy
dependencies:
- verify
script:
- kubectl set image deployment/app app=${IMAGE}
```
**Jenkins:**
```groovy
pipeline {
agent any
stages {
stage('Verify') {
steps {
sh 'atcr-verify ${IMAGE} --policy trust-policy.yaml'
}
}
stage('Deploy') {
when {
expression { currentBuild.result == 'SUCCESS' }
}
steps {
sh 'kubectl set image deployment/app app=${IMAGE}'
}
}
}
}
```
### Kubernetes Admission Controller
**Using as webhook backend:**
```go
// webhook server
func (h *Handler) ValidatePod(w http.ResponseWriter, r *http.Request) {
var admReq admissionv1.AdmissionReview
json.NewDecoder(r.Body).Decode(&admReq)
pod := &corev1.Pod{}
json.Unmarshal(admReq.Request.Object.Raw, pod)
// Verify each container image
for _, container := range pod.Spec.Containers {
cmd := exec.Command("atcr-verify", container.Image,
"--policy", "/etc/atcr/trust-policy.yaml",
"--quiet")
if err := cmd.Run(); err != nil {
// Verification failed
admResp := admissionv1.AdmissionReview{
Response: &admissionv1.AdmissionResponse{
UID: admReq.Request.UID,
Allowed: false,
Result: &metav1.Status{
Message: fmt.Sprintf("Image %s failed signature verification", container.Image),
},
},
}
json.NewEncoder(w).Encode(admResp)
return
}
}
// All images verified
admResp := admissionv1.AdmissionReview{
Response: &admissionv1.AdmissionResponse{
UID: admReq.Request.UID,
Allowed: true,
},
}
json.NewEncoder(w).Encode(admResp)
}
```
### Pre-Pull Verification
**Systemd service:**
```ini
# /etc/systemd/system/myapp.service
[Unit]
Description=My Application
After=docker.service
[Service]
Type=oneshot
ExecStartPre=/usr/local/bin/atcr-verify atcr.io/myorg/myapp:latest --policy /etc/atcr/policy.yaml
ExecStartPre=/usr/bin/docker pull atcr.io/myorg/myapp:latest
ExecStart=/usr/bin/docker run atcr.io/myorg/myapp:latest
Restart=on-failure
[Install]
WantedBy=multi-user.target
```
**Docker wrapper script:**
```bash
#!/bin/bash
# docker-secure-pull.sh
IMAGE="$1"
# Verify before pulling
if ! atcr-verify "$IMAGE" --policy ~/.atcr/trust-policy.yaml; then
echo "ERROR: Image signature verification failed"
exit 1
fi
# Pull if verified
docker pull "$IMAGE"
```
## Configuration
### Config File
Location: `~/.atcr/config.yaml`
```yaml
# Default trust policy
defaultPolicy: ~/.atcr/trust-policy.yaml
# Cache settings
cache:
enabled: true
directory: ~/.atcr/cache
ttl:
didDocuments: 3600 # 1 hour
commits: 600 # 10 minutes
# Network settings
timeout: 30s
retries: 3
# Output settings
output:
format: text # text, json, quiet
color: auto # auto, always, never
# Registry settings
registries:
atcr.io:
insecure: false
credentialsFile: ~/.docker/config.json
```
### Environment Variables
- `ATCR_CONFIG` - Config file path
- `ATCR_POLICY` - Default trust policy file
- `ATCR_CACHE_DIR` - Cache directory
- `ATCR_OUTPUT` - Output format (text, json, quiet)
- `ATCR_TIMEOUT` - Verification timeout
- `HTTP_PROXY` / `HTTPS_PROXY` - Proxy settings
- `NO_CACHE` - Disable caching
## Library Usage
`atcr-verify` can also be used as a Go library:
```go
import "github.com/atcr-io/atcr/pkg/verify"
func main() {
verifier := verify.NewVerifier(verify.Config{
Policy: policy,
Timeout: 30 * time.Second,
})
result, err := verifier.Verify(ctx, "atcr.io/alice/myapp:latest")
if err != nil {
log.Fatal(err)
}
if !result.Verified {
log.Fatal("Verification failed")
}
fmt.Printf("Verified by %s\n", result.Signature.DID)
}
```
## Performance
### Typical Verification Times
- **First verification:** 500-1000ms
- OCI Referrers API: 50-100ms
- DID resolution: 50-150ms
- PDS query: 100-300ms
- Signature verification: 1-5ms
- **Cached verification:** 50-150ms
- DID document cached
- Signature metadata cached
### Optimization Tips
1. **Enable caching** - DID documents change rarely
2. **Use offline bundles** - For air-gapped environments
3. **Parallel verification** - Verify multiple images concurrently
4. **Local trust policy** - Avoid remote policy fetches
## Troubleshooting
### Verification Fails
```bash
atcr-verify atcr.io/alice/myapp:latest --verbose
```
Common issues:
- **No signature found** - Image not signed, check Referrers API
- **DID resolution failed** - Network issue, check PLC directory
- **PDS unreachable** - Network issue, check PDS endpoint
- **Signature invalid** - Tampering detected or key mismatch
- **Trust policy violation** - DID not in trusted list
### Enable Debug Logging
```bash
ATCR_LOG_LEVEL=debug atcr-verify IMAGE
```
### Clear Cache
```bash
rm -rf ~/.atcr/cache
```
## See Also
- [ATProto Signatures](./ATPROTO_SIGNATURES.md) - How ATProto signing works
- [Integration Strategy](./INTEGRATION_STRATEGY.md) - Overview of integration approaches
- [Signature Integration](./SIGNATURE_INTEGRATION.md) - Tool-specific guides
- [Trust Policy Examples](../examples/verification/trust-policy.yaml)

501
docs/ATPROTO_SIGNATURES.md Normal file
View File

@@ -0,0 +1,501 @@
# ATProto Signatures for Container Images
## Overview
ATCR container images are **already cryptographically signed** through ATProto's repository commit system. Every manifest stored in a user's PDS is signed with the user's ATProto signing key, providing cryptographic proof of authorship and integrity.
This document explains:
- How ATProto signing works
- Why additional signing tools aren't needed
- How to bridge ATProto signatures to the OCI/ORAS ecosystem
- Trust model and security considerations
## Key Insight: Manifests Are Already Signed
When you push an image to ATCR:
```bash
docker push atcr.io/alice/myapp:latest
```
The following happens:
1. **AppView stores manifest** as an `io.atcr.manifest` record in alice's PDS
2. **PDS creates repository commit** containing the manifest record
3. **PDS signs the commit** with alice's ATProto signing key (ECDSA K-256)
4. **Signature is stored** in the repository commit object
**Result:** The manifest is cryptographically signed with alice's private key, and anyone can verify it using alice's public key from her DID document.
## ATProto Signing Mechanism
### Repository Commit Signing
ATProto uses a Merkle Search Tree (MST) to store records, and every modification creates a signed commit:
```
┌─────────────────────────────────────────────┐
│ Repository Commit │
├─────────────────────────────────────────────┤
│ DID: did:plc:alice123 │
│ Version: 3jzfkjqwdwa2a │
│ Previous: bafyreig7... (parent commit) │
│ Data CID: bafyreih8... (MST root) │
│ ┌───────────────────────────────────────┐ │
│ │ Signature (ECDSA K-256 + SHA-256) │ │
│ │ Signed with: alice's private key │ │
│ │ Value: 0x3045022100... (DER format) │ │
│ └───────────────────────────────────────┘ │
└─────────────────────────────────────────────┘
┌─────────────────────┐
│ Merkle Search Tree │
│ (contains records) │
└─────────────────────┘
┌────────────────────────────┐
│ io.atcr.manifest record │
│ Repository: myapp │
│ Digest: sha256:abc123... │
│ Layers: [...] │
└────────────────────────────┘
```
### Signature Algorithm
**Algorithm:** ECDSA with K-256 (secp256k1) curve + SHA-256 hash
- **Curve:** secp256k1 (same as Bitcoin, Ethereum)
- **Hash:** SHA-256
- **Format:** DER-encoded signature bytes
- **Variant:** "low-S" signatures (per BIP-0062)
**Signing process:**
1. Serialize commit data as DAG-CBOR
2. Hash with SHA-256
3. Sign hash with ECDSA K-256 private key
4. Store signature in commit object
### Public Key Distribution
Public keys are distributed via DID documents, accessible through DID resolution:
**DID Resolution Flow:**
```
did:plc:alice123
Query PLC directory: https://plc.directory/did:plc:alice123
DID Document:
{
"@context": ["https://www.w3.org/ns/did/v1"],
"id": "did:plc:alice123",
"verificationMethod": [{
"id": "did:plc:alice123#atproto",
"type": "Multikey",
"controller": "did:plc:alice123",
"publicKeyMultibase": "zQ3shokFTS3brHcDQrn82RUDfCZESWL1ZdCEJwekUDdo1Ko4Z"
}],
"service": [{
"id": "#atproto_pds",
"type": "AtprotoPersonalDataServer",
"serviceEndpoint": "https://bsky.social"
}]
}
```
**Public key format:**
- **Encoding:** Multibase (base58btc with `z` prefix)
- **Codec:** Multicodec `0xE701` for K-256 keys
- **Example:** `zQ3sh...` decodes to 33-byte compressed public key
## Verification Process
To verify a manifest's signature:
### Step 1: Resolve Image to Manifest Digest
```bash
# Get manifest digest
DIGEST=$(crane digest atcr.io/alice/myapp:latest)
# Result: sha256:abc123...
```
### Step 2: Fetch Manifest Record from PDS
```bash
# Extract repository name from image reference
REPO="myapp"
# Query PDS for manifest record
curl "https://bsky.social/xrpc/com.atproto.repo.listRecords?\
repo=did:plc:alice123&\
collection=io.atcr.manifest&\
limit=100" | jq -r '.records[] | select(.value.digest == "sha256:abc123...")'
```
Response includes:
```json
{
"uri": "at://did:plc:alice123/io.atcr.manifest/abc123",
"cid": "bafyreig7...",
"value": {
"$type": "io.atcr.manifest",
"repository": "myapp",
"digest": "sha256:abc123...",
...
}
}
```
### Step 3: Fetch Repository Commit
```bash
# Get current repository state
curl "https://bsky.social/xrpc/com.atproto.sync.getRepo?\
did=did:plc:alice123" --output repo.car
# Extract commit from CAR file (requires ATProto tools)
# Commit includes signature over repository state
```
### Step 4: Resolve DID to Public Key
```bash
# Resolve DID document
curl "https://plc.directory/did:plc:alice123" | jq -r '.verificationMethod[0].publicKeyMultibase'
# Result: zQ3shokFTS3brHcDQrn82RUDfCZESWL1ZdCEJwekUDdo1Ko4Z
```
### Step 5: Verify Signature
```go
// Pseudocode for verification
import "github.com/bluesky-social/indigo/atproto/crypto"
// 1. Parse commit
commit := parseCommitFromCAR(repoCAR)
// 2. Extract signature bytes
signature := commit.Sig
// 3. Get bytes that were signed
bytesToVerify := commit.Unsigned().BytesForSigning()
// 4. Decode public key from multibase
pubKey := decodeMultibasePublicKey(publicKeyMultibase)
// 5. Verify ECDSA signature
valid := crypto.VerifySignature(pubKey, bytesToVerify, signature)
```
### Step 6: Verify Manifest Integrity
```bash
# Verify the manifest record's CID matches the content
# CID is content-addressed, so tampering changes the CID
```
## Bridging to OCI/ORAS Ecosystem
While ATProto signatures are cryptographically sound, the OCI ecosystem doesn't understand ATProto records. To make signatures discoverable, we create **ORAS signature artifacts** that reference the ATProto signature.
### ORAS Signature Artifact Format
```json
{
"schemaVersion": 2,
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"artifactType": "application/vnd.atproto.signature.v1+json",
"config": {
"mediaType": "application/vnd.oci.empty.v1+json",
"digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
"size": 2
},
"subject": {
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"digest": "sha256:abc123...",
"size": 1234
},
"layers": [
{
"mediaType": "application/vnd.atproto.signature.v1+json",
"digest": "sha256:sig789...",
"size": 512,
"annotations": {
"org.opencontainers.image.title": "atproto-signature.json"
}
}
],
"annotations": {
"io.atcr.atproto.did": "did:plc:alice123",
"io.atcr.atproto.pds": "https://bsky.social",
"io.atcr.atproto.recordUri": "at://did:plc:alice123/io.atcr.manifest/abc123",
"io.atcr.atproto.commitCid": "bafyreih8...",
"io.atcr.atproto.signedAt": "2025-10-31T12:34:56.789Z",
"io.atcr.atproto.keyId": "did:plc:alice123#atproto"
}
}
```
**Key elements:**
1. **artifactType**: `application/vnd.atproto.signature.v1+json` - identifies this as an ATProto signature
2. **subject**: Links to the image manifest being signed
3. **layers**: Contains signature metadata blob
4. **annotations**: Quick-access metadata for verification
### Signature Metadata Blob
The layer blob contains detailed verification information:
```json
{
"$type": "io.atcr.atproto.signature",
"version": "1.0",
"subject": {
"digest": "sha256:abc123...",
"mediaType": "application/vnd.oci.image.manifest.v1+json"
},
"atproto": {
"did": "did:plc:alice123",
"handle": "alice.bsky.social",
"pdsEndpoint": "https://bsky.social",
"recordUri": "at://did:plc:alice123/io.atcr.manifest/abc123",
"recordCid": "bafyreig7...",
"commitCid": "bafyreih8...",
"commitRev": "3jzfkjqwdwa2a",
"signedAt": "2025-10-31T12:34:56.789Z"
},
"signature": {
"algorithm": "ECDSA-K256-SHA256",
"keyId": "did:plc:alice123#atproto",
"publicKeyMultibase": "zQ3shokFTS3brHcDQrn82RUDfCZESWL1ZdCEJwekUDdo1Ko4Z"
},
"verification": {
"method": "atproto-repo-commit",
"instructions": "Fetch repository commit from PDS and verify signature using public key from DID document"
}
}
```
### Discovery via Referrers API
ORAS artifacts are discoverable via the OCI Referrers API:
```bash
# Query for signature artifacts
curl "https://atcr.io/v2/alice/myapp/referrers/sha256:abc123?\
artifactType=application/vnd.atproto.signature.v1+json"
```
Response:
```json
{
"schemaVersion": 2,
"mediaType": "application/vnd.oci.image.index.v1+json",
"manifests": [
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"digest": "sha256:sig789...",
"size": 1234,
"artifactType": "application/vnd.atproto.signature.v1+json",
"annotations": {
"io.atcr.atproto.did": "did:plc:alice123",
"io.atcr.atproto.signedAt": "2025-10-31T12:34:56.789Z"
}
}
]
}
```
## Trust Model
### What ATProto Signatures Prove
**Authenticity**: Image was published by the DID owner
**Integrity**: Image manifest hasn't been tampered with since signing
**Non-repudiation**: Only the DID owner could have created this signature
**Timestamp**: When the image was signed (commit timestamp)
### What ATProto Signatures Don't Prove
**Safety**: Image doesn't contain vulnerabilities (use vulnerability scanning)
**DID trustworthiness**: Whether the DID owner is trustworthy (trust policy decision)
**Key security**: Private key wasn't compromised (same limitation as all PKI)
**PDS honesty**: PDS operator serves correct data (verify across multiple sources)
### Trust Dependencies
1. **DID Resolution**: Must correctly resolve DID to public key
- **Mitigation**: Use multiple resolvers, cache DID documents
2. **PDS Availability**: Must query PDS to verify signatures
- **Mitigation**: Embed signature bytes in ORAS blob for offline verification
3. **PDS Honesty**: PDS could serve fake/unsigned records
- **Mitigation**: Signature verification prevents this (can't forge signature)
4. **Key Security**: User's private key could be compromised
- **Mitigation**: Key rotation via DID document updates, short-lived credentials
5. **Algorithm Security**: ECDSA K-256 must remain secure
- **Status**: Well-studied, same as Bitcoin/Ethereum (widely trusted)
### Comparison with Other Signing Systems
| Aspect | ATProto Signatures | Cosign (Keyless) | Notary v2 |
|--------|-------------------|------------------|-----------|
| **Identity** | DID (decentralized) | OIDC (federated) | X.509 (PKI) |
| **Key Management** | PDS signing keys | Ephemeral (Fulcio) | User-managed |
| **Trust Anchor** | DID resolution | Fulcio CA + Rekor | Certificate chain |
| **Transparency Log** | ATProto firehose | Rekor | Optional |
| **Offline Verification** | Limited* | No | Yes |
| **Decentralization** | High | Medium | Low |
| **Complexity** | Low | High | Medium |
*Can be improved by embedding signature bytes in ORAS blob
### Security Considerations
**Threat: Man-in-the-Middle Attack**
- **Attack**: Intercept PDS queries, serve fake records
- **Defense**: TLS for PDS communication, verify signature with public key from DID document
- **Result**: Attacker can't forge signature without private key
**Threat: Compromised PDS**
- **Attack**: PDS operator serves unsigned/fake manifests
- **Defense**: Signature verification fails (PDS can't sign without user's private key)
- **Result**: Protected
**Threat: Key Compromise**
- **Attack**: Attacker steals user's ATProto signing key
- **Defense**: Key rotation via DID document, revoke old keys
- **Result**: Same as any PKI system (rotate keys quickly)
**Threat: Replay Attack**
- **Attack**: Replay old signed manifest to rollback to vulnerable version
- **Defense**: Check commit timestamp, verify commit is in current repository DAG
- **Result**: Protected (commits form immutable chain)
**Threat: DID Takeover**
- **Attack**: Attacker gains control of user's DID (rotation keys)
- **Defense**: Monitor DID document changes, verify key history
- **Result**: Serious but requires compromising rotation keys (harder than signing keys)
## Implementation Strategy
### Automatic Signature Artifact Creation
When AppView stores a manifest in a user's PDS:
1. **Store manifest record** (existing behavior)
2. **Get commit response** with commit CID and revision
3. **Create ORAS signature artifact**:
- Build metadata blob (JSON)
- Upload blob to hold storage
- Create ORAS manifest with subject = image manifest
- Store ORAS manifest (creates referrer link)
### Storage Location
Signature artifacts follow the same pattern as SBOMs:
- **Metadata blobs**: Stored in hold's blob storage
- **ORAS manifests**: Stored in hold's embedded PDS
- **Discovery**: Via OCI Referrers API
### Verification Tools
**Option 1: Custom CLI tool (`atcr-verify`)**
```bash
atcr-verify atcr.io/alice/myapp:latest
# → Queries referrers API
# → Fetches signature metadata
# → Resolves DID → public key
# → Queries PDS for commit
# → Verifies signature
```
**Option 2: Shell script (curl + jq)**
- See `docs/SIGNATURE_INTEGRATION.md` for examples
**Option 3: Kubernetes admission controller**
- Custom webhook that runs verification
- Rejects pods with unsigned/invalid signatures
## Benefits of ATProto Signatures
### Compared to No Signing
**Cryptographic proof** of image authorship
**Tamper detection** for manifests
**Identity binding** via DIDs
**Audit trail** via ATProto repository history
### Compared to Cosign/Notary
**No additional signing required** (already signed by PDS)
**Decentralized identity** (DIDs, not CAs)
**Simpler infrastructure** (no Fulcio, no Rekor, no TUF)
**Consistent with ATCR's architecture** (ATProto-native)
**Lower operational overhead** (reuse existing PDS infrastructure)
### Trade-offs
⚠️ **Custom verification tools required** (standard tools won't work)
⚠️ **Online verification preferred** (need to query PDS)
⚠️ **Different trust model** (trust DIDs, not CAs)
⚠️ **Ecosystem maturity** (newer approach, less tooling)
## Future Enhancements
### Short-term
1. **Offline verification**: Embed signature bytes in ORAS blob
2. **Multi-PDS verification**: Check signature across multiple PDSs
3. **Key rotation support**: Handle historical key validity
### Medium-term
4. **Timestamp service**: RFC 3161 timestamps for long-term validity
5. **Multi-signature**: Require N signatures from M DIDs
6. **Transparency log integration**: Record verifications in public log
### Long-term
7. **IANA registration**: Register `application/vnd.atproto.signature.v1+json`
8. **Standards proposal**: ATProto signature spec to ORAS/OCI
9. **Cross-ecosystem bridges**: Convert to Cosign/Notary formats
## Conclusion
ATCR images are already cryptographically signed through ATProto's repository commit system. By creating ORAS signature artifacts that reference these existing signatures, we can:
- ✅ Make signatures discoverable to OCI tooling
- ✅ Maintain ATProto as the source of truth
- ✅ Provide verification tools for users and clusters
- ✅ Avoid duplicating signing infrastructure
This approach leverages ATProto's strengths (decentralized identity, built-in signing) while bridging to the OCI ecosystem through standard ORAS artifacts.
## References
### ATProto Specifications
- [ATProto Repository Specification](https://atproto.com/specs/repository)
- [ATProto Data Model](https://atproto.com/specs/data-model)
- [ATProto DID Methods](https://atproto.com/specs/did)
### OCI/ORAS Specifications
- [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec)
- [OCI Referrers API](https://github.com/opencontainers/distribution-spec/blob/main/spec.md#listing-referrers)
- [ORAS Artifacts](https://oras.land/docs/)
### Cryptography
- [ECDSA (secp256k1)](https://en.bitcoin.it/wiki/Secp256k1)
- [Multibase Encoding](https://github.com/multiformats/multibase)
- [Multicodec](https://github.com/multiformats/multicodec)
### Related Documentation
- [SBOM Scanning](./SBOM_SCANNING.md) - Similar ORAS artifact pattern
- [Signature Integration](./SIGNATURE_INTEGRATION.md) - Practical integration examples

728
docs/DEVELOPMENT.md Normal file
View File

@@ -0,0 +1,728 @@
# Development Workflow for ATCR
## The Problem
**Current development cycle with Docker:**
1. Edit CSS, JS, template, or Go file
2. Run `docker compose build` (rebuilds entire image)
3. Run `docker compose up` (restart container)
4. Wait **2-3 minutes** for changes to appear
5. Test, find issue, repeat...
**Why it's slow:**
- All assets embedded via `embed.FS` at compile time
- Multi-stage Docker build compiles everything from scratch
- No development mode exists
- Final image uses `scratch` base (no tools, no hot reload)
## The Solution
**Development setup combining:**
1. **Dockerfile.devel** - Development-focused container (golang base, not scratch)
2. **Volume mounts** - Live code editing (changes appear instantly in container)
3. **DirFS** - Skip embed, read templates/CSS/JS from filesystem
4. **Air** - Auto-rebuild on Go code changes
**Results:**
- CSS/JS/Template changes: **Instant** (0 seconds, just refresh browser)
- Go code changes: **2-5 seconds** (vs 2-3 minutes)
- Production builds: **Unchanged** (still optimized with embed.FS)
## How It Works
### Architecture Flow
```
┌─────────────────────────────────────────────────────┐
│ Your Editor (VSCode, etc) │
│ Edit: style.css, app.js, *.html, *.go files │
└─────────────────┬───────────────────────────────────┘
│ (files saved to disk)
┌─────────────────────────────────────────────────────┐
│ Volume Mount (docker-compose.dev.yml) │
│ volumes: │
│ - .:/app (entire codebase mounted) │
└─────────────────┬───────────────────────────────────┘
│ (changes appear instantly in container)
┌─────────────────────────────────────────────────────┐
│ Container (golang:1.25.2 base, has all tools) │
│ │
│ ┌──────────────────────────────────────┐ │
│ │ Air (hot reload tool) │ │
│ │ Watches: *.go, *.html, *.css, *.js │ │
│ │ │ │
│ │ On change: │ │
│ │ - *.go → rebuild binary (2-5s) │ │
│ │ - templates/css/js → restart only │ │
│ └──────────────────────────────────────┘ │
│ │ │
│ ▼ │
│ ┌──────────────────────────────────────┐ │
│ │ ATCR AppView (ATCR_DEV_MODE=true) │ │
│ │ │ │
│ │ ui.go checks DEV_MODE: │ │
│ │ if DEV_MODE: │ │
│ │ templatesFS = os.DirFS("...") │ │
│ │ staticFS = os.DirFS("...") │ │
│ │ else: │ │
│ │ use embed.FS (production) │ │
│ │ │ │
│ │ Result: Reads from mounted files │ │
│ └──────────────────────────────────────┘ │
└─────────────────────────────────────────────────────┘
```
### Change Scenarios
#### Scenario 1: Edit CSS/JS/Templates
```
1. Edit pkg/appview/static/css/style.css in VSCode
2. Save file
3. Change appears in container via volume mount (instant)
4. App uses os.DirFS → reads new file from disk (instant)
5. Refresh browser → see changes
```
**Time:** **Instant** (0 seconds)
**No rebuild, no restart!**
#### Scenario 2: Edit Go Code
```
1. Edit pkg/appview/handlers/home.go
2. Save file
3. Air detects .go file change
4. Air runs: go build -o ./tmp/atcr-appview ./cmd/appview
5. Air kills old process and starts new binary
6. App runs with new code
```
**Time:** **2-5 seconds**
**Fast incremental build!**
## Implementation
### Step 1: Create Dockerfile.devel
Create `Dockerfile.devel` in project root:
```dockerfile
# Development Dockerfile with hot reload support
FROM golang:1.25.2-trixie
# Install Air for hot reload
RUN go install github.com/cosmtrek/air@latest
# Install SQLite (required for CGO in ATCR)
RUN apt-get update && apt-get install -y \
sqlite3 \
libsqlite3-dev \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Copy dependency files and download (cached layer)
COPY go.mod go.sum ./
RUN go mod download
# Note: Source code comes from volume mount
# (no COPY . . needed - that's the whole point!)
# Air will handle building and running
CMD ["air", "-c", ".air.toml"]
```
### Step 2: Create docker-compose.dev.yml
Create `docker-compose.dev.yml` in project root:
```yaml
version: '3.8'
services:
atcr-appview:
build:
context: .
dockerfile: Dockerfile.devel
volumes:
# Mount entire codebase (live editing)
- .:/app
# Cache Go modules (faster rebuilds)
- go-cache:/go/pkg/mod
# Persist SQLite database
- atcr-ui-dev:/var/lib/atcr
environment:
# Enable development mode (uses os.DirFS)
ATCR_DEV_MODE: "true"
# AppView configuration
ATCR_HTTP_ADDR: ":5000"
ATCR_BASE_URL: "http://localhost:5000"
ATCR_DEFAULT_HOLD_DID: "did:web:hold01.atcr.io"
# Database
ATCR_UI_DATABASE_PATH: "/var/lib/atcr/ui.db"
# Auth
ATCR_AUTH_KEY_PATH: "/var/lib/atcr/auth/private-key.pem"
# UI
ATCR_UI_ENABLED: "true"
# Jetstream (optional)
# JETSTREAM_URL: "wss://jetstream2.us-east.bsky.network/subscribe"
# ATCR_BACKFILL_ENABLED: "false"
ports:
- "5000:5000"
networks:
- atcr-dev
# Add other services as needed (postgres, hold, etc)
# atcr-hold:
# ...
networks:
atcr-dev:
driver: bridge
volumes:
go-cache:
atcr-ui-dev:
```
### Step 3: Create .air.toml
Create `.air.toml` in project root:
```toml
# Air configuration for hot reload
# https://github.com/cosmtrek/air
root = "."
testdata_dir = "testdata"
tmp_dir = "tmp"
[build]
# Arguments to pass to binary (AppView needs "serve")
args_bin = ["serve"]
# Where to output the built binary
bin = "./tmp/atcr-appview"
# Build command
cmd = "go build -o ./tmp/atcr-appview ./cmd/appview"
# Delay before rebuilding (ms) - debounce rapid saves
delay = 1000
# Directories to exclude from watching
exclude_dir = [
"tmp",
"vendor",
"bin",
".git",
"node_modules",
"testdata"
]
# Files to exclude from watching
exclude_file = []
# Regex patterns to exclude
exclude_regex = ["_test\\.go"]
# Don't rebuild if file content unchanged
exclude_unchanged = false
# Follow symlinks
follow_symlink = false
# Full command to run (leave empty to use cmd + bin)
full_bin = ""
# Directories to include (empty = all)
include_dir = []
# File extensions to watch
include_ext = ["go", "html", "css", "js"]
# Specific files to watch
include_file = []
# Delay before killing old process (s)
kill_delay = "0s"
# Log file for build errors
log = "build-errors.log"
# Use polling instead of fsnotify (for Docker/VM)
poll = false
poll_interval = 0
# Rerun binary if it exits
rerun = false
rerun_delay = 500
# Send interrupt signal instead of kill
send_interrupt = false
# Stop on build error
stop_on_error = false
[color]
# Colorize output
app = ""
build = "yellow"
main = "magenta"
runner = "green"
watcher = "cyan"
[log]
# Show only app logs (not build logs)
main_only = false
# Add timestamp to logs
time = false
[misc]
# Clean tmp directory on exit
clean_on_exit = false
[screen]
# Clear screen on rebuild
clear_on_rebuild = false
# Keep scrollback
keep_scroll = true
```
### Step 4: Modify pkg/appview/ui.go
Add conditional filesystem loading to `pkg/appview/ui.go`:
```go
package appview
import (
"embed"
"html/template"
"io/fs"
"log"
"net/http"
"os"
)
// Embedded assets (used in production)
//go:embed templates/**/*.html
var embeddedTemplatesFS embed.FS
//go:embed static
var embeddedStaticFS embed.FS
// Actual filesystems used at runtime (conditional)
var templatesFS fs.FS
var staticFS fs.FS
func init() {
// Development mode: read from filesystem for instant updates
if os.Getenv("ATCR_DEV_MODE") == "true" {
log.Println("🔧 DEV MODE: Using filesystem for templates and static assets")
templatesFS = os.DirFS("pkg/appview/templates")
staticFS = os.DirFS("pkg/appview/static")
} else {
// Production mode: use embedded assets
log.Println("📦 PRODUCTION MODE: Using embedded assets")
templatesFS = embeddedTemplatesFS
staticFS = embeddedStaticFS
}
}
// Templates returns parsed HTML templates
func Templates() *template.Template {
tmpl, err := template.ParseFS(templatesFS, "templates/**/*.html")
if err != nil {
log.Fatalf("Failed to parse templates: %v", err)
}
return tmpl
}
// StaticHandler returns a handler for static files
func StaticHandler() http.Handler {
sub, err := fs.Sub(staticFS, "static")
if err != nil {
log.Fatalf("Failed to create static sub-filesystem: %v", err)
}
return http.FileServer(http.FS(sub))
}
```
**Important:** Update the `Templates()` function to NOT cache templates in dev mode:
```go
// Templates returns parsed HTML templates
func Templates() *template.Template {
// In dev mode, reparse templates on every request (instant updates)
// In production, this could be cached
tmpl, err := template.ParseFS(templatesFS, "templates/**/*.html")
if err != nil {
log.Fatalf("Failed to parse templates: %v", err)
}
return tmpl
}
```
If you're caching templates, wrap it with a dev mode check:
```go
var templateCache *template.Template
func Templates() *template.Template {
// Development: reparse every time (instant updates)
if os.Getenv("ATCR_DEV_MODE") == "true" {
tmpl, err := template.ParseFS(templatesFS, "templates/**/*.html")
if err != nil {
log.Printf("Template parse error: %v", err)
return template.New("error")
}
return tmpl
}
// Production: use cached templates
if templateCache == nil {
tmpl, err := template.ParseFS(templatesFS, "templates/**/*.html")
if err != nil {
log.Fatalf("Failed to parse templates: %v", err)
}
templateCache = tmpl
}
return templateCache
}
```
### Step 5: Add to .gitignore
Add Air's temporary directory to `.gitignore`:
```
# Air hot reload
tmp/
build-errors.log
```
## Usage
### Starting Development Environment
```bash
# Build and start dev container
docker compose -f docker-compose.dev.yml up --build
# Or run in background
docker compose -f docker-compose.dev.yml up -d
# View logs
docker compose -f docker-compose.dev.yml logs -f atcr-appview
```
You should see Air starting:
```
atcr-appview | 🔧 DEV MODE: Using filesystem for templates and static assets
atcr-appview |
atcr-appview | __ _ ___
atcr-appview | / /\ | | | |_)
atcr-appview | /_/--\ |_| |_| \_ , built with Go
atcr-appview |
atcr-appview | watching .
atcr-appview | !exclude tmp
atcr-appview | building...
atcr-appview | running...
```
### Development Workflow
#### 1. Edit Templates/CSS/JS (Instant Updates)
```bash
# Edit any template, CSS, or JS file
vim pkg/appview/templates/pages/home.html
vim pkg/appview/static/css/style.css
vim pkg/appview/static/js/app.js
# Save file → changes appear instantly
# Just refresh browser (Cmd+R / Ctrl+R)
```
**No rebuild, no restart!** Air might restart the app, but it's instant since no compilation is needed.
#### 2. Edit Go Code (Fast Rebuild)
```bash
# Edit any Go file
vim pkg/appview/handlers/home.go
# Save file → Air detects change
# Air output shows:
# building...
# build successful in 2.3s
# restarting...
# Refresh browser to see changes
```
**2-5 second rebuild** instead of 2-3 minutes!
### Stopping Development Environment
```bash
# Stop containers
docker compose -f docker-compose.dev.yml down
# Stop and remove volumes (fresh start)
docker compose -f docker-compose.dev.yml down -v
```
## Production Builds
**Production builds are completely unchanged:**
```bash
# Production uses normal Dockerfile (embed.FS, scratch base)
docker compose build
# Or specific service
docker compose build atcr-appview
# Run production
docker compose up
```
**Why it works:**
- Production doesn't set `ATCR_DEV_MODE=true`
- `ui.go` defaults to embedded assets when env var is unset
- Production Dockerfile still uses multi-stage build to scratch
- No development dependencies in production image
## Comparison
| Change Type | Before (docker compose) | After (dev setup) | Improvement |
|-------------|------------------------|-------------------|-------------|
| Edit CSS | 2-3 minutes | **Instant (0s)** | ♾x faster |
| Edit JS | 2-3 minutes | **Instant (0s)** | ♾x faster |
| Edit Template | 2-3 minutes | **Instant (0s)** | ♾x faster |
| Edit Go Code | 2-3 minutes | **2-5 seconds** | 24-90x faster |
| Production Build | Same | **Same** | No change |
## Advanced: Local Development (No Docker)
For even faster development, run locally without Docker:
```bash
# Set environment variables
export ATCR_DEV_MODE=true
export ATCR_HTTP_ADDR=:5000
export ATCR_BASE_URL=http://localhost:5000
export ATCR_DEFAULT_HOLD_DID=did:web:hold01.atcr.io
export ATCR_UI_DATABASE_PATH=/tmp/atcr-ui.db
export ATCR_AUTH_KEY_PATH=/tmp/atcr-auth-key.pem
export ATCR_UI_ENABLED=true
# Or use .env file
source .env.appview
# Run with Air
air -c .air.toml
# Or run directly (no hot reload)
go run ./cmd/appview serve
```
**Advantages:**
- Even faster (no Docker overhead)
- Native debugging with delve
- Direct filesystem access
- Full IDE integration
**Disadvantages:**
- Need to manage dependencies locally (SQLite, etc)
- May differ from production environment
## Troubleshooting
### Air Not Rebuilding
**Problem:** Air doesn't detect changes
**Solution:**
```bash
# Check if Air is actually running
docker compose -f docker-compose.dev.yml logs atcr-appview
# Check .air.toml include_ext includes your file type
# Default: ["go", "html", "css", "js"]
# Restart container
docker compose -f docker-compose.dev.yml restart atcr-appview
```
### Templates Not Updating
**Problem:** Template changes don't appear
**Solution:**
```bash
# Check ATCR_DEV_MODE is set
docker compose -f docker-compose.dev.yml exec atcr-appview env | grep DEV_MODE
# Should output: ATCR_DEV_MODE=true
# Check templates aren't cached (see Step 4 above)
# Templates() should reparse in dev mode
```
### Go Build Failing
**Problem:** Air shows build errors
**Solution:**
```bash
# Check build logs
docker compose -f docker-compose.dev.yml logs atcr-appview
# Or check build-errors.log in container
docker compose -f docker-compose.dev.yml exec atcr-appview cat build-errors.log
# Fix the Go error, save file, Air will retry
```
### Volume Mount Not Working
**Problem:** Changes don't appear in container
**Solution:**
```bash
# Verify volume mount
docker compose -f docker-compose.dev.yml exec atcr-appview ls -la /app
# Should show your source files
# On Windows/Mac, check Docker Desktop file sharing settings
# Settings → Resources → File Sharing → add project directory
```
### Permission Errors
**Problem:** Cannot write to /var/lib/atcr
**Solution:**
```bash
# In Dockerfile.devel, add:
RUN mkdir -p /var/lib/atcr && chmod 777 /var/lib/atcr
# Or use named volumes (already in docker-compose.dev.yml)
volumes:
- atcr-ui-dev:/var/lib/atcr
```
### Slow Builds Even with Air
**Problem:** Air rebuilds slowly
**Solution:**
```bash
# Use Go module cache volume (already in docker-compose.dev.yml)
volumes:
- go-cache:/go/pkg/mod
# Increase Air delay to debounce rapid saves
# In .air.toml:
delay = 2000 # 2 seconds
# Or check if CGO is slowing builds
# AppView needs CGO for SQLite, but you can try:
CGO_ENABLED=0 go build # (won't work for ATCR, but good to know)
```
## Tips & Tricks
### Browser Auto-Reload (LiveReload)
Add LiveReload for automatic browser refresh:
```bash
# Install browser extension
# Chrome: https://chrome.google.com/webstore/detail/livereload
# Firefox: https://addons.mozilla.org/en-US/firefox/addon/livereload-web-extension/
# Add livereload to .air.toml (future Air feature)
# Or use a separate tool like browsersync
```
### Database Resets
Development database is in a named volume:
```bash
# Reset database (fresh start)
docker compose -f docker-compose.dev.yml down -v
docker compose -f docker-compose.dev.yml up
# Or delete specific volume
docker volume rm atcr_atcr-ui-dev
```
### Multiple Environments
Run dev and production side-by-side:
```bash
# Development on port 5000
docker compose -f docker-compose.dev.yml up -d
# Production on port 5001
docker compose up -d
# Now you can compare behavior
```
### Debugging with Delve
Add delve to Dockerfile.devel:
```dockerfile
RUN go install github.com/go-delve/delve/cmd/dlv@latest
# Change CMD to use delve
CMD ["dlv", "debug", "./cmd/appview", "--headless", "--listen=:2345", "--api-version=2", "--accept-multiclient", "--", "serve"]
```
Then connect with VSCode or GoLand.
## Summary
**Development Setup (One-Time):**
1. Create `Dockerfile.devel`
2. Create `docker-compose.dev.yml`
3. Create `.air.toml`
4. Modify `pkg/appview/ui.go` for conditional DirFS
5. Add `tmp/` to `.gitignore`
**Daily Development:**
```bash
# Start
docker compose -f docker-compose.dev.yml up
# Edit files in your editor
# Changes appear instantly (CSS/JS/templates)
# Or in 2-5 seconds (Go code)
# Stop
docker compose -f docker-compose.dev.yml down
```
**Production (Unchanged):**
```bash
docker compose build
docker compose up
```
**Result:** 100x faster development iteration! 🚀

756
docs/HOLD_AS_CA.md Normal file
View File

@@ -0,0 +1,756 @@
# Hold-as-Certificate-Authority Architecture
## ⚠️ Important Notice
This document describes an **optional enterprise feature** for X.509 PKI compliance. The hold-as-CA approach introduces **centralization trade-offs** that contradict ATProto's decentralized philosophy.
**Default Recommendation:** Use [plugin-based integration](./INTEGRATION_STRATEGY.md) instead. Only implement hold-as-CA if your organization has specific X.509 PKI compliance requirements.
## Overview
The hold-as-CA architecture allows ATCR to generate Notation/Notary v2-compatible signatures by having hold services act as Certificate Authorities that issue X.509 certificates for users.
### The Problem
- **ATProto signatures** use K-256 (secp256k1) elliptic curve
- **Notation** only supports P-256, P-384, P-521 elliptic curves
- **Cannot convert** K-256 signatures to P-256 (different cryptographic curves)
- **Must re-sign** with P-256 keys for Notation compatibility
### The Solution
Hold services act as trusted Certificate Authorities (CAs):
1. User pushes image → Manifest signed by PDS with K-256 (ATProto)
2. Hold verifies ATProto signature is valid
3. Hold generates ephemeral P-256 key pair for user
4. Hold issues X.509 certificate to user's DID
5. Hold signs manifest with P-256 key
6. Hold creates Notation signature envelope (JWS format)
7. Stores both ATProto and Notation signatures
**Result:** Images have two signatures:
- **ATProto signature** (K-256) - Decentralized, DID-based
- **Notation signature** (P-256) - Centralized, X.509 PKI
## Architecture
### Certificate Chain
```
Hold Root CA Certificate (self-signed, P-256)
└── User Certificate (issued to DID, P-256)
└── Image Manifest Signature
```
**Hold Root CA:**
```
Subject: CN=ATCR Hold CA - did:web:hold01.atcr.io
Issuer: Self (self-signed)
Key Usage: Digital Signature, Certificate Sign
Basic Constraints: CA=true, pathLen=1
Algorithm: ECDSA P-256
Validity: 10 years
```
**User Certificate:**
```
Subject: CN=did:plc:alice123
SAN: URI:did:plc:alice123
Issuer: Hold Root CA
Key Usage: Digital Signature
Extended Key Usage: Code Signing
Algorithm: ECDSA P-256
Validity: 24 hours (short-lived)
```
### Push Flow
```
┌──────────────────────────────────────────────────────┐
│ 1. User: docker push atcr.io/alice/myapp:latest │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 2. AppView stores manifest in alice's PDS │
│ - PDS signs with K-256 (ATProto standard) │
│ - Signature stored in repository commit │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 3. AppView requests hold to co-sign │
│ POST /xrpc/io.atcr.hold.coSignManifest │
│ { │
│ "userDid": "did:plc:alice123", │
│ "manifestDigest": "sha256:abc123...", │
│ "atprotoSignature": {...} │
│ } │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 4. Hold verifies ATProto signature │
│ a. Resolve alice's DID → public key │
│ b. Fetch commit from alice's PDS │
│ c. Verify K-256 signature │
│ d. Ensure signature is valid │
│ │
│ If verification fails → REJECT │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 5. Hold generates ephemeral P-256 key pair │
│ privateKey := ecdsa.GenerateKey(elliptic.P256()) │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 6. Hold issues X.509 certificate │
│ Subject: CN=did:plc:alice123 │
│ SAN: URI:did:plc:alice123 │
│ Issuer: Hold CA │
│ NotBefore: now │
│ NotAfter: now + 24 hours │
│ KeyUsage: Digital Signature │
│ ExtKeyUsage: Code Signing │
│ │
│ Sign certificate with hold's CA private key │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 7. Hold signs manifest digest │
│ hash := SHA256(manifestBytes) │
│ signature := ECDSA_P256(hash, privateKey) │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 8. Hold creates Notation JWS envelope │
│ { │
│ "protected": {...}, │
│ "payload": "base64(manifestDigest)", │
│ "signature": "base64(p256Signature)", │
│ "header": { │
│ "x5c": [ │
│ "base64(userCert)", │
│ "base64(holdCACert)" │
│ ] │
│ } │
│ } │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 9. Hold returns signature to AppView │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 10. AppView stores Notation signature │
│ - Create ORAS artifact manifest │
│ - Upload JWS envelope as layer blob │
│ - Link to image via subject field │
│ - artifactType: application/vnd.cncf.notary... │
└──────────────────────────────────────────────────────┘
```
### Verification Flow
```
┌──────────────────────────────────────────────────────┐
│ User: notation verify atcr.io/alice/myapp:latest │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 1. Notation queries Referrers API │
│ GET /v2/alice/myapp/referrers/sha256:abc123 │
│ → Discovers Notation signature artifact │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 2. Notation downloads JWS envelope │
│ - Parses JSON Web Signature │
│ - Extracts certificate chain from x5c header │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 3. Notation validates certificate chain │
│ a. User cert issued by Hold CA? ✓ │
│ b. Hold CA cert in trust store? ✓ │
│ c. Certificate not expired? ✓ │
│ d. Key usage correct? ✓ │
│ e. Subject matches policy? ✓ │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 4. Notation verifies signature │
│ a. Extract public key from user certificate │
│ b. Compute manifest hash: SHA256(manifest) │
│ c. Verify: ECDSA_P256(hash, sig, pubKey) ✓ │
└────────────────────┬─────────────────────────────────┘
┌──────────────────────────────────────────────────────┐
│ 5. Success: Image verified ✓ │
│ Signed by: did:plc:alice123 (via Hold CA) │
└──────────────────────────────────────────────────────┘
```
## Implementation
### Hold CA Certificate Generation
```go
// cmd/hold/main.go - CA initialization
func (h *Hold) initializeCA(ctx context.Context) error {
caKeyPath := filepath.Join(h.config.DataDir, "ca-private-key.pem")
caCertPath := filepath.Join(h.config.DataDir, "ca-certificate.pem")
// Load existing CA or generate new one
if exists(caKeyPath) && exists(caCertPath) {
h.caKey = loadPrivateKey(caKeyPath)
h.caCert = loadCertificate(caCertPath)
return nil
}
// Generate P-256 key pair for CA
caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return fmt.Errorf("failed to generate CA key: %w", err)
}
// Create CA certificate template
serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
template := &x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: fmt.Sprintf("ATCR Hold CA - %s", h.DID),
},
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(10, 0, 0), // 10 years
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
MaxPathLen: 1, // Can only issue end-entity certificates
}
// Self-sign
certDER, err := x509.CreateCertificate(
rand.Reader,
template,
template, // Self-signed: issuer = subject
&caKey.PublicKey,
caKey,
)
if err != nil {
return fmt.Errorf("failed to create CA certificate: %w", err)
}
caCert, _ := x509.ParseCertificate(certDER)
// Save to disk (0600 permissions)
savePrivateKey(caKeyPath, caKey)
saveCertificate(caCertPath, caCert)
h.caKey = caKey
h.caCert = caCert
log.Info("Generated new CA certificate", "did", h.DID, "expires", caCert.NotAfter)
return nil
}
```
### User Certificate Issuance
```go
// pkg/hold/cosign.go
func (h *Hold) issueUserCertificate(userDID string) (*x509.Certificate, *ecdsa.PrivateKey, error) {
// Generate ephemeral P-256 key for user
userKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, nil, fmt.Errorf("failed to generate user key: %w", err)
}
serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
// Parse DID for SAN
sanURI, _ := url.Parse(userDID)
template := &x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: userDID,
},
URIs: []*url.URL{sanURI}, // Subject Alternative Name
NotBefore: time.Now(),
NotAfter: time.Now().Add(24 * time.Hour), // Short-lived: 24 hours
KeyUsage: x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
BasicConstraintsValid: true,
IsCA: false,
}
// Sign with hold's CA key
certDER, err := x509.CreateCertificate(
rand.Reader,
template,
h.caCert, // Issuer: Hold CA
&userKey.PublicKey,
h.caKey, // Sign with CA private key
)
if err != nil {
return nil, nil, fmt.Errorf("failed to create user certificate: %w", err)
}
userCert, _ := x509.ParseCertificate(certDER)
return userCert, userKey, nil
}
```
### Co-Signing XRPC Endpoint
```go
// pkg/hold/oci/xrpc.go
func (s *Server) handleCoSignManifest(ctx context.Context, req *CoSignRequest) (*CoSignResponse, error) {
// 1. Verify caller is authenticated
did, err := s.auth.VerifyToken(ctx, req.Token)
if err != nil {
return nil, fmt.Errorf("authentication failed: %w", err)
}
// 2. Verify ATProto signature
valid, err := s.verifyATProtoSignature(ctx, req.UserDID, req.ManifestDigest, req.ATProtoSignature)
if err != nil || !valid {
return nil, fmt.Errorf("ATProto signature verification failed: %w", err)
}
// 3. Issue certificate for user
userCert, userKey, err := s.hold.issueUserCertificate(req.UserDID)
if err != nil {
return nil, fmt.Errorf("failed to issue certificate: %w", err)
}
// 4. Sign manifest with user's key
manifestHash := sha256.Sum256([]byte(req.ManifestDigest))
signature, err := ecdsa.SignASN1(rand.Reader, userKey, manifestHash[:])
if err != nil {
return nil, fmt.Errorf("failed to sign manifest: %w", err)
}
// 5. Create JWS envelope
jws, err := s.createJWSEnvelope(signature, userCert, s.hold.caCert, req.ManifestDigest)
if err != nil {
return nil, fmt.Errorf("failed to create JWS: %w", err)
}
return &CoSignResponse{
JWS: jws,
Certificate: encodeCertificate(userCert),
CACertificate: encodeCertificate(s.hold.caCert),
}, nil
}
```
## Trust Model
### Centralization Analysis
**ATProto Model (Decentralized):**
- Each PDS is independent
- User controls which PDS to use
- Trust user's DID, not specific infrastructure
- PDS compromise affects only that PDS's users
- Multiple PDSs provide redundancy
**Hold-as-CA Model (Centralized):**
- Hold acts as single Certificate Authority
- All users must trust hold's CA certificate
- Hold compromise = attacker can issue certificates for ANY user
- Hold becomes single point of failure
- Users depend on hold operator honesty
### What Hold Vouches For
When hold issues a certificate, it attests:
**"I verified that [DID] signed this manifest with ATProto"**
- Hold validated ATProto signature
- Hold confirmed signature matches user's DID
- Hold checked signature at specific time
**"This image is safe"**
- Hold does NOT audit image contents
- Certificate ≠ vulnerability scan
- Signature ≠ security guarantee
**"I control this DID"**
- Hold does NOT control user's DID
- DID ownership is independent
- Hold cannot revoke DIDs
### Threat Model
**Scenario 1: Hold Private Key Compromise**
**Attack:**
- Attacker steals hold's CA private key
- Can issue certificates for any DID
- Can sign malicious images as any user
**Impact:**
- **CRITICAL** - All users affected
- Attacker can impersonate any user
- All signatures become untrustworthy
**Detection:**
- Certificate Transparency logs (if implemented)
- Unusual certificate issuance patterns
- Users report unexpected signatures
**Mitigation:**
- Store CA key in Hardware Security Module (HSM)
- Strict access controls
- Audit logging
- Regular key rotation
**Recovery:**
- Revoke compromised CA certificate
- Generate new CA certificate
- Re-issue all active certificates
- Notify all users
- Update trust stores
---
**Scenario 2: Malicious Hold Operator**
**Attack:**
- Hold operator issues certificates without verifying ATProto signatures
- Hold operator signs malicious images
- Hold operator backdates certificates
**Impact:**
- **HIGH** - Trust model broken
- Users receive signed malicious images
- Difficult to detect without ATProto cross-check
**Detection:**
- Compare Notation signature timestamp with ATProto commit time
- Verify ATProto signature exists independently
- Monitor hold's signing patterns
**Mitigation:**
- Audit trail linking certificates to ATProto signatures
- Public transparency logs
- Multi-signature requirements
- Periodically verify ATProto signatures
**Recovery:**
- Identify malicious certificates
- Revoke hold's CA trust
- Switch to different hold
- Re-verify all images
---
**Scenario 3: Certificate Theft**
**Attack:**
- Attacker steals issued user certificate + private key
- Uses it to sign malicious images
**Impact:**
- **LOW-MEDIUM** - Limited scope
- Affects only specific user/image
- Short validity period (24 hours)
**Detection:**
- Unexpected signature timestamps
- Images signed from unknown locations
**Mitigation:**
- Short certificate validity (24 hours)
- Ephemeral keys (not stored long-term)
- Certificate revocation if detected
**Recovery:**
- Wait for certificate expiration (24 hours)
- Revoke specific certificate
- Investigate compromise source
## Certificate Management
### Expiration Strategy
**Short-Lived Certificates (24 hours):**
**Pros:**
- ✅ Minimal revocation infrastructure needed
- ✅ Compromise window is tiny
- ✅ Automatic cleanup
- ✅ Lower CRL/OCSP overhead
**Cons:**
- ❌ Old images become unverifiable quickly
- ❌ Requires re-signing for historical verification
- ❌ Storage: multiple signatures for same image
**Solution: On-Demand Re-Signing**
```
User pulls old image → Notation verification fails (expired cert)
→ User requests re-signing: POST /xrpc/io.atcr.hold.reSignManifest
→ Hold verifies ATProto signature still valid
→ Hold issues new certificate (24 hours)
→ Hold creates new Notation signature
→ User can verify with fresh certificate
```
### Revocation
**Certificate Revocation List (CRL):**
```
Hold publishes CRL at: https://hold01.atcr.io/ca.crl
Notation configured to check CRL:
{
"trustPolicies": [{
"name": "atcr-images",
"signatureVerification": {
"verificationLevel": "strict",
"override": {
"revocationValidation": "strict"
}
}
}]
}
```
**OCSP (Online Certificate Status Protocol):**
- Hold runs OCSP responder: `https://hold01.atcr.io/ocsp`
- Real-time certificate status checks
- Lower overhead than CRL downloads
**Revocation Triggers:**
- Key compromise detected
- Malicious signing detected
- User request
- DID ownership change
### CA Key Rotation
**Rotation Procedure:**
1. **Generate new CA key pair**
2. **Create new CA certificate**
3. **Cross-sign old CA with new CA** (transition period)
4. **Distribute new CA certificate** to all users
5. **Begin issuing with new CA** for new signatures
6. **Grace period** (30 days): Accept both old and new CA
7. **Retire old CA** after grace period
**Frequency:** Every 2-3 years (longer than short-lived certs)
## Trust Store Distribution
### Problem
Users must add hold's CA certificate to their Notation trust store for verification to work.
### Manual Distribution
```bash
# 1. Download hold's CA certificate
curl https://hold01.atcr.io/ca.crt -o hold01-ca.crt
# 2. Verify fingerprint (out-of-band)
openssl x509 -in hold01-ca.crt -fingerprint -noout
# Compare with published fingerprint
# 3. Add to Notation trust store
notation cert add --type ca --store atcr-holds hold01-ca.crt
```
### Automated Distribution
**ATCR CLI tool:**
```bash
atcr trust add hold01.atcr.io
# → Fetches CA certificate
# → Verifies via HTTPS + DNSSEC
# → Adds to Notation trust store
# → Configures trust policy
atcr trust list
# → Shows trusted holds with fingerprints
```
### System-Wide Trust
**For enterprise deployments:**
**Debian/Ubuntu:**
```bash
# Install CA certificate system-wide
cp hold01-ca.crt /usr/local/share/ca-certificates/atcr-hold01.crt
update-ca-certificates
```
**RHEL/CentOS:**
```bash
cp hold01-ca.crt /etc/pki/ca-trust/source/anchors/
update-ca-trust
```
**Container images:**
```dockerfile
FROM ubuntu:22.04
COPY hold01-ca.crt /usr/local/share/ca-certificates/
RUN update-ca-certificates
```
## Configuration
### Hold Service
**Environment variables:**
```bash
# Enable co-signing feature
HOLD_COSIGN_ENABLED=true
# CA certificate and key paths
HOLD_CA_CERT_PATH=/var/lib/atcr/hold/ca-certificate.pem
HOLD_CA_KEY_PATH=/var/lib/atcr/hold/ca-private-key.pem
# Certificate validity
HOLD_CERT_VALIDITY_HOURS=24
# OCSP responder
HOLD_OCSP_ENABLED=true
HOLD_OCSP_URL=https://hold01.atcr.io/ocsp
# CRL distribution
HOLD_CRL_ENABLED=true
HOLD_CRL_URL=https://hold01.atcr.io/ca.crl
```
### Notation Trust Policy
```json
{
"version": "1.0",
"trustPolicies": [{
"name": "atcr-images",
"registryScopes": ["atcr.io/*/*"],
"signatureVerification": {
"level": "strict",
"override": {
"revocationValidation": "strict"
}
},
"trustStores": ["ca:atcr-holds"],
"trustedIdentities": [
"x509.subject: CN=did:plc:*",
"x509.subject: CN=did:web:*"
]
}]
}
```
## When to Use Hold-as-CA
### ✅ Use When
**Enterprise X.509 PKI Compliance:**
- Organization requires standard X.509 certificates
- Existing security policies mandate PKI
- Audit requirements for certificate chains
- Integration with existing CA infrastructure
**Tool Compatibility:**
- Must use standard Notation without plugins
- Cannot deploy custom verification tools
- Existing tooling expects X.509 signatures
**Centralized Trust Acceptable:**
- Organization already uses centralized trust model
- Hold operator is internal/trusted team
- Centralization risk is acceptable trade-off
### ❌ Don't Use When
**Default Deployment:**
- Most users should use [plugin-based approach](./INTEGRATION_STRATEGY.md)
- Plugins maintain decentralization
- Plugins reuse existing ATProto signatures
**Small Teams / Startups:**
- Certificate management overhead too high
- Don't need X.509 compliance
- Prefer simpler architecture
**Maximum Decentralization Required:**
- Cannot accept hold as single trust point
- Must maintain pure ATProto model
- Centralization contradicts project goals
## Comparison: Hold-as-CA vs. Plugins
| Aspect | Hold-as-CA | Plugin Approach |
|--------|------------|----------------|
| **Standard compliance** | ✅ Full X.509/PKI | ⚠️ Custom verification |
| **Tool compatibility** | ✅ Notation works unchanged | ❌ Requires plugin install |
| **Decentralization** | ❌ Centralized (hold CA) | ✅ Decentralized (DIDs) |
| **ATProto alignment** | ❌ Against philosophy | ✅ ATProto-native |
| **Signature reuse** | ❌ Must re-sign (P-256) | ✅ Reuses ATProto (K-256) |
| **Certificate mgmt** | 🔴 High overhead | 🟢 None |
| **Trust distribution** | 🔴 Must distribute CA cert | 🟢 DID resolution |
| **Hold compromise** | 🔴 All users affected | 🟢 Metadata only |
| **Operational cost** | 🔴 High | 🟢 Low |
| **Use case** | Enterprise PKI | General purpose |
## Recommendations
### Default Approach: Plugins
For most deployments, use plugin-based verification:
- **Ratify plugin** for Kubernetes
- **OPA Gatekeeper provider** for policy enforcement
- **Containerd verifier** for runtime checks
- **atcr-verify CLI** for general purpose
See [Integration Strategy](./INTEGRATION_STRATEGY.md) for details.
### Optional: Hold-as-CA for Enterprise
Only implement hold-as-CA if you have specific requirements:
- Enterprise X.509 PKI mandates
- Cannot use plugins (restricted environments)
- Accept centralization trade-off
**Implement as opt-in feature:**
```bash
# Users explicitly enable co-signing
docker push atcr.io/alice/myapp:latest --sign=notation
# Or via environment variable
export ATCR_ENABLE_COSIGN=true
docker push atcr.io/alice/myapp:latest
```
### Security Best Practices
**If implementing hold-as-CA:**
1. **Store CA key in HSM** - Never on filesystem
2. **Audit all certificate issuance** - Log every cert
3. **Public transparency log** - Publish all certificates
4. **Short certificate validity** - 24 hours max
5. **Monitor unusual patterns** - Alert on anomalies
6. **Regular CA key rotation** - Every 2-3 years
7. **Cross-check ATProto** - Verify both signatures match
8. **Incident response plan** - Prepare for compromise
## See Also
- [ATProto Signatures](./ATPROTO_SIGNATURES.md) - How ATProto signing works
- [Integration Strategy](./INTEGRATION_STRATEGY.md) - Overview of integration approaches
- [Signature Integration](./SIGNATURE_INTEGRATION.md) - Tool-specific integration guides

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,692 @@
# ATCR Signature Verification Integration Strategy
## Overview
This document provides a comprehensive overview of how to integrate ATProto signature verification into various tools and workflows. ATCR uses a layered approach that provides maximum compatibility while maintaining ATProto's decentralized philosophy.
## Architecture Layers
```
┌─────────────────────────────────────────────────────────┐
│ Layer 4: Applications & Workflows │
│ - CI/CD pipelines │
│ - Kubernetes admission control │
│ - Runtime verification │
│ - Security scanning │
└──────────────────────┬──────────────────────────────────┘
┌─────────────────────────────────────────────────────────┐
│ Layer 3: Integration Methods │
│ - Plugins (Ratify, Gatekeeper, Containerd) │
│ - CLI tools (atcr-verify) │
│ - External services (webhooks, APIs) │
│ - (Optional) X.509 certificates (hold-as-CA) │
└──────────────────────┬──────────────────────────────────┘
┌─────────────────────────────────────────────────────────┐
│ Layer 2: Signature Discovery │
│ - OCI Referrers API (GET /v2/.../referrers/...) │
│ - ORAS artifact format │
│ - artifactType: application/vnd.atproto.signature... │
└──────────────────────┬──────────────────────────────────┘
┌─────────────────────────────────────────────────────────┐
│ Layer 1: ATProto Signatures (Foundation) │
│ - Manifests signed by PDS (K-256) │
│ - Signatures in ATProto repository commits │
│ - Public keys in DID documents │
│ - DID-based identity │
└─────────────────────────────────────────────────────────┘
```
## Integration Approaches
### Approach 1: Plugin-Based (RECOMMENDED) ⭐
**Best for:** Kubernetes, standard tooling, production deployments
Integrate through plugin systems of existing tools:
#### Ratify Verifier Plugin
- **Use case:** Kubernetes admission control via Gatekeeper
- **Effort:** 2-3 weeks to build
- **Maturity:** CNCF Sandbox project, growing adoption
- **Benefits:**
- ✅ Standard plugin interface
- ✅ Works with existing Ratify deployments
- ✅ Policy-based enforcement
- ✅ Multi-verifier support (can combine with Notation, Cosign)
**Implementation:**
```go
// Ratify plugin interface
type ReferenceVerifier interface {
VerifyReference(
ctx context.Context,
subjectRef common.Reference,
referenceDesc ocispecs.ReferenceDescriptor,
store referrerStore.ReferrerStore,
) (VerifierResult, error)
}
```
**Deployment:**
```yaml
apiVersion: config.ratify.deislabs.io/v1beta1
kind: Verifier
metadata:
name: atcr-verifier
spec:
name: atproto
artifactType: application/vnd.atproto.signature.v1+json
parameters:
trustedDIDs:
- did:plc:alice123
```
See [Ratify Integration Guide](./SIGNATURE_INTEGRATION.md#ratify-plugin)
---
#### OPA Gatekeeper External Provider
- **Use case:** Kubernetes admission control with OPA policies
- **Effort:** 2-3 weeks to build
- **Maturity:** Very stable, widely adopted
- **Benefits:**
- ✅ Rego-based policies (flexible)
- ✅ External data provider API (standard)
- ✅ Can reuse existing Gatekeeper deployments
**Implementation:**
```go
// External data provider
type Provider struct {
verifier *atproto.Verifier
}
func (p *Provider) Provide(ctx context.Context, req ProviderRequest) (*ProviderResponse, error) {
image := req.Keys["image"]
result, err := p.verifier.Verify(ctx, image)
return &ProviderResponse{
Data: map[string]bool{"verified": result.Verified},
}, nil
}
```
**Policy:**
```rego
package verify
violation[{"msg": msg}] {
container := input.review.object.spec.containers[_]
startswith(container.image, "atcr.io/")
response := external_data({
"provider": "atcr-verifier",
"keys": ["image"],
"values": [container.image]
})
response.verified != true
msg := sprintf("Image %v has no valid ATProto signature", [container.image])
}
```
See [Gatekeeper Integration Guide](./SIGNATURE_INTEGRATION.md#opa-gatekeeper-external-provider)
---
#### Containerd 2.0 Image Verifier Plugin
- **Use case:** Runtime verification at image pull time
- **Effort:** 1-2 weeks to build
- **Maturity:** New in Containerd 2.0 (Nov 2024)
- **Benefits:**
- ✅ Runtime enforcement (pull-time verification)
- ✅ Works for Docker, nerdctl, ctr
- ✅ Transparent to users
- ✅ No Kubernetes required
**Limitation:** CRI plugin integration still maturing
**Implementation:**
```bash
#!/bin/bash
# /usr/local/bin/containerd-verifiers/atcr-verifier
# Binary called by containerd on image pull
# Containerd passes image info via stdin
read -r INPUT
IMAGE=$(echo "$INPUT" | jq -r '.reference')
DIGEST=$(echo "$INPUT" | jq -r '.descriptor.digest')
# Verify signature
if atcr-verify "$IMAGE@$DIGEST" --quiet; then
exit 0 # Verified
else
exit 1 # Failed
fi
```
**Configuration:**
```toml
# /etc/containerd/config.toml
[plugins."io.containerd.image-verifier.v1.bindir"]
bin_dir = "/usr/local/bin/containerd-verifiers"
max_verifiers = 5
per_verifier_timeout = "10s"
```
See [Containerd Integration Guide](./SIGNATURE_INTEGRATION.md#containerd-20)
---
### Approach 2: CLI Tool (RECOMMENDED) ⭐
**Best for:** CI/CD, scripts, general-purpose verification
Use `atcr-verify` CLI tool directly in workflows:
#### Command-Line Verification
```bash
# Basic verification
atcr-verify atcr.io/alice/myapp:latest
# With trust policy
atcr-verify atcr.io/alice/myapp:latest --policy trust-policy.yaml
# JSON output for scripting
atcr-verify atcr.io/alice/myapp:latest --output json
# Quiet mode for exit codes
atcr-verify atcr.io/alice/myapp:latest --quiet && echo "Verified"
```
#### CI/CD Integration
**GitHub Actions:**
```yaml
- name: Verify image
run: atcr-verify ${{ env.IMAGE }} --policy .github/trust-policy.yaml
```
**GitLab CI:**
```yaml
verify:
image: atcr.io/atcr/verify:latest
script:
- atcr-verify ${IMAGE} --policy trust-policy.yaml
```
**Universal Container:**
```bash
docker run --rm atcr.io/atcr/verify:latest verify IMAGE
```
**Benefits:**
- ✅ Works everywhere (not just Kubernetes)
- ✅ Simple integration (single binary)
- ✅ No plugin installation required
- ✅ Offline mode support
See [atcr-verify CLI Documentation](./ATCR_VERIFY_CLI.md)
---
### Approach 3: External Services
**Best for:** Custom admission controllers, API-based verification
Build verification as a service that tools can call:
#### Webhook Service
```go
// HTTP endpoint for verification
func (h *Handler) VerifyImage(w http.ResponseWriter, r *http.Request) {
image := r.URL.Query().Get("image")
result, err := h.verifier.Verify(r.Context(), image)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
json.NewEncoder(w).Encode(map[string]interface{}{
"verified": result.Verified,
"did": result.Signature.DID,
"signedAt": result.Signature.SignedAt,
})
}
```
#### Usage from Kyverno
```yaml
verifyImages:
- imageReferences:
- "atcr.io/*/*"
attestors:
- entries:
- api:
url: http://atcr-verify.kube-system/verify?image={{ image }}
```
**Benefits:**
- ✅ Flexible integration
- ✅ Centralized verification logic
- ✅ Caching and rate limiting
- ✅ Can add additional checks (vulnerability scanning, etc.)
---
### Approach 4: Hold-as-CA (OPTIONAL, ENTERPRISE ONLY)
**Best for:** Enterprise X.509 PKI compliance requirements
⚠️ **WARNING:** This approach introduces centralization trade-offs. Only use if you have specific X.509 compliance requirements.
Hold services act as Certificate Authorities that issue X.509 certificates for users, enabling standard Notation verification.
**When to use:**
- Enterprise requires standard X.509 PKI
- Cannot deploy custom plugins
- Accept centralization trade-off for tool compatibility
**When NOT to use:**
- Default deployments (use plugins instead)
- Maximum decentralization required
- Don't need X.509 compliance
See [Hold-as-CA Architecture](./HOLD_AS_CA.md) for complete details and security implications.
---
## Tool Compatibility Matrix
| Tool | Discover | Verify | Integration Method | Priority | Effort |
|------|----------|--------|-------------------|----------|--------|
| **Kubernetes** | | | | | |
| OPA Gatekeeper | ✅ | ✅ | External provider | **HIGH** | 2-3 weeks |
| Ratify | ✅ | ✅ | Verifier plugin | **HIGH** | 2-3 weeks |
| Kyverno | ✅ | ⚠️ | External service | MEDIUM | 2 weeks |
| Portieris | ❌ | ❌ | N/A (deprecated) | NONE | - |
| **Runtime** | | | | | |
| Containerd 2.0 | ✅ | ✅ | Bindir plugin | **MED-HIGH** | 1-2 weeks |
| CRI-O | ⚠️ | ⚠️ | Upstream contribution | MEDIUM | 3-4 weeks |
| Podman | ⚠️ | ⚠️ | Upstream contribution | MEDIUM | 3-4 weeks |
| **CI/CD** | | | | | |
| GitHub Actions | ✅ | ✅ | Custom action | **HIGH** | 1 week |
| GitLab CI | ✅ | ✅ | Container image | **HIGH** | 1 week |
| Jenkins/CircleCI | ✅ | ✅ | Container image | HIGH | 1 week |
| **Scanners** | | | | | |
| Trivy | ✅ | ❌ | N/A (not verifier) | NONE | - |
| Snyk | ❌ | ❌ | N/A (not verifier) | NONE | - |
| Anchore | ❌ | ❌ | N/A (not verifier) | NONE | - |
| **Registries** | | | | | |
| Harbor | ✅ | ⚠️ | UI integration | LOW | - |
| **OCI Tools** | | | | | |
| ORAS CLI | ✅ | ❌ | Already works | Document | - |
| Notation | ⚠️ | ⚠️ | Hold-as-CA | OPTIONAL | 3-4 weeks |
| Cosign | ❌ | ❌ | Not compatible | NONE | - |
| Crane | ✅ | ❌ | Already works | Document | - |
| Skopeo | ⚠️ | ⚠️ | Upstream contribution | LOW | 3-4 weeks |
**Legend:**
- ✅ Works / Feasible
- ⚠️ Partial / Requires changes
- ❌ Not applicable / Not feasible
---
## Implementation Roadmap
### Phase 1: Foundation (4-5 weeks) ⭐
**Goal:** Core verification capability
1. **atcr-verify CLI tool** (Week 1-2)
- ATProto signature verification
- Trust policy support
- Multiple output formats
- Offline mode
2. **OCI Referrers API** (Week 2-3)
- AppView endpoint implementation
- ORAS artifact serving
- Integration with existing SBOM pattern
3. **CI/CD Container Image** (Week 3)
- Universal verification image
- Documentation for GitHub Actions, GitLab CI
- Example workflows
4. **Documentation** (Week 4-5)
- Integration guides
- Trust policy examples
- Troubleshooting guides
**Deliverables:**
- `atcr-verify` binary (Linux, macOS, Windows)
- `atcr.io/atcr/verify:latest` container image
- OCI Referrers API implementation
- Complete documentation
---
### Phase 2: Kubernetes Integration (3-4 weeks)
**Goal:** Production-ready Kubernetes admission control
5. **OPA Gatekeeper Provider** (Week 1-2)
- External data provider service
- Helm chart for deployment
- Example policies
6. **Ratify Plugin** (Week 2-3)
- Verifier plugin implementation
- Testing with Ratify
- Documentation
7. **Kubernetes Examples** (Week 4)
- Deployment manifests
- Policy examples
- Integration testing
**Deliverables:**
- `atcr-gatekeeper-provider` service
- Ratify plugin binary
- Kubernetes deployment examples
- Production deployment guide
---
### Phase 3: Runtime Verification (2-3 weeks)
**Goal:** Pull-time verification
8. **Containerd Plugin** (Week 1-2)
- Bindir verifier implementation
- Configuration documentation
- Testing with Docker, nerdctl
9. **CRI-O/Podman Integration** (Week 3, optional)
- Upstream contribution (if accepted)
- Policy.json extension
- Documentation
**Deliverables:**
- Containerd verifier binary
- Configuration guides
- Runtime verification examples
---
### Phase 4: Optional Features (2-3 weeks)
**Goal:** Enterprise features (if demanded)
10. **Hold-as-CA** (Week 1-2, optional)
- Certificate generation
- Notation signature creation
- Trust store distribution
- **Only if enterprise customers request**
11. **Advanced Features** (Week 3, as needed)
- Signature transparency log
- Multi-signature support
- Hardware token integration
**Deliverables:**
- Hold co-signing implementation (if needed)
- Advanced feature documentation
---
## Decision Matrix
### Which Integration Approach Should I Use?
```
┌─────────────────────────────────────────────────┐
│ Are you using Kubernetes? │
└───────────────┬─────────────────────────────────┘
┌────────┴────────┐
│ │
YES NO
│ │
↓ ↓
┌──────────────┐ ┌──────────────┐
│ Using │ │ CI/CD │
│ Gatekeeper? │ │ Pipeline? │
└──────┬───────┘ └──────┬───────┘
│ │
┌────┴────┐ ┌────┴────┐
YES NO YES NO
│ │ │ │
↓ ↓ ↓ ↓
External Ratify GitHub Universal
Provider Plugin Action CLI Tool
```
#### Use OPA Gatekeeper Provider if:
- ✅ Already using Gatekeeper
- ✅ Want Rego-based policies
- ✅ Need flexible policy logic
#### Use Ratify Plugin if:
- ✅ Using Ratify (or planning to)
- ✅ Want standard plugin interface
- ✅ Need multi-verifier support (Notation + Cosign + ATProto)
#### Use atcr-verify CLI if:
- ✅ CI/CD pipelines
- ✅ Local development
- ✅ Non-Kubernetes environments
- ✅ Want simple integration
#### Use Containerd Plugin if:
- ✅ Need runtime enforcement
- ✅ Want pull-time verification
- ✅ Using Containerd 2.0+
#### Use Hold-as-CA if:
- ⚠️ Enterprise X.509 PKI compliance required
- ⚠️ Cannot deploy plugins
- ⚠️ Accept centralization trade-off
---
## Best Practices
### 1. Start Simple
Begin with CLI tool integration in CI/CD:
```bash
# Add to .github/workflows/deploy.yml
- run: atcr-verify $IMAGE --policy .github/trust-policy.yaml
```
### 2. Define Trust Policies
Create trust policies early:
```yaml
# trust-policy.yaml
policies:
- name: production
scope: "atcr.io/*/prod-*"
require:
signature: true
trustedDIDs: [did:plc:devops-team]
action: enforce
```
### 3. Progressive Rollout
1. **Week 1:** Add verification to CI/CD (audit mode)
2. **Week 2:** Enforce in CI/CD
3. **Week 3:** Add Kubernetes admission control (audit mode)
4. **Week 4:** Enforce in Kubernetes
### 4. Monitor and Alert
Track verification metrics:
- Verification success/failure rates
- Policy violations
- Signature coverage (% of images signed)
### 5. Plan for Key Rotation
- Document DID key rotation procedures
- Test key rotation in non-production
- Monitor for unexpected key changes
---
## Common Patterns
### Pattern 1: Multi-Layer Defense
```
1. CI/CD verification (atcr-verify)
↓ (blocks unsigned images from being pushed)
2. Kubernetes admission (Gatekeeper/Ratify)
↓ (blocks unsigned images from running)
3. Runtime verification (Containerd plugin)
↓ (blocks unsigned images from being pulled)
```
### Pattern 2: Trust Policy Inheritance
```yaml
# Global policy
trustedDIDs:
- did:plc:security-team # Always trusted
# Environment-specific policies
staging:
trustedDIDs:
- did:plc:developers # Additional trust for staging
production:
trustedDIDs: [] # Only global trust (security-team)
```
### Pattern 3: Offline Verification
```bash
# Build environment (online)
atcr-verify export $IMAGE -o bundle.json
# Air-gapped environment (offline)
atcr-verify $IMAGE --offline --bundle bundle.json
```
---
## Migration Guide
### From Docker Content Trust (DCT)
DCT is deprecated. Migrate to ATCR signatures:
**Old (DCT):**
```bash
export DOCKER_CONTENT_TRUST=1
docker push myimage:latest
```
**New (ATCR):**
```bash
# Signatures created automatically on push
docker push atcr.io/myorg/myimage:latest
# Verify in CI/CD
atcr-verify atcr.io/myorg/myimage:latest
```
### From Cosign
Cosign and ATCR signatures can coexist:
**Dual signing:**
```bash
# Push to ATCR (ATProto signature automatic)
docker push atcr.io/myorg/myimage:latest
# Also sign with Cosign (if needed)
cosign sign atcr.io/myorg/myimage:latest
```
**Verification:**
```bash
# Verify ATProto signature
atcr-verify atcr.io/myorg/myimage:latest
# Or verify Cosign signature
cosign verify atcr.io/myorg/myimage:latest --key cosign.pub
```
---
## Troubleshooting
### Signatures Not Found
**Symptom:** `atcr-verify` reports "no signature found"
**Diagnosis:**
```bash
# Check if Referrers API works
curl "https://atcr.io/v2/OWNER/REPO/referrers/DIGEST"
# Check if signature artifact exists
oras discover atcr.io/OWNER/REPO:TAG
```
**Solutions:**
1. Verify Referrers API is implemented
2. Re-push image to generate signature
3. Check AppView logs for signature creation errors
### DID Resolution Fails
**Symptom:** Cannot resolve DID to public key
**Diagnosis:**
```bash
# Test DID resolution
curl https://plc.directory/did:plc:XXXXXX
# Check DID document has verificationMethod
curl https://plc.directory/did:plc:XXXXXX | jq .verificationMethod
```
**Solutions:**
1. Check internet connectivity
2. Verify DID is valid
3. Ensure DID document contains public key
### Policy Violations
**Symptom:** Verification fails with "trust policy violation"
**Diagnosis:**
```bash
# Verify with verbose output
atcr-verify IMAGE --policy policy.yaml --verbose
```
**Solutions:**
1. Add DID to trustedDIDs list
2. Check signature age vs. maxAge
3. Verify policy scope matches image
---
## See Also
- [ATProto Signatures](./ATPROTO_SIGNATURES.md) - Technical foundation
- [atcr-verify CLI](./ATCR_VERIFY_CLI.md) - CLI tool documentation
- [Signature Integration](./SIGNATURE_INTEGRATION.md) - Tool-specific guides
- [Hold-as-CA](./HOLD_AS_CA.md) - X.509 certificate approach (optional)
- [Examples](../examples/verification/) - Working code examples

File diff suppressed because it is too large Load Diff

500
examples/plugins/README.md Normal file
View File

@@ -0,0 +1,500 @@
# ATProto Signature Verification Plugins and Examples
This directory contains reference implementations and examples for integrating ATProto signature verification into various tools and workflows.
## Overview
ATCR uses ATProto's native signature system to cryptographically sign container images. To integrate signature verification into existing tools (Kubernetes, CI/CD, container runtimes), you can:
1. **Build plugins** for verification frameworks (Ratify, Gatekeeper, Containerd)
2. **Use external services** called by policy engines
3. **Integrate CLI tools** in your CI/CD pipelines
## Directory Structure
```
examples/plugins/
├── README.md # This file
├── ratify-verifier/ # Ratify plugin for Kubernetes
│ ├── README.md
│ ├── verifier.go
│ ├── config.go
│ ├── resolver.go
│ ├── crypto.go
│ ├── Dockerfile
│ ├── deployment.yaml
│ └── verifier-crd.yaml
├── gatekeeper-provider/ # OPA Gatekeeper external provider
│ ├── README.md
│ ├── main.go
│ ├── verifier.go
│ ├── resolver.go
│ ├── crypto.go
│ ├── Dockerfile
│ ├── deployment.yaml
│ └── provider-crd.yaml
├── containerd-verifier/ # Containerd bindir plugin
│ ├── README.md
│ ├── main.go
│ └── Dockerfile
└── ci-cd/ # CI/CD integration examples
├── github-actions.yml
├── gitlab-ci.yml
└── jenkins-pipeline.groovy
```
## Quick Start
### For Kubernetes (Recommended)
**Option A: Ratify Plugin**
```bash
cd ratify-verifier
# Build plugin and deploy to Kubernetes
./build.sh
kubectl apply -f deployment.yaml
kubectl apply -f verifier-crd.yaml
```
**Option B: Gatekeeper Provider**
```bash
cd gatekeeper-provider
# Build and deploy external provider
docker build -t atcr.io/atcr/gatekeeper-provider:latest .
kubectl apply -f deployment.yaml
kubectl apply -f provider-crd.yaml
```
### For CI/CD
**GitHub Actions**
```yaml
# Copy examples/plugins/ci-cd/github-actions.yml to .github/workflows/
cp ci-cd/github-actions.yml ../.github/workflows/verify-and-deploy.yml
```
**GitLab CI**
```yaml
# Copy examples/plugins/ci-cd/gitlab-ci.yml to your repo
cp ci-cd/gitlab-ci.yml ../.gitlab-ci.yml
```
### For Containerd
```bash
cd containerd-verifier
# Build plugin
./build.sh
# Install to containerd plugins directory
sudo cp atcr-verifier /opt/containerd/bin/
```
## Plugins Overview
### Ratify Verifier Plugin ⭐
**Use case:** Kubernetes admission control with OPA Gatekeeper
**How it works:**
1. Gatekeeper receives pod creation request
2. Calls Ratify verification engine
3. Ratify loads ATProto verifier plugin
4. Plugin verifies signature and checks trust policy
5. Returns allow/deny decision to Gatekeeper
**Pros:**
- Standard Ratify plugin interface
- Works with existing Gatekeeper deployments
- Can combine with other verifiers (Notation, Cosign)
- Policy-based enforcement
**Cons:**
- Requires building custom Ratify image
- Plugin must be compiled into image
- More complex deployment
**See:** [ratify-verifier/README.md](./ratify-verifier/README.md)
### Gatekeeper External Provider ⭐
**Use case:** Kubernetes admission control with OPA Gatekeeper
**How it works:**
1. Gatekeeper receives pod creation request
2. Rego policy calls external data provider API
3. Provider verifies ATProto signature
4. Returns verification result to Gatekeeper
5. Rego policy makes allow/deny decision
**Pros:**
- Simpler deployment (separate service)
- Easy to update (no Gatekeeper changes)
- Flexible Rego policies
- Can add caching, rate limiting
**Cons:**
- Additional service to maintain
- Network dependency (provider must be reachable)
- Slightly higher latency
**See:** [gatekeeper-provider/README.md](./gatekeeper-provider/README.md)
### Containerd Bindir Plugin
**Use case:** Runtime-level verification for all images
**How it works:**
1. Containerd pulls image
2. Calls verifier plugin (bindir)
3. Plugin verifies ATProto signature
4. Returns result to containerd
5. Containerd allows/blocks image
**Pros:**
- Works at runtime level (not just Kubernetes)
- CRI-O, Podman support (CRI-compatible)
- No Kubernetes required
- Applies to all images
**Cons:**
- Containerd 2.0+ required
- More complex to debug
- Less flexible policies
**See:** [containerd-verifier/README.md](./containerd-verifier/README.md)
## CI/CD Integration Examples
### GitHub Actions
Complete workflow with:
- Image signature verification
- DID trust checking
- Automated deployment on success
**See:** [ci-cd/github-actions.yml](./ci-cd/github-actions.yml)
### GitLab CI
Pipeline with:
- Multi-stage verification
- Trust policy enforcement
- Manual deployment approval
**See:** [ci-cd/gitlab-ci.yml](./ci-cd/gitlab-ci.yml)
### Jenkins
Declarative pipeline with:
- Signature verification stage
- Deployment gates
- Rollback on failure
**See:** [ci-cd/jenkins-pipeline.groovy](./ci-cd/jenkins-pipeline.groovy) (coming soon)
## Common Components
All plugins share common functionality:
### DID Resolution
Resolve DID to public key:
```go
func ResolveDIDToPublicKey(ctx context.Context, did string) (*PublicKey, error)
```
**Steps:**
1. Fetch DID document from PLC directory or did:web
2. Extract verification method
3. Decode multibase public key
4. Parse as K-256 public key
### PDS Communication
Fetch repository commit:
```go
func FetchCommit(ctx context.Context, pdsEndpoint, did, commitCID string) (*Commit, error)
```
**Steps:**
1. Call `com.atproto.sync.getRepo` XRPC endpoint
2. Parse CAR file response
3. Extract commit with matching CID
4. Return commit data and signature
### Signature Verification
Verify ECDSA K-256 signature:
```go
func VerifySignature(pubKey *PublicKey, commit *Commit) error
```
**Steps:**
1. Extract unsigned commit bytes
2. Hash with SHA-256
3. Verify ECDSA signature over hash
4. Check signature is valid for public key
### Trust Policy
Check if DID is trusted:
```go
func IsTrusted(did string, now time.Time) bool
```
**Steps:**
1. Load trust policy from config
2. Check if DID in trusted list
3. Verify validFrom/expiresAt timestamps
4. Return true if trusted
## Trust Policy Format
All plugins use the same trust policy format:
```yaml
version: 1.0
trustedDIDs:
did:plc:alice123:
name: "Alice (DevOps Lead)"
validFrom: "2024-01-01T00:00:00Z"
expiresAt: null
did:plc:bob456:
name: "Bob (Security Team)"
validFrom: "2024-06-01T00:00:00Z"
expiresAt: "2025-12-31T23:59:59Z"
policies:
- name: production-images
scope: "atcr.io/*/prod-*"
require:
signature: true
trustedDIDs:
- did:plc:alice123
- did:plc:bob456
minSignatures: 1
action: enforce
- name: dev-images
scope: "atcr.io/*/dev-*"
require:
signature: false
action: audit
```
## Implementation Notes
### Dependencies
All plugins require:
- Go 1.21+ for building
- ATProto DID resolution (PLC directory, did:web)
- ATProto PDS XRPC API access
- ECDSA K-256 signature verification
### Caching
Recommended caching strategy:
- **DID documents**: 5 minute TTL
- **Public keys**: 5 minute TTL
- **PDS endpoints**: 5 minute TTL
- **Signature results**: 5 minute TTL
### Error Handling
Plugins should handle:
- DID resolution failures (network, invalid DID)
- PDS connectivity issues (timeout, 404, 500)
- Invalid signature format
- Untrusted DIDs
- Network timeouts
### Logging
Structured logging with:
- `image` - Image being verified
- `did` - Signer DID
- `duration` - Operation duration
- `error` - Error message (if failed)
### Metrics
Expose Prometheus metrics:
- `atcr_verifications_total{result="verified|failed|error"}`
- `atcr_verification_duration_seconds`
- `atcr_did_resolutions_total{result="success|failure"}`
- `atcr_cache_hits_total`
- `atcr_cache_misses_total`
## Testing
### Unit Tests
Test individual components:
```bash
# Test DID resolution
go test ./pkg/resolver -v
# Test signature verification
go test ./pkg/crypto -v
# Test trust policy
go test ./pkg/trust -v
```
### Integration Tests
Test with real services:
```bash
# Test against ATCR registry
go test ./integration -tags=integration -v
# Test with test PDS
go test ./integration -tags=integration -pds=https://test.pds.example.com
```
### End-to-End Tests
Test full deployment:
```bash
# Deploy to test cluster
kubectl apply -f test/fixtures/
# Create pod with signed image (should succeed)
kubectl run test-signed --image=atcr.io/test/signed:latest
# Create pod with unsigned image (should fail)
kubectl run test-unsigned --image=atcr.io/test/unsigned:latest
```
## Performance Considerations
### Latency
Typical verification latency:
- DID resolution: 50-200ms (cached: <1ms)
- PDS query: 100-500ms (cached: <1ms)
- Signature verification: 1-5ms
- **Total**: 150-700ms (uncached), <10ms (cached)
### Throughput
Expected throughput (single instance):
- Without caching: ~5-10 verifications/second
- With caching: ~100-500 verifications/second
### Scaling
For high traffic:
- Deploy multiple replicas (stateless)
- Use Redis for distributed caching
- Implement rate limiting
- Monitor P95/P99 latency
## Security Considerations
### Network Policies
Restrict access to:
- DID resolution (PLC directory only)
- PDS XRPC endpoints
- Internal services only
### Denial of Service
Protect against:
- High verification request rate
- Slow DID resolution
- Malicious images with many signatures
- Large signature artifacts
### Trust Model
Understand trust dependencies:
- DID resolution is accurate (PLC directory)
- PDS serves correct records
- Private keys are secure
- Trust policy is maintained
## Troubleshooting
### Plugin Not Loading
```bash
# Check plugin exists
ls -la /path/to/plugin
# Check plugin is executable
chmod +x /path/to/plugin
# Check plugin logs
tail -f /var/log/atcr-verifier.log
```
### Verification Failing
```bash
# Test DID resolution
curl https://plc.directory/did:plc:alice123
# Test PDS connectivity
curl https://bsky.social/xrpc/com.atproto.server.describeServer
# Test signature exists
oras discover atcr.io/alice/myapp:latest \
--artifact-type application/vnd.atproto.signature.v1+json
```
### Policy Not Enforcing
```bash
# Check policy is loaded
kubectl get configmap atcr-trust-policy -n gatekeeper-system
# Check constraint is active
kubectl get constraint atcr-signatures-required -o yaml
# Check logs
kubectl logs -n gatekeeper-system deployment/ratify
```
## See Also
### Documentation
- [ATProto Signatures](../../docs/ATPROTO_SIGNATURES.md) - Technical deep-dive
- [Signature Integration](../../docs/SIGNATURE_INTEGRATION.md) - Tool-specific guides
- [Integration Strategy](../../docs/INTEGRATION_STRATEGY.md) - High-level overview
- [atcr-verify CLI](../../docs/ATCR_VERIFY_CLI.md) - CLI tool specification
### Examples
- [Verification Scripts](../verification/) - Shell scripts for manual verification
- [Kubernetes Webhook](../verification/kubernetes-webhook.yaml) - Custom webhook example
### External Resources
- [Ratify](https://ratify.dev/) - Verification framework
- [OPA Gatekeeper](https://open-policy-agent.github.io/gatekeeper/) - Policy engine
- [Containerd](https://containerd.io/) - Container runtime
## Support
For questions or issues:
- GitHub Issues: https://github.com/atcr-io/atcr/issues
- Documentation: https://docs.atcr.io
- Security: security@atcr.io
## Contributing
Contributions welcome! Please:
1. Follow existing code structure
2. Add tests for new features
3. Update documentation
4. Submit pull request
## License
See [LICENSE](../../LICENSE) file in repository root.

View File

@@ -0,0 +1,166 @@
# GitHub Actions workflow for verifying ATProto signatures
name: Verify and Deploy
on:
push:
branches: [main]
pull_request:
branches: [main]
env:
REGISTRY: atcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
verify-signature:
name: Verify Image Signature
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up image tag
id: vars
run: |
echo "IMAGE_TAG=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}" >> $GITHUB_OUTPUT
- name: Install verification tools
run: |
# Install ORAS
curl -LO https://github.com/oras-project/oras/releases/download/v1.0.0/oras_1.0.0_linux_amd64.tar.gz
tar -xzf oras_1.0.0_linux_amd64.tar.gz
sudo mv oras /usr/local/bin/
# Install crane
curl -sL "https://github.com/google/go-containerregistry/releases/download/v0.15.2/go-containerregistry_Linux_x86_64.tar.gz" > crane.tar.gz
tar -xzf crane.tar.gz
sudo mv crane /usr/local/bin/
# Install atcr-verify (when available)
# curl -LO https://github.com/atcr-io/atcr/releases/latest/download/atcr-verify
# chmod +x atcr-verify
# sudo mv atcr-verify /usr/local/bin/
- name: Check for signature
id: check_signature
run: |
IMAGE="${{ steps.vars.outputs.IMAGE_TAG }}"
echo "Checking signature for $IMAGE"
# Get image digest
DIGEST=$(crane digest "$IMAGE")
echo "Image digest: $DIGEST"
# Check for ATProto signature using ORAS
REPO=$(echo "$IMAGE" | cut -d: -f1)
REFERRERS=$(curl -s "https://${{ env.REGISTRY }}/v2/${REPO#${{ env.REGISTRY }}/}/referrers/${DIGEST}?artifactType=application/vnd.atproto.signature.v1+json")
SIG_COUNT=$(echo "$REFERRERS" | jq '.manifests | length')
if [ "$SIG_COUNT" -eq 0 ]; then
echo "❌ No ATProto signature found"
echo "has_signature=false" >> $GITHUB_OUTPUT
exit 1
fi
echo "✓ Found $SIG_COUNT signature(s)"
echo "has_signature=true" >> $GITHUB_OUTPUT
- name: Verify signature (full verification)
if: steps.check_signature.outputs.has_signature == 'true'
run: |
IMAGE="${{ steps.vars.outputs.IMAGE_TAG }}"
# Option 1: Use atcr-verify CLI (when available)
# atcr-verify "$IMAGE" --policy .atcr/trust-policy.yaml
# Option 2: Use shell script
chmod +x examples/verification/atcr-verify.sh
./examples/verification/atcr-verify.sh "$IMAGE"
echo "✓ Signature verified successfully"
- name: Verify signer DID
if: steps.check_signature.outputs.has_signature == 'true'
run: |
IMAGE="${{ steps.vars.outputs.IMAGE_TAG }}"
# Get signature metadata
DIGEST=$(crane digest "$IMAGE")
REPO=$(echo "$IMAGE" | cut -d: -f1)
REFERRERS=$(curl -s "https://${{ env.REGISTRY }}/v2/${REPO#${{ env.REGISTRY }}/}/referrers/${DIGEST}?artifactType=application/vnd.atproto.signature.v1+json")
SIG_DIGEST=$(echo "$REFERRERS" | jq -r '.manifests[0].digest')
# Pull signature artifact
oras pull "${REPO}@${SIG_DIGEST}" -o /tmp/sig
# Extract DID
DID=$(jq -r '.atproto.did' /tmp/sig/atproto-signature.json)
echo "Signed by DID: $DID"
# Check against trusted DIDs
TRUSTED_DIDS="${{ secrets.TRUSTED_DIDS }}" # e.g., "did:plc:alice123,did:plc:bob456"
if [[ ",$TRUSTED_DIDS," == *",$DID,"* ]]; then
echo "✓ DID is trusted"
else
echo "❌ DID $DID is not in trusted list"
exit 1
fi
deploy:
name: Deploy to Kubernetes
needs: verify-signature
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up image tag
id: vars
run: |
echo "IMAGE_TAG=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}" >> $GITHUB_OUTPUT
- name: Set up kubectl
uses: azure/setup-kubectl@v3
- name: Configure kubectl
run: |
echo "${{ secrets.KUBE_CONFIG }}" | base64 -d > /tmp/kubeconfig
export KUBECONFIG=/tmp/kubeconfig
- name: Deploy to production
run: |
kubectl set image deployment/myapp \
myapp=${{ steps.vars.outputs.IMAGE_TAG }} \
-n production
kubectl rollout status deployment/myapp -n production
- name: Verify deployment
run: |
kubectl get pods -n production -l app=myapp
# Wait for rollout to complete
kubectl wait --for=condition=available --timeout=300s \
deployment/myapp -n production
# Alternative: Use atcr-verify action (when available)
verify-with-action:
name: Verify with ATCR Action
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Verify image signature
# uses: atcr-io/atcr-verify-action@v1
# with:
# image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}
# policy: .atcr/trust-policy.yaml
# fail-on-error: true
run: |
echo "TODO: Use official atcr-verify GitHub Action"

View File

@@ -0,0 +1,156 @@
# GitLab CI pipeline for verifying ATProto signatures
variables:
REGISTRY: atcr.io
IMAGE_NAME: $CI_PROJECT_PATH
IMAGE_TAG: $REGISTRY/$IMAGE_NAME:$CI_COMMIT_SHA
stages:
- build
- verify
- deploy
build_image:
stage: build
image: docker:latest
services:
- docker:dind
script:
- docker build -t $IMAGE_TAG .
- docker push $IMAGE_TAG
verify_signature:
stage: verify
image: alpine:latest
before_script:
- apk add --no-cache curl jq
script:
- |
echo "Verifying signature for $IMAGE_TAG"
# Install crane
wget https://github.com/google/go-containerregistry/releases/download/v0.15.2/go-containerregistry_Linux_x86_64.tar.gz
tar -xzf go-containerregistry_Linux_x86_64.tar.gz crane
mv crane /usr/local/bin/
# Get image digest
DIGEST=$(crane digest "$IMAGE_TAG")
echo "Image digest: $DIGEST"
# Extract repository path
REPO=$(echo "$IMAGE_TAG" | cut -d: -f1)
REPO_PATH=${REPO#$REGISTRY/}
# Check for ATProto signature
REFERRERS=$(curl -s "https://$REGISTRY/v2/$REPO_PATH/referrers/${DIGEST}?artifactType=application/vnd.atproto.signature.v1+json")
SIG_COUNT=$(echo "$REFERRERS" | jq '.manifests | length')
if [ "$SIG_COUNT" -eq 0 ]; then
echo "❌ No ATProto signature found"
exit 1
fi
echo "✓ Found $SIG_COUNT signature(s)"
verify_full:
stage: verify
image: alpine:latest
before_script:
- apk add --no-cache curl jq bash
script:
- |
# Option 1: Use atcr-verify CLI (when available)
# wget https://github.com/atcr-io/atcr/releases/latest/download/atcr-verify
# chmod +x atcr-verify
# ./atcr-verify "$IMAGE_TAG" --policy .atcr/trust-policy.yaml
# Option 2: Use shell script
chmod +x examples/verification/atcr-verify.sh
./examples/verification/atcr-verify.sh "$IMAGE_TAG"
echo "✓ Signature verified successfully"
verify_trust:
stage: verify
image: alpine:latest
before_script:
- apk add --no-cache curl jq
script:
- |
# Install crane and ORAS
wget https://github.com/google/go-containerregistry/releases/download/v0.15.2/go-containerregistry_Linux_x86_64.tar.gz
tar -xzf go-containerregistry_Linux_x86_64.tar.gz crane
mv crane /usr/local/bin/
wget https://github.com/oras-project/oras/releases/download/v1.0.0/oras_1.0.0_linux_amd64.tar.gz
tar -xzf oras_1.0.0_linux_amd64.tar.gz
mv oras /usr/local/bin/
# Get signature metadata
DIGEST=$(crane digest "$IMAGE_TAG")
REPO=$(echo "$IMAGE_TAG" | cut -d: -f1)
REPO_PATH=${REPO#$REGISTRY/}
REFERRERS=$(curl -s "https://$REGISTRY/v2/$REPO_PATH/referrers/${DIGEST}?artifactType=application/vnd.atproto.signature.v1+json")
SIG_DIGEST=$(echo "$REFERRERS" | jq -r '.manifests[0].digest')
# Pull signature artifact
oras pull "${REPO}@${SIG_DIGEST}" -o /tmp/sig
# Extract DID
DID=$(jq -r '.atproto.did' /tmp/sig/atproto-signature.json)
echo "Signed by DID: $DID"
# Check against trusted DIDs (from CI/CD variables)
if [[ ",$TRUSTED_DIDS," == *",$DID,"* ]]; then
echo "✓ DID is trusted"
else
echo "❌ DID $DID is not in trusted list"
exit 1
fi
deploy_production:
stage: deploy
image: bitnami/kubectl:latest
dependencies:
- verify_signature
- verify_full
- verify_trust
only:
- main
script:
- |
# Configure kubectl
echo "$KUBE_CONFIG" | base64 -d > /tmp/kubeconfig
export KUBECONFIG=/tmp/kubeconfig
# Deploy to production
kubectl set image deployment/myapp \
myapp=$IMAGE_TAG \
-n production
kubectl rollout status deployment/myapp -n production
# Verify deployment
kubectl get pods -n production -l app=myapp
# Alternative: Manual approval before deploy
deploy_production_manual:
stage: deploy
image: bitnami/kubectl:latest
dependencies:
- verify_signature
when: manual
only:
- main
script:
- |
echo "Deploying $IMAGE_TAG to production"
echo "$KUBE_CONFIG" | base64 -d > /tmp/kubeconfig
export KUBECONFIG=/tmp/kubeconfig
kubectl set image deployment/myapp \
myapp=$IMAGE_TAG \
-n production

View File

@@ -0,0 +1,501 @@
# OPA Gatekeeper External Data Provider for ATProto Signatures
This is a reference implementation of an OPA Gatekeeper External Data Provider that verifies ATProto signatures on ATCR container images.
## Overview
Gatekeeper's External Data Provider feature allows Rego policies to call external HTTP services for data validation. This provider implements signature verification as an HTTP service that Gatekeeper can query.
## Architecture
```
Kubernetes Pod Creation
OPA Gatekeeper (admission webhook)
Rego Policy (constraint template)
External Data Provider API call
ATProto Verification Service ← This service
1. Resolve image digest
2. Discover signature artifacts
3. Parse ATProto signature metadata
4. Resolve DID to public key
5. Fetch commit from PDS
6. Verify K-256 signature
7. Check trust policy
Return: verified=true/false + metadata
```
## Files
- `main.go` - HTTP server and provider endpoints
- `verifier.go` - ATProto signature verification logic
- `resolver.go` - DID and PDS resolution
- `crypto.go` - K-256 signature verification
- `trust-policy.yaml` - Trust policy configuration
- `Dockerfile` - Build provider service image
- `deployment.yaml` - Kubernetes deployment manifest
- `provider-crd.yaml` - Gatekeeper Provider custom resource
- `constraint-template.yaml` - Rego constraint template
- `constraint.yaml` - Policy constraint example
## Prerequisites
- Go 1.21+
- Kubernetes cluster with OPA Gatekeeper installed
- Access to ATCR registry
## Building
```bash
# Build binary
CGO_ENABLED=0 go build -o atcr-provider \
-ldflags="-w -s" \
./main.go
# Build Docker image
docker build -t atcr.io/atcr/gatekeeper-provider:latest .
# Push to registry
docker push atcr.io/atcr/gatekeeper-provider:latest
```
## Deployment
### 1. Create Trust Policy ConfigMap
```bash
kubectl create namespace gatekeeper-system
kubectl create configmap atcr-trust-policy \
--from-file=trust-policy.yaml \
-n gatekeeper-system
```
### 2. Deploy Provider Service
```bash
kubectl apply -f deployment.yaml
```
### 3. Configure Gatekeeper Provider
```bash
kubectl apply -f provider-crd.yaml
```
### 4. Create Constraint Template
```bash
kubectl apply -f constraint-template.yaml
```
### 5. Create Constraint
```bash
kubectl apply -f constraint.yaml
```
### 6. Test
```bash
# Try to create pod with signed image (should succeed)
kubectl run test-signed --image=atcr.io/alice/myapp:latest
# Try to create pod with unsigned image (should fail)
kubectl run test-unsigned --image=atcr.io/malicious/fake:latest
# Check constraint status
kubectl get constraint atcr-signatures-required -o yaml
```
## API Specification
### Provider Endpoint
**POST /provide**
Request:
```json
{
"keys": ["image"],
"values": [
"atcr.io/alice/myapp:latest",
"atcr.io/bob/webapp:v1.0"
]
}
```
Response:
```json
{
"responses": [
{
"image": "atcr.io/alice/myapp:latest",
"verified": true,
"did": "did:plc:alice123",
"handle": "alice.bsky.social",
"signedAt": "2025-10-31T12:34:56Z",
"commitCid": "bafyreih8..."
},
{
"image": "atcr.io/bob/webapp:v1.0",
"verified": false,
"error": "no signature found"
}
]
}
```
### Health Check
**GET /health**
Response:
```json
{
"status": "ok",
"version": "1.0.0"
}
```
## Configuration
### Trust Policy Format
```yaml
# trust-policy.yaml
version: 1.0
trustedDIDs:
did:plc:alice123:
name: "Alice (DevOps)"
validFrom: "2024-01-01T00:00:00Z"
expiresAt: null
did:plc:bob456:
name: "Bob (Security)"
validFrom: "2024-06-01T00:00:00Z"
expiresAt: "2025-12-31T23:59:59Z"
policies:
- name: production
scope: "atcr.io/*/prod-*"
require:
signature: true
trustedDIDs:
- did:plc:alice123
- did:plc:bob456
action: enforce
```
### Provider Configuration
Environment variables:
- `TRUST_POLICY_PATH` - Path to trust policy file (default: `/config/trust-policy.yaml`)
- `HTTP_PORT` - HTTP server port (default: `8080`)
- `LOG_LEVEL` - Log level: debug, info, warn, error (default: `info`)
- `CACHE_ENABLED` - Enable caching (default: `true`)
- `CACHE_TTL` - Cache TTL in seconds (default: `300`)
- `DID_RESOLVER_TIMEOUT` - DID resolution timeout (default: `10s`)
- `PDS_TIMEOUT` - PDS XRPC timeout (default: `10s`)
## Rego Policy Examples
### Simple Verification
```rego
package atcrsignatures
import future.keywords.contains
import future.keywords.if
import future.keywords.in
provider := "atcr-verifier"
violation[{"msg": msg}] {
container := input.review.object.spec.containers[_]
startswith(container.image, "atcr.io/")
# Call external provider
response := external_data({
"provider": provider,
"keys": ["image"],
"values": [container.image]
})
# Check verification result
not response[_].verified == true
msg := sprintf("Image %v has no valid ATProto signature", [container.image])
}
```
### Advanced Verification with DID Trust
```rego
package atcrsignatures
import future.keywords.contains
import future.keywords.if
import future.keywords.in
provider := "atcr-verifier"
trusted_dids := [
"did:plc:alice123",
"did:plc:bob456"
]
violation[{"msg": msg}] {
container := input.review.object.spec.containers[_]
startswith(container.image, "atcr.io/")
# Call external provider
response := external_data({
"provider": provider,
"keys": ["image"],
"values": [container.image]
})
# Get response for this image
result := response[_]
result.image == container.image
# Check if verified
not result.verified == true
msg := sprintf("Image %v failed signature verification: %v", [container.image, result.error])
}
violation[{"msg": msg}] {
container := input.review.object.spec.containers[_]
startswith(container.image, "atcr.io/")
# Call external provider
response := external_data({
"provider": provider,
"keys": ["image"],
"values": [container.image]
})
# Get response for this image
result := response[_]
result.image == container.image
result.verified == true
# Check DID is trusted
not result.did in trusted_dids
msg := sprintf("Image %v signed by untrusted DID: %v", [container.image, result.did])
}
```
### Namespace-Specific Policies
```rego
package atcrsignatures
import future.keywords.contains
import future.keywords.if
import future.keywords.in
provider := "atcr-verifier"
# Production namespaces require signatures
production_namespaces := ["production", "prod", "staging"]
violation[{"msg": msg}] {
# Only apply to production namespaces
input.review.object.metadata.namespace in production_namespaces
container := input.review.object.spec.containers[_]
startswith(container.image, "atcr.io/")
# Call external provider
response := external_data({
"provider": provider,
"keys": ["image"],
"values": [container.image]
})
# Check verification result
not response[_].verified == true
msg := sprintf("Production namespace requires signed images. Image %v is not signed", [container.image])
}
```
## Performance Considerations
### Caching
The provider caches:
- Signature verification results (TTL: 5 minutes)
- DID documents (TTL: 5 minutes)
- PDS endpoints (TTL: 5 minutes)
- Public keys (TTL: 5 minutes)
Enable/disable via `CACHE_ENABLED` environment variable.
### Timeouts
- `DID_RESOLVER_TIMEOUT` - DID resolution timeout (default: 10s)
- `PDS_TIMEOUT` - PDS XRPC calls timeout (default: 10s)
- HTTP client timeout: 30s total
### Horizontal Scaling
The provider is stateless and can be scaled horizontally:
```yaml
apiVersion: apps/v1
kind: Deployment
spec:
replicas: 3 # Scale up for high traffic
```
### Rate Limiting
Consider implementing rate limiting for:
- Gatekeeper → Provider requests
- Provider → DID resolver
- Provider → PDS
## Monitoring
### Metrics
The provider exposes Prometheus metrics at `/metrics`:
```
# Request metrics
atcr_provider_requests_total{status="success|failure"}
atcr_provider_request_duration_seconds
# Verification metrics
atcr_provider_verifications_total{result="verified|failed|error"}
atcr_provider_verification_duration_seconds
# Cache metrics
atcr_provider_cache_hits_total
atcr_provider_cache_misses_total
```
### Logging
Structured JSON logging with fields:
- `image` - Image being verified
- `did` - Signer DID (if found)
- `duration` - Verification duration
- `error` - Error message (if failed)
### Health Checks
```bash
# Liveness probe
curl http://localhost:8080/health
# Readiness probe
curl http://localhost:8080/ready
```
## Troubleshooting
### Provider Not Reachable
```bash
# Check provider pod status
kubectl get pods -n gatekeeper-system -l app=atcr-provider
# Check service
kubectl get svc -n gatekeeper-system atcr-provider
# Test connectivity from Gatekeeper pod
kubectl exec -n gatekeeper-system deployment/gatekeeper-controller-manager -- \
curl http://atcr-provider.gatekeeper-system/health
```
### Verification Failing
```bash
# Check provider logs
kubectl logs -n gatekeeper-system deployment/atcr-provider
# Test verification manually
kubectl run test-curl --rm -it --image=curlimages/curl -- \
curl -X POST http://atcr-provider.gatekeeper-system/provide \
-H "Content-Type: application/json" \
-d '{"keys":["image"],"values":["atcr.io/alice/myapp:latest"]}'
```
### Policy Not Enforcing
```bash
# Check Gatekeeper logs
kubectl logs -n gatekeeper-system deployment/gatekeeper-controller-manager
# Check constraint status
kubectl get constraint atcr-signatures-required -o yaml
# Test policy manually with conftest
conftest test -p constraint-template.yaml pod.yaml
```
## Security Considerations
### Network Policies
Restrict network access:
```yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: atcr-provider
namespace: gatekeeper-system
spec:
podSelector:
matchLabels:
app: atcr-provider
ingress:
- from:
- podSelector:
matchLabels:
control-plane: controller-manager # Gatekeeper
ports:
- port: 8080
egress:
- to: # PLC directory
- namespaceSelector: {}
ports:
- port: 443
```
### Authentication
The provider should only be accessible from Gatekeeper. Options:
- Network policies (recommended for Kubernetes)
- Mutual TLS
- API tokens
### Trust Policy Management
- Store trust policy in version control
- Use GitOps (Flux, ArgoCD) for updates
- Review DID changes carefully
- Audit policy modifications
## See Also
- [Gatekeeper Documentation](https://open-policy-agent.github.io/gatekeeper/)
- [External Data Provider](https://open-policy-agent.github.io/gatekeeper/website/docs/externaldata/)
- [ATCR Signature Integration](../../../docs/SIGNATURE_INTEGRATION.md)
- [ATCR Integration Strategy](../../../docs/INTEGRATION_STRATEGY.md)
## Support
For issues or questions:
- GitHub Issues: https://github.com/atcr-io/atcr/issues
- Gatekeeper GitHub: https://github.com/open-policy-agent/gatekeeper

View File

@@ -0,0 +1,225 @@
// Package main implements an OPA Gatekeeper External Data Provider for ATProto signature verification.
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"time"
)
const (
// DefaultPort is the default HTTP port
DefaultPort = "8080"
// DefaultTrustPolicyPath is the default trust policy file path
DefaultTrustPolicyPath = "/config/trust-policy.yaml"
)
// Server is the HTTP server for the external data provider.
type Server struct {
verifier *Verifier
port string
httpServer *http.Server
}
// ProviderRequest is the request format from Gatekeeper.
type ProviderRequest struct {
Keys []string `json:"keys"`
Values []string `json:"values"`
}
// ProviderResponse is the response format to Gatekeeper.
type ProviderResponse struct {
SystemError string `json:"system_error,omitempty"`
Responses []map[string]interface{} `json:"responses"`
}
// VerificationResult holds the result of verifying a single image.
type VerificationResult struct {
Image string `json:"image"`
Verified bool `json:"verified"`
DID string `json:"did,omitempty"`
Handle string `json:"handle,omitempty"`
SignedAt time.Time `json:"signedAt,omitempty"`
CommitCID string `json:"commitCid,omitempty"`
Error string `json:"error,omitempty"`
}
// NewServer creates a new provider server.
func NewServer(verifier *Verifier, port string) *Server {
return &Server{
verifier: verifier,
port: port,
}
}
// Start starts the HTTP server.
func (s *Server) Start() error {
mux := http.NewServeMux()
// Provider endpoint (called by Gatekeeper)
mux.HandleFunc("/provide", s.handleProvide)
// Health check endpoints
mux.HandleFunc("/health", s.handleHealth)
mux.HandleFunc("/ready", s.handleReady)
// Metrics endpoint (Prometheus)
// TODO: Implement metrics
// mux.HandleFunc("/metrics", s.handleMetrics)
s.httpServer = &http.Server{
Addr: ":" + s.port,
Handler: mux,
ReadTimeout: 10 * time.Second,
WriteTimeout: 30 * time.Second,
IdleTimeout: 60 * time.Second,
}
log.Printf("Starting ATProto signature verification provider on port %s", s.port)
return s.httpServer.ListenAndServe()
}
// Stop gracefully stops the HTTP server.
func (s *Server) Stop(ctx context.Context) error {
if s.httpServer != nil {
return s.httpServer.Shutdown(ctx)
}
return nil
}
// handleProvide handles the provider endpoint called by Gatekeeper.
func (s *Server) handleProvide(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
// Parse request
var req ProviderRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
log.Printf("ERROR: failed to parse request: %v", err)
http.Error(w, fmt.Sprintf("invalid request: %v", err), http.StatusBadRequest)
return
}
log.Printf("INFO: received verification request for %d images", len(req.Values))
// Verify each image
responses := make([]map[string]interface{}, 0, len(req.Values))
for _, image := range req.Values {
result := s.verifyImage(r.Context(), image)
responses = append(responses, structToMap(result))
}
// Send response
resp := ProviderResponse{
Responses: responses,
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(resp); err != nil {
log.Printf("ERROR: failed to encode response: %v", err)
}
}
// verifyImage verifies a single image.
func (s *Server) verifyImage(ctx context.Context, image string) VerificationResult {
start := time.Now()
log.Printf("INFO: verifying image: %s", image)
// Call verifier
verified, metadata, err := s.verifier.Verify(ctx, image)
duration := time.Since(start)
if err != nil {
log.Printf("ERROR: verification failed for %s: %v (duration: %v)", image, err, duration)
return VerificationResult{
Image: image,
Verified: false,
Error: err.Error(),
}
}
if !verified {
log.Printf("WARN: image %s failed verification (duration: %v)", image, duration)
return VerificationResult{
Image: image,
Verified: false,
Error: "signature verification failed",
}
}
log.Printf("INFO: image %s verified successfully (DID: %s, duration: %v)",
image, metadata.DID, duration)
return VerificationResult{
Image: image,
Verified: true,
DID: metadata.DID,
Handle: metadata.Handle,
SignedAt: metadata.SignedAt,
CommitCID: metadata.CommitCID,
}
}
// handleHealth handles health check requests.
func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]string{
"status": "ok",
"version": "1.0.0",
})
}
// handleReady handles readiness check requests.
func (s *Server) handleReady(w http.ResponseWriter, r *http.Request) {
// TODO: Check dependencies (DID resolver, PDS connectivity)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]string{
"status": "ready",
})
}
// structToMap converts a struct to a map for JSON encoding.
func structToMap(v interface{}) map[string]interface{} {
data, _ := json.Marshal(v)
var m map[string]interface{}
json.Unmarshal(data, &m)
return m
}
func main() {
// Load configuration
port := os.Getenv("HTTP_PORT")
if port == "" {
port = DefaultPort
}
trustPolicyPath := os.Getenv("TRUST_POLICY_PATH")
if trustPolicyPath == "" {
trustPolicyPath = DefaultTrustPolicyPath
}
// Create verifier
verifier, err := NewVerifier(trustPolicyPath)
if err != nil {
log.Fatalf("FATAL: failed to create verifier: %v", err)
}
// Create server
server := NewServer(verifier, port)
// Start server
if err := server.Start(); err != nil && err != http.ErrServerClosed {
log.Fatalf("FATAL: server error: %v", err)
}
}
// TODO: Implement verifier.go with ATProto signature verification logic
// TODO: Implement resolver.go with DID resolution
// TODO: Implement crypto.go with K-256 signature verification

View File

@@ -0,0 +1,304 @@
# Ratify ATProto Verifier Plugin
This is a reference implementation of a Ratify verifier plugin for ATProto signatures.
## Overview
Ratify is a verification framework that integrates with OPA Gatekeeper to enforce signature policies in Kubernetes. This plugin adds support for verifying ATProto signatures on ATCR container images.
## Architecture
```
Kubernetes Pod Creation
OPA Gatekeeper (admission webhook)
Ratify (verification engine)
ATProto Verifier Plugin ← This plugin
1. Fetch signature artifact from registry
2. Parse ATProto signature metadata
3. Resolve DID to public key
4. Fetch repository commit from PDS
5. Verify ECDSA K-256 signature
6. Check trust policy
Return: Allow/Deny
```
## Files
- `verifier.go` - Main verifier implementation
- `config.go` - Configuration and trust policy
- `resolver.go` - DID and PDS resolution
- `crypto.go` - K-256 signature verification
- `Dockerfile` - Build custom Ratify image with plugin
- `deployment.yaml` - Kubernetes deployment manifest
- `verifier-crd.yaml` - Ratify Verifier custom resource
## Prerequisites
- Go 1.21+
- Ratify source code (for building plugin)
- Kubernetes cluster with OPA Gatekeeper installed
- Access to ATCR registry
## Building
```bash
# Clone Ratify
git clone https://github.com/ratify-project/ratify.git
cd ratify
# Copy plugin files
cp -r /path/to/examples/plugins/ratify-verifier plugins/verifier/atproto/
# Build plugin
CGO_ENABLED=0 go build -o atproto-verifier \
-ldflags="-w -s" \
./plugins/verifier/atproto
# Build custom Ratify image with plugin
docker build -f Dockerfile.with-atproto -t atcr.io/atcr/ratify-with-atproto:latest .
```
## Deployment
### 1. Deploy Ratify with Plugin
```bash
# Push custom image
docker push atcr.io/atcr/ratify-with-atproto:latest
# Deploy Ratify
kubectl apply -f deployment.yaml
```
### 2. Configure Verifier
```bash
# Create Verifier custom resource
kubectl apply -f verifier-crd.yaml
```
### 3. Configure Trust Policy
```bash
# Create ConfigMap with trust policy
kubectl create configmap atcr-trust-policy \
--from-file=trust-policy.yaml \
-n gatekeeper-system
```
### 4. Create Gatekeeper Constraint
```bash
kubectl apply -f constraint.yaml
```
### 5. Test
```bash
# Try to create pod with signed image (should succeed)
kubectl run test-signed --image=atcr.io/alice/myapp:latest
# Try to create pod with unsigned image (should fail)
kubectl run test-unsigned --image=atcr.io/malicious/fake:latest
```
## Configuration
### Trust Policy Format
```yaml
# trust-policy.yaml
version: 1.0
trustedDIDs:
did:plc:alice123:
name: "Alice (DevOps)"
validFrom: "2024-01-01T00:00:00Z"
expiresAt: null
did:plc:bob456:
name: "Bob (Security)"
validFrom: "2024-06-01T00:00:00Z"
expiresAt: "2025-12-31T23:59:59Z"
policies:
- name: production
scope: "atcr.io/*/prod-*"
require:
signature: true
trustedDIDs:
- did:plc:alice123
- did:plc:bob456
action: enforce
```
### Verifier Configuration
```yaml
apiVersion: config.ratify.deislabs.io/v1beta1
kind: Verifier
metadata:
name: atproto-verifier
spec:
name: atproto
artifactType: application/vnd.atproto.signature.v1+json
address: /.ratify/plugins/atproto-verifier
parameters:
trustPolicyPath: /config/trust-policy.yaml
didResolverTimeout: 10s
pdsTimeout: 10s
cacheEnabled: true
cacheTTL: 300s
```
## Implementation Details
### Verifier Interface
The plugin implements Ratify's `ReferenceVerifier` interface:
```go
type ReferenceVerifier interface {
Name() string
Type() string
CanVerify(artifactType string) bool
VerifyReference(
ctx context.Context,
subjectRef common.Reference,
referenceDesc ocispecs.ReferenceDescriptor,
store referrerstore.ReferrerStore,
) (VerifierResult, error)
}
```
### Verification Flow
1. **Artifact Fetch**: Download signature artifact from registry via Ratify's store
2. **Parse Metadata**: Extract ATProto signature metadata (DID, PDS, commit CID)
3. **DID Resolution**: Resolve DID to public key via PLC directory or did:web
4. **Commit Fetch**: Get repository commit from PDS via XRPC
5. **Signature Verify**: Verify ECDSA K-256 signature over commit bytes
6. **Trust Check**: Validate DID against trust policy
7. **Result**: Return success/failure with metadata
### Error Handling
The plugin returns detailed error information:
```go
type VerifierResult struct {
IsSuccess bool
Name string
Type string
Message string
Extensions map[string]interface{}
}
```
**Extensions include:**
- `did` - Signer's DID
- `handle` - Signer's handle (if available)
- `signedAt` - Signature timestamp
- `commitCid` - ATProto commit CID
- `pdsEndpoint` - PDS URL
- `error` - Error details (if verification failed)
## Troubleshooting
### Plugin Not Found
```bash
# Check plugin is in image
kubectl exec -n gatekeeper-system deployment/ratify -c ratify -- ls -la /.ratify/plugins/
# Check logs
kubectl logs -n gatekeeper-system deployment/ratify -c ratify
```
### Verification Failing
```bash
# Check Ratify logs for details
kubectl logs -n gatekeeper-system deployment/ratify -c ratify | grep atproto
# Check Verifier status
kubectl get verifier atproto-verifier -o yaml
# Test DID resolution manually
curl https://plc.directory/did:plc:alice123
```
### Trust Policy Issues
```bash
# Check ConfigMap exists
kubectl get configmap atcr-trust-policy -n gatekeeper-system
# View policy contents
kubectl get configmap atcr-trust-policy -n gatekeeper-system -o yaml
```
## Performance Considerations
### Caching
The plugin caches:
- DID documents (TTL: 5 minutes)
- PDS endpoints (TTL: 5 minutes)
- Public keys (TTL: 5 minutes)
Configure via `cacheEnabled` and `cacheTTL` parameters.
### Timeouts
Configure timeouts for external calls:
- `didResolverTimeout` - DID resolution (default: 10s)
- `pdsTimeout` - PDS XRPC calls (default: 10s)
### Rate Limiting
Consider implementing rate limiting for:
- DID resolution (PLC directory)
- PDS XRPC calls
- Signature verification
## Security Considerations
### Trust Policy Management
- Store trust policy in version control
- Review DID additions/removals carefully
- Set expiration dates for temporary access
- Audit trust policy changes
### Private Key Protection
- Plugin only uses public keys
- No private keys needed for verification
- DID resolution is read-only
- PDS queries are read-only
### Denial of Service
- Implement timeouts for all external calls
- Cache DID documents to reduce load
- Rate limit verification requests
- Monitor verification latency
## See Also
- [Ratify Documentation](https://ratify.dev/)
- [Ratify Plugin Development](https://ratify.dev/docs/plugins/verifier/overview)
- [ATCR Signature Integration](../../../docs/SIGNATURE_INTEGRATION.md)
- [ATCR Integration Strategy](../../../docs/INTEGRATION_STRATEGY.md)
## Support
For issues or questions:
- GitHub Issues: https://github.com/atcr-io/atcr/issues
- Ratify GitHub: https://github.com/ratify-project/ratify

View File

@@ -0,0 +1,214 @@
// Package atproto implements a Ratify verifier plugin for ATProto signatures.
package atproto
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/ratify-project/ratify/pkg/common"
"github.com/ratify-project/ratify/pkg/ocispecs"
"github.com/ratify-project/ratify/pkg/referrerstore"
"github.com/ratify-project/ratify/pkg/verifier"
)
const (
// VerifierName is the name of this verifier
VerifierName = "atproto"
// VerifierType is the type of this verifier
VerifierType = "atproto"
// ATProtoSignatureArtifactType is the OCI artifact type for ATProto signatures
ATProtoSignatureArtifactType = "application/vnd.atproto.signature.v1+json"
)
// ATProtoVerifier implements the Ratify ReferenceVerifier interface for ATProto signatures.
type ATProtoVerifier struct {
name string
config ATProtoConfig
resolver *Resolver
verifier *SignatureVerifier
trustStore *TrustStore
}
// ATProtoConfig holds configuration for the ATProto verifier.
type ATProtoConfig struct {
// TrustPolicyPath is the path to the trust policy YAML file
TrustPolicyPath string `json:"trustPolicyPath"`
// DIDResolverTimeout is the timeout for DID resolution
DIDResolverTimeout time.Duration `json:"didResolverTimeout"`
// PDSTimeout is the timeout for PDS XRPC calls
PDSTimeout time.Duration `json:"pdsTimeout"`
// CacheEnabled enables caching of DID documents and public keys
CacheEnabled bool `json:"cacheEnabled"`
// CacheTTL is the cache TTL for DID documents and public keys
CacheTTL time.Duration `json:"cacheTTL"`
}
// ATProtoSignature represents the ATProto signature metadata stored in the OCI artifact.
type ATProtoSignature struct {
Type string `json:"$type"`
Version string `json:"version"`
Subject struct {
Digest string `json:"digest"`
MediaType string `json:"mediaType"`
} `json:"subject"`
ATProto struct {
DID string `json:"did"`
Handle string `json:"handle"`
PDSEndpoint string `json:"pdsEndpoint"`
RecordURI string `json:"recordUri"`
CommitCID string `json:"commitCid"`
SignedAt time.Time `json:"signedAt"`
} `json:"atproto"`
Signature struct {
Algorithm string `json:"algorithm"`
KeyID string `json:"keyId"`
PublicKeyMultibase string `json:"publicKeyMultibase"`
} `json:"signature"`
}
// NewATProtoVerifier creates a new ATProto verifier instance.
func NewATProtoVerifier(name string, config ATProtoConfig) (*ATProtoVerifier, error) {
// Load trust policy
trustStore, err := LoadTrustStore(config.TrustPolicyPath)
if err != nil {
return nil, fmt.Errorf("failed to load trust policy: %w", err)
}
// Create resolver with caching
resolver := NewResolver(config.DIDResolverTimeout, config.CacheEnabled, config.CacheTTL)
// Create signature verifier
verifier := NewSignatureVerifier(config.PDSTimeout)
return &ATProtoVerifier{
name: name,
config: config,
resolver: resolver,
verifier: verifier,
trustStore: trustStore,
}, nil
}
// Name returns the name of this verifier.
func (v *ATProtoVerifier) Name() string {
return v.name
}
// Type returns the type of this verifier.
func (v *ATProtoVerifier) Type() string {
return VerifierType
}
// CanVerify returns true if this verifier can verify the given artifact type.
func (v *ATProtoVerifier) CanVerify(artifactType string) bool {
return artifactType == ATProtoSignatureArtifactType
}
// VerifyReference verifies an ATProto signature artifact.
func (v *ATProtoVerifier) VerifyReference(
ctx context.Context,
subjectRef common.Reference,
referenceDesc ocispecs.ReferenceDescriptor,
store referrerstore.ReferrerStore,
) (verifier.VerifierResult, error) {
// 1. Fetch signature blob from store
sigBlob, err := store.GetBlobContent(ctx, subjectRef, referenceDesc.Digest)
if err != nil {
return v.failureResult(fmt.Sprintf("failed to fetch signature blob: %v", err)), err
}
// 2. Parse ATProto signature metadata
var sigData ATProtoSignature
if err := json.Unmarshal(sigBlob, &sigData); err != nil {
return v.failureResult(fmt.Sprintf("failed to parse signature metadata: %v", err)), err
}
// Validate signature format
if err := v.validateSignature(&sigData); err != nil {
return v.failureResult(fmt.Sprintf("invalid signature format: %v", err)), err
}
// 3. Check trust policy first (fail fast if DID not trusted)
if !v.trustStore.IsTrusted(sigData.ATProto.DID, time.Now()) {
return v.failureResult(fmt.Sprintf("DID %s not in trusted list", sigData.ATProto.DID)),
fmt.Errorf("untrusted DID")
}
// 4. Resolve DID to public key
pubKey, err := v.resolver.ResolveDIDToPublicKey(ctx, sigData.ATProto.DID)
if err != nil {
return v.failureResult(fmt.Sprintf("failed to resolve DID: %v", err)), err
}
// 5. Fetch repository commit from PDS
commit, err := v.verifier.FetchCommit(ctx, sigData.ATProto.PDSEndpoint,
sigData.ATProto.DID, sigData.ATProto.CommitCID)
if err != nil {
return v.failureResult(fmt.Sprintf("failed to fetch commit: %v", err)), err
}
// 6. Verify K-256 signature
if err := v.verifier.VerifySignature(pubKey, commit); err != nil {
return v.failureResult(fmt.Sprintf("signature verification failed: %v", err)), err
}
// 7. Success - return detailed result
return verifier.VerifierResult{
IsSuccess: true,
Name: v.name,
Type: v.Type(),
Message: fmt.Sprintf("Successfully verified ATProto signature for DID %s", sigData.ATProto.DID),
Extensions: map[string]interface{}{
"did": sigData.ATProto.DID,
"handle": sigData.ATProto.Handle,
"signedAt": sigData.ATProto.SignedAt,
"commitCid": sigData.ATProto.CommitCID,
"pdsEndpoint": sigData.ATProto.PDSEndpoint,
},
}, nil
}
// validateSignature validates the signature metadata format.
func (v *ATProtoVerifier) validateSignature(sig *ATProtoSignature) error {
if sig.Type != "io.atcr.atproto.signature" {
return fmt.Errorf("invalid signature type: %s", sig.Type)
}
if sig.ATProto.DID == "" {
return fmt.Errorf("missing DID")
}
if sig.ATProto.PDSEndpoint == "" {
return fmt.Errorf("missing PDS endpoint")
}
if sig.ATProto.CommitCID == "" {
return fmt.Errorf("missing commit CID")
}
if sig.Signature.Algorithm != "ECDSA-K256-SHA256" {
return fmt.Errorf("unsupported signature algorithm: %s", sig.Signature.Algorithm)
}
return nil
}
// failureResult creates a failure result with the given message.
func (v *ATProtoVerifier) failureResult(message string) verifier.VerifierResult {
return verifier.VerifierResult{
IsSuccess: false,
Name: v.name,
Type: v.Type(),
Message: message,
Extensions: map[string]interface{}{
"error": message,
},
}
}
// TODO: Implement resolver.go with DID resolution logic
// TODO: Implement crypto.go with K-256 signature verification
// TODO: Implement config.go with trust policy loading

View File

@@ -0,0 +1,364 @@
# ATProto Signature Verification Examples
This directory contains practical examples for verifying ATProto signatures on ATCR container images.
## Files
### Scripts
- **`atcr-verify.sh`** - Standalone signature verification script
- Verifies ATProto signatures using shell commands
- Requires: `curl`, `jq`, `crane`, `oras`
- Does everything except full cryptographic verification
- Use this until the `atcr-verify` CLI tool is built
- **`verify-and-pull.sh`** - Secure image pull wrapper
- Verifies signatures before pulling images
- Can be used as a `docker pull` replacement
- Configurable via environment variables
### Configuration
- **`trust-policy.yaml`** - Example trust policy configuration
- Defines which DIDs to trust
- Specifies policies for different image scopes
- Includes audit logging and reporting settings
- **`kubernetes-webhook.yaml`** - Kubernetes admission controller
- Validates signatures before pod creation
- Includes webhook deployment, service, and configuration
- Uses trust policy ConfigMap
## Quick Start
### 1. Verify an Image
```bash
# Make script executable
chmod +x atcr-verify.sh
# Verify an image
./atcr-verify.sh atcr.io/alice/myapp:latest
```
**Output:**
```
═══════════════════════════════════════════════════
ATProto Signature Verification
═══════════════════════════════════════════════════
Image: atcr.io/alice/myapp:latest
═══════════════════════════════════════════════════
[1/7] Resolving image digest...
→ sha256:abc123...
[2/7] Discovering ATProto signature artifacts...
→ Found 1 signature(s)
→ Signature digest: sha256:sig789...
→ Signed by DID: did:plc:alice123
[3/7] Fetching signature metadata...
→ DID: did:plc:alice123
→ Handle: alice.bsky.social
→ PDS: https://bsky.social
→ Record: at://did:plc:alice123/io.atcr.manifest/abc123
→ Signed at: 2025-10-31T12:34:56.789Z
[4/7] Resolving DID to public key...
→ Public key: zQ3shokFTS3brHcD...
[5/7] Querying PDS for signed record...
→ Record CID: bafyreig7...
[6/7] Verifying record integrity...
→ Record digest matches image digest
[7/7] Cryptographic signature verification...
⚠ Full cryptographic verification requires ATProto crypto library
═══════════════════════════════════════════════════
✓ Verification Completed
═══════════════════════════════════════════════════
Signed by: alice.bsky.social (did:plc:alice123)
Signed at: 2025-10-31T12:34:56.789Z
PDS: https://bsky.social
Record: at://did:plc:alice123/io.atcr.manifest/abc123
Signature: sha256:sig789...
═══════════════════════════════════════════════════
```
### 2. Secure Pull
```bash
# Make script executable
chmod +x verify-and-pull.sh
# Pull image with verification
./verify-and-pull.sh atcr.io/alice/myapp:latest
# With Docker options
./verify-and-pull.sh atcr.io/alice/myapp:latest --platform linux/amd64
```
**Create an alias for convenience:**
```bash
# Add to ~/.bashrc or ~/.zshrc
alias docker-pull-secure='/path/to/verify-and-pull.sh'
# Use it
docker-pull-secure atcr.io/alice/myapp:latest
```
### 3. Deploy Kubernetes Webhook
```bash
# 1. Generate TLS certificates for webhook
openssl req -x509 -newkey rsa:4096 -keyout tls.key -out tls.crt \
-days 365 -nodes -subj "/CN=atcr-verify-webhook.atcr-system.svc"
# 2. Create namespace and secret
kubectl create namespace atcr-system
kubectl create secret tls atcr-verify-webhook-certs \
--cert=tls.crt --key=tls.key -n atcr-system
# 3. Update CA bundle in kubernetes-webhook.yaml
cat tls.crt | base64 -w 0
# Copy output and replace caBundle in kubernetes-webhook.yaml
# 4. Deploy webhook
kubectl apply -f kubernetes-webhook.yaml
# 5. Enable verification for a namespace
kubectl label namespace production atcr-verify=enabled
# 6. Test with a pod
kubectl run test-pod --image=atcr.io/alice/myapp:latest -n production
```
## Prerequisites
### For Scripts
Install required tools:
**macOS (Homebrew):**
```bash
brew install curl jq crane oras
```
**Linux (apt):**
```bash
# curl and jq
sudo apt-get install curl jq
# crane
curl -sL "https://github.com/google/go-containerregistry/releases/download/v0.15.2/go-containerregistry_Linux_x86_64.tar.gz" | tar -xz crane
sudo mv crane /usr/local/bin/
# oras
curl -LO "https://github.com/oras-project/oras/releases/download/v1.0.0/oras_1.0.0_linux_amd64.tar.gz"
tar -xzf oras_1.0.0_linux_amd64.tar.gz
sudo mv oras /usr/local/bin/
```
### For Kubernetes Webhook
Requirements:
- Kubernetes cluster (1.16+)
- `kubectl` configured
- Permission to create namespaces and webhooks
- Webhook container image (build from source or use pre-built)
## Configuration
### Environment Variables (verify-and-pull.sh)
- `VERIFY_SCRIPT` - Path to atcr-verify.sh (default: ./atcr-verify.sh)
- `TRUST_POLICY` - Path to trust policy (default: ./trust-policy.yaml)
- `REQUIRE_VERIFICATION` - Require verification (default: true)
- `SKIP_ATCR_IMAGES` - Skip verification for non-ATCR images (default: false)
**Example:**
```bash
# Skip verification for non-ATCR images
SKIP_ATCR_IMAGES=true ./verify-and-pull.sh docker.io/library/nginx:latest
# Allow pulling even if verification fails (NOT RECOMMENDED)
REQUIRE_VERIFICATION=false ./verify-and-pull.sh atcr.io/alice/myapp:latest
```
### Trust Policy
Edit `trust-policy.yaml` to customize:
1. **Add your DIDs:**
```yaml
trustedDIDs:
did:plc:your-did:
name: "Your Name"
validFrom: "2024-01-01T00:00:00Z"
```
2. **Define policies:**
```yaml
policies:
- name: my-policy
scope: "atcr.io/myorg/*"
require:
signature: true
trustedDIDs:
- did:plc:your-did
action: enforce
```
3. **Use with verification:**
```bash
# When atcr-verify CLI is available:
atcr-verify IMAGE --policy trust-policy.yaml
```
## Integration Patterns
### CI/CD (GitHub Actions)
```yaml
- name: Verify image signature
run: |
chmod +x examples/verification/atcr-verify.sh
./examples/verification/atcr-verify.sh ${{ env.IMAGE }}
- name: Deploy if verified
if: success()
run: kubectl set image deployment/app app=${{ env.IMAGE }}
```
### CI/CD (GitLab CI)
```yaml
verify:
script:
- chmod +x examples/verification/atcr-verify.sh
- ./examples/verification/atcr-verify.sh $IMAGE
deploy:
dependencies: [verify]
script:
- kubectl set image deployment/app app=$IMAGE
```
### Docker Alias
```bash
# ~/.bashrc or ~/.zshrc
function docker() {
if [ "$1" = "pull" ] && [[ "$2" =~ ^atcr\.io/ ]]; then
echo "Using secure pull with signature verification..."
/path/to/verify-and-pull.sh "${@:2}"
else
command docker "$@"
fi
}
```
### Systemd Service
```ini
# /etc/systemd/system/myapp.service
[Unit]
Description=My Application
After=docker.service
[Service]
Type=oneshot
ExecStartPre=/path/to/verify-and-pull.sh atcr.io/myorg/myapp:latest
ExecStart=/usr/bin/docker run atcr.io/myorg/myapp:latest
Restart=on-failure
[Install]
WantedBy=multi-user.target
```
## Troubleshooting
### "No ATProto signature found"
**Cause:** Image doesn't have a signature artifact
**Solutions:**
1. Check if image exists: `crane digest IMAGE`
2. Re-push image to generate signature
3. Verify referrers API is working:
```bash
curl "https://atcr.io/v2/REPO/referrers/DIGEST"
```
### "Failed to resolve DID"
**Cause:** DID resolution failed
**Solutions:**
1. Check internet connectivity
2. Verify DID is valid: `curl https://plc.directory/DID`
3. Check if DID document has verificationMethod
### "Failed to fetch record from PDS"
**Cause:** PDS is unreachable or record doesn't exist
**Solutions:**
1. Check PDS endpoint: `curl PDS_URL/xrpc/com.atproto.server.describeServer`
2. Verify record URI is correct
3. Check if record exists in PDS
### Webhook Pods Don't Start
**Cause:** Webhook is rejecting all pods
**Solutions:**
1. Check webhook logs: `kubectl logs -n atcr-system -l app=atcr-verify-webhook`
2. Disable webhook temporarily: `kubectl delete validatingwebhookconfiguration atcr-verify`
3. Fix issue and re-deploy
4. Test with labeled namespace first
## Security Best Practices
1. **Always verify in production**
- Enable webhook for production namespaces
- Set `failurePolicy: Fail` to block on errors
2. **Use trust policies**
- Define specific trusted DIDs
- Don't trust all signatures blindly
- Set expiration dates for temporary access
3. **Monitor verification**
- Enable audit logging
- Review verification failures
- Track signature coverage
4. **Rotate keys regularly**
- Update DID documents when keys change
- Revoke compromised keys immediately
- Monitor for unexpected key changes
5. **Secure webhook deployment**
- Use TLS for webhook communication
- Restrict webhook RBAC permissions
- Keep webhook image updated
## Next Steps
1. **Test verification** with your images
2. **Customize trust policy** for your organization
3. **Deploy webhook** to test clusters first
4. **Monitor** verification in CI/CD pipelines
5. **Gradually roll out** to production
## See Also
- [ATProto Signatures](../../docs/ATPROTO_SIGNATURES.md) - Technical details
- [Signature Integration](../../docs/SIGNATURE_INTEGRATION.md) - Integration guide
- [SBOM Scanning](../../docs/SBOM_SCANNING.md) - Similar ORAS pattern
## Support
For issues or questions:
- GitHub Issues: https://github.com/your-org/atcr/issues
- Documentation: https://docs.atcr.io
- Security: security@yourorg.com

View File

@@ -0,0 +1,243 @@
#!/bin/bash
# ATProto Signature Verification Script
#
# This script verifies ATProto signatures for container images stored in ATCR.
# It performs all steps except full cryptographic verification (which requires
# the indigo library). For production use, use the atcr-verify CLI tool.
#
# Usage: ./atcr-verify.sh IMAGE_REF
# Example: ./atcr-verify.sh atcr.io/alice/myapp:latest
#
# Requirements:
# - curl
# - jq
# - crane (https://github.com/google/go-containerregistry/releases)
# - oras (https://oras.land/docs/installation)
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Check dependencies
check_dependencies() {
local missing=0
for cmd in curl jq crane oras; do
if ! command -v $cmd &> /dev/null; then
echo -e "${RED}${NC} Missing dependency: $cmd"
missing=1
fi
done
if [ $missing -eq 1 ]; then
echo ""
echo "Install missing dependencies:"
echo " curl: https://curl.se/download.html"
echo " jq: https://stedolan.github.io/jq/download/"
echo " crane: https://github.com/google/go-containerregistry/releases"
echo " oras: https://oras.land/docs/installation"
exit 1
fi
}
# Print with color
print_step() {
echo -e "${BLUE}[$1/${TOTAL_STEPS}]${NC} $2..."
}
print_success() {
echo -e " ${GREEN}${NC} $1"
}
print_error() {
echo -e " ${RED}${NC} $1"
}
print_warning() {
echo -e " ${YELLOW}${NC} $1"
}
# Main verification function
verify_image() {
local image="$1"
if [ -z "$image" ]; then
echo "Usage: $0 IMAGE_REF"
echo "Example: $0 atcr.io/alice/myapp:latest"
exit 1
fi
TOTAL_STEPS=7
echo ""
echo "═══════════════════════════════════════════════════"
echo " ATProto Signature Verification"
echo "═══════════════════════════════════════════════════"
echo " Image: $image"
echo "═══════════════════════════════════════════════════"
echo ""
# Step 1: Resolve image digest
print_step 1 "Resolving image digest"
DIGEST=$(crane digest "$image" 2>&1)
if [ $? -ne 0 ]; then
print_error "Failed to resolve image digest"
echo "$DIGEST"
exit 1
fi
print_success "$DIGEST"
# Extract registry, repository, and tag
REGISTRY=$(echo "$image" | cut -d/ -f1)
REPO=$(echo "$image" | cut -d/ -f2-)
REPO_PATH=$(echo "$REPO" | cut -d: -f1)
# Step 2: Discover ATProto signature artifacts
print_step 2 "Discovering ATProto signature artifacts"
REFERRERS_URL="https://${REGISTRY}/v2/${REPO_PATH}/referrers/${DIGEST}?artifactType=application/vnd.atproto.signature.v1+json"
SIG_ARTIFACTS=$(curl -s -H "Accept: application/vnd.oci.image.index.v1+json" "$REFERRERS_URL")
if [ $? -ne 0 ]; then
print_error "Failed to query referrers API"
exit 1
fi
SIG_COUNT=$(echo "$SIG_ARTIFACTS" | jq '.manifests | length')
if [ "$SIG_COUNT" = "0" ]; then
print_error "No ATProto signature found"
echo ""
echo "This image does not have an ATProto signature."
echo "Signatures are automatically created when you push to ATCR."
exit 1
fi
print_success "Found $SIG_COUNT signature(s)"
# Get first signature digest
SIG_DIGEST=$(echo "$SIG_ARTIFACTS" | jq -r '.manifests[0].digest')
SIG_DID=$(echo "$SIG_ARTIFACTS" | jq -r '.manifests[0].annotations["io.atcr.atproto.did"]')
print_success "Signature digest: $SIG_DIGEST"
print_success "Signed by DID: $SIG_DID"
# Step 3: Fetch signature metadata
print_step 3 "Fetching signature metadata"
TMPDIR=$(mktemp -d)
trap "rm -rf $TMPDIR" EXIT
oras pull "${REGISTRY}/${REPO_PATH}@${SIG_DIGEST}" -o "$TMPDIR" --quiet 2>&1
if [ $? -ne 0 ]; then
print_error "Failed to fetch signature metadata"
exit 1
fi
# Find the JSON file
SIG_FILE=$(find "$TMPDIR" -name "*.json" -type f | head -n 1)
if [ -z "$SIG_FILE" ]; then
print_error "Signature metadata file not found"
exit 1
fi
DID=$(jq -r '.atproto.did' "$SIG_FILE")
HANDLE=$(jq -r '.atproto.handle // "unknown"' "$SIG_FILE")
PDS=$(jq -r '.atproto.pdsEndpoint' "$SIG_FILE")
RECORD_URI=$(jq -r '.atproto.recordUri' "$SIG_FILE")
COMMIT_CID=$(jq -r '.atproto.commitCid' "$SIG_FILE")
SIGNED_AT=$(jq -r '.atproto.signedAt' "$SIG_FILE")
print_success "DID: $DID"
print_success "Handle: $HANDLE"
print_success "PDS: $PDS"
print_success "Record: $RECORD_URI"
print_success "Signed at: $SIGNED_AT"
# Step 4: Resolve DID to public key
print_step 4 "Resolving DID to public key"
DID_DOC=$(curl -s "https://plc.directory/$DID")
if [ $? -ne 0 ]; then
print_error "Failed to resolve DID"
exit 1
fi
PUB_KEY_MB=$(echo "$DID_DOC" | jq -r '.verificationMethod[0].publicKeyMultibase')
if [ "$PUB_KEY_MB" = "null" ] || [ -z "$PUB_KEY_MB" ]; then
print_error "Public key not found in DID document"
exit 1
fi
print_success "Public key: ${PUB_KEY_MB:0:20}...${PUB_KEY_MB: -10}"
# Step 5: Query PDS for signed record
print_step 5 "Querying PDS for signed record"
# Extract collection and rkey from record URI (at://did/collection/rkey)
COLLECTION=$(echo "$RECORD_URI" | sed 's|at://[^/]*/\([^/]*\)/.*|\1|')
RKEY=$(echo "$RECORD_URI" | sed 's|at://.*/||')
RECORD_URL="${PDS}/xrpc/com.atproto.repo.getRecord?repo=${DID}&collection=${COLLECTION}&rkey=${RKEY}"
RECORD=$(curl -s "$RECORD_URL")
if [ $? -ne 0 ]; then
print_error "Failed to fetch record from PDS"
exit 1
fi
RECORD_CID=$(echo "$RECORD" | jq -r '.cid')
if [ "$RECORD_CID" = "null" ] || [ -z "$RECORD_CID" ]; then
print_error "Record not found in PDS"
exit 1
fi
print_success "Record CID: $RECORD_CID"
# Step 6: Verify record matches image manifest
print_step 6 "Verifying record integrity"
RECORD_DIGEST=$(echo "$RECORD" | jq -r '.value.digest')
if [ "$RECORD_DIGEST" != "$DIGEST" ]; then
print_error "Record digest ($RECORD_DIGEST) doesn't match image digest ($DIGEST)"
exit 1
fi
print_success "Record digest matches image digest"
# Step 7: Signature verification status
print_step 7 "Cryptographic signature verification"
print_warning "Full cryptographic verification requires ATProto crypto library"
print_warning "This script verifies:"
echo " • Record exists in PDS"
echo " • DID resolved successfully"
echo " • Public key retrieved from DID document"
echo " • Record digest matches image digest"
echo ""
print_warning "For full cryptographic verification, use: atcr-verify $image"
# Summary
echo ""
echo "═══════════════════════════════════════════════════"
echo -e " ${GREEN}✓ Verification Completed${NC}"
echo "═══════════════════════════════════════════════════"
echo ""
echo " Signed by: $HANDLE ($DID)"
echo " Signed at: $SIGNED_AT"
echo " PDS: $PDS"
echo " Record: $RECORD_URI"
echo " Signature: $SIG_DIGEST"
echo ""
echo "═══════════════════════════════════════════════════"
echo ""
}
# Check dependencies first
check_dependencies
# Run verification
verify_image "$1"

View File

@@ -0,0 +1,259 @@
# Kubernetes Admission Webhook for ATProto Signature Verification
#
# This example shows how to deploy a validating admission webhook that
# verifies ATProto signatures before allowing pods to be created.
#
# Prerequisites:
# 1. Build and push the webhook image (see examples/webhook/ for code)
# 2. Generate TLS certificates for the webhook
# 3. Create trust policy ConfigMap
#
# Usage:
# kubectl apply -f kubernetes-webhook.yaml
# kubectl label namespace production atcr-verify=enabled
---
apiVersion: v1
kind: Namespace
metadata:
name: atcr-system
---
# ConfigMap with trust policy
apiVersion: v1
kind: ConfigMap
metadata:
name: atcr-trust-policy
namespace: atcr-system
data:
policy.yaml: |
version: 1.0
# Global settings
defaultAction: enforce # enforce, audit, or allow
# Policies by image pattern
policies:
- name: production-images
scope: "atcr.io/*/prod-*"
require:
signature: true
trustedDIDs:
- did:plc:your-org-devops
- did:plc:your-org-security
minSignatures: 1
action: enforce
- name: staging-images
scope: "atcr.io/*/staging-*"
require:
signature: true
trustedDIDs:
- did:plc:your-org-devops
- did:plc:your-org-security
- did:plc:your-developers
action: enforce
- name: dev-images
scope: "atcr.io/*/dev-*"
require:
signature: false
action: audit # Log but don't block
# Trusted DIDs configuration
trustedDIDs:
did:plc:your-org-devops:
name: "DevOps Team"
validFrom: "2024-01-01T00:00:00Z"
expiresAt: null
did:plc:your-org-security:
name: "Security Team"
validFrom: "2024-01-01T00:00:00Z"
expiresAt: null
did:plc:your-developers:
name: "Developer Team"
validFrom: "2024-06-01T00:00:00Z"
expiresAt: null
---
# Service for webhook
apiVersion: v1
kind: Service
metadata:
name: atcr-verify-webhook
namespace: atcr-system
spec:
selector:
app: atcr-verify-webhook
ports:
- name: https
port: 443
targetPort: 8443
---
# Deployment for webhook
apiVersion: apps/v1
kind: Deployment
metadata:
name: atcr-verify-webhook
namespace: atcr-system
spec:
replicas: 2
selector:
matchLabels:
app: atcr-verify-webhook
template:
metadata:
labels:
app: atcr-verify-webhook
spec:
containers:
- name: webhook
image: atcr.io/atcr/verify-webhook:latest
imagePullPolicy: Always
ports:
- containerPort: 8443
name: https
env:
- name: TLS_CERT_FILE
value: /etc/webhook/certs/tls.crt
- name: TLS_KEY_FILE
value: /etc/webhook/certs/tls.key
- name: POLICY_FILE
value: /etc/webhook/policy/policy.yaml
- name: LOG_LEVEL
value: info
volumeMounts:
- name: webhook-certs
mountPath: /etc/webhook/certs
readOnly: true
- name: policy
mountPath: /etc/webhook/policy
readOnly: true
resources:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /healthz
port: 8443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
readinessProbe:
httpGet:
path: /readyz
port: 8443
scheme: HTTPS
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: webhook-certs
secret:
secretName: atcr-verify-webhook-certs
- name: policy
configMap:
name: atcr-trust-policy
---
# ValidatingWebhookConfiguration
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: atcr-verify
webhooks:
- name: verify.atcr.io
admissionReviewVersions: ["v1", "v1beta1"]
sideEffects: None
# Client configuration
clientConfig:
service:
name: atcr-verify-webhook
namespace: atcr-system
path: /validate
port: 443
# CA bundle for webhook TLS (base64-encoded CA cert)
# Generate with: cat ca.crt | base64 -w 0
caBundle: LS0tLS1CRUdJTi... # Replace with your CA bundle
# Rules - what to validate
rules:
- operations: ["CREATE", "UPDATE"]
apiGroups: [""]
apiVersions: ["v1"]
resources: ["pods"]
scope: "Namespaced"
# Namespace selector - only validate labeled namespaces
namespaceSelector:
matchExpressions:
- key: atcr-verify
operator: In
values: ["enabled", "enforce"]
# Failure policy - what to do if webhook fails
failurePolicy: Fail # Reject pods if webhook is unavailable
# Timeout
timeoutSeconds: 10
# Match policy
matchPolicy: Equivalent
---
# Example: Label a namespace to enable verification
# kubectl label namespace production atcr-verify=enabled
---
# RBAC for webhook
apiVersion: v1
kind: ServiceAccount
metadata:
name: atcr-verify-webhook
namespace: atcr-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: atcr-verify-webhook
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: atcr-verify-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: atcr-verify-webhook
subjects:
- kind: ServiceAccount
name: atcr-verify-webhook
namespace: atcr-system
---
# Secret for TLS certificates
# Generate certificates with:
# openssl req -x509 -newkey rsa:4096 -keyout tls.key -out tls.crt \
# -days 365 -nodes -subj "/CN=atcr-verify-webhook.atcr-system.svc"
#
# Create secret with:
# kubectl create secret tls atcr-verify-webhook-certs \
# --cert=tls.crt --key=tls.key -n atcr-system
#
# (Commented out - create manually with your certs)
# apiVersion: v1
# kind: Secret
# metadata:
# name: atcr-verify-webhook-certs
# namespace: atcr-system
# type: kubernetes.io/tls
# data:
# tls.crt: <base64-encoded-cert>
# tls.key: <base64-encoded-key>

View File

@@ -0,0 +1,247 @@
# ATProto Signature Trust Policy
#
# This file defines which signatures to trust and what to do when
# signatures are invalid or missing.
#
# Usage with atcr-verify:
# atcr-verify IMAGE --policy trust-policy.yaml
version: 1.0
# Global settings
defaultAction: enforce # Options: enforce, audit, allow
requireSignature: true # Require at least one signature
# Policies matched by image scope (first match wins)
policies:
# Production images require signatures from trusted DIDs
- name: production-images
description: "Production images must be signed by DevOps or Security team"
scope: "atcr.io/*/prod-*"
require:
signature: true
trustedDIDs:
- did:plc:your-org-devops
- did:plc:your-org-security
minSignatures: 1
maxAge: 2592000 # 30 days in seconds
action: enforce # Reject if policy fails
# Critical infrastructure requires multi-signature
- name: critical-infrastructure
description: "Critical services require 2 signatures"
scope: "atcr.io/*/critical-*"
require:
signature: true
trustedDIDs:
- did:plc:your-org-security
- did:plc:your-org-devops
minSignatures: 2 # Require at least 2 signatures
algorithms:
- ECDSA-K256-SHA256 # Only allow specific algorithms
action: enforce
# Staging images require signature from any team member
- name: staging-images
description: "Staging images need any trusted signature"
scope: "atcr.io/*/staging-*"
require:
signature: true
trustedDIDs:
- did:plc:your-org-devops
- did:plc:your-org-security
- did:plc:your-org-developers
minSignatures: 1
action: enforce
# Development images are audited but not blocked
- name: dev-images
description: "Development images are monitored"
scope: "atcr.io/*/dev-*"
require:
signature: false # Don't require signatures
action: audit # Log but don't reject
# Test images from external sources
- name: external-test-images
description: "Test images from partners"
scope: "atcr.io/external/*"
require:
signature: true
trustedDIDs:
- did:plc:partner-acme
- did:plc:partner-widgets
minSignatures: 1
action: enforce
# Default fallback for all other images
- name: default
description: "All other images require signature"
scope: "atcr.io/*/*"
require:
signature: true
minSignatures: 1
action: enforce
# Trusted DID registry
trustedDIDs:
# Your organization's DevOps team
did:plc:your-org-devops:
name: "DevOps Team"
description: "Production deployment automation"
validFrom: "2024-01-01T00:00:00Z"
expiresAt: null # Never expires
contact: "devops@yourorg.com"
allowedScopes:
- "atcr.io/*/prod-*"
- "atcr.io/*/staging-*"
- "atcr.io/*/critical-*"
# Your organization's Security team
did:plc:your-org-security:
name: "Security Team"
description: "Security-reviewed images"
validFrom: "2024-01-01T00:00:00Z"
expiresAt: null
contact: "security@yourorg.com"
allowedScopes:
- "atcr.io/*/*" # Can sign any image
# Developer team (limited access)
did:plc:your-org-developers:
name: "Developer Team"
description: "Development and staging images"
validFrom: "2024-06-01T00:00:00Z"
expiresAt: "2025-12-31T23:59:59Z" # Temporary access
contact: "dev-team@yourorg.com"
allowedScopes:
- "atcr.io/*/dev-*"
- "atcr.io/*/staging-*"
notes: "Access expires end of 2025 - review then"
# External partner: ACME Corp
did:plc:partner-acme:
name: "ACME Corp Integration Team"
description: "Third-party integration images"
validFrom: "2024-09-01T00:00:00Z"
expiresAt: "2025-09-01T00:00:00Z"
contact: "integration@acme.example.com"
allowedScopes:
- "atcr.io/external/acme-*"
# External partner: Widgets Inc
did:plc:partner-widgets:
name: "Widgets Inc"
description: "Widgets service integration"
validFrom: "2024-10-01T00:00:00Z"
expiresAt: "2025-10-01T00:00:00Z"
contact: "api@widgets.example.com"
allowedScopes:
- "atcr.io/external/widgets-*"
# Signature validation settings
validation:
# Signature age limits
maxSignatureAge: 7776000 # 90 days in seconds (null = no limit)
# Allowed signature algorithms
allowedAlgorithms:
- ECDSA-K256-SHA256 # ATProto default
- ECDSA-P256-SHA256 # Alternative
# DID resolution settings
didResolver:
timeout: 10 # seconds
cache:
enabled: true
ttl: 3600 # 1 hour in seconds
fallbackResolvers:
- https://plc.directory
- https://backup-plc.example.com
# PDS connection settings
pds:
timeout: 15 # seconds
retries: 3
cache:
enabled: true
ttl: 600 # 10 minutes
# Audit logging
audit:
enabled: true
logLevel: info # debug, info, warn, error
# What to log
logEvents:
- signature_verified
- signature_missing
- signature_invalid
- signature_expired
- did_resolution_failed
- pds_query_failed
- policy_violation
# Log destinations
destinations:
- type: stdout
format: json
- type: file
path: /var/log/atcr-verify/audit.log
format: json
rotate: true
maxSize: 100MB
maxFiles: 10
# Reporting and metrics
reporting:
# Prometheus metrics
metrics:
enabled: true
port: 9090
path: /metrics
# Periodic reports
reports:
enabled: true
interval: 86400 # Daily in seconds
email:
- security@yourorg.com
- devops@yourorg.com
includeStatistics: true
# Emergency overrides
overrides:
# Allow bypassing verification in emergencies
enabled: false # Enable with extreme caution!
requireApproval: true
approvers:
- security@yourorg.com
validDuration: 3600 # Override valid for 1 hour
# Examples of policy evaluation:
#
# atcr.io/myorg/prod-api:v1.2.3
# → Matches: production-images
# → Requires: 1 signature from DevOps or Security
# → Action: enforce
#
# atcr.io/myorg/critical-auth:v2.0.0
# → Matches: critical-infrastructure
# → Requires: 2 signatures from Security and DevOps
# → Action: enforce
#
# atcr.io/myorg/staging-frontend:latest
# → Matches: staging-images
# → Requires: 1 signature from any team member
# → Action: enforce
#
# atcr.io/myorg/dev-experiment:test
# → Matches: dev-images
# → Requires: none
# → Action: audit (log only)
#
# atcr.io/external/acme-connector:v1.0
# → Matches: external-test-images
# → Requires: 1 signature from partner-acme
# → Action: enforce

View File

@@ -0,0 +1,162 @@
#!/bin/bash
# Verify and Pull Script
#
# This script verifies ATProto signatures before pulling images with Docker.
# It acts as a wrapper around `docker pull` to enforce signature verification.
#
# Usage: ./verify-and-pull.sh IMAGE [DOCKER_PULL_OPTIONS]
# Example: ./verify-and-pull.sh atcr.io/alice/myapp:latest
# Example: ./verify-and-pull.sh atcr.io/alice/myapp:latest --platform linux/amd64
#
# To use this as a replacement for docker pull, create an alias:
# alias docker-pull-secure='/path/to/verify-and-pull.sh'
set -e
# Configuration
VERIFY_SCRIPT="${VERIFY_SCRIPT:-$(dirname $0)/atcr-verify.sh}"
TRUST_POLICY="${TRUST_POLICY:-$(dirname $0)/trust-policy.yaml}"
REQUIRE_VERIFICATION="${REQUIRE_VERIFICATION:-true}"
SKIP_ATCR_IMAGES="${SKIP_ATCR_IMAGES:-false}" # Skip verification for non-ATCR images
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
print_header() {
echo ""
echo -e "${BLUE}═══════════════════════════════════════════════════${NC}"
echo -e "${BLUE} Secure Image Pull with Signature Verification${NC}"
echo -e "${BLUE}═══════════════════════════════════════════════════${NC}"
echo ""
}
print_success() {
echo -e "${GREEN}${NC} $1"
}
print_error() {
echo -e "${RED}${NC} $1"
}
print_warning() {
echo -e "${YELLOW}${NC} $1"
}
# Check if image is from ATCR
is_atcr_image() {
local image="$1"
if [[ "$image" =~ ^atcr\.io/ ]]; then
return 0
else
return 1
fi
}
# Main function
main() {
if [ $# -eq 0 ]; then
echo "Usage: $0 IMAGE [DOCKER_PULL_OPTIONS]"
echo ""
echo "Examples:"
echo " $0 atcr.io/alice/myapp:latest"
echo " $0 atcr.io/alice/myapp:latest --platform linux/amd64"
echo ""
echo "Environment variables:"
echo " VERIFY_SCRIPT - Path to verification script (default: ./atcr-verify.sh)"
echo " TRUST_POLICY - Path to trust policy (default: ./trust-policy.yaml)"
echo " REQUIRE_VERIFICATION - Require verification for ATCR images (default: true)"
echo " SKIP_ATCR_IMAGES - Skip verification for non-ATCR images (default: false)"
exit 1
fi
local image="$1"
shift
local docker_args="$@"
print_header
echo -e "${BLUE}Image:${NC} $image"
if [ -n "$docker_args" ]; then
echo -e "${BLUE}Docker options:${NC} $docker_args"
fi
echo ""
# Check if this is an ATCR image
if ! is_atcr_image "$image"; then
if [ "$SKIP_ATCR_IMAGES" = "true" ]; then
print_warning "Not an ATCR image - skipping signature verification"
echo ""
docker pull $docker_args "$image"
exit $?
else
print_warning "Not an ATCR image"
if [ "$REQUIRE_VERIFICATION" = "true" ]; then
print_error "Verification required but image is not from ATCR"
exit 1
else
print_warning "Proceeding without verification"
echo ""
docker pull $docker_args "$image"
exit $?
fi
fi
fi
# Step 1: Verify signature
echo -e "${BLUE}Step 1: Verifying ATProto signature${NC}"
echo ""
if [ ! -f "$VERIFY_SCRIPT" ]; then
print_error "Verification script not found: $VERIFY_SCRIPT"
exit 1
fi
# Run verification
if bash "$VERIFY_SCRIPT" "$image"; then
print_success "Signature verification passed"
echo ""
else
print_error "Signature verification failed"
echo ""
if [ "$REQUIRE_VERIFICATION" = "true" ]; then
echo -e "${RED}Image pull blocked due to failed signature verification${NC}"
echo ""
echo "To proceed anyway (NOT RECOMMENDED), run:"
echo " REQUIRE_VERIFICATION=false $0 $image $docker_args"
exit 1
else
print_warning "Verification failed but REQUIRE_VERIFICATION=false"
print_warning "Proceeding with pull (NOT RECOMMENDED)"
echo ""
fi
fi
# Step 2: Pull image
echo -e "${BLUE}Step 2: Pulling image${NC}"
echo ""
if docker pull $docker_args "$image"; then
print_success "Image pulled successfully"
else
print_error "Failed to pull image"
exit 1
fi
# Summary
echo ""
echo -e "${GREEN}═══════════════════════════════════════════════════${NC}"
echo -e "${GREEN} ✓ Secure pull completed successfully${NC}"
echo -e "${GREEN}═══════════════════════════════════════════════════${NC}"
echo ""
echo -e "${BLUE}Image:${NC} $image"
echo -e "${BLUE}Status:${NC} Verified and pulled"
echo ""
}
# Run main function
main "$@"

25
license Normal file
View File

@@ -0,0 +1,25 @@
The MIT License (MIT)
=====================
Copyright © 2025 Evan Jarrett
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the “Software”), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -43,12 +43,14 @@ func (h *ManifestHealthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
reachable, err := h.HealthChecker.CheckHealth(ctx, endpoint)
if ctx.Err() == context.DeadlineExceeded {
// Still pending - render "Checking..." badge with HTMX retry
h.renderBadge(w, endpoint, false, true)
} else if err != nil {
// Check for HTTP errors first (connection refused, network unreachable, etc.)
// This ensures we catch real failures even when timing aligns with context timeout
if err != nil {
// Error - mark as unreachable
h.renderBadge(w, endpoint, false, false)
} else if ctx.Err() == context.DeadlineExceeded {
// Context timed out but no HTTP error yet - still pending
h.renderBadge(w, endpoint, false, true)
} else {
// Success
h.renderBadge(w, endpoint, reachable, false)
@@ -65,10 +67,10 @@ func (h *ManifestHealthHandler) renderBadge(w http.ResponseWriter, endpoint stri
w.Write([]byte(`<span class="checking-badge"
hx-get="` + retryURL + `"
hx-trigger="load delay:3s"
hx-swap="outerHTML">🔄 Checking...</span>`))
hx-swap="outerHTML"><i data-lucide="refresh-ccw"></i> Checking...</span>`))
} else if !reachable {
// Unreachable - render offline badge
w.Write([]byte(`<span class="offline-badge">⚠️ Offline</span>`))
w.Write([]byte(`<span class="offline-badge"><i data-lucide="triangle-alert"></i> Offline</span>`))
} else {
// Reachable - no badge (empty response)
w.Write([]byte(``))

View File

@@ -154,10 +154,13 @@ func (nr *NamespaceResolver) Repository(ctx context.Context, name reference.Name
// Auto-reconcile crew membership on first push/pull
// This ensures users can push immediately after docker login without web sign-in
// EnsureCrewMembership is best-effort and logs errors without failing the request
// Run in background to avoid blocking registry operations if hold is offline
if holdDID != "" && nr.refresher != nil {
slog.Debug("Auto-reconciling crew membership", "component", "registry/middleware", "did", did, "hold_did", holdDID)
client := atproto.NewClient(pdsEndpoint, did, "")
storage.EnsureCrewMembership(ctx, client, nr.refresher, holdDID)
go func(ctx context.Context, client *atproto.Client, refresher *oauth.Refresher, holdDID string) {
storage.EnsureCrewMembership(ctx, client, refresher, holdDID)
}(ctx, client, nr.refresher, holdDID)
}
// Get service token for hold authentication

View File

@@ -2,6 +2,7 @@ package storage
import (
"context"
"sync"
"testing"
"atcr.io/pkg/atproto"
@@ -9,20 +10,37 @@ import (
// Mock implementations for testing
type mockDatabaseMetrics struct {
mu sync.Mutex
pullCount int
pushCount int
}
func (m *mockDatabaseMetrics) IncrementPullCount(did, repository string) error {
m.mu.Lock()
defer m.mu.Unlock()
m.pullCount++
return nil
}
func (m *mockDatabaseMetrics) IncrementPushCount(did, repository string) error {
m.mu.Lock()
defer m.mu.Unlock()
m.pushCount++
return nil
}
func (m *mockDatabaseMetrics) getPullCount() int {
m.mu.Lock()
defer m.mu.Unlock()
return m.pullCount
}
func (m *mockDatabaseMetrics) getPushCount() int {
m.mu.Lock()
defer m.mu.Unlock()
return m.pushCount
}
type mockReadmeCache struct{}
func (m *mockReadmeCache) Get(ctx context.Context, url string) (string, error) {

View File

@@ -6,6 +6,7 @@ import (
"io"
"log/slog"
"net/http"
"time"
"atcr.io/pkg/atproto"
"atcr.io/pkg/auth/oauth"
@@ -59,6 +60,10 @@ func EnsureCrewMembership(ctx context.Context, client *atproto.Client, refresher
// requestCrewMembership calls the hold's requestCrew endpoint
// The endpoint handles all authorization and duplicate checking internally
func requestCrewMembership(ctx context.Context, holdEndpoint, serviceToken string) error {
// Add 5 second timeout to prevent hanging on offline holds
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
url := fmt.Sprintf("%s%s", holdEndpoint, atproto.HoldRequestCrew)
req, err := http.NewRequestWithContext(ctx, "POST", url, nil)

View File

@@ -11,6 +11,7 @@ import (
"maps"
"net/http"
"strings"
"sync"
"time"
"atcr.io/pkg/atproto"
@@ -22,6 +23,7 @@ import (
// It stores manifests in ATProto as records
type ManifestStore struct {
ctx *RegistryContext // Context with user/hold info
mu sync.RWMutex // Protects lastFetchedHoldDID
lastFetchedHoldDID string // Hold DID from most recently fetched manifest (for pull)
blobStore distribution.BlobStore // Blob store for fetching config during push
}
@@ -67,6 +69,7 @@ func (s *ManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...
// Store the hold DID for subsequent blob requests during pull
// Prefer HoldDID (new format) with fallback to HoldEndpoint (legacy URL format)
// The routing repository will cache this for concurrent blob fetches
s.mu.Lock()
if manifestRecord.HoldDID != "" {
// New format: DID reference (preferred)
s.lastFetchedHoldDID = manifestRecord.HoldDID
@@ -74,6 +77,7 @@ func (s *ManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...
// Legacy format: URL reference - convert to DID
s.lastFetchedHoldDID = atproto.ResolveHoldDIDFromURL(manifestRecord.HoldEndpoint)
}
s.mu.Unlock()
var ociManifest []byte
@@ -232,6 +236,8 @@ func digestToRKey(dgst digest.Digest) string {
// GetLastFetchedHoldDID returns the hold DID from the most recently fetched manifest
// This is used by the routing repository to cache the hold for blob requests
func (s *ManifestStore) GetLastFetchedHoldDID() string {
s.mu.RLock()
defer s.mu.RUnlock()
return s.lastFetchedHoldDID
}

View File

@@ -669,13 +669,13 @@ func TestManifestStore_Get_OnlyCountsGETRequests(t *testing.T) {
if tt.expectPullIncrement {
// Check that IncrementPullCount was called
if mockDB.pullCount == 0 {
if mockDB.getPullCount() == 0 {
t.Error("Expected pull count to be incremented for GET request, but it wasn't")
}
} else {
// Check that IncrementPullCount was NOT called
if mockDB.pullCount > 0 {
t.Errorf("Expected pull count NOT to be incremented for %s request, but it was (count=%d)", tt.httpMethod, mockDB.pullCount)
if mockDB.getPullCount() > 0 {
t.Errorf("Expected pull count NOT to be incremented for %s request, but it was (count=%d)", tt.httpMethod, mockDB.getPullCount())
}
}
})

View File

@@ -219,6 +219,7 @@ func TestGetProfile(t *testing.T) {
// Clear migration locks before each test
migrationLocks = sync.Map{}
var mu sync.Mutex
putRecordCalled := false
var migrationRequest map[string]any
@@ -232,8 +233,10 @@ func TestGetProfile(t *testing.T) {
// PutRecord (migration)
if r.Method == "POST" && strings.Contains(r.URL.Path, "putRecord") {
mu.Lock()
putRecordCalled = true
json.NewDecoder(r.Body).Decode(&migrationRequest)
mu.Unlock()
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.sailor.profile/self","cid":"bafytest"}`))
return
@@ -270,12 +273,17 @@ func TestGetProfile(t *testing.T) {
// Give goroutine time to execute
time.Sleep(50 * time.Millisecond)
if !putRecordCalled {
mu.Lock()
called := putRecordCalled
request := migrationRequest
mu.Unlock()
if !called {
t.Error("Expected migration PutRecord to be called")
}
if migrationRequest != nil {
recordData := migrationRequest["record"].(map[string]any)
if request != nil {
recordData := request["record"].(map[string]any)
migratedHold := recordData["defaultHold"]
if migratedHold != tt.expectedHoldDID {
t.Errorf("Migrated defaultHold = %v, want %v", migratedHold, tt.expectedHoldDID)

View File

@@ -7,6 +7,7 @@ package storage
import (
"context"
"log/slog"
"sync"
"time"
"github.com/distribution/distribution/v3"
@@ -17,6 +18,7 @@ import (
type RoutingRepository struct {
distribution.Repository
Ctx *RegistryContext // All context and services (exported for token updates)
mu sync.Mutex // Protects manifestStore and blobStore
manifestStore *ManifestStore // Cached manifest store instance
blobStore *ProxyBlobStore // Cached blob store instance
}
@@ -31,35 +33,47 @@ func NewRoutingRepository(baseRepo distribution.Repository, ctx *RegistryContext
// Manifests returns the ATProto-backed manifest service
func (r *RoutingRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
r.mu.Lock()
// Create or return cached manifest store
if r.manifestStore == nil {
// Ensure blob store is created first (needed for label extraction during push)
// Release lock while calling Blobs to avoid deadlock
r.mu.Unlock()
blobStore := r.Blobs(ctx)
r.mu.Lock()
r.manifestStore = NewManifestStore(r.Ctx, blobStore)
// Double-check after reacquiring lock (another goroutine might have set it)
if r.manifestStore == nil {
r.manifestStore = NewManifestStore(r.Ctx, blobStore)
}
}
manifestStore := r.manifestStore
r.mu.Unlock()
// After any manifest operation, cache the hold DID for blob fetches
// We use a goroutine to avoid blocking, and check after a short delay to allow the operation to complete
go func() {
time.Sleep(100 * time.Millisecond) // Brief delay to let manifest fetch complete
if holdDID := r.manifestStore.GetLastFetchedHoldDID(); holdDID != "" {
if holdDID := manifestStore.GetLastFetchedHoldDID(); holdDID != "" {
// Cache for 10 minutes - should cover typical pull operations
GetGlobalHoldCache().Set(r.Ctx.DID, r.Ctx.Repository, holdDID, 10*time.Minute)
slog.Debug("Cached hold DID", "component", "storage/routing", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", holdDID)
}
}()
return r.manifestStore, nil
return manifestStore, nil
}
// Blobs returns a proxy blob store that routes to external hold service
// The registry (AppView) NEVER stores blobs locally - all blobs go through hold service
func (r *RoutingRepository) Blobs(ctx context.Context) distribution.BlobStore {
r.mu.Lock()
// Return cached blob store if available
if r.blobStore != nil {
blobStore := r.blobStore
r.mu.Unlock()
slog.Debug("Returning cached blob store", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository)
return r.blobStore
return blobStore
}
// For pull operations, check if we have a cached hold DID from a recent manifest fetch
@@ -85,7 +99,9 @@ func (r *RoutingRepository) Blobs(ctx context.Context) distribution.BlobStore {
// Create and cache proxy blob store
r.blobStore = NewProxyBlobStore(r.Ctx)
return r.blobStore
blobStore := r.blobStore
r.mu.Unlock()
return blobStore
}
// Tags returns the tag service

View File

@@ -12,11 +12,11 @@
<!-- Stylesheets -->
<link rel="stylesheet" href="/css/style.css">
<!-- HTMX -->
<script src="https://unpkg.com/htmx.org@2.0.8/dist/htmx.min.js"></script>
<!-- HTMX (vendored) -->
<script src="/js/htmx.min.js"></script>
<!-- Lucide Icons -->
<script src="https://unpkg.com/lucide@latest"></script>
<!-- Lucide Icons (vendored) -->
<script src="/js/lucide.min.js"></script>
<!-- App Scripts -->
<script src="/js/app.js"></script>

View File

@@ -181,7 +181,7 @@
hx-get="/api/manifest-health?endpoint={{ .Manifest.HoldEndpoint | urlquery }}"
hx-trigger="load delay:2s"
hx-swap="outerHTML">
<i data-lucide="rotate-cw"></i> Checking...
<i data-lucide="refresh-ccw"></i> Checking...
</span>
{{ else if not .Reachable }}
<span class="offline-badge"><i data-lucide="alert-triangle"></i> Offline</span>

View File

@@ -12,6 +12,9 @@ import (
"atcr.io/pkg/appview/licenses"
)
//go:generate curl -fsSL -o static/js/htmx.min.js https://unpkg.com/htmx.org@2.0.8/dist/htmx.min.js
//go:generate curl -fsSL -o static/js/lucide.min.js https://unpkg.com/lucide@latest/dist/umd/lucide.min.js
//go:embed templates/**/*.html
var templatesFS embed.FS

View File

@@ -35,6 +35,4 @@ func main() {
fmt.Printf("Failed to generate CBOR encoders: %v\n", err)
os.Exit(1)
}
fmt.Println("Generated CBOR encoders in pkg/atproto/cbor_gen.go")
}

View File

@@ -0,0 +1,110 @@
#!/usr/bin/env bash
#
# update-homebrew-formula.sh - Helper script to update Homebrew formula with new release
#
# Usage: ./scripts/update-homebrew-formula.sh <version>
#
# Example: ./scripts/update-homebrew-formula.sh v0.0.2
#
# This script:
# 1. Downloads the source tarball from GitHub
# 2. Calculates SHA256 checksum
# 3. Generates updated formula snippet
#
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Check arguments
if [ $# -ne 1 ]; then
echo -e "${RED}Error: Missing required argument${NC}"
echo "Usage: $0 <version>"
echo ""
echo "Example: $0 v0.0.2"
echo " $0 0.0.2 (v prefix is optional)"
exit 1
fi
VERSION="$1"
# Add 'v' prefix if not present
if [[ ! "$VERSION" =~ ^v ]]; then
VERSION="v${VERSION}"
fi
echo -e "${GREEN}Updating Homebrew formula for version ${VERSION}${NC}"
echo ""
# GitHub repository details
GITHUB_REPO="atcr-io/atcr"
TARBALL_URL="https://github.com/${GITHUB_REPO}/archive/refs/tags/${VERSION}.tar.gz"
# Create temporary directory
TEMP_DIR=$(mktemp -d)
trap 'rm -rf "$TEMP_DIR"' EXIT
TARBALL_FILE="${TEMP_DIR}/${VERSION}.tar.gz"
echo -e "${YELLOW}Downloading source tarball...${NC}"
echo "URL: ${TARBALL_URL}"
if curl -sSfL -o "$TARBALL_FILE" "$TARBALL_URL"; then
# Calculate SHA256
if command -v sha256sum &> /dev/null; then
CHECKSUM=$(sha256sum "$TARBALL_FILE" | awk '{print $1}')
elif command -v shasum &> /dev/null; then
CHECKSUM=$(shasum -a 256 "$TARBALL_FILE" | awk '{print $1}')
else
echo -e "${RED}Error: sha256sum or shasum command not found${NC}"
exit 1
fi
echo -e "${GREEN}✓ Downloaded successfully${NC}"
echo "SHA256: $CHECKSUM"
else
echo -e "${RED}✗ Failed to download source tarball${NC}"
echo ""
echo "Make sure the tag ${VERSION} exists on GitHub:"
echo " https://github.com/${GITHUB_REPO}/releases/tag/${VERSION}"
echo ""
echo "If you haven't pushed the tag yet, run:"
echo " git tag ${VERSION}"
echo " git push origin ${VERSION}"
exit 1
fi
echo ""
echo "======================================================================"
echo "Copy the following to Formula/docker-credential-atcr.rb:"
echo "======================================================================"
echo ""
cat << EOF
url "https://github.com/${GITHUB_REPO}/archive/refs/tags/${VERSION}.tar.gz"
sha256 "${CHECKSUM}"
license "MIT"
head "https://github.com/${GITHUB_REPO}.git", branch: "main"
EOF
echo ""
echo "======================================================================"
echo ""
echo -e "${YELLOW}Next steps:${NC}"
echo "1. Update Formula/docker-credential-atcr.rb with the url and sha256 above"
echo "2. Test the formula locally:"
echo " brew install --build-from-source Formula/docker-credential-atcr.rb"
echo " docker-credential-atcr version"
echo "3. Commit and push to your atcr-io/homebrew-tap repository:"
echo " cd /path/to/homebrew-tap"
echo " cp Formula/docker-credential-atcr.rb ."
echo " git add docker-credential-atcr.rb"
echo " git commit -m \"Update docker-credential-atcr to ${VERSION}\""
echo " git push"
echo "4. Users can upgrade with:"
echo " brew update"
echo " brew upgrade docker-credential-atcr"