Compare commits
2 Commits
test
...
vulnerabil
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3b5b89b378 | ||
|
|
8c5f9da2cf |
@@ -111,6 +111,33 @@ HOLD_DATABASE_DIR=/var/lib/atcr-hold
|
||||
#
|
||||
HOLD_OWNER=did:plc:your-did-here
|
||||
|
||||
# ==============================================================================
|
||||
# Scanner Configuration (SBOM & Vulnerability Scanning)
|
||||
# ==============================================================================
|
||||
|
||||
# Enable automatic SBOM generation and vulnerability scanning on image push
|
||||
# Default: true
|
||||
HOLD_SBOM_ENABLED=true
|
||||
|
||||
# Number of concurrent scanner worker threads
|
||||
# Default: 2
|
||||
HOLD_SBOM_WORKERS=2
|
||||
|
||||
# Enable vulnerability scanning with Grype
|
||||
# If false, only SBOM generation (Syft) will run
|
||||
# Default: true
|
||||
HOLD_VULN_ENABLED=true
|
||||
|
||||
# Path to Grype vulnerability database
|
||||
# Database is auto-downloaded and cached at this location
|
||||
# Default: /var/lib/atcr-hold/grype-db
|
||||
# HOLD_VULN_DB_PATH=/var/lib/atcr-hold/grype-db
|
||||
|
||||
# How often to update vulnerability database
|
||||
# Examples: 24h, 12h, 48h
|
||||
# Default: 24h
|
||||
# HOLD_VULN_DB_UPDATE_INTERVAL=24h
|
||||
|
||||
# ==============================================================================
|
||||
# Logging Configuration
|
||||
# ==============================================================================
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -11,11 +11,6 @@ dist/
|
||||
# Environment configuration
|
||||
.env
|
||||
|
||||
# Generated assets (run go generate to rebuild)
|
||||
pkg/appview/licenses/spdx-licenses.json
|
||||
pkg/appview/static/js/htmx.min.js
|
||||
pkg/appview/static/js/lucide.min.js
|
||||
|
||||
# IDE
|
||||
.claude/
|
||||
.vscode/
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
# Tangled Workflow: Release Credential Helper to Tangled.org
|
||||
#
|
||||
# This workflow builds the docker-credential-atcr binary and publishes it
|
||||
# to Tangled.org for distribution via Homebrew.
|
||||
#
|
||||
# Current limitation: Tangled doesn't support triggering on tags yet,
|
||||
# so this triggers on push to main. Manually verify you've tagged the
|
||||
# release before pushing.
|
||||
|
||||
when:
|
||||
- event: ["push"]
|
||||
tag: ["v*"]
|
||||
|
||||
engine: "nixery"
|
||||
|
||||
dependencies:
|
||||
nixpkgs:
|
||||
- go_1_24 # Go 1.24+ for building
|
||||
- git # For finding tags
|
||||
- goreleaser # For building multi-platform binaries
|
||||
# - goat # TODO: Add goat CLI for uploading to Tangled (if available in nixpkgs)
|
||||
|
||||
environment:
|
||||
CGO_ENABLED: "0" # Build static binaries
|
||||
|
||||
steps:
|
||||
- name: Find latest git tag
|
||||
command: |
|
||||
# Get the most recent version tag
|
||||
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.1")
|
||||
echo "Latest tag: $LATEST_TAG"
|
||||
echo "$LATEST_TAG" > .version
|
||||
|
||||
# Also get the commit hash for reference
|
||||
COMMIT_HASH=$(git rev-parse HEAD)
|
||||
echo "Commit: $COMMIT_HASH"
|
||||
|
||||
- name: Build binaries with GoReleaser
|
||||
command: |
|
||||
# Read version from previous step
|
||||
VERSION=$(cat .version)
|
||||
export VERSION
|
||||
|
||||
# Build for all platforms using GoReleaser
|
||||
# This creates artifacts in dist/ directory
|
||||
goreleaser build --clean --snapshot --config .goreleaser.yaml
|
||||
|
||||
# List what was built
|
||||
echo "Built artifacts:"
|
||||
ls -lh dist/
|
||||
|
||||
- name: Package artifacts
|
||||
command: |
|
||||
VERSION=$(cat .version)
|
||||
VERSION_NO_V=${VERSION#v} # Remove 'v' prefix for filenames
|
||||
|
||||
cd dist
|
||||
|
||||
# Create tarballs for each platform (GoReleaser might already do this)
|
||||
# Darwin x86_64
|
||||
if [ -d "docker-credential-atcr_darwin_amd64_v1" ]; then
|
||||
tar czf "docker-credential-atcr_${VERSION_NO_V}_Darwin_x86_64.tar.gz" \
|
||||
-C docker-credential-atcr_darwin_amd64_v1 docker-credential-atcr
|
||||
fi
|
||||
|
||||
# Darwin arm64
|
||||
if [ -d "docker-credential-atcr_darwin_arm64" ]; then
|
||||
tar czf "docker-credential-atcr_${VERSION_NO_V}_Darwin_arm64.tar.gz" \
|
||||
-C docker-credential-atcr_darwin_arm64 docker-credential-atcr
|
||||
fi
|
||||
|
||||
# Linux x86_64
|
||||
if [ -d "docker-credential-atcr_linux_amd64_v1" ]; then
|
||||
tar czf "docker-credential-atcr_${VERSION_NO_V}_Linux_x86_64.tar.gz" \
|
||||
-C docker-credential-atcr_linux_amd64_v1 docker-credential-atcr
|
||||
fi
|
||||
|
||||
# Linux arm64
|
||||
if [ -d "docker-credential-atcr_linux_arm64" ]; then
|
||||
tar czf "docker-credential-atcr_${VERSION_NO_V}_Linux_arm64.tar.gz" \
|
||||
-C docker-credential-atcr_linux_arm64 docker-credential-atcr
|
||||
fi
|
||||
|
||||
echo "Created tarballs:"
|
||||
ls -lh *.tar.gz
|
||||
|
||||
- name: Upload to Tangled.org
|
||||
command: |
|
||||
VERSION=$(cat .version)
|
||||
VERSION_NO_V=${VERSION#v}
|
||||
|
||||
# TODO: Authenticate with goat CLI
|
||||
# You'll need to set up credentials/tokens for goat
|
||||
# Example (adjust based on goat's actual auth mechanism):
|
||||
# goat login --pds https://your-pds.example.com --handle your.handle
|
||||
|
||||
# TODO: Upload each artifact to Tangled.org
|
||||
# This creates sh.tangled.repo.artifact records in your ATProto PDS
|
||||
# Adjust these commands based on scripts/publish-artifact.sh pattern
|
||||
|
||||
# Example structure (you'll need to fill in actual goat commands):
|
||||
# for artifact in dist/*.tar.gz; do
|
||||
# echo "Uploading $artifact..."
|
||||
# goat upload \
|
||||
# --repo "at-container-registry" \
|
||||
# --tag "$VERSION" \
|
||||
# --file "$artifact"
|
||||
# done
|
||||
|
||||
echo "TODO: Implement goat upload commands"
|
||||
echo "See scripts/publish-artifact.sh for reference"
|
||||
echo ""
|
||||
echo "After uploading, you'll receive a TAG_HASH from Tangled."
|
||||
echo "Update Formula/docker-credential-atcr.rb with:"
|
||||
echo " VERSION = \"$VERSION_NO_V\""
|
||||
echo " TAG_HASH = \"<hash-from-tangled>\""
|
||||
echo ""
|
||||
echo "Then run: scripts/update-homebrew-formula.sh $VERSION_NO_V <tag-hash>"
|
||||
|
||||
- name: Generate checksums for verification
|
||||
command: |
|
||||
VERSION=$(cat .version)
|
||||
VERSION_NO_V=${VERSION#v}
|
||||
|
||||
cd dist
|
||||
|
||||
echo "SHA256 checksums for Homebrew formula:"
|
||||
echo "======================================="
|
||||
|
||||
for file in docker-credential-atcr_${VERSION_NO_V}_*.tar.gz; do
|
||||
if [ -f "$file" ]; then
|
||||
sha256sum "$file"
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "Copy these checksums to Formula/docker-credential-atcr.rb"
|
||||
@@ -1,5 +1,5 @@
|
||||
# ATCR Release Pipeline for Tangled.org
|
||||
# Triggers on version tags and builds cross-platform binaries using buildah
|
||||
# Triggers on version tags and builds cross-platform binaries using GoReleaser
|
||||
|
||||
when:
|
||||
- event: ["manual"]
|
||||
@@ -10,50 +10,46 @@ engine: "nixery"
|
||||
|
||||
dependencies:
|
||||
nixpkgs:
|
||||
- buildah
|
||||
- chroot
|
||||
|
||||
environment:
|
||||
IMAGE_REGISTRY: atcr.io
|
||||
IMAGE_USER: evan.jarrett.net
|
||||
- git
|
||||
- go
|
||||
#- goreleaser
|
||||
- podman
|
||||
|
||||
steps:
|
||||
- name: Setup build environment
|
||||
- name: Fetch git tags
|
||||
command: git fetch --tags --force
|
||||
|
||||
- name: Checkout tag for current commit
|
||||
command: |
|
||||
if ! grep -q "^root:" /etc/passwd 2>/dev/null; then
|
||||
echo "root:x:0:0:root:/root:/bin/sh" >> /etc/passwd
|
||||
CURRENT_COMMIT=$(git rev-parse HEAD)
|
||||
export TAG=$(git tag --points-at $CURRENT_COMMIT --sort=-version:refname | head -n1)
|
||||
if [ -z "$TAG" ]; then
|
||||
echo "Error: No tag found for commit $CURRENT_COMMIT"
|
||||
exit 1
|
||||
fi
|
||||
echo "Found tag $TAG for commit $CURRENT_COMMIT"
|
||||
git checkout $TAG
|
||||
|
||||
- name: Login to registry
|
||||
- name: Build AppView Docker image
|
||||
command: |
|
||||
echo "${APP_PASSWORD}" | buildah login \
|
||||
--storage-driver vfs \
|
||||
-u "${IMAGE_USER}" \
|
||||
--password-stdin \
|
||||
${IMAGE_REGISTRY}
|
||||
TAG=$(git describe --tags --exact-match 2>/dev/null || git tag --points-at HEAD | head -n1)
|
||||
podman login atcr.io -u evan.jarrett.net -p ${APP_PASSWORD}
|
||||
podman build -f Dockerfile.appview -t atcr.io/evan.jarrett.net/atcr-appview:${TAG} .
|
||||
podman push atcr.io/evan.jarrett.net/atcr-appview:${TAG}
|
||||
|
||||
- name: Build and push AppView image
|
||||
- name: Build Hold Docker image
|
||||
command: |
|
||||
buildah bud \
|
||||
--storage-driver vfs \
|
||||
--tag ${IMAGE_REGISTRY}/${IMAGE_USER}/atcr-appview:${TAG} \
|
||||
--tag ${IMAGE_REGISTRY}/${IMAGE_USER}/atcr-appview:latest \
|
||||
--file ./Dockerfile.appview \
|
||||
.
|
||||
TAG=$(git describe --tags --exact-match 2>/dev/null || git tag --points-at HEAD | head -n1)
|
||||
podman login atcr.io -u evan.jarrett.net -p ${APP_PASSWORD}
|
||||
podman build -f Dockerfile.hold -t atcr.io/evan.jarrett.net/atcr-hold:${TAG} .
|
||||
podman push atcr.io/evan.jarrett.net/atcr-hold:${TAG}
|
||||
|
||||
# disable for now
|
||||
# - name: Tidy Go modules
|
||||
# command: go mod tidy
|
||||
|
||||
buildah push \
|
||||
--storage-driver vfs \
|
||||
${IMAGE_REGISTRY}/${IMAGE_USER}/atcr-appview:latest
|
||||
# - name: Install Goat
|
||||
# command: go install github.com/bluesky-social/goat@latest
|
||||
|
||||
- name: Build and push Hold image
|
||||
command: |
|
||||
buildah bud \
|
||||
--storage-driver vfs \
|
||||
--tag ${IMAGE_REGISTRY}/${IMAGE_USER}/atcr-hold:${TAG} \
|
||||
--tag ${IMAGE_REGISTRY}/${IMAGE_USER}/atcr-hold:latest \
|
||||
--file ./Dockerfile.hold \
|
||||
.
|
||||
|
||||
buildah push \
|
||||
--storage-driver vfs \
|
||||
${IMAGE_REGISTRY}/${IMAGE_USER}/atcr-hold:latest
|
||||
# - name: Run GoReleaser
|
||||
# command: goreleaser release --clean
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
# typed: false
|
||||
# frozen_string_literal: true
|
||||
|
||||
class DockerCredentialAtcr < Formula
|
||||
desc "Docker credential helper for ATCR (ATProto Container Registry)"
|
||||
homepage "https://atcr.io"
|
||||
url "https://github.com/atcr-io/atcr/archive/refs/tags/v0.0.1.tar.gz"
|
||||
sha256 "REPLACE_WITH_TARBALL_SHA256"
|
||||
license "MIT"
|
||||
head "https://github.com/atcr-io/atcr.git", branch: "main"
|
||||
|
||||
depends_on "go" => :build
|
||||
|
||||
def install
|
||||
# Build the credential helper binary
|
||||
# Use ldflags to inject version information
|
||||
ldflags = %W[
|
||||
-s -w
|
||||
-X main.version=#{version}
|
||||
-X main.commit=#{tap.user}
|
||||
-X main.date=#{time.iso8601}
|
||||
]
|
||||
|
||||
system "go", "build", *std_go_args(ldflags:, output: bin/"docker-credential-atcr"), "./cmd/credential-helper"
|
||||
end
|
||||
|
||||
test do
|
||||
# Test that the binary exists and is executable
|
||||
assert_match version.to_s, shell_output("#{bin}/docker-credential-atcr version 2>&1")
|
||||
end
|
||||
|
||||
def caveats
|
||||
<<~EOS
|
||||
To configure Docker to use ATCR credential helper, add the following
|
||||
to your ~/.docker/config.json:
|
||||
|
||||
{
|
||||
"credHelpers": {
|
||||
"atcr.io": "atcr"
|
||||
}
|
||||
}
|
||||
|
||||
Note: The credential helper name is "atcr" (Docker automatically prefixes
|
||||
with "docker-credential-" when looking for the binary).
|
||||
|
||||
To authenticate with ATCR:
|
||||
docker push atcr.io/<your-handle>/<image>:latest
|
||||
|
||||
This will open your browser to complete the OAuth device flow.
|
||||
|
||||
Configuration is stored in: ~/.atcr/device.json
|
||||
EOS
|
||||
end
|
||||
end
|
||||
@@ -37,22 +37,13 @@ Invoke-WebRequest -Uri https://atcr.io/install.ps1 -OutFile install.ps1
|
||||
.\install.ps1
|
||||
```
|
||||
|
||||
### Using Homebrew (macOS and Linux)
|
||||
|
||||
### Using Homebrew (macOS)
|
||||
You can read the full manifest spec here, but the dependencies block is the real interesting bit. Dependencies for your workflow, like Go, Node.js, Python etc. can be pulled in from nixpkgs. Nixpkgs—for the uninitiated—is a vast collection of packages for the Nix package manager. Fortunately, you needn’t know nor care about Nix to use it! Just head to https://search.nixos.org to find your package of choice (I’ll bet 1€ that it’s there1), toss it in the list and run your build. The Nix-savvy of you lot will be happy to know that you can use custom registries too.
|
||||
```bash
|
||||
# Add the ATCR tap
|
||||
brew tap atcr-io/tap
|
||||
|
||||
# Install the credential helper
|
||||
brew install docker-credential-atcr
|
||||
```
|
||||
|
||||
The Homebrew formula supports:
|
||||
- **macOS**: Intel (x86_64) and Apple Silicon (arm64)
|
||||
- **Linux**: x86_64 and arm64
|
||||
|
||||
Homebrew will automatically download the correct binary for your platform.
|
||||
|
||||
### Manual Installation
|
||||
|
||||
1. **Download the binary** for your platform from [GitHub Releases](https://github.com/atcr-io/atcr/releases)
|
||||
|
||||
84
Makefile
84
Makefile
@@ -1,84 +0,0 @@
|
||||
# ATCR Makefile
|
||||
# Build targets for the ATProto Container Registry
|
||||
|
||||
.PHONY: all build build-appview build-hold build-credential-helper build-oauth-helper \
|
||||
generate test test-race test-verbose lint clean help
|
||||
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
help: ## Show this help message
|
||||
@echo "ATCR Build Targets:"
|
||||
@echo ""
|
||||
@awk 'BEGIN {FS = ":.*##"; printf ""} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-28s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
|
||||
all: generate build ## Generate assets and build all binaries (default)
|
||||
|
||||
# Generated asset files
|
||||
GENERATED_ASSETS = \
|
||||
pkg/appview/static/js/htmx.min.js \
|
||||
pkg/appview/static/js/lucide.min.js \
|
||||
pkg/appview/licenses/spdx-licenses.json
|
||||
|
||||
generate: $(GENERATED_ASSETS) ## Run go generate to download vendor assets
|
||||
|
||||
$(GENERATED_ASSETS):
|
||||
@echo "→ Generating vendor assets and code..."
|
||||
go generate ./...
|
||||
|
||||
##@ Build Targets
|
||||
|
||||
build: build-appview build-hold build-credential-helper ## Build all binaries
|
||||
|
||||
build-appview: $(GENERATED_ASSETS) ## Build appview binary only
|
||||
@echo "→ Building appview..."
|
||||
@mkdir -p bin
|
||||
go build -o bin/atcr-appview ./cmd/appview
|
||||
|
||||
build-hold: $(GENERATED_ASSETS) ## Build hold binary only
|
||||
@echo "→ Building hold..."
|
||||
@mkdir -p bin
|
||||
go build -o bin/atcr-hold ./cmd/hold
|
||||
|
||||
build-credential-helper: $(GENERATED_ASSETS) ## Build credential helper only
|
||||
@echo "→ Building credential helper..."
|
||||
@mkdir -p bin
|
||||
go build -o bin/docker-credential-atcr ./cmd/credential-helper
|
||||
|
||||
build-oauth-helper: $(GENERATED_ASSETS) ## Build OAuth helper only
|
||||
@echo "→ Building OAuth helper..."
|
||||
@mkdir -p bin
|
||||
go build -o bin/oauth-helper ./cmd/oauth-helper
|
||||
|
||||
##@ Test Targets
|
||||
|
||||
test: ## Run all tests
|
||||
@echo "→ Running tests..."
|
||||
go test -cover ./...
|
||||
|
||||
test-race: ## Run tests with race detector
|
||||
@echo "→ Running tests with race detector..."
|
||||
go test -race ./...
|
||||
|
||||
test-verbose: ## Run tests with verbose output
|
||||
@echo "→ Running tests with verbose output..."
|
||||
go test -v ./...
|
||||
|
||||
##@ Quality Targets
|
||||
|
||||
.PHONY: check-golangci-lint
|
||||
check-golangci-lint:
|
||||
@which golangci-lint > /dev/null || (echo "→ Installing golangci-lint..." && go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest)
|
||||
|
||||
lint: check-golangci-lint ## Run golangci-lint
|
||||
@echo "→ Running golangci-lint..."
|
||||
golangci-lint run ./...
|
||||
|
||||
##@ Utility Targets
|
||||
|
||||
clean: ## Remove built binaries and generated assets
|
||||
@echo "→ Cleaning build artifacts..."
|
||||
rm -rf bin/
|
||||
rm -f pkg/appview/static/js/htmx.min.js
|
||||
rm -f pkg/appview/static/js/lucide.min.js
|
||||
rm -f pkg/appview/licenses/spdx-licenses.json
|
||||
@echo "✓ Clean complete"
|
||||
@@ -314,16 +314,15 @@ func serveRegistry(cmd *cobra.Command, args []string) error {
|
||||
} else {
|
||||
slog.Debug("Updated profile with hold DID", "component", "appview/callback", "hold_did", holdDID)
|
||||
}
|
||||
slog.Debug("Attempting crew registration", "component", "oauth/server", "did", did, "hold_did", holdDID)
|
||||
storage.EnsureCrewMembership(ctx, client, refresher, holdDID)
|
||||
} else {
|
||||
// Already a DID - use it
|
||||
holdDID = profile.DefaultHold
|
||||
}
|
||||
// Register crew regardless of migration (outside the migration block)
|
||||
// Run in background to avoid blocking OAuth callback if hold is offline
|
||||
slog.Debug("Attempting crew registration", "component", "appview/callback", "did", did, "hold_did", holdDID)
|
||||
go func(ctx context.Context, client *atproto.Client, refresher *oauth.Refresher, holdDID string) {
|
||||
storage.EnsureCrewMembership(ctx, client, refresher, holdDID)
|
||||
}(ctx, client, refresher, holdDID)
|
||||
storage.EnsureCrewMembership(ctx, client, refresher, holdDID)
|
||||
|
||||
}
|
||||
|
||||
@@ -377,6 +376,12 @@ func serveRegistry(cmd *cobra.Command, args []string) error {
|
||||
slog.Info("UI enabled", "home", "/", "settings", "/settings")
|
||||
}
|
||||
|
||||
// API endpoint for vulnerability details
|
||||
if uiSessionStore != nil {
|
||||
repoHandler := &uihandlers.RepositoryPageHandler{}
|
||||
mainRouter.Get("/api/vulnerabilities", repoHandler.HandleVulnerabilityDetails)
|
||||
}
|
||||
|
||||
// Mount OAuth endpoints
|
||||
mainRouter.Get("/auth/oauth/authorize", oauthServer.ServeAuthorize)
|
||||
mainRouter.Get("/auth/oauth/callback", oauthServer.ServeCallback)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"atcr.io/pkg/hold"
|
||||
"atcr.io/pkg/hold/oci"
|
||||
"atcr.io/pkg/hold/pds"
|
||||
"atcr.io/pkg/hold/scanner"
|
||||
"atcr.io/pkg/logging"
|
||||
"atcr.io/pkg/s3"
|
||||
|
||||
@@ -111,8 +112,33 @@ func main() {
|
||||
// Create PDS XRPC handler (ATProto endpoints)
|
||||
xrpcHandler = pds.NewXRPCHandler(holdPDS, *s3Service, driver, broadcaster, nil)
|
||||
|
||||
// Initialize scanner queue if scanning is enabled
|
||||
// Use interface type to ensure proper nil checking (avoid typed nil pointer issue)
|
||||
var scanQueue oci.ScanQueue
|
||||
if cfg.Scanner.Enabled {
|
||||
slog.Info("Initializing vulnerability scanner",
|
||||
"workers", cfg.Scanner.Workers,
|
||||
"vulnEnabled", cfg.Scanner.VulnEnabled,
|
||||
"vulnDBPath", cfg.Scanner.VulnDBPath)
|
||||
|
||||
// Create scanner worker
|
||||
scanWorker := scanner.NewWorker(cfg, driver, holdPDS)
|
||||
|
||||
// Create and start scanner queue (buffer size = workers * 2 for some headroom)
|
||||
bufferSize := cfg.Scanner.Workers * 2
|
||||
concreteQueue := scanner.NewQueue(cfg.Scanner.Workers, bufferSize)
|
||||
scanWorker.Start(concreteQueue)
|
||||
|
||||
// Assign to interface variable (ensures proper nil behavior)
|
||||
scanQueue = concreteQueue
|
||||
|
||||
slog.Info("Scanner queue initialized successfully")
|
||||
} else {
|
||||
slog.Info("SBOM/vulnerability scanning disabled")
|
||||
}
|
||||
|
||||
// Create OCI XRPC handler (multipart upload endpoints)
|
||||
ociHandler = oci.NewXRPCHandler(holdPDS, *s3Service, driver, cfg.Server.DisablePresignedURLs, cfg.Registration.EnableBlueskyPosts, nil)
|
||||
ociHandler = oci.NewXRPCHandler(holdPDS, *s3Service, driver, cfg.Server.DisablePresignedURLs, cfg.Registration.EnableBlueskyPosts, nil, scanQueue)
|
||||
}
|
||||
|
||||
// Setup HTTP routes with chi router
|
||||
|
||||
@@ -100,6 +100,40 @@ HOLD_ALLOW_ALL_CREW=false
|
||||
# Default: false
|
||||
HOLD_BLUESKY_POSTS_ENABLED=true
|
||||
|
||||
# ==============================================================================
|
||||
# Scanner Configuration (SBOM & Vulnerability Scanning)
|
||||
# ==============================================================================
|
||||
|
||||
# Enable automatic SBOM generation and vulnerability scanning on image push
|
||||
# When enabled, the hold service will:
|
||||
# 1. Generate SBOM (Software Bill of Materials) using Syft
|
||||
# 2. Scan for vulnerabilities using Grype
|
||||
# 3. Store results as ORAS artifacts (OCI referrers pattern)
|
||||
# 4. Display vulnerability counts on repository pages in AppView
|
||||
#
|
||||
# Default: true
|
||||
HOLD_SBOM_ENABLED=true
|
||||
|
||||
# Number of concurrent scanner worker threads
|
||||
# Increase for faster scanning on multi-core systems
|
||||
# Default: 2
|
||||
HOLD_SBOM_WORKERS=2
|
||||
|
||||
# Enable vulnerability scanning with Grype
|
||||
# If false, only SBOM generation (Syft) will run
|
||||
# Default: true
|
||||
HOLD_VULN_ENABLED=true
|
||||
|
||||
# Path to Grype vulnerability database
|
||||
# Database is auto-downloaded and cached at this location on first run
|
||||
# Default: /var/lib/atcr-hold/grype-db
|
||||
HOLD_VULN_DB_PATH=/var/lib/atcr-hold/grype-db
|
||||
|
||||
# How often to update vulnerability database
|
||||
# Examples: 24h, 12h, 48h
|
||||
# Default: 24h
|
||||
HOLD_VULN_DB_UPDATE_INTERVAL=24h
|
||||
|
||||
# ==============================================================================
|
||||
# S3/UpCloud Object Storage Configuration
|
||||
# ==============================================================================
|
||||
|
||||
@@ -114,6 +114,13 @@ services:
|
||||
S3_ENDPOINT: ${S3_ENDPOINT:-}
|
||||
S3_REGION_ENDPOINT: ${S3_REGION_ENDPOINT:-}
|
||||
|
||||
# Scanner configuration (SBOM & Vulnerability Scanning)
|
||||
HOLD_SBOM_ENABLED: ${HOLD_SBOM_ENABLED:-true}
|
||||
HOLD_SBOM_WORKERS: ${HOLD_SBOM_WORKERS:-2}
|
||||
HOLD_VULN_ENABLED: ${HOLD_VULN_ENABLED:-true}
|
||||
HOLD_VULN_DB_PATH: ${HOLD_VULN_DB_PATH:-/var/lib/atcr-hold/grype-db}
|
||||
HOLD_VULN_DB_UPDATE_INTERVAL: ${HOLD_VULN_DB_UPDATE_INTERVAL:-24h}
|
||||
|
||||
# Logging
|
||||
ATCR_LOG_LEVEL: ${ATCR_LOG_LEVEL:-debug}
|
||||
ATCR_LOG_FORMATTER: ${ATCR_LOG_FORMATTER:-text}
|
||||
@@ -149,6 +156,8 @@ volumes:
|
||||
driver: local
|
||||
atcr-hold-data:
|
||||
driver: local
|
||||
atcr-hold-tokens:
|
||||
driver: local
|
||||
|
||||
configs:
|
||||
caddyfile:
|
||||
|
||||
@@ -50,6 +50,10 @@ services:
|
||||
# STORAGE_ROOT_DIR: /var/lib/atcr/hold
|
||||
TEST_MODE: true
|
||||
# DISABLE_PRESIGNED_URLS: true
|
||||
# Scanner configuration
|
||||
HOLD_SBOM_ENABLED: true
|
||||
HOLD_SBOM_WORKERS: 2
|
||||
HOLD_VULN_ENABLED: true
|
||||
# Logging
|
||||
ATCR_LOG_LEVEL: debug
|
||||
# Storage config comes from env_file (STORAGE_DRIVER, AWS_*, S3_*)
|
||||
|
||||
@@ -1,728 +0,0 @@
|
||||
# atcr-verify CLI Tool
|
||||
|
||||
## Overview
|
||||
|
||||
`atcr-verify` is a command-line tool for verifying ATProto signatures on container images stored in ATCR. It provides cryptographic verification of image manifests using ATProto's DID-based trust model.
|
||||
|
||||
## Features
|
||||
|
||||
- ✅ Verify ATProto signatures via OCI Referrers API
|
||||
- ✅ DID resolution and public key extraction
|
||||
- ✅ PDS query and commit signature verification
|
||||
- ✅ Trust policy enforcement
|
||||
- ✅ Offline verification mode (with cached data)
|
||||
- ✅ Multiple output formats (human-readable, JSON, quiet)
|
||||
- ✅ Exit codes for CI/CD integration
|
||||
- ✅ Kubernetes admission controller integration
|
||||
|
||||
## Installation
|
||||
|
||||
### Binary Release
|
||||
|
||||
```bash
|
||||
# Linux (x86_64)
|
||||
curl -L https://github.com/atcr-io/atcr/releases/latest/download/atcr-verify-linux-amd64 -o atcr-verify
|
||||
chmod +x atcr-verify
|
||||
sudo mv atcr-verify /usr/local/bin/
|
||||
|
||||
# macOS (Apple Silicon)
|
||||
curl -L https://github.com/atcr-io/atcr/releases/latest/download/atcr-verify-darwin-arm64 -o atcr-verify
|
||||
chmod +x atcr-verify
|
||||
sudo mv atcr-verify /usr/local/bin/
|
||||
|
||||
# Windows
|
||||
curl -L https://github.com/atcr-io/atcr/releases/latest/download/atcr-verify-windows-amd64.exe -o atcr-verify.exe
|
||||
```
|
||||
|
||||
### From Source
|
||||
|
||||
```bash
|
||||
git clone https://github.com/atcr-io/atcr.git
|
||||
cd atcr
|
||||
go install ./cmd/atcr-verify
|
||||
```
|
||||
|
||||
### Container Image
|
||||
|
||||
```bash
|
||||
docker pull atcr.io/atcr/verify:latest
|
||||
|
||||
# Run
|
||||
docker run --rm atcr.io/atcr/verify:latest verify IMAGE
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Verification
|
||||
|
||||
```bash
|
||||
# Verify an image
|
||||
atcr-verify atcr.io/alice/myapp:latest
|
||||
|
||||
# Output:
|
||||
# ✓ Image verified successfully
|
||||
# Signed by: alice.bsky.social (did:plc:alice123)
|
||||
# Signed at: 2025-10-31T12:34:56.789Z
|
||||
```
|
||||
|
||||
### With Trust Policy
|
||||
|
||||
```bash
|
||||
# Verify against trust policy
|
||||
atcr-verify atcr.io/alice/myapp:latest --policy trust-policy.yaml
|
||||
|
||||
# Output:
|
||||
# ✓ Image verified successfully
|
||||
# ✓ Trust policy satisfied
|
||||
# Policy: production-images
|
||||
# Trusted DID: did:plc:alice123
|
||||
```
|
||||
|
||||
### JSON Output
|
||||
|
||||
```bash
|
||||
atcr-verify atcr.io/alice/myapp:latest --output json
|
||||
|
||||
# Output:
|
||||
{
|
||||
"verified": true,
|
||||
"image": "atcr.io/alice/myapp:latest",
|
||||
"digest": "sha256:abc123...",
|
||||
"signature": {
|
||||
"did": "did:plc:alice123",
|
||||
"handle": "alice.bsky.social",
|
||||
"pds": "https://bsky.social",
|
||||
"recordUri": "at://did:plc:alice123/io.atcr.manifest/abc123",
|
||||
"commitCid": "bafyreih8...",
|
||||
"signedAt": "2025-10-31T12:34:56.789Z",
|
||||
"algorithm": "ECDSA-K256-SHA256"
|
||||
},
|
||||
"trustPolicy": {
|
||||
"satisfied": true,
|
||||
"policy": "production-images",
|
||||
"trustedDID": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Quiet Mode
|
||||
|
||||
```bash
|
||||
# Exit code only (for scripts)
|
||||
atcr-verify atcr.io/alice/myapp:latest --quiet
|
||||
echo $? # 0 = verified, 1 = failed
|
||||
```
|
||||
|
||||
### Offline Mode
|
||||
|
||||
```bash
|
||||
# Export verification bundle
|
||||
atcr-verify export atcr.io/alice/myapp:latest -o bundle.json
|
||||
|
||||
# Verify offline (in air-gapped environment)
|
||||
atcr-verify atcr.io/alice/myapp:latest --offline --bundle bundle.json
|
||||
```
|
||||
|
||||
## Command Reference
|
||||
|
||||
### verify
|
||||
|
||||
Verify ATProto signature for an image.
|
||||
|
||||
```bash
|
||||
atcr-verify verify IMAGE [flags]
|
||||
atcr-verify IMAGE [flags] # 'verify' subcommand is optional
|
||||
```
|
||||
|
||||
**Arguments:**
|
||||
- `IMAGE` - Image reference (registry/owner/repo:tag or @digest)
|
||||
|
||||
**Flags:**
|
||||
- `--policy FILE` - Trust policy file (default: none)
|
||||
- `--output FORMAT` - Output format: text, json, quiet (default: text)
|
||||
- `--offline` - Offline mode (requires --bundle)
|
||||
- `--bundle FILE` - Verification bundle for offline mode
|
||||
- `--cache-dir DIR` - Cache directory for DID documents (default: ~/.atcr/cache)
|
||||
- `--no-cache` - Disable caching
|
||||
- `--timeout DURATION` - Verification timeout (default: 30s)
|
||||
- `--verbose` - Verbose output
|
||||
|
||||
**Exit Codes:**
|
||||
- `0` - Verification succeeded
|
||||
- `1` - Verification failed
|
||||
- `2` - Invalid arguments
|
||||
- `3` - Network error
|
||||
- `4` - Trust policy violation
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Basic verification
|
||||
atcr-verify atcr.io/alice/myapp:latest
|
||||
|
||||
# With specific digest
|
||||
atcr-verify atcr.io/alice/myapp@sha256:abc123...
|
||||
|
||||
# With trust policy
|
||||
atcr-verify atcr.io/alice/myapp:latest --policy production-policy.yaml
|
||||
|
||||
# JSON output for scripting
|
||||
atcr-verify atcr.io/alice/myapp:latest --output json | jq .verified
|
||||
|
||||
# Quiet mode for CI/CD
|
||||
if atcr-verify atcr.io/alice/myapp:latest --quiet; then
|
||||
echo "Deploy approved"
|
||||
fi
|
||||
```
|
||||
|
||||
### export
|
||||
|
||||
Export verification bundle for offline verification.
|
||||
|
||||
```bash
|
||||
atcr-verify export IMAGE [flags]
|
||||
```
|
||||
|
||||
**Arguments:**
|
||||
- `IMAGE` - Image reference to export bundle for
|
||||
|
||||
**Flags:**
|
||||
- `-o, --output FILE` - Output file (default: stdout)
|
||||
- `--include-did-docs` - Include DID documents in bundle
|
||||
- `--include-commit` - Include ATProto commit data
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Export to file
|
||||
atcr-verify export atcr.io/alice/myapp:latest -o myapp-bundle.json
|
||||
|
||||
# Export with all verification data
|
||||
atcr-verify export atcr.io/alice/myapp:latest \
|
||||
--include-did-docs \
|
||||
--include-commit \
|
||||
-o complete-bundle.json
|
||||
|
||||
# Export for multiple images
|
||||
for img in $(cat images.txt); do
|
||||
atcr-verify export $img -o bundles/$(echo $img | tr '/:' '_').json
|
||||
done
|
||||
```
|
||||
|
||||
### trust
|
||||
|
||||
Manage trust policies and trusted DIDs.
|
||||
|
||||
```bash
|
||||
atcr-verify trust COMMAND [flags]
|
||||
```
|
||||
|
||||
**Subcommands:**
|
||||
|
||||
**`trust list`** - List trusted DIDs
|
||||
```bash
|
||||
atcr-verify trust list
|
||||
|
||||
# Output:
|
||||
# Trusted DIDs:
|
||||
# - did:plc:alice123 (alice.bsky.social)
|
||||
# - did:plc:bob456 (bob.example.com)
|
||||
```
|
||||
|
||||
**`trust add DID`** - Add trusted DID
|
||||
```bash
|
||||
atcr-verify trust add did:plc:alice123
|
||||
atcr-verify trust add did:plc:alice123 --name "Alice (DevOps)"
|
||||
```
|
||||
|
||||
**`trust remove DID`** - Remove trusted DID
|
||||
```bash
|
||||
atcr-verify trust remove did:plc:alice123
|
||||
```
|
||||
|
||||
**`trust policy validate`** - Validate trust policy file
|
||||
```bash
|
||||
atcr-verify trust policy validate policy.yaml
|
||||
```
|
||||
|
||||
### version
|
||||
|
||||
Show version information.
|
||||
|
||||
```bash
|
||||
atcr-verify version
|
||||
|
||||
# Output:
|
||||
# atcr-verify version 1.0.0
|
||||
# Go version: go1.21.5
|
||||
# Commit: 3b5b89b
|
||||
# Built: 2025-10-31T12:00:00Z
|
||||
```
|
||||
|
||||
## Trust Policy
|
||||
|
||||
Trust policies define which signatures to trust and what to do when verification fails.
|
||||
|
||||
### Policy File Format
|
||||
|
||||
```yaml
|
||||
version: 1.0
|
||||
|
||||
# Global settings
|
||||
defaultAction: enforce # enforce, audit, allow
|
||||
requireSignature: true
|
||||
|
||||
# Policies matched by image pattern (first match wins)
|
||||
policies:
|
||||
- name: production-images
|
||||
description: "Production images must be signed by DevOps or Security"
|
||||
scope: "atcr.io/*/prod-*"
|
||||
require:
|
||||
signature: true
|
||||
trustedDIDs:
|
||||
- did:plc:devops-team
|
||||
- did:plc:security-team
|
||||
minSignatures: 1
|
||||
maxAge: 2592000 # 30 days in seconds
|
||||
action: enforce
|
||||
|
||||
- name: staging-images
|
||||
scope: "atcr.io/*/staging-*"
|
||||
require:
|
||||
signature: true
|
||||
trustedDIDs:
|
||||
- did:plc:devops-team
|
||||
- did:plc:developers
|
||||
minSignatures: 1
|
||||
action: enforce
|
||||
|
||||
- name: dev-images
|
||||
scope: "atcr.io/*/dev-*"
|
||||
require:
|
||||
signature: false
|
||||
action: audit # Log but don't fail
|
||||
|
||||
# Trusted DID registry
|
||||
trustedDIDs:
|
||||
did:plc:devops-team:
|
||||
name: "DevOps Team"
|
||||
validFrom: "2024-01-01T00:00:00Z"
|
||||
expiresAt: null
|
||||
contact: "devops@example.com"
|
||||
|
||||
did:plc:security-team:
|
||||
name: "Security Team"
|
||||
validFrom: "2024-01-01T00:00:00Z"
|
||||
expiresAt: null
|
||||
|
||||
did:plc:developers:
|
||||
name: "Developer Team"
|
||||
validFrom: "2024-06-01T00:00:00Z"
|
||||
expiresAt: "2025-12-31T23:59:59Z"
|
||||
```
|
||||
|
||||
### Policy Matching
|
||||
|
||||
Policies are evaluated in order. First match wins.
|
||||
|
||||
**Scope patterns:**
|
||||
- `atcr.io/*/*` - All ATCR images
|
||||
- `atcr.io/myorg/*` - All images from myorg
|
||||
- `atcr.io/*/prod-*` - All images with "prod-" prefix
|
||||
- `atcr.io/myorg/myapp` - Specific repository
|
||||
- `atcr.io/myorg/myapp:v*` - Tag pattern matching
|
||||
|
||||
### Policy Actions
|
||||
|
||||
**`enforce`** - Reject if policy fails
|
||||
- Exit code 4
|
||||
- Blocks deployment
|
||||
|
||||
**`audit`** - Log but allow
|
||||
- Exit code 0 (success)
|
||||
- Warning message printed
|
||||
|
||||
**`allow`** - Always allow
|
||||
- No verification performed
|
||||
- Exit code 0
|
||||
|
||||
### Policy Requirements
|
||||
|
||||
**`signature: true`** - Require signature present
|
||||
|
||||
**`trustedDIDs`** - List of trusted DIDs
|
||||
```yaml
|
||||
trustedDIDs:
|
||||
- did:plc:alice123
|
||||
- did:web:example.com
|
||||
```
|
||||
|
||||
**`minSignatures`** - Minimum number of signatures required
|
||||
```yaml
|
||||
minSignatures: 2 # Require 2 signatures
|
||||
```
|
||||
|
||||
**`maxAge`** - Maximum signature age in seconds
|
||||
```yaml
|
||||
maxAge: 2592000 # 30 days
|
||||
```
|
||||
|
||||
**`algorithms`** - Allowed signature algorithms
|
||||
```yaml
|
||||
algorithms:
|
||||
- ECDSA-K256-SHA256
|
||||
```
|
||||
|
||||
## Verification Flow
|
||||
|
||||
### 1. Image Resolution
|
||||
|
||||
```
|
||||
Input: atcr.io/alice/myapp:latest
|
||||
↓
|
||||
Resolve tag to digest
|
||||
↓
|
||||
Output: sha256:abc123...
|
||||
```
|
||||
|
||||
### 2. Signature Discovery
|
||||
|
||||
```
|
||||
Query OCI Referrers API:
|
||||
GET /v2/alice/myapp/referrers/sha256:abc123
|
||||
?artifactType=application/vnd.atproto.signature.v1+json
|
||||
↓
|
||||
Returns: List of signature artifacts
|
||||
↓
|
||||
Download signature metadata blobs
|
||||
```
|
||||
|
||||
### 3. DID Resolution
|
||||
|
||||
```
|
||||
Extract DID from signature: did:plc:alice123
|
||||
↓
|
||||
Query PLC directory:
|
||||
GET https://plc.directory/did:plc:alice123
|
||||
↓
|
||||
Extract public key from DID document
|
||||
```
|
||||
|
||||
### 4. PDS Query
|
||||
|
||||
```
|
||||
Get PDS endpoint from DID document
|
||||
↓
|
||||
Query for manifest record:
|
||||
GET {pds}/xrpc/com.atproto.repo.getRecord
|
||||
?repo=did:plc:alice123
|
||||
&collection=io.atcr.manifest
|
||||
&rkey=abc123
|
||||
↓
|
||||
Get commit CID from record
|
||||
↓
|
||||
Fetch commit data (includes signature)
|
||||
```
|
||||
|
||||
### 5. Signature Verification
|
||||
|
||||
```
|
||||
Extract signature bytes from commit
|
||||
↓
|
||||
Compute commit hash (SHA-256)
|
||||
↓
|
||||
Verify: ECDSA_K256(hash, signature, publicKey)
|
||||
↓
|
||||
Result: Valid or Invalid
|
||||
```
|
||||
|
||||
### 6. Trust Policy Evaluation
|
||||
|
||||
```
|
||||
Check if DID is in trustedDIDs list
|
||||
↓
|
||||
Check signature age < maxAge
|
||||
↓
|
||||
Check minSignatures satisfied
|
||||
↓
|
||||
Apply policy action (enforce/audit/allow)
|
||||
```
|
||||
|
||||
## Integration Examples
|
||||
|
||||
### CI/CD Pipeline
|
||||
|
||||
**GitHub Actions:**
|
||||
```yaml
|
||||
name: Deploy
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
verify-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install atcr-verify
|
||||
run: |
|
||||
curl -L https://github.com/atcr-io/atcr/releases/latest/download/atcr-verify-linux-amd64 -o atcr-verify
|
||||
chmod +x atcr-verify
|
||||
sudo mv atcr-verify /usr/local/bin/
|
||||
|
||||
- name: Verify image signature
|
||||
run: |
|
||||
atcr-verify ${{ env.IMAGE }} --policy .github/trust-policy.yaml
|
||||
|
||||
- name: Deploy to production
|
||||
if: success()
|
||||
run: kubectl set image deployment/app app=${{ env.IMAGE }}
|
||||
```
|
||||
|
||||
**GitLab CI:**
|
||||
```yaml
|
||||
verify:
|
||||
stage: verify
|
||||
image: atcr.io/atcr/verify:latest
|
||||
script:
|
||||
- atcr-verify ${IMAGE} --policy trust-policy.yaml
|
||||
|
||||
deploy:
|
||||
stage: deploy
|
||||
dependencies:
|
||||
- verify
|
||||
script:
|
||||
- kubectl set image deployment/app app=${IMAGE}
|
||||
```
|
||||
|
||||
**Jenkins:**
|
||||
```groovy
|
||||
pipeline {
|
||||
agent any
|
||||
|
||||
stages {
|
||||
stage('Verify') {
|
||||
steps {
|
||||
sh 'atcr-verify ${IMAGE} --policy trust-policy.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stage('Deploy') {
|
||||
when {
|
||||
expression { currentBuild.result == 'SUCCESS' }
|
||||
}
|
||||
steps {
|
||||
sh 'kubectl set image deployment/app app=${IMAGE}'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Kubernetes Admission Controller
|
||||
|
||||
**Using as webhook backend:**
|
||||
|
||||
```go
|
||||
// webhook server
|
||||
func (h *Handler) ValidatePod(w http.ResponseWriter, r *http.Request) {
|
||||
var admReq admissionv1.AdmissionReview
|
||||
json.NewDecoder(r.Body).Decode(&admReq)
|
||||
|
||||
pod := &corev1.Pod{}
|
||||
json.Unmarshal(admReq.Request.Object.Raw, pod)
|
||||
|
||||
// Verify each container image
|
||||
for _, container := range pod.Spec.Containers {
|
||||
cmd := exec.Command("atcr-verify", container.Image,
|
||||
"--policy", "/etc/atcr/trust-policy.yaml",
|
||||
"--quiet")
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Verification failed
|
||||
admResp := admissionv1.AdmissionReview{
|
||||
Response: &admissionv1.AdmissionResponse{
|
||||
UID: admReq.Request.UID,
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
Message: fmt.Sprintf("Image %s failed signature verification", container.Image),
|
||||
},
|
||||
},
|
||||
}
|
||||
json.NewEncoder(w).Encode(admResp)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// All images verified
|
||||
admResp := admissionv1.AdmissionReview{
|
||||
Response: &admissionv1.AdmissionResponse{
|
||||
UID: admReq.Request.UID,
|
||||
Allowed: true,
|
||||
},
|
||||
}
|
||||
json.NewEncoder(w).Encode(admResp)
|
||||
}
|
||||
```
|
||||
|
||||
### Pre-Pull Verification
|
||||
|
||||
**Systemd service:**
|
||||
```ini
|
||||
# /etc/systemd/system/myapp.service
|
||||
[Unit]
|
||||
Description=My Application
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=/usr/local/bin/atcr-verify atcr.io/myorg/myapp:latest --policy /etc/atcr/policy.yaml
|
||||
ExecStartPre=/usr/bin/docker pull atcr.io/myorg/myapp:latest
|
||||
ExecStart=/usr/bin/docker run atcr.io/myorg/myapp:latest
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
**Docker wrapper script:**
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# docker-secure-pull.sh
|
||||
|
||||
IMAGE="$1"
|
||||
|
||||
# Verify before pulling
|
||||
if ! atcr-verify "$IMAGE" --policy ~/.atcr/trust-policy.yaml; then
|
||||
echo "ERROR: Image signature verification failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Pull if verified
|
||||
docker pull "$IMAGE"
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Config File
|
||||
|
||||
Location: `~/.atcr/config.yaml`
|
||||
|
||||
```yaml
|
||||
# Default trust policy
|
||||
defaultPolicy: ~/.atcr/trust-policy.yaml
|
||||
|
||||
# Cache settings
|
||||
cache:
|
||||
enabled: true
|
||||
directory: ~/.atcr/cache
|
||||
ttl:
|
||||
didDocuments: 3600 # 1 hour
|
||||
commits: 600 # 10 minutes
|
||||
|
||||
# Network settings
|
||||
timeout: 30s
|
||||
retries: 3
|
||||
|
||||
# Output settings
|
||||
output:
|
||||
format: text # text, json, quiet
|
||||
color: auto # auto, always, never
|
||||
|
||||
# Registry settings
|
||||
registries:
|
||||
atcr.io:
|
||||
insecure: false
|
||||
credentialsFile: ~/.docker/config.json
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
- `ATCR_CONFIG` - Config file path
|
||||
- `ATCR_POLICY` - Default trust policy file
|
||||
- `ATCR_CACHE_DIR` - Cache directory
|
||||
- `ATCR_OUTPUT` - Output format (text, json, quiet)
|
||||
- `ATCR_TIMEOUT` - Verification timeout
|
||||
- `HTTP_PROXY` / `HTTPS_PROXY` - Proxy settings
|
||||
- `NO_CACHE` - Disable caching
|
||||
|
||||
## Library Usage
|
||||
|
||||
`atcr-verify` can also be used as a Go library:
|
||||
|
||||
```go
|
||||
import "github.com/atcr-io/atcr/pkg/verify"
|
||||
|
||||
func main() {
|
||||
verifier := verify.NewVerifier(verify.Config{
|
||||
Policy: policy,
|
||||
Timeout: 30 * time.Second,
|
||||
})
|
||||
|
||||
result, err := verifier.Verify(ctx, "atcr.io/alice/myapp:latest")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !result.Verified {
|
||||
log.Fatal("Verification failed")
|
||||
}
|
||||
|
||||
fmt.Printf("Verified by %s\n", result.Signature.DID)
|
||||
}
|
||||
```
|
||||
|
||||
## Performance
|
||||
|
||||
### Typical Verification Times
|
||||
|
||||
- **First verification:** 500-1000ms
|
||||
- OCI Referrers API: 50-100ms
|
||||
- DID resolution: 50-150ms
|
||||
- PDS query: 100-300ms
|
||||
- Signature verification: 1-5ms
|
||||
|
||||
- **Cached verification:** 50-150ms
|
||||
- DID document cached
|
||||
- Signature metadata cached
|
||||
|
||||
### Optimization Tips
|
||||
|
||||
1. **Enable caching** - DID documents change rarely
|
||||
2. **Use offline bundles** - For air-gapped environments
|
||||
3. **Parallel verification** - Verify multiple images concurrently
|
||||
4. **Local trust policy** - Avoid remote policy fetches
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Verification Fails
|
||||
|
||||
```bash
|
||||
atcr-verify atcr.io/alice/myapp:latest --verbose
|
||||
```
|
||||
|
||||
Common issues:
|
||||
- **No signature found** - Image not signed, check Referrers API
|
||||
- **DID resolution failed** - Network issue, check PLC directory
|
||||
- **PDS unreachable** - Network issue, check PDS endpoint
|
||||
- **Signature invalid** - Tampering detected or key mismatch
|
||||
- **Trust policy violation** - DID not in trusted list
|
||||
|
||||
### Enable Debug Logging
|
||||
|
||||
```bash
|
||||
ATCR_LOG_LEVEL=debug atcr-verify IMAGE
|
||||
```
|
||||
|
||||
### Clear Cache
|
||||
|
||||
```bash
|
||||
rm -rf ~/.atcr/cache
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
- [ATProto Signatures](./ATPROTO_SIGNATURES.md) - How ATProto signing works
|
||||
- [Integration Strategy](./INTEGRATION_STRATEGY.md) - Overview of integration approaches
|
||||
- [Signature Integration](./SIGNATURE_INTEGRATION.md) - Tool-specific guides
|
||||
- [Trust Policy Examples](../examples/verification/trust-policy.yaml)
|
||||
@@ -1,501 +0,0 @@
|
||||
# ATProto Signatures for Container Images
|
||||
|
||||
## Overview
|
||||
|
||||
ATCR container images are **already cryptographically signed** through ATProto's repository commit system. Every manifest stored in a user's PDS is signed with the user's ATProto signing key, providing cryptographic proof of authorship and integrity.
|
||||
|
||||
This document explains:
|
||||
- How ATProto signing works
|
||||
- Why additional signing tools aren't needed
|
||||
- How to bridge ATProto signatures to the OCI/ORAS ecosystem
|
||||
- Trust model and security considerations
|
||||
|
||||
## Key Insight: Manifests Are Already Signed
|
||||
|
||||
When you push an image to ATCR:
|
||||
|
||||
```bash
|
||||
docker push atcr.io/alice/myapp:latest
|
||||
```
|
||||
|
||||
The following happens:
|
||||
|
||||
1. **AppView stores manifest** as an `io.atcr.manifest` record in alice's PDS
|
||||
2. **PDS creates repository commit** containing the manifest record
|
||||
3. **PDS signs the commit** with alice's ATProto signing key (ECDSA K-256)
|
||||
4. **Signature is stored** in the repository commit object
|
||||
|
||||
**Result:** The manifest is cryptographically signed with alice's private key, and anyone can verify it using alice's public key from her DID document.
|
||||
|
||||
## ATProto Signing Mechanism
|
||||
|
||||
### Repository Commit Signing
|
||||
|
||||
ATProto uses a Merkle Search Tree (MST) to store records, and every modification creates a signed commit:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ Repository Commit │
|
||||
├─────────────────────────────────────────────┤
|
||||
│ DID: did:plc:alice123 │
|
||||
│ Version: 3jzfkjqwdwa2a │
|
||||
│ Previous: bafyreig7... (parent commit) │
|
||||
│ Data CID: bafyreih8... (MST root) │
|
||||
│ ┌───────────────────────────────────────┐ │
|
||||
│ │ Signature (ECDSA K-256 + SHA-256) │ │
|
||||
│ │ Signed with: alice's private key │ │
|
||||
│ │ Value: 0x3045022100... (DER format) │ │
|
||||
│ └───────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────┘
|
||||
│
|
||||
↓
|
||||
┌─────────────────────┐
|
||||
│ Merkle Search Tree │
|
||||
│ (contains records) │
|
||||
└─────────────────────┘
|
||||
│
|
||||
↓
|
||||
┌────────────────────────────┐
|
||||
│ io.atcr.manifest record │
|
||||
│ Repository: myapp │
|
||||
│ Digest: sha256:abc123... │
|
||||
│ Layers: [...] │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
### Signature Algorithm
|
||||
|
||||
**Algorithm:** ECDSA with K-256 (secp256k1) curve + SHA-256 hash
|
||||
- **Curve:** secp256k1 (same as Bitcoin, Ethereum)
|
||||
- **Hash:** SHA-256
|
||||
- **Format:** DER-encoded signature bytes
|
||||
- **Variant:** "low-S" signatures (per BIP-0062)
|
||||
|
||||
**Signing process:**
|
||||
1. Serialize commit data as DAG-CBOR
|
||||
2. Hash with SHA-256
|
||||
3. Sign hash with ECDSA K-256 private key
|
||||
4. Store signature in commit object
|
||||
|
||||
### Public Key Distribution
|
||||
|
||||
Public keys are distributed via DID documents, accessible through DID resolution:
|
||||
|
||||
**DID Resolution Flow:**
|
||||
```
|
||||
did:plc:alice123
|
||||
↓
|
||||
Query PLC directory: https://plc.directory/did:plc:alice123
|
||||
↓
|
||||
DID Document:
|
||||
{
|
||||
"@context": ["https://www.w3.org/ns/did/v1"],
|
||||
"id": "did:plc:alice123",
|
||||
"verificationMethod": [{
|
||||
"id": "did:plc:alice123#atproto",
|
||||
"type": "Multikey",
|
||||
"controller": "did:plc:alice123",
|
||||
"publicKeyMultibase": "zQ3shokFTS3brHcDQrn82RUDfCZESWL1ZdCEJwekUDdo1Ko4Z"
|
||||
}],
|
||||
"service": [{
|
||||
"id": "#atproto_pds",
|
||||
"type": "AtprotoPersonalDataServer",
|
||||
"serviceEndpoint": "https://bsky.social"
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
**Public key format:**
|
||||
- **Encoding:** Multibase (base58btc with `z` prefix)
|
||||
- **Codec:** Multicodec `0xE701` for K-256 keys
|
||||
- **Example:** `zQ3sh...` decodes to 33-byte compressed public key
|
||||
|
||||
## Verification Process
|
||||
|
||||
To verify a manifest's signature:
|
||||
|
||||
### Step 1: Resolve Image to Manifest Digest
|
||||
|
||||
```bash
|
||||
# Get manifest digest
|
||||
DIGEST=$(crane digest atcr.io/alice/myapp:latest)
|
||||
# Result: sha256:abc123...
|
||||
```
|
||||
|
||||
### Step 2: Fetch Manifest Record from PDS
|
||||
|
||||
```bash
|
||||
# Extract repository name from image reference
|
||||
REPO="myapp"
|
||||
|
||||
# Query PDS for manifest record
|
||||
curl "https://bsky.social/xrpc/com.atproto.repo.listRecords?\
|
||||
repo=did:plc:alice123&\
|
||||
collection=io.atcr.manifest&\
|
||||
limit=100" | jq -r '.records[] | select(.value.digest == "sha256:abc123...")'
|
||||
```
|
||||
|
||||
Response includes:
|
||||
```json
|
||||
{
|
||||
"uri": "at://did:plc:alice123/io.atcr.manifest/abc123",
|
||||
"cid": "bafyreig7...",
|
||||
"value": {
|
||||
"$type": "io.atcr.manifest",
|
||||
"repository": "myapp",
|
||||
"digest": "sha256:abc123...",
|
||||
...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: Fetch Repository Commit
|
||||
|
||||
```bash
|
||||
# Get current repository state
|
||||
curl "https://bsky.social/xrpc/com.atproto.sync.getRepo?\
|
||||
did=did:plc:alice123" --output repo.car
|
||||
|
||||
# Extract commit from CAR file (requires ATProto tools)
|
||||
# Commit includes signature over repository state
|
||||
```
|
||||
|
||||
### Step 4: Resolve DID to Public Key
|
||||
|
||||
```bash
|
||||
# Resolve DID document
|
||||
curl "https://plc.directory/did:plc:alice123" | jq -r '.verificationMethod[0].publicKeyMultibase'
|
||||
# Result: zQ3shokFTS3brHcDQrn82RUDfCZESWL1ZdCEJwekUDdo1Ko4Z
|
||||
```
|
||||
|
||||
### Step 5: Verify Signature
|
||||
|
||||
```go
|
||||
// Pseudocode for verification
|
||||
import "github.com/bluesky-social/indigo/atproto/crypto"
|
||||
|
||||
// 1. Parse commit
|
||||
commit := parseCommitFromCAR(repoCAR)
|
||||
|
||||
// 2. Extract signature bytes
|
||||
signature := commit.Sig
|
||||
|
||||
// 3. Get bytes that were signed
|
||||
bytesToVerify := commit.Unsigned().BytesForSigning()
|
||||
|
||||
// 4. Decode public key from multibase
|
||||
pubKey := decodeMultibasePublicKey(publicKeyMultibase)
|
||||
|
||||
// 5. Verify ECDSA signature
|
||||
valid := crypto.VerifySignature(pubKey, bytesToVerify, signature)
|
||||
```
|
||||
|
||||
### Step 6: Verify Manifest Integrity
|
||||
|
||||
```bash
|
||||
# Verify the manifest record's CID matches the content
|
||||
# CID is content-addressed, so tampering changes the CID
|
||||
```
|
||||
|
||||
## Bridging to OCI/ORAS Ecosystem
|
||||
|
||||
While ATProto signatures are cryptographically sound, the OCI ecosystem doesn't understand ATProto records. To make signatures discoverable, we create **ORAS signature artifacts** that reference the ATProto signature.
|
||||
|
||||
### ORAS Signature Artifact Format
|
||||
|
||||
```json
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"artifactType": "application/vnd.atproto.signature.v1+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.empty.v1+json",
|
||||
"digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
|
||||
"size": 2
|
||||
},
|
||||
"subject": {
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"digest": "sha256:abc123...",
|
||||
"size": 1234
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.atproto.signature.v1+json",
|
||||
"digest": "sha256:sig789...",
|
||||
"size": 512,
|
||||
"annotations": {
|
||||
"org.opencontainers.image.title": "atproto-signature.json"
|
||||
}
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"io.atcr.atproto.did": "did:plc:alice123",
|
||||
"io.atcr.atproto.pds": "https://bsky.social",
|
||||
"io.atcr.atproto.recordUri": "at://did:plc:alice123/io.atcr.manifest/abc123",
|
||||
"io.atcr.atproto.commitCid": "bafyreih8...",
|
||||
"io.atcr.atproto.signedAt": "2025-10-31T12:34:56.789Z",
|
||||
"io.atcr.atproto.keyId": "did:plc:alice123#atproto"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Key elements:**
|
||||
|
||||
1. **artifactType**: `application/vnd.atproto.signature.v1+json` - identifies this as an ATProto signature
|
||||
2. **subject**: Links to the image manifest being signed
|
||||
3. **layers**: Contains signature metadata blob
|
||||
4. **annotations**: Quick-access metadata for verification
|
||||
|
||||
### Signature Metadata Blob
|
||||
|
||||
The layer blob contains detailed verification information:
|
||||
|
||||
```json
|
||||
{
|
||||
"$type": "io.atcr.atproto.signature",
|
||||
"version": "1.0",
|
||||
"subject": {
|
||||
"digest": "sha256:abc123...",
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json"
|
||||
},
|
||||
"atproto": {
|
||||
"did": "did:plc:alice123",
|
||||
"handle": "alice.bsky.social",
|
||||
"pdsEndpoint": "https://bsky.social",
|
||||
"recordUri": "at://did:plc:alice123/io.atcr.manifest/abc123",
|
||||
"recordCid": "bafyreig7...",
|
||||
"commitCid": "bafyreih8...",
|
||||
"commitRev": "3jzfkjqwdwa2a",
|
||||
"signedAt": "2025-10-31T12:34:56.789Z"
|
||||
},
|
||||
"signature": {
|
||||
"algorithm": "ECDSA-K256-SHA256",
|
||||
"keyId": "did:plc:alice123#atproto",
|
||||
"publicKeyMultibase": "zQ3shokFTS3brHcDQrn82RUDfCZESWL1ZdCEJwekUDdo1Ko4Z"
|
||||
},
|
||||
"verification": {
|
||||
"method": "atproto-repo-commit",
|
||||
"instructions": "Fetch repository commit from PDS and verify signature using public key from DID document"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Discovery via Referrers API
|
||||
|
||||
ORAS artifacts are discoverable via the OCI Referrers API:
|
||||
|
||||
```bash
|
||||
# Query for signature artifacts
|
||||
curl "https://atcr.io/v2/alice/myapp/referrers/sha256:abc123?\
|
||||
artifactType=application/vnd.atproto.signature.v1+json"
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.index.v1+json",
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"digest": "sha256:sig789...",
|
||||
"size": 1234,
|
||||
"artifactType": "application/vnd.atproto.signature.v1+json",
|
||||
"annotations": {
|
||||
"io.atcr.atproto.did": "did:plc:alice123",
|
||||
"io.atcr.atproto.signedAt": "2025-10-31T12:34:56.789Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Trust Model
|
||||
|
||||
### What ATProto Signatures Prove
|
||||
|
||||
✅ **Authenticity**: Image was published by the DID owner
|
||||
✅ **Integrity**: Image manifest hasn't been tampered with since signing
|
||||
✅ **Non-repudiation**: Only the DID owner could have created this signature
|
||||
✅ **Timestamp**: When the image was signed (commit timestamp)
|
||||
|
||||
### What ATProto Signatures Don't Prove
|
||||
|
||||
❌ **Safety**: Image doesn't contain vulnerabilities (use vulnerability scanning)
|
||||
❌ **DID trustworthiness**: Whether the DID owner is trustworthy (trust policy decision)
|
||||
❌ **Key security**: Private key wasn't compromised (same limitation as all PKI)
|
||||
❌ **PDS honesty**: PDS operator serves correct data (verify across multiple sources)
|
||||
|
||||
### Trust Dependencies
|
||||
|
||||
1. **DID Resolution**: Must correctly resolve DID to public key
|
||||
- **Mitigation**: Use multiple resolvers, cache DID documents
|
||||
|
||||
2. **PDS Availability**: Must query PDS to verify signatures
|
||||
- **Mitigation**: Embed signature bytes in ORAS blob for offline verification
|
||||
|
||||
3. **PDS Honesty**: PDS could serve fake/unsigned records
|
||||
- **Mitigation**: Signature verification prevents this (can't forge signature)
|
||||
|
||||
4. **Key Security**: User's private key could be compromised
|
||||
- **Mitigation**: Key rotation via DID document updates, short-lived credentials
|
||||
|
||||
5. **Algorithm Security**: ECDSA K-256 must remain secure
|
||||
- **Status**: Well-studied, same as Bitcoin/Ethereum (widely trusted)
|
||||
|
||||
### Comparison with Other Signing Systems
|
||||
|
||||
| Aspect | ATProto Signatures | Cosign (Keyless) | Notary v2 |
|
||||
|--------|-------------------|------------------|-----------|
|
||||
| **Identity** | DID (decentralized) | OIDC (federated) | X.509 (PKI) |
|
||||
| **Key Management** | PDS signing keys | Ephemeral (Fulcio) | User-managed |
|
||||
| **Trust Anchor** | DID resolution | Fulcio CA + Rekor | Certificate chain |
|
||||
| **Transparency Log** | ATProto firehose | Rekor | Optional |
|
||||
| **Offline Verification** | Limited* | No | Yes |
|
||||
| **Decentralization** | High | Medium | Low |
|
||||
| **Complexity** | Low | High | Medium |
|
||||
|
||||
*Can be improved by embedding signature bytes in ORAS blob
|
||||
|
||||
### Security Considerations
|
||||
|
||||
**Threat: Man-in-the-Middle Attack**
|
||||
- **Attack**: Intercept PDS queries, serve fake records
|
||||
- **Defense**: TLS for PDS communication, verify signature with public key from DID document
|
||||
- **Result**: Attacker can't forge signature without private key
|
||||
|
||||
**Threat: Compromised PDS**
|
||||
- **Attack**: PDS operator serves unsigned/fake manifests
|
||||
- **Defense**: Signature verification fails (PDS can't sign without user's private key)
|
||||
- **Result**: Protected
|
||||
|
||||
**Threat: Key Compromise**
|
||||
- **Attack**: Attacker steals user's ATProto signing key
|
||||
- **Defense**: Key rotation via DID document, revoke old keys
|
||||
- **Result**: Same as any PKI system (rotate keys quickly)
|
||||
|
||||
**Threat: Replay Attack**
|
||||
- **Attack**: Replay old signed manifest to rollback to vulnerable version
|
||||
- **Defense**: Check commit timestamp, verify commit is in current repository DAG
|
||||
- **Result**: Protected (commits form immutable chain)
|
||||
|
||||
**Threat: DID Takeover**
|
||||
- **Attack**: Attacker gains control of user's DID (rotation keys)
|
||||
- **Defense**: Monitor DID document changes, verify key history
|
||||
- **Result**: Serious but requires compromising rotation keys (harder than signing keys)
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
### Automatic Signature Artifact Creation
|
||||
|
||||
When AppView stores a manifest in a user's PDS:
|
||||
|
||||
1. **Store manifest record** (existing behavior)
|
||||
2. **Get commit response** with commit CID and revision
|
||||
3. **Create ORAS signature artifact**:
|
||||
- Build metadata blob (JSON)
|
||||
- Upload blob to hold storage
|
||||
- Create ORAS manifest with subject = image manifest
|
||||
- Store ORAS manifest (creates referrer link)
|
||||
|
||||
### Storage Location
|
||||
|
||||
Signature artifacts follow the same pattern as SBOMs:
|
||||
- **Metadata blobs**: Stored in hold's blob storage
|
||||
- **ORAS manifests**: Stored in hold's embedded PDS
|
||||
- **Discovery**: Via OCI Referrers API
|
||||
|
||||
### Verification Tools
|
||||
|
||||
**Option 1: Custom CLI tool (`atcr-verify`)**
|
||||
```bash
|
||||
atcr-verify atcr.io/alice/myapp:latest
|
||||
# → Queries referrers API
|
||||
# → Fetches signature metadata
|
||||
# → Resolves DID → public key
|
||||
# → Queries PDS for commit
|
||||
# → Verifies signature
|
||||
```
|
||||
|
||||
**Option 2: Shell script (curl + jq)**
|
||||
- See `docs/SIGNATURE_INTEGRATION.md` for examples
|
||||
|
||||
**Option 3: Kubernetes admission controller**
|
||||
- Custom webhook that runs verification
|
||||
- Rejects pods with unsigned/invalid signatures
|
||||
|
||||
## Benefits of ATProto Signatures
|
||||
|
||||
### Compared to No Signing
|
||||
|
||||
✅ **Cryptographic proof** of image authorship
|
||||
✅ **Tamper detection** for manifests
|
||||
✅ **Identity binding** via DIDs
|
||||
✅ **Audit trail** via ATProto repository history
|
||||
|
||||
### Compared to Cosign/Notary
|
||||
|
||||
✅ **No additional signing required** (already signed by PDS)
|
||||
✅ **Decentralized identity** (DIDs, not CAs)
|
||||
✅ **Simpler infrastructure** (no Fulcio, no Rekor, no TUF)
|
||||
✅ **Consistent with ATCR's architecture** (ATProto-native)
|
||||
✅ **Lower operational overhead** (reuse existing PDS infrastructure)
|
||||
|
||||
### Trade-offs
|
||||
|
||||
⚠️ **Custom verification tools required** (standard tools won't work)
|
||||
⚠️ **Online verification preferred** (need to query PDS)
|
||||
⚠️ **Different trust model** (trust DIDs, not CAs)
|
||||
⚠️ **Ecosystem maturity** (newer approach, less tooling)
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
### Short-term
|
||||
|
||||
1. **Offline verification**: Embed signature bytes in ORAS blob
|
||||
2. **Multi-PDS verification**: Check signature across multiple PDSs
|
||||
3. **Key rotation support**: Handle historical key validity
|
||||
|
||||
### Medium-term
|
||||
|
||||
4. **Timestamp service**: RFC 3161 timestamps for long-term validity
|
||||
5. **Multi-signature**: Require N signatures from M DIDs
|
||||
6. **Transparency log integration**: Record verifications in public log
|
||||
|
||||
### Long-term
|
||||
|
||||
7. **IANA registration**: Register `application/vnd.atproto.signature.v1+json`
|
||||
8. **Standards proposal**: ATProto signature spec to ORAS/OCI
|
||||
9. **Cross-ecosystem bridges**: Convert to Cosign/Notary formats
|
||||
|
||||
## Conclusion
|
||||
|
||||
ATCR images are already cryptographically signed through ATProto's repository commit system. By creating ORAS signature artifacts that reference these existing signatures, we can:
|
||||
|
||||
- ✅ Make signatures discoverable to OCI tooling
|
||||
- ✅ Maintain ATProto as the source of truth
|
||||
- ✅ Provide verification tools for users and clusters
|
||||
- ✅ Avoid duplicating signing infrastructure
|
||||
|
||||
This approach leverages ATProto's strengths (decentralized identity, built-in signing) while bridging to the OCI ecosystem through standard ORAS artifacts.
|
||||
|
||||
## References
|
||||
|
||||
### ATProto Specifications
|
||||
- [ATProto Repository Specification](https://atproto.com/specs/repository)
|
||||
- [ATProto Data Model](https://atproto.com/specs/data-model)
|
||||
- [ATProto DID Methods](https://atproto.com/specs/did)
|
||||
|
||||
### OCI/ORAS Specifications
|
||||
- [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec)
|
||||
- [OCI Referrers API](https://github.com/opencontainers/distribution-spec/blob/main/spec.md#listing-referrers)
|
||||
- [ORAS Artifacts](https://oras.land/docs/)
|
||||
|
||||
### Cryptography
|
||||
- [ECDSA (secp256k1)](https://en.bitcoin.it/wiki/Secp256k1)
|
||||
- [Multibase Encoding](https://github.com/multiformats/multibase)
|
||||
- [Multicodec](https://github.com/multiformats/multicodec)
|
||||
|
||||
### Related Documentation
|
||||
- [SBOM Scanning](./SBOM_SCANNING.md) - Similar ORAS artifact pattern
|
||||
- [Signature Integration](./SIGNATURE_INTEGRATION.md) - Practical integration examples
|
||||
@@ -1,728 +0,0 @@
|
||||
# Development Workflow for ATCR
|
||||
|
||||
## The Problem
|
||||
|
||||
**Current development cycle with Docker:**
|
||||
1. Edit CSS, JS, template, or Go file
|
||||
2. Run `docker compose build` (rebuilds entire image)
|
||||
3. Run `docker compose up` (restart container)
|
||||
4. Wait **2-3 minutes** for changes to appear
|
||||
5. Test, find issue, repeat...
|
||||
|
||||
**Why it's slow:**
|
||||
- All assets embedded via `embed.FS` at compile time
|
||||
- Multi-stage Docker build compiles everything from scratch
|
||||
- No development mode exists
|
||||
- Final image uses `scratch` base (no tools, no hot reload)
|
||||
|
||||
## The Solution
|
||||
|
||||
**Development setup combining:**
|
||||
1. **Dockerfile.devel** - Development-focused container (golang base, not scratch)
|
||||
2. **Volume mounts** - Live code editing (changes appear instantly in container)
|
||||
3. **DirFS** - Skip embed, read templates/CSS/JS from filesystem
|
||||
4. **Air** - Auto-rebuild on Go code changes
|
||||
|
||||
**Results:**
|
||||
- CSS/JS/Template changes: **Instant** (0 seconds, just refresh browser)
|
||||
- Go code changes: **2-5 seconds** (vs 2-3 minutes)
|
||||
- Production builds: **Unchanged** (still optimized with embed.FS)
|
||||
|
||||
## How It Works
|
||||
|
||||
### Architecture Flow
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ Your Editor (VSCode, etc) │
|
||||
│ Edit: style.css, app.js, *.html, *.go files │
|
||||
└─────────────────┬───────────────────────────────────┘
|
||||
│ (files saved to disk)
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ Volume Mount (docker-compose.dev.yml) │
|
||||
│ volumes: │
|
||||
│ - .:/app (entire codebase mounted) │
|
||||
└─────────────────┬───────────────────────────────────┘
|
||||
│ (changes appear instantly in container)
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ Container (golang:1.25.2 base, has all tools) │
|
||||
│ │
|
||||
│ ┌──────────────────────────────────────┐ │
|
||||
│ │ Air (hot reload tool) │ │
|
||||
│ │ Watches: *.go, *.html, *.css, *.js │ │
|
||||
│ │ │ │
|
||||
│ │ On change: │ │
|
||||
│ │ - *.go → rebuild binary (2-5s) │ │
|
||||
│ │ - templates/css/js → restart only │ │
|
||||
│ └──────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌──────────────────────────────────────┐ │
|
||||
│ │ ATCR AppView (ATCR_DEV_MODE=true) │ │
|
||||
│ │ │ │
|
||||
│ │ ui.go checks DEV_MODE: │ │
|
||||
│ │ if DEV_MODE: │ │
|
||||
│ │ templatesFS = os.DirFS("...") │ │
|
||||
│ │ staticFS = os.DirFS("...") │ │
|
||||
│ │ else: │ │
|
||||
│ │ use embed.FS (production) │ │
|
||||
│ │ │ │
|
||||
│ │ Result: Reads from mounted files │ │
|
||||
│ └──────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Change Scenarios
|
||||
|
||||
#### Scenario 1: Edit CSS/JS/Templates
|
||||
```
|
||||
1. Edit pkg/appview/static/css/style.css in VSCode
|
||||
2. Save file
|
||||
3. Change appears in container via volume mount (instant)
|
||||
4. App uses os.DirFS → reads new file from disk (instant)
|
||||
5. Refresh browser → see changes
|
||||
```
|
||||
**Time:** **Instant** (0 seconds)
|
||||
**No rebuild, no restart!**
|
||||
|
||||
#### Scenario 2: Edit Go Code
|
||||
```
|
||||
1. Edit pkg/appview/handlers/home.go
|
||||
2. Save file
|
||||
3. Air detects .go file change
|
||||
4. Air runs: go build -o ./tmp/atcr-appview ./cmd/appview
|
||||
5. Air kills old process and starts new binary
|
||||
6. App runs with new code
|
||||
```
|
||||
**Time:** **2-5 seconds**
|
||||
**Fast incremental build!**
|
||||
|
||||
## Implementation
|
||||
|
||||
### Step 1: Create Dockerfile.devel
|
||||
|
||||
Create `Dockerfile.devel` in project root:
|
||||
|
||||
```dockerfile
|
||||
# Development Dockerfile with hot reload support
|
||||
FROM golang:1.25.2-trixie
|
||||
|
||||
# Install Air for hot reload
|
||||
RUN go install github.com/cosmtrek/air@latest
|
||||
|
||||
# Install SQLite (required for CGO in ATCR)
|
||||
RUN apt-get update && apt-get install -y \
|
||||
sqlite3 \
|
||||
libsqlite3-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy dependency files and download (cached layer)
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Note: Source code comes from volume mount
|
||||
# (no COPY . . needed - that's the whole point!)
|
||||
|
||||
# Air will handle building and running
|
||||
CMD ["air", "-c", ".air.toml"]
|
||||
```
|
||||
|
||||
### Step 2: Create docker-compose.dev.yml
|
||||
|
||||
Create `docker-compose.dev.yml` in project root:
|
||||
|
||||
```yaml
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
atcr-appview:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.devel
|
||||
volumes:
|
||||
# Mount entire codebase (live editing)
|
||||
- .:/app
|
||||
# Cache Go modules (faster rebuilds)
|
||||
- go-cache:/go/pkg/mod
|
||||
# Persist SQLite database
|
||||
- atcr-ui-dev:/var/lib/atcr
|
||||
environment:
|
||||
# Enable development mode (uses os.DirFS)
|
||||
ATCR_DEV_MODE: "true"
|
||||
|
||||
# AppView configuration
|
||||
ATCR_HTTP_ADDR: ":5000"
|
||||
ATCR_BASE_URL: "http://localhost:5000"
|
||||
ATCR_DEFAULT_HOLD_DID: "did:web:hold01.atcr.io"
|
||||
|
||||
# Database
|
||||
ATCR_UI_DATABASE_PATH: "/var/lib/atcr/ui.db"
|
||||
|
||||
# Auth
|
||||
ATCR_AUTH_KEY_PATH: "/var/lib/atcr/auth/private-key.pem"
|
||||
|
||||
# UI
|
||||
ATCR_UI_ENABLED: "true"
|
||||
|
||||
# Jetstream (optional)
|
||||
# JETSTREAM_URL: "wss://jetstream2.us-east.bsky.network/subscribe"
|
||||
# ATCR_BACKFILL_ENABLED: "false"
|
||||
ports:
|
||||
- "5000:5000"
|
||||
networks:
|
||||
- atcr-dev
|
||||
|
||||
# Add other services as needed (postgres, hold, etc)
|
||||
# atcr-hold:
|
||||
# ...
|
||||
|
||||
networks:
|
||||
atcr-dev:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
go-cache:
|
||||
atcr-ui-dev:
|
||||
```
|
||||
|
||||
### Step 3: Create .air.toml
|
||||
|
||||
Create `.air.toml` in project root:
|
||||
|
||||
```toml
|
||||
# Air configuration for hot reload
|
||||
# https://github.com/cosmtrek/air
|
||||
|
||||
root = "."
|
||||
testdata_dir = "testdata"
|
||||
tmp_dir = "tmp"
|
||||
|
||||
[build]
|
||||
# Arguments to pass to binary (AppView needs "serve")
|
||||
args_bin = ["serve"]
|
||||
|
||||
# Where to output the built binary
|
||||
bin = "./tmp/atcr-appview"
|
||||
|
||||
# Build command
|
||||
cmd = "go build -o ./tmp/atcr-appview ./cmd/appview"
|
||||
|
||||
# Delay before rebuilding (ms) - debounce rapid saves
|
||||
delay = 1000
|
||||
|
||||
# Directories to exclude from watching
|
||||
exclude_dir = [
|
||||
"tmp",
|
||||
"vendor",
|
||||
"bin",
|
||||
".git",
|
||||
"node_modules",
|
||||
"testdata"
|
||||
]
|
||||
|
||||
# Files to exclude from watching
|
||||
exclude_file = []
|
||||
|
||||
# Regex patterns to exclude
|
||||
exclude_regex = ["_test\\.go"]
|
||||
|
||||
# Don't rebuild if file content unchanged
|
||||
exclude_unchanged = false
|
||||
|
||||
# Follow symlinks
|
||||
follow_symlink = false
|
||||
|
||||
# Full command to run (leave empty to use cmd + bin)
|
||||
full_bin = ""
|
||||
|
||||
# Directories to include (empty = all)
|
||||
include_dir = []
|
||||
|
||||
# File extensions to watch
|
||||
include_ext = ["go", "html", "css", "js"]
|
||||
|
||||
# Specific files to watch
|
||||
include_file = []
|
||||
|
||||
# Delay before killing old process (s)
|
||||
kill_delay = "0s"
|
||||
|
||||
# Log file for build errors
|
||||
log = "build-errors.log"
|
||||
|
||||
# Use polling instead of fsnotify (for Docker/VM)
|
||||
poll = false
|
||||
poll_interval = 0
|
||||
|
||||
# Rerun binary if it exits
|
||||
rerun = false
|
||||
rerun_delay = 500
|
||||
|
||||
# Send interrupt signal instead of kill
|
||||
send_interrupt = false
|
||||
|
||||
# Stop on build error
|
||||
stop_on_error = false
|
||||
|
||||
[color]
|
||||
# Colorize output
|
||||
app = ""
|
||||
build = "yellow"
|
||||
main = "magenta"
|
||||
runner = "green"
|
||||
watcher = "cyan"
|
||||
|
||||
[log]
|
||||
# Show only app logs (not build logs)
|
||||
main_only = false
|
||||
|
||||
# Add timestamp to logs
|
||||
time = false
|
||||
|
||||
[misc]
|
||||
# Clean tmp directory on exit
|
||||
clean_on_exit = false
|
||||
|
||||
[screen]
|
||||
# Clear screen on rebuild
|
||||
clear_on_rebuild = false
|
||||
|
||||
# Keep scrollback
|
||||
keep_scroll = true
|
||||
```
|
||||
|
||||
### Step 4: Modify pkg/appview/ui.go
|
||||
|
||||
Add conditional filesystem loading to `pkg/appview/ui.go`:
|
||||
|
||||
```go
|
||||
package appview
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"html/template"
|
||||
"io/fs"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Embedded assets (used in production)
|
||||
//go:embed templates/**/*.html
|
||||
var embeddedTemplatesFS embed.FS
|
||||
|
||||
//go:embed static
|
||||
var embeddedStaticFS embed.FS
|
||||
|
||||
// Actual filesystems used at runtime (conditional)
|
||||
var templatesFS fs.FS
|
||||
var staticFS fs.FS
|
||||
|
||||
func init() {
|
||||
// Development mode: read from filesystem for instant updates
|
||||
if os.Getenv("ATCR_DEV_MODE") == "true" {
|
||||
log.Println("🔧 DEV MODE: Using filesystem for templates and static assets")
|
||||
templatesFS = os.DirFS("pkg/appview/templates")
|
||||
staticFS = os.DirFS("pkg/appview/static")
|
||||
} else {
|
||||
// Production mode: use embedded assets
|
||||
log.Println("📦 PRODUCTION MODE: Using embedded assets")
|
||||
templatesFS = embeddedTemplatesFS
|
||||
staticFS = embeddedStaticFS
|
||||
}
|
||||
}
|
||||
|
||||
// Templates returns parsed HTML templates
|
||||
func Templates() *template.Template {
|
||||
tmpl, err := template.ParseFS(templatesFS, "templates/**/*.html")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse templates: %v", err)
|
||||
}
|
||||
return tmpl
|
||||
}
|
||||
|
||||
// StaticHandler returns a handler for static files
|
||||
func StaticHandler() http.Handler {
|
||||
sub, err := fs.Sub(staticFS, "static")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create static sub-filesystem: %v", err)
|
||||
}
|
||||
return http.FileServer(http.FS(sub))
|
||||
}
|
||||
```
|
||||
|
||||
**Important:** Update the `Templates()` function to NOT cache templates in dev mode:
|
||||
|
||||
```go
|
||||
// Templates returns parsed HTML templates
|
||||
func Templates() *template.Template {
|
||||
// In dev mode, reparse templates on every request (instant updates)
|
||||
// In production, this could be cached
|
||||
tmpl, err := template.ParseFS(templatesFS, "templates/**/*.html")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse templates: %v", err)
|
||||
}
|
||||
return tmpl
|
||||
}
|
||||
```
|
||||
|
||||
If you're caching templates, wrap it with a dev mode check:
|
||||
|
||||
```go
|
||||
var templateCache *template.Template
|
||||
|
||||
func Templates() *template.Template {
|
||||
// Development: reparse every time (instant updates)
|
||||
if os.Getenv("ATCR_DEV_MODE") == "true" {
|
||||
tmpl, err := template.ParseFS(templatesFS, "templates/**/*.html")
|
||||
if err != nil {
|
||||
log.Printf("Template parse error: %v", err)
|
||||
return template.New("error")
|
||||
}
|
||||
return tmpl
|
||||
}
|
||||
|
||||
// Production: use cached templates
|
||||
if templateCache == nil {
|
||||
tmpl, err := template.ParseFS(templatesFS, "templates/**/*.html")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse templates: %v", err)
|
||||
}
|
||||
templateCache = tmpl
|
||||
}
|
||||
return templateCache
|
||||
}
|
||||
```
|
||||
|
||||
### Step 5: Add to .gitignore
|
||||
|
||||
Add Air's temporary directory to `.gitignore`:
|
||||
|
||||
```
|
||||
# Air hot reload
|
||||
tmp/
|
||||
build-errors.log
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Starting Development Environment
|
||||
|
||||
```bash
|
||||
# Build and start dev container
|
||||
docker compose -f docker-compose.dev.yml up --build
|
||||
|
||||
# Or run in background
|
||||
docker compose -f docker-compose.dev.yml up -d
|
||||
|
||||
# View logs
|
||||
docker compose -f docker-compose.dev.yml logs -f atcr-appview
|
||||
```
|
||||
|
||||
You should see Air starting:
|
||||
|
||||
```
|
||||
atcr-appview | 🔧 DEV MODE: Using filesystem for templates and static assets
|
||||
atcr-appview |
|
||||
atcr-appview | __ _ ___
|
||||
atcr-appview | / /\ | | | |_)
|
||||
atcr-appview | /_/--\ |_| |_| \_ , built with Go
|
||||
atcr-appview |
|
||||
atcr-appview | watching .
|
||||
atcr-appview | !exclude tmp
|
||||
atcr-appview | building...
|
||||
atcr-appview | running...
|
||||
```
|
||||
|
||||
### Development Workflow
|
||||
|
||||
#### 1. Edit Templates/CSS/JS (Instant Updates)
|
||||
|
||||
```bash
|
||||
# Edit any template, CSS, or JS file
|
||||
vim pkg/appview/templates/pages/home.html
|
||||
vim pkg/appview/static/css/style.css
|
||||
vim pkg/appview/static/js/app.js
|
||||
|
||||
# Save file → changes appear instantly
|
||||
# Just refresh browser (Cmd+R / Ctrl+R)
|
||||
```
|
||||
|
||||
**No rebuild, no restart!** Air might restart the app, but it's instant since no compilation is needed.
|
||||
|
||||
#### 2. Edit Go Code (Fast Rebuild)
|
||||
|
||||
```bash
|
||||
# Edit any Go file
|
||||
vim pkg/appview/handlers/home.go
|
||||
|
||||
# Save file → Air detects change
|
||||
# Air output shows:
|
||||
# building...
|
||||
# build successful in 2.3s
|
||||
# restarting...
|
||||
|
||||
# Refresh browser to see changes
|
||||
```
|
||||
|
||||
**2-5 second rebuild** instead of 2-3 minutes!
|
||||
|
||||
### Stopping Development Environment
|
||||
|
||||
```bash
|
||||
# Stop containers
|
||||
docker compose -f docker-compose.dev.yml down
|
||||
|
||||
# Stop and remove volumes (fresh start)
|
||||
docker compose -f docker-compose.dev.yml down -v
|
||||
```
|
||||
|
||||
## Production Builds
|
||||
|
||||
**Production builds are completely unchanged:**
|
||||
|
||||
```bash
|
||||
# Production uses normal Dockerfile (embed.FS, scratch base)
|
||||
docker compose build
|
||||
|
||||
# Or specific service
|
||||
docker compose build atcr-appview
|
||||
|
||||
# Run production
|
||||
docker compose up
|
||||
```
|
||||
|
||||
**Why it works:**
|
||||
- Production doesn't set `ATCR_DEV_MODE=true`
|
||||
- `ui.go` defaults to embedded assets when env var is unset
|
||||
- Production Dockerfile still uses multi-stage build to scratch
|
||||
- No development dependencies in production image
|
||||
|
||||
## Comparison
|
||||
|
||||
| Change Type | Before (docker compose) | After (dev setup) | Improvement |
|
||||
|-------------|------------------------|-------------------|-------------|
|
||||
| Edit CSS | 2-3 minutes | **Instant (0s)** | ♾️x faster |
|
||||
| Edit JS | 2-3 minutes | **Instant (0s)** | ♾️x faster |
|
||||
| Edit Template | 2-3 minutes | **Instant (0s)** | ♾️x faster |
|
||||
| Edit Go Code | 2-3 minutes | **2-5 seconds** | 24-90x faster |
|
||||
| Production Build | Same | **Same** | No change |
|
||||
|
||||
## Advanced: Local Development (No Docker)
|
||||
|
||||
For even faster development, run locally without Docker:
|
||||
|
||||
```bash
|
||||
# Set environment variables
|
||||
export ATCR_DEV_MODE=true
|
||||
export ATCR_HTTP_ADDR=:5000
|
||||
export ATCR_BASE_URL=http://localhost:5000
|
||||
export ATCR_DEFAULT_HOLD_DID=did:web:hold01.atcr.io
|
||||
export ATCR_UI_DATABASE_PATH=/tmp/atcr-ui.db
|
||||
export ATCR_AUTH_KEY_PATH=/tmp/atcr-auth-key.pem
|
||||
export ATCR_UI_ENABLED=true
|
||||
|
||||
# Or use .env file
|
||||
source .env.appview
|
||||
|
||||
# Run with Air
|
||||
air -c .air.toml
|
||||
|
||||
# Or run directly (no hot reload)
|
||||
go run ./cmd/appview serve
|
||||
```
|
||||
|
||||
**Advantages:**
|
||||
- Even faster (no Docker overhead)
|
||||
- Native debugging with delve
|
||||
- Direct filesystem access
|
||||
- Full IDE integration
|
||||
|
||||
**Disadvantages:**
|
||||
- Need to manage dependencies locally (SQLite, etc)
|
||||
- May differ from production environment
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Air Not Rebuilding
|
||||
|
||||
**Problem:** Air doesn't detect changes
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# Check if Air is actually running
|
||||
docker compose -f docker-compose.dev.yml logs atcr-appview
|
||||
|
||||
# Check .air.toml include_ext includes your file type
|
||||
# Default: ["go", "html", "css", "js"]
|
||||
|
||||
# Restart container
|
||||
docker compose -f docker-compose.dev.yml restart atcr-appview
|
||||
```
|
||||
|
||||
### Templates Not Updating
|
||||
|
||||
**Problem:** Template changes don't appear
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# Check ATCR_DEV_MODE is set
|
||||
docker compose -f docker-compose.dev.yml exec atcr-appview env | grep DEV_MODE
|
||||
|
||||
# Should output: ATCR_DEV_MODE=true
|
||||
|
||||
# Check templates aren't cached (see Step 4 above)
|
||||
# Templates() should reparse in dev mode
|
||||
```
|
||||
|
||||
### Go Build Failing
|
||||
|
||||
**Problem:** Air shows build errors
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# Check build logs
|
||||
docker compose -f docker-compose.dev.yml logs atcr-appview
|
||||
|
||||
# Or check build-errors.log in container
|
||||
docker compose -f docker-compose.dev.yml exec atcr-appview cat build-errors.log
|
||||
|
||||
# Fix the Go error, save file, Air will retry
|
||||
```
|
||||
|
||||
### Volume Mount Not Working
|
||||
|
||||
**Problem:** Changes don't appear in container
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# Verify volume mount
|
||||
docker compose -f docker-compose.dev.yml exec atcr-appview ls -la /app
|
||||
|
||||
# Should show your source files
|
||||
|
||||
# On Windows/Mac, check Docker Desktop file sharing settings
|
||||
# Settings → Resources → File Sharing → add project directory
|
||||
```
|
||||
|
||||
### Permission Errors
|
||||
|
||||
**Problem:** Cannot write to /var/lib/atcr
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# In Dockerfile.devel, add:
|
||||
RUN mkdir -p /var/lib/atcr && chmod 777 /var/lib/atcr
|
||||
|
||||
# Or use named volumes (already in docker-compose.dev.yml)
|
||||
volumes:
|
||||
- atcr-ui-dev:/var/lib/atcr
|
||||
```
|
||||
|
||||
### Slow Builds Even with Air
|
||||
|
||||
**Problem:** Air rebuilds slowly
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# Use Go module cache volume (already in docker-compose.dev.yml)
|
||||
volumes:
|
||||
- go-cache:/go/pkg/mod
|
||||
|
||||
# Increase Air delay to debounce rapid saves
|
||||
# In .air.toml:
|
||||
delay = 2000 # 2 seconds
|
||||
|
||||
# Or check if CGO is slowing builds
|
||||
# AppView needs CGO for SQLite, but you can try:
|
||||
CGO_ENABLED=0 go build # (won't work for ATCR, but good to know)
|
||||
```
|
||||
|
||||
## Tips & Tricks
|
||||
|
||||
### Browser Auto-Reload (LiveReload)
|
||||
|
||||
Add LiveReload for automatic browser refresh:
|
||||
|
||||
```bash
|
||||
# Install browser extension
|
||||
# Chrome: https://chrome.google.com/webstore/detail/livereload
|
||||
# Firefox: https://addons.mozilla.org/en-US/firefox/addon/livereload-web-extension/
|
||||
|
||||
# Add livereload to .air.toml (future Air feature)
|
||||
# Or use a separate tool like browsersync
|
||||
```
|
||||
|
||||
### Database Resets
|
||||
|
||||
Development database is in a named volume:
|
||||
|
||||
```bash
|
||||
# Reset database (fresh start)
|
||||
docker compose -f docker-compose.dev.yml down -v
|
||||
docker compose -f docker-compose.dev.yml up
|
||||
|
||||
# Or delete specific volume
|
||||
docker volume rm atcr_atcr-ui-dev
|
||||
```
|
||||
|
||||
### Multiple Environments
|
||||
|
||||
Run dev and production side-by-side:
|
||||
|
||||
```bash
|
||||
# Development on port 5000
|
||||
docker compose -f docker-compose.dev.yml up -d
|
||||
|
||||
# Production on port 5001
|
||||
docker compose up -d
|
||||
|
||||
# Now you can compare behavior
|
||||
```
|
||||
|
||||
### Debugging with Delve
|
||||
|
||||
Add delve to Dockerfile.devel:
|
||||
|
||||
```dockerfile
|
||||
RUN go install github.com/go-delve/delve/cmd/dlv@latest
|
||||
|
||||
# Change CMD to use delve
|
||||
CMD ["dlv", "debug", "./cmd/appview", "--headless", "--listen=:2345", "--api-version=2", "--accept-multiclient", "--", "serve"]
|
||||
```
|
||||
|
||||
Then connect with VSCode or GoLand.
|
||||
|
||||
## Summary
|
||||
|
||||
**Development Setup (One-Time):**
|
||||
1. Create `Dockerfile.devel`
|
||||
2. Create `docker-compose.dev.yml`
|
||||
3. Create `.air.toml`
|
||||
4. Modify `pkg/appview/ui.go` for conditional DirFS
|
||||
5. Add `tmp/` to `.gitignore`
|
||||
|
||||
**Daily Development:**
|
||||
```bash
|
||||
# Start
|
||||
docker compose -f docker-compose.dev.yml up
|
||||
|
||||
# Edit files in your editor
|
||||
# Changes appear instantly (CSS/JS/templates)
|
||||
# Or in 2-5 seconds (Go code)
|
||||
|
||||
# Stop
|
||||
docker compose -f docker-compose.dev.yml down
|
||||
```
|
||||
|
||||
**Production (Unchanged):**
|
||||
```bash
|
||||
docker compose build
|
||||
docker compose up
|
||||
```
|
||||
|
||||
**Result:** 100x faster development iteration! 🚀
|
||||
@@ -1,756 +0,0 @@
|
||||
# Hold-as-Certificate-Authority Architecture
|
||||
|
||||
## ⚠️ Important Notice
|
||||
|
||||
This document describes an **optional enterprise feature** for X.509 PKI compliance. The hold-as-CA approach introduces **centralization trade-offs** that contradict ATProto's decentralized philosophy.
|
||||
|
||||
**Default Recommendation:** Use [plugin-based integration](./INTEGRATION_STRATEGY.md) instead. Only implement hold-as-CA if your organization has specific X.509 PKI compliance requirements.
|
||||
|
||||
## Overview
|
||||
|
||||
The hold-as-CA architecture allows ATCR to generate Notation/Notary v2-compatible signatures by having hold services act as Certificate Authorities that issue X.509 certificates for users.
|
||||
|
||||
### The Problem
|
||||
|
||||
- **ATProto signatures** use K-256 (secp256k1) elliptic curve
|
||||
- **Notation** only supports P-256, P-384, P-521 elliptic curves
|
||||
- **Cannot convert** K-256 signatures to P-256 (different cryptographic curves)
|
||||
- **Must re-sign** with P-256 keys for Notation compatibility
|
||||
|
||||
### The Solution
|
||||
|
||||
Hold services act as trusted Certificate Authorities (CAs):
|
||||
|
||||
1. User pushes image → Manifest signed by PDS with K-256 (ATProto)
|
||||
2. Hold verifies ATProto signature is valid
|
||||
3. Hold generates ephemeral P-256 key pair for user
|
||||
4. Hold issues X.509 certificate to user's DID
|
||||
5. Hold signs manifest with P-256 key
|
||||
6. Hold creates Notation signature envelope (JWS format)
|
||||
7. Stores both ATProto and Notation signatures
|
||||
|
||||
**Result:** Images have two signatures:
|
||||
- **ATProto signature** (K-256) - Decentralized, DID-based
|
||||
- **Notation signature** (P-256) - Centralized, X.509 PKI
|
||||
|
||||
## Architecture
|
||||
|
||||
### Certificate Chain
|
||||
|
||||
```
|
||||
Hold Root CA Certificate (self-signed, P-256)
|
||||
└── User Certificate (issued to DID, P-256)
|
||||
└── Image Manifest Signature
|
||||
```
|
||||
|
||||
**Hold Root CA:**
|
||||
```
|
||||
Subject: CN=ATCR Hold CA - did:web:hold01.atcr.io
|
||||
Issuer: Self (self-signed)
|
||||
Key Usage: Digital Signature, Certificate Sign
|
||||
Basic Constraints: CA=true, pathLen=1
|
||||
Algorithm: ECDSA P-256
|
||||
Validity: 10 years
|
||||
```
|
||||
|
||||
**User Certificate:**
|
||||
```
|
||||
Subject: CN=did:plc:alice123
|
||||
SAN: URI:did:plc:alice123
|
||||
Issuer: Hold Root CA
|
||||
Key Usage: Digital Signature
|
||||
Extended Key Usage: Code Signing
|
||||
Algorithm: ECDSA P-256
|
||||
Validity: 24 hours (short-lived)
|
||||
```
|
||||
|
||||
### Push Flow
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 1. User: docker push atcr.io/alice/myapp:latest │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 2. AppView stores manifest in alice's PDS │
|
||||
│ - PDS signs with K-256 (ATProto standard) │
|
||||
│ - Signature stored in repository commit │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 3. AppView requests hold to co-sign │
|
||||
│ POST /xrpc/io.atcr.hold.coSignManifest │
|
||||
│ { │
|
||||
│ "userDid": "did:plc:alice123", │
|
||||
│ "manifestDigest": "sha256:abc123...", │
|
||||
│ "atprotoSignature": {...} │
|
||||
│ } │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 4. Hold verifies ATProto signature │
|
||||
│ a. Resolve alice's DID → public key │
|
||||
│ b. Fetch commit from alice's PDS │
|
||||
│ c. Verify K-256 signature │
|
||||
│ d. Ensure signature is valid │
|
||||
│ │
|
||||
│ If verification fails → REJECT │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 5. Hold generates ephemeral P-256 key pair │
|
||||
│ privateKey := ecdsa.GenerateKey(elliptic.P256()) │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 6. Hold issues X.509 certificate │
|
||||
│ Subject: CN=did:plc:alice123 │
|
||||
│ SAN: URI:did:plc:alice123 │
|
||||
│ Issuer: Hold CA │
|
||||
│ NotBefore: now │
|
||||
│ NotAfter: now + 24 hours │
|
||||
│ KeyUsage: Digital Signature │
|
||||
│ ExtKeyUsage: Code Signing │
|
||||
│ │
|
||||
│ Sign certificate with hold's CA private key │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 7. Hold signs manifest digest │
|
||||
│ hash := SHA256(manifestBytes) │
|
||||
│ signature := ECDSA_P256(hash, privateKey) │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 8. Hold creates Notation JWS envelope │
|
||||
│ { │
|
||||
│ "protected": {...}, │
|
||||
│ "payload": "base64(manifestDigest)", │
|
||||
│ "signature": "base64(p256Signature)", │
|
||||
│ "header": { │
|
||||
│ "x5c": [ │
|
||||
│ "base64(userCert)", │
|
||||
│ "base64(holdCACert)" │
|
||||
│ ] │
|
||||
│ } │
|
||||
│ } │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 9. Hold returns signature to AppView │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 10. AppView stores Notation signature │
|
||||
│ - Create ORAS artifact manifest │
|
||||
│ - Upload JWS envelope as layer blob │
|
||||
│ - Link to image via subject field │
|
||||
│ - artifactType: application/vnd.cncf.notary... │
|
||||
└──────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Verification Flow
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ User: notation verify atcr.io/alice/myapp:latest │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 1. Notation queries Referrers API │
|
||||
│ GET /v2/alice/myapp/referrers/sha256:abc123 │
|
||||
│ → Discovers Notation signature artifact │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 2. Notation downloads JWS envelope │
|
||||
│ - Parses JSON Web Signature │
|
||||
│ - Extracts certificate chain from x5c header │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 3. Notation validates certificate chain │
|
||||
│ a. User cert issued by Hold CA? ✓ │
|
||||
│ b. Hold CA cert in trust store? ✓ │
|
||||
│ c. Certificate not expired? ✓ │
|
||||
│ d. Key usage correct? ✓ │
|
||||
│ e. Subject matches policy? ✓ │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 4. Notation verifies signature │
|
||||
│ a. Extract public key from user certificate │
|
||||
│ b. Compute manifest hash: SHA256(manifest) │
|
||||
│ c. Verify: ECDSA_P256(hash, sig, pubKey) ✓ │
|
||||
└────────────────────┬─────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────────┐
|
||||
│ 5. Success: Image verified ✓ │
|
||||
│ Signed by: did:plc:alice123 (via Hold CA) │
|
||||
└──────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### Hold CA Certificate Generation
|
||||
|
||||
```go
|
||||
// cmd/hold/main.go - CA initialization
|
||||
func (h *Hold) initializeCA(ctx context.Context) error {
|
||||
caKeyPath := filepath.Join(h.config.DataDir, "ca-private-key.pem")
|
||||
caCertPath := filepath.Join(h.config.DataDir, "ca-certificate.pem")
|
||||
|
||||
// Load existing CA or generate new one
|
||||
if exists(caKeyPath) && exists(caCertPath) {
|
||||
h.caKey = loadPrivateKey(caKeyPath)
|
||||
h.caCert = loadCertificate(caCertPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generate P-256 key pair for CA
|
||||
caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate CA key: %w", err)
|
||||
}
|
||||
|
||||
// Create CA certificate template
|
||||
serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
||||
|
||||
template := &x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
CommonName: fmt.Sprintf("ATCR Hold CA - %s", h.DID),
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().AddDate(10, 0, 0), // 10 years
|
||||
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: true,
|
||||
MaxPathLen: 1, // Can only issue end-entity certificates
|
||||
}
|
||||
|
||||
// Self-sign
|
||||
certDER, err := x509.CreateCertificate(
|
||||
rand.Reader,
|
||||
template,
|
||||
template, // Self-signed: issuer = subject
|
||||
&caKey.PublicKey,
|
||||
caKey,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create CA certificate: %w", err)
|
||||
}
|
||||
|
||||
caCert, _ := x509.ParseCertificate(certDER)
|
||||
|
||||
// Save to disk (0600 permissions)
|
||||
savePrivateKey(caKeyPath, caKey)
|
||||
saveCertificate(caCertPath, caCert)
|
||||
|
||||
h.caKey = caKey
|
||||
h.caCert = caCert
|
||||
|
||||
log.Info("Generated new CA certificate", "did", h.DID, "expires", caCert.NotAfter)
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### User Certificate Issuance
|
||||
|
||||
```go
|
||||
// pkg/hold/cosign.go
|
||||
func (h *Hold) issueUserCertificate(userDID string) (*x509.Certificate, *ecdsa.PrivateKey, error) {
|
||||
// Generate ephemeral P-256 key for user
|
||||
userKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to generate user key: %w", err)
|
||||
}
|
||||
|
||||
serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
||||
|
||||
// Parse DID for SAN
|
||||
sanURI, _ := url.Parse(userDID)
|
||||
|
||||
template := &x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
CommonName: userDID,
|
||||
},
|
||||
URIs: []*url.URL{sanURI}, // Subject Alternative Name
|
||||
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(24 * time.Hour), // Short-lived: 24 hours
|
||||
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: false,
|
||||
}
|
||||
|
||||
// Sign with hold's CA key
|
||||
certDER, err := x509.CreateCertificate(
|
||||
rand.Reader,
|
||||
template,
|
||||
h.caCert, // Issuer: Hold CA
|
||||
&userKey.PublicKey,
|
||||
h.caKey, // Sign with CA private key
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create user certificate: %w", err)
|
||||
}
|
||||
|
||||
userCert, _ := x509.ParseCertificate(certDER)
|
||||
|
||||
return userCert, userKey, nil
|
||||
}
|
||||
```
|
||||
|
||||
### Co-Signing XRPC Endpoint
|
||||
|
||||
```go
|
||||
// pkg/hold/oci/xrpc.go
|
||||
func (s *Server) handleCoSignManifest(ctx context.Context, req *CoSignRequest) (*CoSignResponse, error) {
|
||||
// 1. Verify caller is authenticated
|
||||
did, err := s.auth.VerifyToken(ctx, req.Token)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("authentication failed: %w", err)
|
||||
}
|
||||
|
||||
// 2. Verify ATProto signature
|
||||
valid, err := s.verifyATProtoSignature(ctx, req.UserDID, req.ManifestDigest, req.ATProtoSignature)
|
||||
if err != nil || !valid {
|
||||
return nil, fmt.Errorf("ATProto signature verification failed: %w", err)
|
||||
}
|
||||
|
||||
// 3. Issue certificate for user
|
||||
userCert, userKey, err := s.hold.issueUserCertificate(req.UserDID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to issue certificate: %w", err)
|
||||
}
|
||||
|
||||
// 4. Sign manifest with user's key
|
||||
manifestHash := sha256.Sum256([]byte(req.ManifestDigest))
|
||||
signature, err := ecdsa.SignASN1(rand.Reader, userKey, manifestHash[:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sign manifest: %w", err)
|
||||
}
|
||||
|
||||
// 5. Create JWS envelope
|
||||
jws, err := s.createJWSEnvelope(signature, userCert, s.hold.caCert, req.ManifestDigest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create JWS: %w", err)
|
||||
}
|
||||
|
||||
return &CoSignResponse{
|
||||
JWS: jws,
|
||||
Certificate: encodeCertificate(userCert),
|
||||
CACertificate: encodeCertificate(s.hold.caCert),
|
||||
}, nil
|
||||
}
|
||||
```
|
||||
|
||||
## Trust Model
|
||||
|
||||
### Centralization Analysis
|
||||
|
||||
**ATProto Model (Decentralized):**
|
||||
- Each PDS is independent
|
||||
- User controls which PDS to use
|
||||
- Trust user's DID, not specific infrastructure
|
||||
- PDS compromise affects only that PDS's users
|
||||
- Multiple PDSs provide redundancy
|
||||
|
||||
**Hold-as-CA Model (Centralized):**
|
||||
- Hold acts as single Certificate Authority
|
||||
- All users must trust hold's CA certificate
|
||||
- Hold compromise = attacker can issue certificates for ANY user
|
||||
- Hold becomes single point of failure
|
||||
- Users depend on hold operator honesty
|
||||
|
||||
### What Hold Vouches For
|
||||
|
||||
When hold issues a certificate, it attests:
|
||||
|
||||
✅ **"I verified that [DID] signed this manifest with ATProto"**
|
||||
- Hold validated ATProto signature
|
||||
- Hold confirmed signature matches user's DID
|
||||
- Hold checked signature at specific time
|
||||
|
||||
❌ **"This image is safe"**
|
||||
- Hold does NOT audit image contents
|
||||
- Certificate ≠ vulnerability scan
|
||||
- Signature ≠ security guarantee
|
||||
|
||||
❌ **"I control this DID"**
|
||||
- Hold does NOT control user's DID
|
||||
- DID ownership is independent
|
||||
- Hold cannot revoke DIDs
|
||||
|
||||
### Threat Model
|
||||
|
||||
**Scenario 1: Hold Private Key Compromise**
|
||||
|
||||
**Attack:**
|
||||
- Attacker steals hold's CA private key
|
||||
- Can issue certificates for any DID
|
||||
- Can sign malicious images as any user
|
||||
|
||||
**Impact:**
|
||||
- **CRITICAL** - All users affected
|
||||
- Attacker can impersonate any user
|
||||
- All signatures become untrustworthy
|
||||
|
||||
**Detection:**
|
||||
- Certificate Transparency logs (if implemented)
|
||||
- Unusual certificate issuance patterns
|
||||
- Users report unexpected signatures
|
||||
|
||||
**Mitigation:**
|
||||
- Store CA key in Hardware Security Module (HSM)
|
||||
- Strict access controls
|
||||
- Audit logging
|
||||
- Regular key rotation
|
||||
|
||||
**Recovery:**
|
||||
- Revoke compromised CA certificate
|
||||
- Generate new CA certificate
|
||||
- Re-issue all active certificates
|
||||
- Notify all users
|
||||
- Update trust stores
|
||||
|
||||
---
|
||||
|
||||
**Scenario 2: Malicious Hold Operator**
|
||||
|
||||
**Attack:**
|
||||
- Hold operator issues certificates without verifying ATProto signatures
|
||||
- Hold operator signs malicious images
|
||||
- Hold operator backdates certificates
|
||||
|
||||
**Impact:**
|
||||
- **HIGH** - Trust model broken
|
||||
- Users receive signed malicious images
|
||||
- Difficult to detect without ATProto cross-check
|
||||
|
||||
**Detection:**
|
||||
- Compare Notation signature timestamp with ATProto commit time
|
||||
- Verify ATProto signature exists independently
|
||||
- Monitor hold's signing patterns
|
||||
|
||||
**Mitigation:**
|
||||
- Audit trail linking certificates to ATProto signatures
|
||||
- Public transparency logs
|
||||
- Multi-signature requirements
|
||||
- Periodically verify ATProto signatures
|
||||
|
||||
**Recovery:**
|
||||
- Identify malicious certificates
|
||||
- Revoke hold's CA trust
|
||||
- Switch to different hold
|
||||
- Re-verify all images
|
||||
|
||||
---
|
||||
|
||||
**Scenario 3: Certificate Theft**
|
||||
|
||||
**Attack:**
|
||||
- Attacker steals issued user certificate + private key
|
||||
- Uses it to sign malicious images
|
||||
|
||||
**Impact:**
|
||||
- **LOW-MEDIUM** - Limited scope
|
||||
- Affects only specific user/image
|
||||
- Short validity period (24 hours)
|
||||
|
||||
**Detection:**
|
||||
- Unexpected signature timestamps
|
||||
- Images signed from unknown locations
|
||||
|
||||
**Mitigation:**
|
||||
- Short certificate validity (24 hours)
|
||||
- Ephemeral keys (not stored long-term)
|
||||
- Certificate revocation if detected
|
||||
|
||||
**Recovery:**
|
||||
- Wait for certificate expiration (24 hours)
|
||||
- Revoke specific certificate
|
||||
- Investigate compromise source
|
||||
|
||||
## Certificate Management
|
||||
|
||||
### Expiration Strategy
|
||||
|
||||
**Short-Lived Certificates (24 hours):**
|
||||
|
||||
**Pros:**
|
||||
- ✅ Minimal revocation infrastructure needed
|
||||
- ✅ Compromise window is tiny
|
||||
- ✅ Automatic cleanup
|
||||
- ✅ Lower CRL/OCSP overhead
|
||||
|
||||
**Cons:**
|
||||
- ❌ Old images become unverifiable quickly
|
||||
- ❌ Requires re-signing for historical verification
|
||||
- ❌ Storage: multiple signatures for same image
|
||||
|
||||
**Solution: On-Demand Re-Signing**
|
||||
```
|
||||
User pulls old image → Notation verification fails (expired cert)
|
||||
→ User requests re-signing: POST /xrpc/io.atcr.hold.reSignManifest
|
||||
→ Hold verifies ATProto signature still valid
|
||||
→ Hold issues new certificate (24 hours)
|
||||
→ Hold creates new Notation signature
|
||||
→ User can verify with fresh certificate
|
||||
```
|
||||
|
||||
### Revocation
|
||||
|
||||
**Certificate Revocation List (CRL):**
|
||||
```
|
||||
Hold publishes CRL at: https://hold01.atcr.io/ca.crl
|
||||
|
||||
Notation configured to check CRL:
|
||||
{
|
||||
"trustPolicies": [{
|
||||
"name": "atcr-images",
|
||||
"signatureVerification": {
|
||||
"verificationLevel": "strict",
|
||||
"override": {
|
||||
"revocationValidation": "strict"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
**OCSP (Online Certificate Status Protocol):**
|
||||
- Hold runs OCSP responder: `https://hold01.atcr.io/ocsp`
|
||||
- Real-time certificate status checks
|
||||
- Lower overhead than CRL downloads
|
||||
|
||||
**Revocation Triggers:**
|
||||
- Key compromise detected
|
||||
- Malicious signing detected
|
||||
- User request
|
||||
- DID ownership change
|
||||
|
||||
### CA Key Rotation
|
||||
|
||||
**Rotation Procedure:**
|
||||
|
||||
1. **Generate new CA key pair**
|
||||
2. **Create new CA certificate**
|
||||
3. **Cross-sign old CA with new CA** (transition period)
|
||||
4. **Distribute new CA certificate** to all users
|
||||
5. **Begin issuing with new CA** for new signatures
|
||||
6. **Grace period** (30 days): Accept both old and new CA
|
||||
7. **Retire old CA** after grace period
|
||||
|
||||
**Frequency:** Every 2-3 years (longer than short-lived certs)
|
||||
|
||||
## Trust Store Distribution
|
||||
|
||||
### Problem
|
||||
|
||||
Users must add hold's CA certificate to their Notation trust store for verification to work.
|
||||
|
||||
### Manual Distribution
|
||||
|
||||
```bash
|
||||
# 1. Download hold's CA certificate
|
||||
curl https://hold01.atcr.io/ca.crt -o hold01-ca.crt
|
||||
|
||||
# 2. Verify fingerprint (out-of-band)
|
||||
openssl x509 -in hold01-ca.crt -fingerprint -noout
|
||||
# Compare with published fingerprint
|
||||
|
||||
# 3. Add to Notation trust store
|
||||
notation cert add --type ca --store atcr-holds hold01-ca.crt
|
||||
```
|
||||
|
||||
### Automated Distribution
|
||||
|
||||
**ATCR CLI tool:**
|
||||
```bash
|
||||
atcr trust add hold01.atcr.io
|
||||
# → Fetches CA certificate
|
||||
# → Verifies via HTTPS + DNSSEC
|
||||
# → Adds to Notation trust store
|
||||
# → Configures trust policy
|
||||
|
||||
atcr trust list
|
||||
# → Shows trusted holds with fingerprints
|
||||
```
|
||||
|
||||
### System-Wide Trust
|
||||
|
||||
**For enterprise deployments:**
|
||||
|
||||
**Debian/Ubuntu:**
|
||||
```bash
|
||||
# Install CA certificate system-wide
|
||||
cp hold01-ca.crt /usr/local/share/ca-certificates/atcr-hold01.crt
|
||||
update-ca-certificates
|
||||
```
|
||||
|
||||
**RHEL/CentOS:**
|
||||
```bash
|
||||
cp hold01-ca.crt /etc/pki/ca-trust/source/anchors/
|
||||
update-ca-trust
|
||||
```
|
||||
|
||||
**Container images:**
|
||||
```dockerfile
|
||||
FROM ubuntu:22.04
|
||||
COPY hold01-ca.crt /usr/local/share/ca-certificates/
|
||||
RUN update-ca-certificates
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Hold Service
|
||||
|
||||
**Environment variables:**
|
||||
```bash
|
||||
# Enable co-signing feature
|
||||
HOLD_COSIGN_ENABLED=true
|
||||
|
||||
# CA certificate and key paths
|
||||
HOLD_CA_CERT_PATH=/var/lib/atcr/hold/ca-certificate.pem
|
||||
HOLD_CA_KEY_PATH=/var/lib/atcr/hold/ca-private-key.pem
|
||||
|
||||
# Certificate validity
|
||||
HOLD_CERT_VALIDITY_HOURS=24
|
||||
|
||||
# OCSP responder
|
||||
HOLD_OCSP_ENABLED=true
|
||||
HOLD_OCSP_URL=https://hold01.atcr.io/ocsp
|
||||
|
||||
# CRL distribution
|
||||
HOLD_CRL_ENABLED=true
|
||||
HOLD_CRL_URL=https://hold01.atcr.io/ca.crl
|
||||
```
|
||||
|
||||
### Notation Trust Policy
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "1.0",
|
||||
"trustPolicies": [{
|
||||
"name": "atcr-images",
|
||||
"registryScopes": ["atcr.io/*/*"],
|
||||
"signatureVerification": {
|
||||
"level": "strict",
|
||||
"override": {
|
||||
"revocationValidation": "strict"
|
||||
}
|
||||
},
|
||||
"trustStores": ["ca:atcr-holds"],
|
||||
"trustedIdentities": [
|
||||
"x509.subject: CN=did:plc:*",
|
||||
"x509.subject: CN=did:web:*"
|
||||
]
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
## When to Use Hold-as-CA
|
||||
|
||||
### ✅ Use When
|
||||
|
||||
**Enterprise X.509 PKI Compliance:**
|
||||
- Organization requires standard X.509 certificates
|
||||
- Existing security policies mandate PKI
|
||||
- Audit requirements for certificate chains
|
||||
- Integration with existing CA infrastructure
|
||||
|
||||
**Tool Compatibility:**
|
||||
- Must use standard Notation without plugins
|
||||
- Cannot deploy custom verification tools
|
||||
- Existing tooling expects X.509 signatures
|
||||
|
||||
**Centralized Trust Acceptable:**
|
||||
- Organization already uses centralized trust model
|
||||
- Hold operator is internal/trusted team
|
||||
- Centralization risk is acceptable trade-off
|
||||
|
||||
### ❌ Don't Use When
|
||||
|
||||
**Default Deployment:**
|
||||
- Most users should use [plugin-based approach](./INTEGRATION_STRATEGY.md)
|
||||
- Plugins maintain decentralization
|
||||
- Plugins reuse existing ATProto signatures
|
||||
|
||||
**Small Teams / Startups:**
|
||||
- Certificate management overhead too high
|
||||
- Don't need X.509 compliance
|
||||
- Prefer simpler architecture
|
||||
|
||||
**Maximum Decentralization Required:**
|
||||
- Cannot accept hold as single trust point
|
||||
- Must maintain pure ATProto model
|
||||
- Centralization contradicts project goals
|
||||
|
||||
## Comparison: Hold-as-CA vs. Plugins
|
||||
|
||||
| Aspect | Hold-as-CA | Plugin Approach |
|
||||
|--------|------------|----------------|
|
||||
| **Standard compliance** | ✅ Full X.509/PKI | ⚠️ Custom verification |
|
||||
| **Tool compatibility** | ✅ Notation works unchanged | ❌ Requires plugin install |
|
||||
| **Decentralization** | ❌ Centralized (hold CA) | ✅ Decentralized (DIDs) |
|
||||
| **ATProto alignment** | ❌ Against philosophy | ✅ ATProto-native |
|
||||
| **Signature reuse** | ❌ Must re-sign (P-256) | ✅ Reuses ATProto (K-256) |
|
||||
| **Certificate mgmt** | 🔴 High overhead | 🟢 None |
|
||||
| **Trust distribution** | 🔴 Must distribute CA cert | 🟢 DID resolution |
|
||||
| **Hold compromise** | 🔴 All users affected | 🟢 Metadata only |
|
||||
| **Operational cost** | 🔴 High | 🟢 Low |
|
||||
| **Use case** | Enterprise PKI | General purpose |
|
||||
|
||||
## Recommendations
|
||||
|
||||
### Default Approach: Plugins
|
||||
|
||||
For most deployments, use plugin-based verification:
|
||||
- **Ratify plugin** for Kubernetes
|
||||
- **OPA Gatekeeper provider** for policy enforcement
|
||||
- **Containerd verifier** for runtime checks
|
||||
- **atcr-verify CLI** for general purpose
|
||||
|
||||
See [Integration Strategy](./INTEGRATION_STRATEGY.md) for details.
|
||||
|
||||
### Optional: Hold-as-CA for Enterprise
|
||||
|
||||
Only implement hold-as-CA if you have specific requirements:
|
||||
- Enterprise X.509 PKI mandates
|
||||
- Cannot use plugins (restricted environments)
|
||||
- Accept centralization trade-off
|
||||
|
||||
**Implement as opt-in feature:**
|
||||
```bash
|
||||
# Users explicitly enable co-signing
|
||||
docker push atcr.io/alice/myapp:latest --sign=notation
|
||||
|
||||
# Or via environment variable
|
||||
export ATCR_ENABLE_COSIGN=true
|
||||
docker push atcr.io/alice/myapp:latest
|
||||
```
|
||||
|
||||
### Security Best Practices
|
||||
|
||||
**If implementing hold-as-CA:**
|
||||
|
||||
1. **Store CA key in HSM** - Never on filesystem
|
||||
2. **Audit all certificate issuance** - Log every cert
|
||||
3. **Public transparency log** - Publish all certificates
|
||||
4. **Short certificate validity** - 24 hours max
|
||||
5. **Monitor unusual patterns** - Alert on anomalies
|
||||
6. **Regular CA key rotation** - Every 2-3 years
|
||||
7. **Cross-check ATProto** - Verify both signatures match
|
||||
8. **Incident response plan** - Prepare for compromise
|
||||
|
||||
## See Also
|
||||
|
||||
- [ATProto Signatures](./ATPROTO_SIGNATURES.md) - How ATProto signing works
|
||||
- [Integration Strategy](./INTEGRATION_STRATEGY.md) - Overview of integration approaches
|
||||
- [Signature Integration](./SIGNATURE_INTEGRATION.md) - Tool-specific integration guides
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,692 +0,0 @@
|
||||
# ATCR Signature Verification Integration Strategy
|
||||
|
||||
## Overview
|
||||
|
||||
This document provides a comprehensive overview of how to integrate ATProto signature verification into various tools and workflows. ATCR uses a layered approach that provides maximum compatibility while maintaining ATProto's decentralized philosophy.
|
||||
|
||||
## Architecture Layers
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ Layer 4: Applications & Workflows │
|
||||
│ - CI/CD pipelines │
|
||||
│ - Kubernetes admission control │
|
||||
│ - Runtime verification │
|
||||
│ - Security scanning │
|
||||
└──────────────────────┬──────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ Layer 3: Integration Methods │
|
||||
│ - Plugins (Ratify, Gatekeeper, Containerd) │
|
||||
│ - CLI tools (atcr-verify) │
|
||||
│ - External services (webhooks, APIs) │
|
||||
│ - (Optional) X.509 certificates (hold-as-CA) │
|
||||
└──────────────────────┬──────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ Layer 2: Signature Discovery │
|
||||
│ - OCI Referrers API (GET /v2/.../referrers/...) │
|
||||
│ - ORAS artifact format │
|
||||
│ - artifactType: application/vnd.atproto.signature... │
|
||||
└──────────────────────┬──────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ Layer 1: ATProto Signatures (Foundation) │
|
||||
│ - Manifests signed by PDS (K-256) │
|
||||
│ - Signatures in ATProto repository commits │
|
||||
│ - Public keys in DID documents │
|
||||
│ - DID-based identity │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Integration Approaches
|
||||
|
||||
### Approach 1: Plugin-Based (RECOMMENDED) ⭐
|
||||
|
||||
**Best for:** Kubernetes, standard tooling, production deployments
|
||||
|
||||
Integrate through plugin systems of existing tools:
|
||||
|
||||
#### Ratify Verifier Plugin
|
||||
- **Use case:** Kubernetes admission control via Gatekeeper
|
||||
- **Effort:** 2-3 weeks to build
|
||||
- **Maturity:** CNCF Sandbox project, growing adoption
|
||||
- **Benefits:**
|
||||
- ✅ Standard plugin interface
|
||||
- ✅ Works with existing Ratify deployments
|
||||
- ✅ Policy-based enforcement
|
||||
- ✅ Multi-verifier support (can combine with Notation, Cosign)
|
||||
|
||||
**Implementation:**
|
||||
```go
|
||||
// Ratify plugin interface
|
||||
type ReferenceVerifier interface {
|
||||
VerifyReference(
|
||||
ctx context.Context,
|
||||
subjectRef common.Reference,
|
||||
referenceDesc ocispecs.ReferenceDescriptor,
|
||||
store referrerStore.ReferrerStore,
|
||||
) (VerifierResult, error)
|
||||
}
|
||||
```
|
||||
|
||||
**Deployment:**
|
||||
```yaml
|
||||
apiVersion: config.ratify.deislabs.io/v1beta1
|
||||
kind: Verifier
|
||||
metadata:
|
||||
name: atcr-verifier
|
||||
spec:
|
||||
name: atproto
|
||||
artifactType: application/vnd.atproto.signature.v1+json
|
||||
parameters:
|
||||
trustedDIDs:
|
||||
- did:plc:alice123
|
||||
```
|
||||
|
||||
See [Ratify Integration Guide](./SIGNATURE_INTEGRATION.md#ratify-plugin)
|
||||
|
||||
---
|
||||
|
||||
#### OPA Gatekeeper External Provider
|
||||
- **Use case:** Kubernetes admission control with OPA policies
|
||||
- **Effort:** 2-3 weeks to build
|
||||
- **Maturity:** Very stable, widely adopted
|
||||
- **Benefits:**
|
||||
- ✅ Rego-based policies (flexible)
|
||||
- ✅ External data provider API (standard)
|
||||
- ✅ Can reuse existing Gatekeeper deployments
|
||||
|
||||
**Implementation:**
|
||||
```go
|
||||
// External data provider
|
||||
type Provider struct {
|
||||
verifier *atproto.Verifier
|
||||
}
|
||||
|
||||
func (p *Provider) Provide(ctx context.Context, req ProviderRequest) (*ProviderResponse, error) {
|
||||
image := req.Keys["image"]
|
||||
result, err := p.verifier.Verify(ctx, image)
|
||||
return &ProviderResponse{
|
||||
Data: map[string]bool{"verified": result.Verified},
|
||||
}, nil
|
||||
}
|
||||
```
|
||||
|
||||
**Policy:**
|
||||
```rego
|
||||
package verify
|
||||
|
||||
violation[{"msg": msg}] {
|
||||
container := input.review.object.spec.containers[_]
|
||||
startswith(container.image, "atcr.io/")
|
||||
|
||||
response := external_data({
|
||||
"provider": "atcr-verifier",
|
||||
"keys": ["image"],
|
||||
"values": [container.image]
|
||||
})
|
||||
|
||||
response.verified != true
|
||||
msg := sprintf("Image %v has no valid ATProto signature", [container.image])
|
||||
}
|
||||
```
|
||||
|
||||
See [Gatekeeper Integration Guide](./SIGNATURE_INTEGRATION.md#opa-gatekeeper-external-provider)
|
||||
|
||||
---
|
||||
|
||||
#### Containerd 2.0 Image Verifier Plugin
|
||||
- **Use case:** Runtime verification at image pull time
|
||||
- **Effort:** 1-2 weeks to build
|
||||
- **Maturity:** New in Containerd 2.0 (Nov 2024)
|
||||
- **Benefits:**
|
||||
- ✅ Runtime enforcement (pull-time verification)
|
||||
- ✅ Works for Docker, nerdctl, ctr
|
||||
- ✅ Transparent to users
|
||||
- ✅ No Kubernetes required
|
||||
|
||||
**Limitation:** CRI plugin integration still maturing
|
||||
|
||||
**Implementation:**
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# /usr/local/bin/containerd-verifiers/atcr-verifier
|
||||
# Binary called by containerd on image pull
|
||||
|
||||
# Containerd passes image info via stdin
|
||||
read -r INPUT
|
||||
|
||||
IMAGE=$(echo "$INPUT" | jq -r '.reference')
|
||||
DIGEST=$(echo "$INPUT" | jq -r '.descriptor.digest')
|
||||
|
||||
# Verify signature
|
||||
if atcr-verify "$IMAGE@$DIGEST" --quiet; then
|
||||
exit 0 # Verified
|
||||
else
|
||||
exit 1 # Failed
|
||||
fi
|
||||
```
|
||||
|
||||
**Configuration:**
|
||||
```toml
|
||||
# /etc/containerd/config.toml
|
||||
[plugins."io.containerd.image-verifier.v1.bindir"]
|
||||
bin_dir = "/usr/local/bin/containerd-verifiers"
|
||||
max_verifiers = 5
|
||||
per_verifier_timeout = "10s"
|
||||
```
|
||||
|
||||
See [Containerd Integration Guide](./SIGNATURE_INTEGRATION.md#containerd-20)
|
||||
|
||||
---
|
||||
|
||||
### Approach 2: CLI Tool (RECOMMENDED) ⭐
|
||||
|
||||
**Best for:** CI/CD, scripts, general-purpose verification
|
||||
|
||||
Use `atcr-verify` CLI tool directly in workflows:
|
||||
|
||||
#### Command-Line Verification
|
||||
```bash
|
||||
# Basic verification
|
||||
atcr-verify atcr.io/alice/myapp:latest
|
||||
|
||||
# With trust policy
|
||||
atcr-verify atcr.io/alice/myapp:latest --policy trust-policy.yaml
|
||||
|
||||
# JSON output for scripting
|
||||
atcr-verify atcr.io/alice/myapp:latest --output json
|
||||
|
||||
# Quiet mode for exit codes
|
||||
atcr-verify atcr.io/alice/myapp:latest --quiet && echo "Verified"
|
||||
```
|
||||
|
||||
#### CI/CD Integration
|
||||
|
||||
**GitHub Actions:**
|
||||
```yaml
|
||||
- name: Verify image
|
||||
run: atcr-verify ${{ env.IMAGE }} --policy .github/trust-policy.yaml
|
||||
```
|
||||
|
||||
**GitLab CI:**
|
||||
```yaml
|
||||
verify:
|
||||
image: atcr.io/atcr/verify:latest
|
||||
script:
|
||||
- atcr-verify ${IMAGE} --policy trust-policy.yaml
|
||||
```
|
||||
|
||||
**Universal Container:**
|
||||
```bash
|
||||
docker run --rm atcr.io/atcr/verify:latest verify IMAGE
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- ✅ Works everywhere (not just Kubernetes)
|
||||
- ✅ Simple integration (single binary)
|
||||
- ✅ No plugin installation required
|
||||
- ✅ Offline mode support
|
||||
|
||||
See [atcr-verify CLI Documentation](./ATCR_VERIFY_CLI.md)
|
||||
|
||||
---
|
||||
|
||||
### Approach 3: External Services
|
||||
|
||||
**Best for:** Custom admission controllers, API-based verification
|
||||
|
||||
Build verification as a service that tools can call:
|
||||
|
||||
#### Webhook Service
|
||||
```go
|
||||
// HTTP endpoint for verification
|
||||
func (h *Handler) VerifyImage(w http.ResponseWriter, r *http.Request) {
|
||||
image := r.URL.Query().Get("image")
|
||||
|
||||
result, err := h.verifier.Verify(r.Context(), image)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"verified": result.Verified,
|
||||
"did": result.Signature.DID,
|
||||
"signedAt": result.Signature.SignedAt,
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
#### Usage from Kyverno
|
||||
```yaml
|
||||
verifyImages:
|
||||
- imageReferences:
|
||||
- "atcr.io/*/*"
|
||||
attestors:
|
||||
- entries:
|
||||
- api:
|
||||
url: http://atcr-verify.kube-system/verify?image={{ image }}
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- ✅ Flexible integration
|
||||
- ✅ Centralized verification logic
|
||||
- ✅ Caching and rate limiting
|
||||
- ✅ Can add additional checks (vulnerability scanning, etc.)
|
||||
|
||||
---
|
||||
|
||||
### Approach 4: Hold-as-CA (OPTIONAL, ENTERPRISE ONLY)
|
||||
|
||||
**Best for:** Enterprise X.509 PKI compliance requirements
|
||||
|
||||
⚠️ **WARNING:** This approach introduces centralization trade-offs. Only use if you have specific X.509 compliance requirements.
|
||||
|
||||
Hold services act as Certificate Authorities that issue X.509 certificates for users, enabling standard Notation verification.
|
||||
|
||||
**When to use:**
|
||||
- Enterprise requires standard X.509 PKI
|
||||
- Cannot deploy custom plugins
|
||||
- Accept centralization trade-off for tool compatibility
|
||||
|
||||
**When NOT to use:**
|
||||
- Default deployments (use plugins instead)
|
||||
- Maximum decentralization required
|
||||
- Don't need X.509 compliance
|
||||
|
||||
See [Hold-as-CA Architecture](./HOLD_AS_CA.md) for complete details and security implications.
|
||||
|
||||
---
|
||||
|
||||
## Tool Compatibility Matrix
|
||||
|
||||
| Tool | Discover | Verify | Integration Method | Priority | Effort |
|
||||
|------|----------|--------|-------------------|----------|--------|
|
||||
| **Kubernetes** | | | | | |
|
||||
| OPA Gatekeeper | ✅ | ✅ | External provider | **HIGH** | 2-3 weeks |
|
||||
| Ratify | ✅ | ✅ | Verifier plugin | **HIGH** | 2-3 weeks |
|
||||
| Kyverno | ✅ | ⚠️ | External service | MEDIUM | 2 weeks |
|
||||
| Portieris | ❌ | ❌ | N/A (deprecated) | NONE | - |
|
||||
| **Runtime** | | | | | |
|
||||
| Containerd 2.0 | ✅ | ✅ | Bindir plugin | **MED-HIGH** | 1-2 weeks |
|
||||
| CRI-O | ⚠️ | ⚠️ | Upstream contribution | MEDIUM | 3-4 weeks |
|
||||
| Podman | ⚠️ | ⚠️ | Upstream contribution | MEDIUM | 3-4 weeks |
|
||||
| **CI/CD** | | | | | |
|
||||
| GitHub Actions | ✅ | ✅ | Custom action | **HIGH** | 1 week |
|
||||
| GitLab CI | ✅ | ✅ | Container image | **HIGH** | 1 week |
|
||||
| Jenkins/CircleCI | ✅ | ✅ | Container image | HIGH | 1 week |
|
||||
| **Scanners** | | | | | |
|
||||
| Trivy | ✅ | ❌ | N/A (not verifier) | NONE | - |
|
||||
| Snyk | ❌ | ❌ | N/A (not verifier) | NONE | - |
|
||||
| Anchore | ❌ | ❌ | N/A (not verifier) | NONE | - |
|
||||
| **Registries** | | | | | |
|
||||
| Harbor | ✅ | ⚠️ | UI integration | LOW | - |
|
||||
| **OCI Tools** | | | | | |
|
||||
| ORAS CLI | ✅ | ❌ | Already works | Document | - |
|
||||
| Notation | ⚠️ | ⚠️ | Hold-as-CA | OPTIONAL | 3-4 weeks |
|
||||
| Cosign | ❌ | ❌ | Not compatible | NONE | - |
|
||||
| Crane | ✅ | ❌ | Already works | Document | - |
|
||||
| Skopeo | ⚠️ | ⚠️ | Upstream contribution | LOW | 3-4 weeks |
|
||||
|
||||
**Legend:**
|
||||
- ✅ Works / Feasible
|
||||
- ⚠️ Partial / Requires changes
|
||||
- ❌ Not applicable / Not feasible
|
||||
|
||||
---
|
||||
|
||||
## Implementation Roadmap
|
||||
|
||||
### Phase 1: Foundation (4-5 weeks) ⭐
|
||||
|
||||
**Goal:** Core verification capability
|
||||
|
||||
1. **atcr-verify CLI tool** (Week 1-2)
|
||||
- ATProto signature verification
|
||||
- Trust policy support
|
||||
- Multiple output formats
|
||||
- Offline mode
|
||||
|
||||
2. **OCI Referrers API** (Week 2-3)
|
||||
- AppView endpoint implementation
|
||||
- ORAS artifact serving
|
||||
- Integration with existing SBOM pattern
|
||||
|
||||
3. **CI/CD Container Image** (Week 3)
|
||||
- Universal verification image
|
||||
- Documentation for GitHub Actions, GitLab CI
|
||||
- Example workflows
|
||||
|
||||
4. **Documentation** (Week 4-5)
|
||||
- Integration guides
|
||||
- Trust policy examples
|
||||
- Troubleshooting guides
|
||||
|
||||
**Deliverables:**
|
||||
- `atcr-verify` binary (Linux, macOS, Windows)
|
||||
- `atcr.io/atcr/verify:latest` container image
|
||||
- OCI Referrers API implementation
|
||||
- Complete documentation
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Kubernetes Integration (3-4 weeks)
|
||||
|
||||
**Goal:** Production-ready Kubernetes admission control
|
||||
|
||||
5. **OPA Gatekeeper Provider** (Week 1-2)
|
||||
- External data provider service
|
||||
- Helm chart for deployment
|
||||
- Example policies
|
||||
|
||||
6. **Ratify Plugin** (Week 2-3)
|
||||
- Verifier plugin implementation
|
||||
- Testing with Ratify
|
||||
- Documentation
|
||||
|
||||
7. **Kubernetes Examples** (Week 4)
|
||||
- Deployment manifests
|
||||
- Policy examples
|
||||
- Integration testing
|
||||
|
||||
**Deliverables:**
|
||||
- `atcr-gatekeeper-provider` service
|
||||
- Ratify plugin binary
|
||||
- Kubernetes deployment examples
|
||||
- Production deployment guide
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Runtime Verification (2-3 weeks)
|
||||
|
||||
**Goal:** Pull-time verification
|
||||
|
||||
8. **Containerd Plugin** (Week 1-2)
|
||||
- Bindir verifier implementation
|
||||
- Configuration documentation
|
||||
- Testing with Docker, nerdctl
|
||||
|
||||
9. **CRI-O/Podman Integration** (Week 3, optional)
|
||||
- Upstream contribution (if accepted)
|
||||
- Policy.json extension
|
||||
- Documentation
|
||||
|
||||
**Deliverables:**
|
||||
- Containerd verifier binary
|
||||
- Configuration guides
|
||||
- Runtime verification examples
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Optional Features (2-3 weeks)
|
||||
|
||||
**Goal:** Enterprise features (if demanded)
|
||||
|
||||
10. **Hold-as-CA** (Week 1-2, optional)
|
||||
- Certificate generation
|
||||
- Notation signature creation
|
||||
- Trust store distribution
|
||||
- **Only if enterprise customers request**
|
||||
|
||||
11. **Advanced Features** (Week 3, as needed)
|
||||
- Signature transparency log
|
||||
- Multi-signature support
|
||||
- Hardware token integration
|
||||
|
||||
**Deliverables:**
|
||||
- Hold co-signing implementation (if needed)
|
||||
- Advanced feature documentation
|
||||
|
||||
---
|
||||
|
||||
## Decision Matrix
|
||||
|
||||
### Which Integration Approach Should I Use?
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ Are you using Kubernetes? │
|
||||
└───────────────┬─────────────────────────────────┘
|
||||
│
|
||||
┌────────┴────────┐
|
||||
│ │
|
||||
YES NO
|
||||
│ │
|
||||
↓ ↓
|
||||
┌──────────────┐ ┌──────────────┐
|
||||
│ Using │ │ CI/CD │
|
||||
│ Gatekeeper? │ │ Pipeline? │
|
||||
└──────┬───────┘ └──────┬───────┘
|
||||
│ │
|
||||
┌────┴────┐ ┌────┴────┐
|
||||
YES NO YES NO
|
||||
│ │ │ │
|
||||
↓ ↓ ↓ ↓
|
||||
External Ratify GitHub Universal
|
||||
Provider Plugin Action CLI Tool
|
||||
```
|
||||
|
||||
#### Use OPA Gatekeeper Provider if:
|
||||
- ✅ Already using Gatekeeper
|
||||
- ✅ Want Rego-based policies
|
||||
- ✅ Need flexible policy logic
|
||||
|
||||
#### Use Ratify Plugin if:
|
||||
- ✅ Using Ratify (or planning to)
|
||||
- ✅ Want standard plugin interface
|
||||
- ✅ Need multi-verifier support (Notation + Cosign + ATProto)
|
||||
|
||||
#### Use atcr-verify CLI if:
|
||||
- ✅ CI/CD pipelines
|
||||
- ✅ Local development
|
||||
- ✅ Non-Kubernetes environments
|
||||
- ✅ Want simple integration
|
||||
|
||||
#### Use Containerd Plugin if:
|
||||
- ✅ Need runtime enforcement
|
||||
- ✅ Want pull-time verification
|
||||
- ✅ Using Containerd 2.0+
|
||||
|
||||
#### Use Hold-as-CA if:
|
||||
- ⚠️ Enterprise X.509 PKI compliance required
|
||||
- ⚠️ Cannot deploy plugins
|
||||
- ⚠️ Accept centralization trade-off
|
||||
|
||||
---
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Start Simple
|
||||
|
||||
Begin with CLI tool integration in CI/CD:
|
||||
```bash
|
||||
# Add to .github/workflows/deploy.yml
|
||||
- run: atcr-verify $IMAGE --policy .github/trust-policy.yaml
|
||||
```
|
||||
|
||||
### 2. Define Trust Policies
|
||||
|
||||
Create trust policies early:
|
||||
```yaml
|
||||
# trust-policy.yaml
|
||||
policies:
|
||||
- name: production
|
||||
scope: "atcr.io/*/prod-*"
|
||||
require:
|
||||
signature: true
|
||||
trustedDIDs: [did:plc:devops-team]
|
||||
action: enforce
|
||||
```
|
||||
|
||||
### 3. Progressive Rollout
|
||||
|
||||
1. **Week 1:** Add verification to CI/CD (audit mode)
|
||||
2. **Week 2:** Enforce in CI/CD
|
||||
3. **Week 3:** Add Kubernetes admission control (audit mode)
|
||||
4. **Week 4:** Enforce in Kubernetes
|
||||
|
||||
### 4. Monitor and Alert
|
||||
|
||||
Track verification metrics:
|
||||
- Verification success/failure rates
|
||||
- Policy violations
|
||||
- Signature coverage (% of images signed)
|
||||
|
||||
### 5. Plan for Key Rotation
|
||||
|
||||
- Document DID key rotation procedures
|
||||
- Test key rotation in non-production
|
||||
- Monitor for unexpected key changes
|
||||
|
||||
---
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Pattern 1: Multi-Layer Defense
|
||||
|
||||
```
|
||||
1. CI/CD verification (atcr-verify)
|
||||
↓ (blocks unsigned images from being pushed)
|
||||
2. Kubernetes admission (Gatekeeper/Ratify)
|
||||
↓ (blocks unsigned images from running)
|
||||
3. Runtime verification (Containerd plugin)
|
||||
↓ (blocks unsigned images from being pulled)
|
||||
```
|
||||
|
||||
### Pattern 2: Trust Policy Inheritance
|
||||
|
||||
```yaml
|
||||
# Global policy
|
||||
trustedDIDs:
|
||||
- did:plc:security-team # Always trusted
|
||||
|
||||
# Environment-specific policies
|
||||
staging:
|
||||
trustedDIDs:
|
||||
- did:plc:developers # Additional trust for staging
|
||||
|
||||
production:
|
||||
trustedDIDs: [] # Only global trust (security-team)
|
||||
```
|
||||
|
||||
### Pattern 3: Offline Verification
|
||||
|
||||
```bash
|
||||
# Build environment (online)
|
||||
atcr-verify export $IMAGE -o bundle.json
|
||||
|
||||
# Air-gapped environment (offline)
|
||||
atcr-verify $IMAGE --offline --bundle bundle.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Migration Guide
|
||||
|
||||
### From Docker Content Trust (DCT)
|
||||
|
||||
DCT is deprecated. Migrate to ATCR signatures:
|
||||
|
||||
**Old (DCT):**
|
||||
```bash
|
||||
export DOCKER_CONTENT_TRUST=1
|
||||
docker push myimage:latest
|
||||
```
|
||||
|
||||
**New (ATCR):**
|
||||
```bash
|
||||
# Signatures created automatically on push
|
||||
docker push atcr.io/myorg/myimage:latest
|
||||
|
||||
# Verify in CI/CD
|
||||
atcr-verify atcr.io/myorg/myimage:latest
|
||||
```
|
||||
|
||||
### From Cosign
|
||||
|
||||
Cosign and ATCR signatures can coexist:
|
||||
|
||||
**Dual signing:**
|
||||
```bash
|
||||
# Push to ATCR (ATProto signature automatic)
|
||||
docker push atcr.io/myorg/myimage:latest
|
||||
|
||||
# Also sign with Cosign (if needed)
|
||||
cosign sign atcr.io/myorg/myimage:latest
|
||||
```
|
||||
|
||||
**Verification:**
|
||||
```bash
|
||||
# Verify ATProto signature
|
||||
atcr-verify atcr.io/myorg/myimage:latest
|
||||
|
||||
# Or verify Cosign signature
|
||||
cosign verify atcr.io/myorg/myimage:latest --key cosign.pub
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Signatures Not Found
|
||||
|
||||
**Symptom:** `atcr-verify` reports "no signature found"
|
||||
|
||||
**Diagnosis:**
|
||||
```bash
|
||||
# Check if Referrers API works
|
||||
curl "https://atcr.io/v2/OWNER/REPO/referrers/DIGEST"
|
||||
|
||||
# Check if signature artifact exists
|
||||
oras discover atcr.io/OWNER/REPO:TAG
|
||||
```
|
||||
|
||||
**Solutions:**
|
||||
1. Verify Referrers API is implemented
|
||||
2. Re-push image to generate signature
|
||||
3. Check AppView logs for signature creation errors
|
||||
|
||||
### DID Resolution Fails
|
||||
|
||||
**Symptom:** Cannot resolve DID to public key
|
||||
|
||||
**Diagnosis:**
|
||||
```bash
|
||||
# Test DID resolution
|
||||
curl https://plc.directory/did:plc:XXXXXX
|
||||
|
||||
# Check DID document has verificationMethod
|
||||
curl https://plc.directory/did:plc:XXXXXX | jq .verificationMethod
|
||||
```
|
||||
|
||||
**Solutions:**
|
||||
1. Check internet connectivity
|
||||
2. Verify DID is valid
|
||||
3. Ensure DID document contains public key
|
||||
|
||||
### Policy Violations
|
||||
|
||||
**Symptom:** Verification fails with "trust policy violation"
|
||||
|
||||
**Diagnosis:**
|
||||
```bash
|
||||
# Verify with verbose output
|
||||
atcr-verify IMAGE --policy policy.yaml --verbose
|
||||
```
|
||||
|
||||
**Solutions:**
|
||||
1. Add DID to trustedDIDs list
|
||||
2. Check signature age vs. maxAge
|
||||
3. Verify policy scope matches image
|
||||
|
||||
---
|
||||
|
||||
## See Also
|
||||
|
||||
- [ATProto Signatures](./ATPROTO_SIGNATURES.md) - Technical foundation
|
||||
- [atcr-verify CLI](./ATCR_VERIFY_CLI.md) - CLI tool documentation
|
||||
- [Signature Integration](./SIGNATURE_INTEGRATION.md) - Tool-specific guides
|
||||
- [Hold-as-CA](./HOLD_AS_CA.md) - X.509 certificate approach (optional)
|
||||
- [Examples](../examples/verification/) - Working code examples
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,500 +0,0 @@
|
||||
# ATProto Signature Verification Plugins and Examples
|
||||
|
||||
This directory contains reference implementations and examples for integrating ATProto signature verification into various tools and workflows.
|
||||
|
||||
## Overview
|
||||
|
||||
ATCR uses ATProto's native signature system to cryptographically sign container images. To integrate signature verification into existing tools (Kubernetes, CI/CD, container runtimes), you can:
|
||||
|
||||
1. **Build plugins** for verification frameworks (Ratify, Gatekeeper, Containerd)
|
||||
2. **Use external services** called by policy engines
|
||||
3. **Integrate CLI tools** in your CI/CD pipelines
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
examples/plugins/
|
||||
├── README.md # This file
|
||||
├── ratify-verifier/ # Ratify plugin for Kubernetes
|
||||
│ ├── README.md
|
||||
│ ├── verifier.go
|
||||
│ ├── config.go
|
||||
│ ├── resolver.go
|
||||
│ ├── crypto.go
|
||||
│ ├── Dockerfile
|
||||
│ ├── deployment.yaml
|
||||
│ └── verifier-crd.yaml
|
||||
├── gatekeeper-provider/ # OPA Gatekeeper external provider
|
||||
│ ├── README.md
|
||||
│ ├── main.go
|
||||
│ ├── verifier.go
|
||||
│ ├── resolver.go
|
||||
│ ├── crypto.go
|
||||
│ ├── Dockerfile
|
||||
│ ├── deployment.yaml
|
||||
│ └── provider-crd.yaml
|
||||
├── containerd-verifier/ # Containerd bindir plugin
|
||||
│ ├── README.md
|
||||
│ ├── main.go
|
||||
│ └── Dockerfile
|
||||
└── ci-cd/ # CI/CD integration examples
|
||||
├── github-actions.yml
|
||||
├── gitlab-ci.yml
|
||||
└── jenkins-pipeline.groovy
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### For Kubernetes (Recommended)
|
||||
|
||||
**Option A: Ratify Plugin**
|
||||
```bash
|
||||
cd ratify-verifier
|
||||
# Build plugin and deploy to Kubernetes
|
||||
./build.sh
|
||||
kubectl apply -f deployment.yaml
|
||||
kubectl apply -f verifier-crd.yaml
|
||||
```
|
||||
|
||||
**Option B: Gatekeeper Provider**
|
||||
```bash
|
||||
cd gatekeeper-provider
|
||||
# Build and deploy external provider
|
||||
docker build -t atcr.io/atcr/gatekeeper-provider:latest .
|
||||
kubectl apply -f deployment.yaml
|
||||
kubectl apply -f provider-crd.yaml
|
||||
```
|
||||
|
||||
### For CI/CD
|
||||
|
||||
**GitHub Actions**
|
||||
```yaml
|
||||
# Copy examples/plugins/ci-cd/github-actions.yml to .github/workflows/
|
||||
cp ci-cd/github-actions.yml ../.github/workflows/verify-and-deploy.yml
|
||||
```
|
||||
|
||||
**GitLab CI**
|
||||
```yaml
|
||||
# Copy examples/plugins/ci-cd/gitlab-ci.yml to your repo
|
||||
cp ci-cd/gitlab-ci.yml ../.gitlab-ci.yml
|
||||
```
|
||||
|
||||
### For Containerd
|
||||
|
||||
```bash
|
||||
cd containerd-verifier
|
||||
# Build plugin
|
||||
./build.sh
|
||||
# Install to containerd plugins directory
|
||||
sudo cp atcr-verifier /opt/containerd/bin/
|
||||
```
|
||||
|
||||
## Plugins Overview
|
||||
|
||||
### Ratify Verifier Plugin ⭐
|
||||
|
||||
**Use case:** Kubernetes admission control with OPA Gatekeeper
|
||||
|
||||
**How it works:**
|
||||
1. Gatekeeper receives pod creation request
|
||||
2. Calls Ratify verification engine
|
||||
3. Ratify loads ATProto verifier plugin
|
||||
4. Plugin verifies signature and checks trust policy
|
||||
5. Returns allow/deny decision to Gatekeeper
|
||||
|
||||
**Pros:**
|
||||
- Standard Ratify plugin interface
|
||||
- Works with existing Gatekeeper deployments
|
||||
- Can combine with other verifiers (Notation, Cosign)
|
||||
- Policy-based enforcement
|
||||
|
||||
**Cons:**
|
||||
- Requires building custom Ratify image
|
||||
- Plugin must be compiled into image
|
||||
- More complex deployment
|
||||
|
||||
**See:** [ratify-verifier/README.md](./ratify-verifier/README.md)
|
||||
|
||||
### Gatekeeper External Provider ⭐
|
||||
|
||||
**Use case:** Kubernetes admission control with OPA Gatekeeper
|
||||
|
||||
**How it works:**
|
||||
1. Gatekeeper receives pod creation request
|
||||
2. Rego policy calls external data provider API
|
||||
3. Provider verifies ATProto signature
|
||||
4. Returns verification result to Gatekeeper
|
||||
5. Rego policy makes allow/deny decision
|
||||
|
||||
**Pros:**
|
||||
- Simpler deployment (separate service)
|
||||
- Easy to update (no Gatekeeper changes)
|
||||
- Flexible Rego policies
|
||||
- Can add caching, rate limiting
|
||||
|
||||
**Cons:**
|
||||
- Additional service to maintain
|
||||
- Network dependency (provider must be reachable)
|
||||
- Slightly higher latency
|
||||
|
||||
**See:** [gatekeeper-provider/README.md](./gatekeeper-provider/README.md)
|
||||
|
||||
### Containerd Bindir Plugin
|
||||
|
||||
**Use case:** Runtime-level verification for all images
|
||||
|
||||
**How it works:**
|
||||
1. Containerd pulls image
|
||||
2. Calls verifier plugin (bindir)
|
||||
3. Plugin verifies ATProto signature
|
||||
4. Returns result to containerd
|
||||
5. Containerd allows/blocks image
|
||||
|
||||
**Pros:**
|
||||
- Works at runtime level (not just Kubernetes)
|
||||
- CRI-O, Podman support (CRI-compatible)
|
||||
- No Kubernetes required
|
||||
- Applies to all images
|
||||
|
||||
**Cons:**
|
||||
- Containerd 2.0+ required
|
||||
- More complex to debug
|
||||
- Less flexible policies
|
||||
|
||||
**See:** [containerd-verifier/README.md](./containerd-verifier/README.md)
|
||||
|
||||
## CI/CD Integration Examples
|
||||
|
||||
### GitHub Actions
|
||||
|
||||
Complete workflow with:
|
||||
- Image signature verification
|
||||
- DID trust checking
|
||||
- Automated deployment on success
|
||||
|
||||
**See:** [ci-cd/github-actions.yml](./ci-cd/github-actions.yml)
|
||||
|
||||
### GitLab CI
|
||||
|
||||
Pipeline with:
|
||||
- Multi-stage verification
|
||||
- Trust policy enforcement
|
||||
- Manual deployment approval
|
||||
|
||||
**See:** [ci-cd/gitlab-ci.yml](./ci-cd/gitlab-ci.yml)
|
||||
|
||||
### Jenkins
|
||||
|
||||
Declarative pipeline with:
|
||||
- Signature verification stage
|
||||
- Deployment gates
|
||||
- Rollback on failure
|
||||
|
||||
**See:** [ci-cd/jenkins-pipeline.groovy](./ci-cd/jenkins-pipeline.groovy) (coming soon)
|
||||
|
||||
## Common Components
|
||||
|
||||
All plugins share common functionality:
|
||||
|
||||
### DID Resolution
|
||||
|
||||
Resolve DID to public key:
|
||||
```go
|
||||
func ResolveDIDToPublicKey(ctx context.Context, did string) (*PublicKey, error)
|
||||
```
|
||||
|
||||
**Steps:**
|
||||
1. Fetch DID document from PLC directory or did:web
|
||||
2. Extract verification method
|
||||
3. Decode multibase public key
|
||||
4. Parse as K-256 public key
|
||||
|
||||
### PDS Communication
|
||||
|
||||
Fetch repository commit:
|
||||
```go
|
||||
func FetchCommit(ctx context.Context, pdsEndpoint, did, commitCID string) (*Commit, error)
|
||||
```
|
||||
|
||||
**Steps:**
|
||||
1. Call `com.atproto.sync.getRepo` XRPC endpoint
|
||||
2. Parse CAR file response
|
||||
3. Extract commit with matching CID
|
||||
4. Return commit data and signature
|
||||
|
||||
### Signature Verification
|
||||
|
||||
Verify ECDSA K-256 signature:
|
||||
```go
|
||||
func VerifySignature(pubKey *PublicKey, commit *Commit) error
|
||||
```
|
||||
|
||||
**Steps:**
|
||||
1. Extract unsigned commit bytes
|
||||
2. Hash with SHA-256
|
||||
3. Verify ECDSA signature over hash
|
||||
4. Check signature is valid for public key
|
||||
|
||||
### Trust Policy
|
||||
|
||||
Check if DID is trusted:
|
||||
```go
|
||||
func IsTrusted(did string, now time.Time) bool
|
||||
```
|
||||
|
||||
**Steps:**
|
||||
1. Load trust policy from config
|
||||
2. Check if DID in trusted list
|
||||
3. Verify validFrom/expiresAt timestamps
|
||||
4. Return true if trusted
|
||||
|
||||
## Trust Policy Format
|
||||
|
||||
All plugins use the same trust policy format:
|
||||
|
||||
```yaml
|
||||
version: 1.0
|
||||
|
||||
trustedDIDs:
|
||||
did:plc:alice123:
|
||||
name: "Alice (DevOps Lead)"
|
||||
validFrom: "2024-01-01T00:00:00Z"
|
||||
expiresAt: null
|
||||
|
||||
did:plc:bob456:
|
||||
name: "Bob (Security Team)"
|
||||
validFrom: "2024-06-01T00:00:00Z"
|
||||
expiresAt: "2025-12-31T23:59:59Z"
|
||||
|
||||
policies:
|
||||
- name: production-images
|
||||
scope: "atcr.io/*/prod-*"
|
||||
require:
|
||||
signature: true
|
||||
trustedDIDs:
|
||||
- did:plc:alice123
|
||||
- did:plc:bob456
|
||||
minSignatures: 1
|
||||
action: enforce
|
||||
|
||||
- name: dev-images
|
||||
scope: "atcr.io/*/dev-*"
|
||||
require:
|
||||
signature: false
|
||||
action: audit
|
||||
```
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
### Dependencies
|
||||
|
||||
All plugins require:
|
||||
- Go 1.21+ for building
|
||||
- ATProto DID resolution (PLC directory, did:web)
|
||||
- ATProto PDS XRPC API access
|
||||
- ECDSA K-256 signature verification
|
||||
|
||||
### Caching
|
||||
|
||||
Recommended caching strategy:
|
||||
- **DID documents**: 5 minute TTL
|
||||
- **Public keys**: 5 minute TTL
|
||||
- **PDS endpoints**: 5 minute TTL
|
||||
- **Signature results**: 5 minute TTL
|
||||
|
||||
### Error Handling
|
||||
|
||||
Plugins should handle:
|
||||
- DID resolution failures (network, invalid DID)
|
||||
- PDS connectivity issues (timeout, 404, 500)
|
||||
- Invalid signature format
|
||||
- Untrusted DIDs
|
||||
- Network timeouts
|
||||
|
||||
### Logging
|
||||
|
||||
Structured logging with:
|
||||
- `image` - Image being verified
|
||||
- `did` - Signer DID
|
||||
- `duration` - Operation duration
|
||||
- `error` - Error message (if failed)
|
||||
|
||||
### Metrics
|
||||
|
||||
Expose Prometheus metrics:
|
||||
- `atcr_verifications_total{result="verified|failed|error"}`
|
||||
- `atcr_verification_duration_seconds`
|
||||
- `atcr_did_resolutions_total{result="success|failure"}`
|
||||
- `atcr_cache_hits_total`
|
||||
- `atcr_cache_misses_total`
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Tests
|
||||
|
||||
Test individual components:
|
||||
```bash
|
||||
# Test DID resolution
|
||||
go test ./pkg/resolver -v
|
||||
|
||||
# Test signature verification
|
||||
go test ./pkg/crypto -v
|
||||
|
||||
# Test trust policy
|
||||
go test ./pkg/trust -v
|
||||
```
|
||||
|
||||
### Integration Tests
|
||||
|
||||
Test with real services:
|
||||
```bash
|
||||
# Test against ATCR registry
|
||||
go test ./integration -tags=integration -v
|
||||
|
||||
# Test with test PDS
|
||||
go test ./integration -tags=integration -pds=https://test.pds.example.com
|
||||
```
|
||||
|
||||
### End-to-End Tests
|
||||
|
||||
Test full deployment:
|
||||
```bash
|
||||
# Deploy to test cluster
|
||||
kubectl apply -f test/fixtures/
|
||||
|
||||
# Create pod with signed image (should succeed)
|
||||
kubectl run test-signed --image=atcr.io/test/signed:latest
|
||||
|
||||
# Create pod with unsigned image (should fail)
|
||||
kubectl run test-unsigned --image=atcr.io/test/unsigned:latest
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Latency
|
||||
|
||||
Typical verification latency:
|
||||
- DID resolution: 50-200ms (cached: <1ms)
|
||||
- PDS query: 100-500ms (cached: <1ms)
|
||||
- Signature verification: 1-5ms
|
||||
- **Total**: 150-700ms (uncached), <10ms (cached)
|
||||
|
||||
### Throughput
|
||||
|
||||
Expected throughput (single instance):
|
||||
- Without caching: ~5-10 verifications/second
|
||||
- With caching: ~100-500 verifications/second
|
||||
|
||||
### Scaling
|
||||
|
||||
For high traffic:
|
||||
- Deploy multiple replicas (stateless)
|
||||
- Use Redis for distributed caching
|
||||
- Implement rate limiting
|
||||
- Monitor P95/P99 latency
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Network Policies
|
||||
|
||||
Restrict access to:
|
||||
- DID resolution (PLC directory only)
|
||||
- PDS XRPC endpoints
|
||||
- Internal services only
|
||||
|
||||
### Denial of Service
|
||||
|
||||
Protect against:
|
||||
- High verification request rate
|
||||
- Slow DID resolution
|
||||
- Malicious images with many signatures
|
||||
- Large signature artifacts
|
||||
|
||||
### Trust Model
|
||||
|
||||
Understand trust dependencies:
|
||||
- DID resolution is accurate (PLC directory)
|
||||
- PDS serves correct records
|
||||
- Private keys are secure
|
||||
- Trust policy is maintained
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Plugin Not Loading
|
||||
|
||||
```bash
|
||||
# Check plugin exists
|
||||
ls -la /path/to/plugin
|
||||
|
||||
# Check plugin is executable
|
||||
chmod +x /path/to/plugin
|
||||
|
||||
# Check plugin logs
|
||||
tail -f /var/log/atcr-verifier.log
|
||||
```
|
||||
|
||||
### Verification Failing
|
||||
|
||||
```bash
|
||||
# Test DID resolution
|
||||
curl https://plc.directory/did:plc:alice123
|
||||
|
||||
# Test PDS connectivity
|
||||
curl https://bsky.social/xrpc/com.atproto.server.describeServer
|
||||
|
||||
# Test signature exists
|
||||
oras discover atcr.io/alice/myapp:latest \
|
||||
--artifact-type application/vnd.atproto.signature.v1+json
|
||||
```
|
||||
|
||||
### Policy Not Enforcing
|
||||
|
||||
```bash
|
||||
# Check policy is loaded
|
||||
kubectl get configmap atcr-trust-policy -n gatekeeper-system
|
||||
|
||||
# Check constraint is active
|
||||
kubectl get constraint atcr-signatures-required -o yaml
|
||||
|
||||
# Check logs
|
||||
kubectl logs -n gatekeeper-system deployment/ratify
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
### Documentation
|
||||
|
||||
- [ATProto Signatures](../../docs/ATPROTO_SIGNATURES.md) - Technical deep-dive
|
||||
- [Signature Integration](../../docs/SIGNATURE_INTEGRATION.md) - Tool-specific guides
|
||||
- [Integration Strategy](../../docs/INTEGRATION_STRATEGY.md) - High-level overview
|
||||
- [atcr-verify CLI](../../docs/ATCR_VERIFY_CLI.md) - CLI tool specification
|
||||
|
||||
### Examples
|
||||
|
||||
- [Verification Scripts](../verification/) - Shell scripts for manual verification
|
||||
- [Kubernetes Webhook](../verification/kubernetes-webhook.yaml) - Custom webhook example
|
||||
|
||||
### External Resources
|
||||
|
||||
- [Ratify](https://ratify.dev/) - Verification framework
|
||||
- [OPA Gatekeeper](https://open-policy-agent.github.io/gatekeeper/) - Policy engine
|
||||
- [Containerd](https://containerd.io/) - Container runtime
|
||||
|
||||
## Support
|
||||
|
||||
For questions or issues:
|
||||
- GitHub Issues: https://github.com/atcr-io/atcr/issues
|
||||
- Documentation: https://docs.atcr.io
|
||||
- Security: security@atcr.io
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions welcome! Please:
|
||||
1. Follow existing code structure
|
||||
2. Add tests for new features
|
||||
3. Update documentation
|
||||
4. Submit pull request
|
||||
|
||||
## License
|
||||
|
||||
See [LICENSE](../../LICENSE) file in repository root.
|
||||
@@ -1,166 +0,0 @@
|
||||
# GitHub Actions workflow for verifying ATProto signatures
|
||||
|
||||
name: Verify and Deploy
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
env:
|
||||
REGISTRY: atcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
verify-signature:
|
||||
name: Verify Image Signature
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up image tag
|
||||
id: vars
|
||||
run: |
|
||||
echo "IMAGE_TAG=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install verification tools
|
||||
run: |
|
||||
# Install ORAS
|
||||
curl -LO https://github.com/oras-project/oras/releases/download/v1.0.0/oras_1.0.0_linux_amd64.tar.gz
|
||||
tar -xzf oras_1.0.0_linux_amd64.tar.gz
|
||||
sudo mv oras /usr/local/bin/
|
||||
|
||||
# Install crane
|
||||
curl -sL "https://github.com/google/go-containerregistry/releases/download/v0.15.2/go-containerregistry_Linux_x86_64.tar.gz" > crane.tar.gz
|
||||
tar -xzf crane.tar.gz
|
||||
sudo mv crane /usr/local/bin/
|
||||
|
||||
# Install atcr-verify (when available)
|
||||
# curl -LO https://github.com/atcr-io/atcr/releases/latest/download/atcr-verify
|
||||
# chmod +x atcr-verify
|
||||
# sudo mv atcr-verify /usr/local/bin/
|
||||
|
||||
- name: Check for signature
|
||||
id: check_signature
|
||||
run: |
|
||||
IMAGE="${{ steps.vars.outputs.IMAGE_TAG }}"
|
||||
echo "Checking signature for $IMAGE"
|
||||
|
||||
# Get image digest
|
||||
DIGEST=$(crane digest "$IMAGE")
|
||||
echo "Image digest: $DIGEST"
|
||||
|
||||
# Check for ATProto signature using ORAS
|
||||
REPO=$(echo "$IMAGE" | cut -d: -f1)
|
||||
REFERRERS=$(curl -s "https://${{ env.REGISTRY }}/v2/${REPO#${{ env.REGISTRY }}/}/referrers/${DIGEST}?artifactType=application/vnd.atproto.signature.v1+json")
|
||||
|
||||
SIG_COUNT=$(echo "$REFERRERS" | jq '.manifests | length')
|
||||
|
||||
if [ "$SIG_COUNT" -eq 0 ]; then
|
||||
echo "❌ No ATProto signature found"
|
||||
echo "has_signature=false" >> $GITHUB_OUTPUT
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Found $SIG_COUNT signature(s)"
|
||||
echo "has_signature=true" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Verify signature (full verification)
|
||||
if: steps.check_signature.outputs.has_signature == 'true'
|
||||
run: |
|
||||
IMAGE="${{ steps.vars.outputs.IMAGE_TAG }}"
|
||||
|
||||
# Option 1: Use atcr-verify CLI (when available)
|
||||
# atcr-verify "$IMAGE" --policy .atcr/trust-policy.yaml
|
||||
|
||||
# Option 2: Use shell script
|
||||
chmod +x examples/verification/atcr-verify.sh
|
||||
./examples/verification/atcr-verify.sh "$IMAGE"
|
||||
|
||||
echo "✓ Signature verified successfully"
|
||||
|
||||
- name: Verify signer DID
|
||||
if: steps.check_signature.outputs.has_signature == 'true'
|
||||
run: |
|
||||
IMAGE="${{ steps.vars.outputs.IMAGE_TAG }}"
|
||||
|
||||
# Get signature metadata
|
||||
DIGEST=$(crane digest "$IMAGE")
|
||||
REPO=$(echo "$IMAGE" | cut -d: -f1)
|
||||
|
||||
REFERRERS=$(curl -s "https://${{ env.REGISTRY }}/v2/${REPO#${{ env.REGISTRY }}/}/referrers/${DIGEST}?artifactType=application/vnd.atproto.signature.v1+json")
|
||||
SIG_DIGEST=$(echo "$REFERRERS" | jq -r '.manifests[0].digest')
|
||||
|
||||
# Pull signature artifact
|
||||
oras pull "${REPO}@${SIG_DIGEST}" -o /tmp/sig
|
||||
|
||||
# Extract DID
|
||||
DID=$(jq -r '.atproto.did' /tmp/sig/atproto-signature.json)
|
||||
echo "Signed by DID: $DID"
|
||||
|
||||
# Check against trusted DIDs
|
||||
TRUSTED_DIDS="${{ secrets.TRUSTED_DIDS }}" # e.g., "did:plc:alice123,did:plc:bob456"
|
||||
|
||||
if [[ ",$TRUSTED_DIDS," == *",$DID,"* ]]; then
|
||||
echo "✓ DID is trusted"
|
||||
else
|
||||
echo "❌ DID $DID is not in trusted list"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
deploy:
|
||||
name: Deploy to Kubernetes
|
||||
needs: verify-signature
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up image tag
|
||||
id: vars
|
||||
run: |
|
||||
echo "IMAGE_TAG=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up kubectl
|
||||
uses: azure/setup-kubectl@v3
|
||||
|
||||
- name: Configure kubectl
|
||||
run: |
|
||||
echo "${{ secrets.KUBE_CONFIG }}" | base64 -d > /tmp/kubeconfig
|
||||
export KUBECONFIG=/tmp/kubeconfig
|
||||
|
||||
- name: Deploy to production
|
||||
run: |
|
||||
kubectl set image deployment/myapp \
|
||||
myapp=${{ steps.vars.outputs.IMAGE_TAG }} \
|
||||
-n production
|
||||
|
||||
kubectl rollout status deployment/myapp -n production
|
||||
|
||||
- name: Verify deployment
|
||||
run: |
|
||||
kubectl get pods -n production -l app=myapp
|
||||
|
||||
# Wait for rollout to complete
|
||||
kubectl wait --for=condition=available --timeout=300s \
|
||||
deployment/myapp -n production
|
||||
|
||||
# Alternative: Use atcr-verify action (when available)
|
||||
verify-with-action:
|
||||
name: Verify with ATCR Action
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Verify image signature
|
||||
# uses: atcr-io/atcr-verify-action@v1
|
||||
# with:
|
||||
# image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
# policy: .atcr/trust-policy.yaml
|
||||
# fail-on-error: true
|
||||
run: |
|
||||
echo "TODO: Use official atcr-verify GitHub Action"
|
||||
@@ -1,156 +0,0 @@
|
||||
# GitLab CI pipeline for verifying ATProto signatures
|
||||
|
||||
variables:
|
||||
REGISTRY: atcr.io
|
||||
IMAGE_NAME: $CI_PROJECT_PATH
|
||||
IMAGE_TAG: $REGISTRY/$IMAGE_NAME:$CI_COMMIT_SHA
|
||||
|
||||
stages:
|
||||
- build
|
||||
- verify
|
||||
- deploy
|
||||
|
||||
build_image:
|
||||
stage: build
|
||||
image: docker:latest
|
||||
services:
|
||||
- docker:dind
|
||||
script:
|
||||
- docker build -t $IMAGE_TAG .
|
||||
- docker push $IMAGE_TAG
|
||||
|
||||
verify_signature:
|
||||
stage: verify
|
||||
image: alpine:latest
|
||||
before_script:
|
||||
- apk add --no-cache curl jq
|
||||
script:
|
||||
- |
|
||||
echo "Verifying signature for $IMAGE_TAG"
|
||||
|
||||
# Install crane
|
||||
wget https://github.com/google/go-containerregistry/releases/download/v0.15.2/go-containerregistry_Linux_x86_64.tar.gz
|
||||
tar -xzf go-containerregistry_Linux_x86_64.tar.gz crane
|
||||
mv crane /usr/local/bin/
|
||||
|
||||
# Get image digest
|
||||
DIGEST=$(crane digest "$IMAGE_TAG")
|
||||
echo "Image digest: $DIGEST"
|
||||
|
||||
# Extract repository path
|
||||
REPO=$(echo "$IMAGE_TAG" | cut -d: -f1)
|
||||
REPO_PATH=${REPO#$REGISTRY/}
|
||||
|
||||
# Check for ATProto signature
|
||||
REFERRERS=$(curl -s "https://$REGISTRY/v2/$REPO_PATH/referrers/${DIGEST}?artifactType=application/vnd.atproto.signature.v1+json")
|
||||
|
||||
SIG_COUNT=$(echo "$REFERRERS" | jq '.manifests | length')
|
||||
|
||||
if [ "$SIG_COUNT" -eq 0 ]; then
|
||||
echo "❌ No ATProto signature found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Found $SIG_COUNT signature(s)"
|
||||
|
||||
verify_full:
|
||||
stage: verify
|
||||
image: alpine:latest
|
||||
before_script:
|
||||
- apk add --no-cache curl jq bash
|
||||
script:
|
||||
- |
|
||||
# Option 1: Use atcr-verify CLI (when available)
|
||||
# wget https://github.com/atcr-io/atcr/releases/latest/download/atcr-verify
|
||||
# chmod +x atcr-verify
|
||||
# ./atcr-verify "$IMAGE_TAG" --policy .atcr/trust-policy.yaml
|
||||
|
||||
# Option 2: Use shell script
|
||||
chmod +x examples/verification/atcr-verify.sh
|
||||
./examples/verification/atcr-verify.sh "$IMAGE_TAG"
|
||||
|
||||
echo "✓ Signature verified successfully"
|
||||
|
||||
verify_trust:
|
||||
stage: verify
|
||||
image: alpine:latest
|
||||
before_script:
|
||||
- apk add --no-cache curl jq
|
||||
script:
|
||||
- |
|
||||
# Install crane and ORAS
|
||||
wget https://github.com/google/go-containerregistry/releases/download/v0.15.2/go-containerregistry_Linux_x86_64.tar.gz
|
||||
tar -xzf go-containerregistry_Linux_x86_64.tar.gz crane
|
||||
mv crane /usr/local/bin/
|
||||
|
||||
wget https://github.com/oras-project/oras/releases/download/v1.0.0/oras_1.0.0_linux_amd64.tar.gz
|
||||
tar -xzf oras_1.0.0_linux_amd64.tar.gz
|
||||
mv oras /usr/local/bin/
|
||||
|
||||
# Get signature metadata
|
||||
DIGEST=$(crane digest "$IMAGE_TAG")
|
||||
REPO=$(echo "$IMAGE_TAG" | cut -d: -f1)
|
||||
REPO_PATH=${REPO#$REGISTRY/}
|
||||
|
||||
REFERRERS=$(curl -s "https://$REGISTRY/v2/$REPO_PATH/referrers/${DIGEST}?artifactType=application/vnd.atproto.signature.v1+json")
|
||||
SIG_DIGEST=$(echo "$REFERRERS" | jq -r '.manifests[0].digest')
|
||||
|
||||
# Pull signature artifact
|
||||
oras pull "${REPO}@${SIG_DIGEST}" -o /tmp/sig
|
||||
|
||||
# Extract DID
|
||||
DID=$(jq -r '.atproto.did' /tmp/sig/atproto-signature.json)
|
||||
echo "Signed by DID: $DID"
|
||||
|
||||
# Check against trusted DIDs (from CI/CD variables)
|
||||
if [[ ",$TRUSTED_DIDS," == *",$DID,"* ]]; then
|
||||
echo "✓ DID is trusted"
|
||||
else
|
||||
echo "❌ DID $DID is not in trusted list"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
deploy_production:
|
||||
stage: deploy
|
||||
image: bitnami/kubectl:latest
|
||||
dependencies:
|
||||
- verify_signature
|
||||
- verify_full
|
||||
- verify_trust
|
||||
only:
|
||||
- main
|
||||
script:
|
||||
- |
|
||||
# Configure kubectl
|
||||
echo "$KUBE_CONFIG" | base64 -d > /tmp/kubeconfig
|
||||
export KUBECONFIG=/tmp/kubeconfig
|
||||
|
||||
# Deploy to production
|
||||
kubectl set image deployment/myapp \
|
||||
myapp=$IMAGE_TAG \
|
||||
-n production
|
||||
|
||||
kubectl rollout status deployment/myapp -n production
|
||||
|
||||
# Verify deployment
|
||||
kubectl get pods -n production -l app=myapp
|
||||
|
||||
# Alternative: Manual approval before deploy
|
||||
deploy_production_manual:
|
||||
stage: deploy
|
||||
image: bitnami/kubectl:latest
|
||||
dependencies:
|
||||
- verify_signature
|
||||
when: manual
|
||||
only:
|
||||
- main
|
||||
script:
|
||||
- |
|
||||
echo "Deploying $IMAGE_TAG to production"
|
||||
|
||||
echo "$KUBE_CONFIG" | base64 -d > /tmp/kubeconfig
|
||||
export KUBECONFIG=/tmp/kubeconfig
|
||||
|
||||
kubectl set image deployment/myapp \
|
||||
myapp=$IMAGE_TAG \
|
||||
-n production
|
||||
@@ -1,501 +0,0 @@
|
||||
# OPA Gatekeeper External Data Provider for ATProto Signatures
|
||||
|
||||
This is a reference implementation of an OPA Gatekeeper External Data Provider that verifies ATProto signatures on ATCR container images.
|
||||
|
||||
## Overview
|
||||
|
||||
Gatekeeper's External Data Provider feature allows Rego policies to call external HTTP services for data validation. This provider implements signature verification as an HTTP service that Gatekeeper can query.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Kubernetes Pod Creation
|
||||
↓
|
||||
OPA Gatekeeper (admission webhook)
|
||||
↓
|
||||
Rego Policy (constraint template)
|
||||
↓
|
||||
External Data Provider API call
|
||||
↓
|
||||
ATProto Verification Service ← This service
|
||||
↓
|
||||
1. Resolve image digest
|
||||
2. Discover signature artifacts
|
||||
3. Parse ATProto signature metadata
|
||||
4. Resolve DID to public key
|
||||
5. Fetch commit from PDS
|
||||
6. Verify K-256 signature
|
||||
7. Check trust policy
|
||||
↓
|
||||
Return: verified=true/false + metadata
|
||||
```
|
||||
|
||||
## Files
|
||||
|
||||
- `main.go` - HTTP server and provider endpoints
|
||||
- `verifier.go` - ATProto signature verification logic
|
||||
- `resolver.go` - DID and PDS resolution
|
||||
- `crypto.go` - K-256 signature verification
|
||||
- `trust-policy.yaml` - Trust policy configuration
|
||||
- `Dockerfile` - Build provider service image
|
||||
- `deployment.yaml` - Kubernetes deployment manifest
|
||||
- `provider-crd.yaml` - Gatekeeper Provider custom resource
|
||||
- `constraint-template.yaml` - Rego constraint template
|
||||
- `constraint.yaml` - Policy constraint example
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Go 1.21+
|
||||
- Kubernetes cluster with OPA Gatekeeper installed
|
||||
- Access to ATCR registry
|
||||
|
||||
## Building
|
||||
|
||||
```bash
|
||||
# Build binary
|
||||
CGO_ENABLED=0 go build -o atcr-provider \
|
||||
-ldflags="-w -s" \
|
||||
./main.go
|
||||
|
||||
# Build Docker image
|
||||
docker build -t atcr.io/atcr/gatekeeper-provider:latest .
|
||||
|
||||
# Push to registry
|
||||
docker push atcr.io/atcr/gatekeeper-provider:latest
|
||||
```
|
||||
|
||||
## Deployment
|
||||
|
||||
### 1. Create Trust Policy ConfigMap
|
||||
|
||||
```bash
|
||||
kubectl create namespace gatekeeper-system
|
||||
kubectl create configmap atcr-trust-policy \
|
||||
--from-file=trust-policy.yaml \
|
||||
-n gatekeeper-system
|
||||
```
|
||||
|
||||
### 2. Deploy Provider Service
|
||||
|
||||
```bash
|
||||
kubectl apply -f deployment.yaml
|
||||
```
|
||||
|
||||
### 3. Configure Gatekeeper Provider
|
||||
|
||||
```bash
|
||||
kubectl apply -f provider-crd.yaml
|
||||
```
|
||||
|
||||
### 4. Create Constraint Template
|
||||
|
||||
```bash
|
||||
kubectl apply -f constraint-template.yaml
|
||||
```
|
||||
|
||||
### 5. Create Constraint
|
||||
|
||||
```bash
|
||||
kubectl apply -f constraint.yaml
|
||||
```
|
||||
|
||||
### 6. Test
|
||||
|
||||
```bash
|
||||
# Try to create pod with signed image (should succeed)
|
||||
kubectl run test-signed --image=atcr.io/alice/myapp:latest
|
||||
|
||||
# Try to create pod with unsigned image (should fail)
|
||||
kubectl run test-unsigned --image=atcr.io/malicious/fake:latest
|
||||
|
||||
# Check constraint status
|
||||
kubectl get constraint atcr-signatures-required -o yaml
|
||||
```
|
||||
|
||||
## API Specification
|
||||
|
||||
### Provider Endpoint
|
||||
|
||||
**POST /provide**
|
||||
|
||||
Request:
|
||||
```json
|
||||
{
|
||||
"keys": ["image"],
|
||||
"values": [
|
||||
"atcr.io/alice/myapp:latest",
|
||||
"atcr.io/bob/webapp:v1.0"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"responses": [
|
||||
{
|
||||
"image": "atcr.io/alice/myapp:latest",
|
||||
"verified": true,
|
||||
"did": "did:plc:alice123",
|
||||
"handle": "alice.bsky.social",
|
||||
"signedAt": "2025-10-31T12:34:56Z",
|
||||
"commitCid": "bafyreih8..."
|
||||
},
|
||||
{
|
||||
"image": "atcr.io/bob/webapp:v1.0",
|
||||
"verified": false,
|
||||
"error": "no signature found"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Health Check
|
||||
|
||||
**GET /health**
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"status": "ok",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Trust Policy Format
|
||||
|
||||
```yaml
|
||||
# trust-policy.yaml
|
||||
version: 1.0
|
||||
|
||||
trustedDIDs:
|
||||
did:plc:alice123:
|
||||
name: "Alice (DevOps)"
|
||||
validFrom: "2024-01-01T00:00:00Z"
|
||||
expiresAt: null
|
||||
|
||||
did:plc:bob456:
|
||||
name: "Bob (Security)"
|
||||
validFrom: "2024-06-01T00:00:00Z"
|
||||
expiresAt: "2025-12-31T23:59:59Z"
|
||||
|
||||
policies:
|
||||
- name: production
|
||||
scope: "atcr.io/*/prod-*"
|
||||
require:
|
||||
signature: true
|
||||
trustedDIDs:
|
||||
- did:plc:alice123
|
||||
- did:plc:bob456
|
||||
action: enforce
|
||||
```
|
||||
|
||||
### Provider Configuration
|
||||
|
||||
Environment variables:
|
||||
- `TRUST_POLICY_PATH` - Path to trust policy file (default: `/config/trust-policy.yaml`)
|
||||
- `HTTP_PORT` - HTTP server port (default: `8080`)
|
||||
- `LOG_LEVEL` - Log level: debug, info, warn, error (default: `info`)
|
||||
- `CACHE_ENABLED` - Enable caching (default: `true`)
|
||||
- `CACHE_TTL` - Cache TTL in seconds (default: `300`)
|
||||
- `DID_RESOLVER_TIMEOUT` - DID resolution timeout (default: `10s`)
|
||||
- `PDS_TIMEOUT` - PDS XRPC timeout (default: `10s`)
|
||||
|
||||
## Rego Policy Examples
|
||||
|
||||
### Simple Verification
|
||||
|
||||
```rego
|
||||
package atcrsignatures
|
||||
|
||||
import future.keywords.contains
|
||||
import future.keywords.if
|
||||
import future.keywords.in
|
||||
|
||||
provider := "atcr-verifier"
|
||||
|
||||
violation[{"msg": msg}] {
|
||||
container := input.review.object.spec.containers[_]
|
||||
startswith(container.image, "atcr.io/")
|
||||
|
||||
# Call external provider
|
||||
response := external_data({
|
||||
"provider": provider,
|
||||
"keys": ["image"],
|
||||
"values": [container.image]
|
||||
})
|
||||
|
||||
# Check verification result
|
||||
not response[_].verified == true
|
||||
|
||||
msg := sprintf("Image %v has no valid ATProto signature", [container.image])
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced Verification with DID Trust
|
||||
|
||||
```rego
|
||||
package atcrsignatures
|
||||
|
||||
import future.keywords.contains
|
||||
import future.keywords.if
|
||||
import future.keywords.in
|
||||
|
||||
provider := "atcr-verifier"
|
||||
|
||||
trusted_dids := [
|
||||
"did:plc:alice123",
|
||||
"did:plc:bob456"
|
||||
]
|
||||
|
||||
violation[{"msg": msg}] {
|
||||
container := input.review.object.spec.containers[_]
|
||||
startswith(container.image, "atcr.io/")
|
||||
|
||||
# Call external provider
|
||||
response := external_data({
|
||||
"provider": provider,
|
||||
"keys": ["image"],
|
||||
"values": [container.image]
|
||||
})
|
||||
|
||||
# Get response for this image
|
||||
result := response[_]
|
||||
result.image == container.image
|
||||
|
||||
# Check if verified
|
||||
not result.verified == true
|
||||
msg := sprintf("Image %v failed signature verification: %v", [container.image, result.error])
|
||||
}
|
||||
|
||||
violation[{"msg": msg}] {
|
||||
container := input.review.object.spec.containers[_]
|
||||
startswith(container.image, "atcr.io/")
|
||||
|
||||
# Call external provider
|
||||
response := external_data({
|
||||
"provider": provider,
|
||||
"keys": ["image"],
|
||||
"values": [container.image]
|
||||
})
|
||||
|
||||
# Get response for this image
|
||||
result := response[_]
|
||||
result.image == container.image
|
||||
result.verified == true
|
||||
|
||||
# Check DID is trusted
|
||||
not result.did in trusted_dids
|
||||
msg := sprintf("Image %v signed by untrusted DID: %v", [container.image, result.did])
|
||||
}
|
||||
```
|
||||
|
||||
### Namespace-Specific Policies
|
||||
|
||||
```rego
|
||||
package atcrsignatures
|
||||
|
||||
import future.keywords.contains
|
||||
import future.keywords.if
|
||||
import future.keywords.in
|
||||
|
||||
provider := "atcr-verifier"
|
||||
|
||||
# Production namespaces require signatures
|
||||
production_namespaces := ["production", "prod", "staging"]
|
||||
|
||||
violation[{"msg": msg}] {
|
||||
# Only apply to production namespaces
|
||||
input.review.object.metadata.namespace in production_namespaces
|
||||
|
||||
container := input.review.object.spec.containers[_]
|
||||
startswith(container.image, "atcr.io/")
|
||||
|
||||
# Call external provider
|
||||
response := external_data({
|
||||
"provider": provider,
|
||||
"keys": ["image"],
|
||||
"values": [container.image]
|
||||
})
|
||||
|
||||
# Check verification result
|
||||
not response[_].verified == true
|
||||
|
||||
msg := sprintf("Production namespace requires signed images. Image %v is not signed", [container.image])
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Caching
|
||||
|
||||
The provider caches:
|
||||
- Signature verification results (TTL: 5 minutes)
|
||||
- DID documents (TTL: 5 minutes)
|
||||
- PDS endpoints (TTL: 5 minutes)
|
||||
- Public keys (TTL: 5 minutes)
|
||||
|
||||
Enable/disable via `CACHE_ENABLED` environment variable.
|
||||
|
||||
### Timeouts
|
||||
|
||||
- `DID_RESOLVER_TIMEOUT` - DID resolution timeout (default: 10s)
|
||||
- `PDS_TIMEOUT` - PDS XRPC calls timeout (default: 10s)
|
||||
- HTTP client timeout: 30s total
|
||||
|
||||
### Horizontal Scaling
|
||||
|
||||
The provider is stateless and can be scaled horizontally:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
spec:
|
||||
replicas: 3 # Scale up for high traffic
|
||||
```
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
Consider implementing rate limiting for:
|
||||
- Gatekeeper → Provider requests
|
||||
- Provider → DID resolver
|
||||
- Provider → PDS
|
||||
|
||||
## Monitoring
|
||||
|
||||
### Metrics
|
||||
|
||||
The provider exposes Prometheus metrics at `/metrics`:
|
||||
|
||||
```
|
||||
# Request metrics
|
||||
atcr_provider_requests_total{status="success|failure"}
|
||||
atcr_provider_request_duration_seconds
|
||||
|
||||
# Verification metrics
|
||||
atcr_provider_verifications_total{result="verified|failed|error"}
|
||||
atcr_provider_verification_duration_seconds
|
||||
|
||||
# Cache metrics
|
||||
atcr_provider_cache_hits_total
|
||||
atcr_provider_cache_misses_total
|
||||
```
|
||||
|
||||
### Logging
|
||||
|
||||
Structured JSON logging with fields:
|
||||
- `image` - Image being verified
|
||||
- `did` - Signer DID (if found)
|
||||
- `duration` - Verification duration
|
||||
- `error` - Error message (if failed)
|
||||
|
||||
### Health Checks
|
||||
|
||||
```bash
|
||||
# Liveness probe
|
||||
curl http://localhost:8080/health
|
||||
|
||||
# Readiness probe
|
||||
curl http://localhost:8080/ready
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Provider Not Reachable
|
||||
|
||||
```bash
|
||||
# Check provider pod status
|
||||
kubectl get pods -n gatekeeper-system -l app=atcr-provider
|
||||
|
||||
# Check service
|
||||
kubectl get svc -n gatekeeper-system atcr-provider
|
||||
|
||||
# Test connectivity from Gatekeeper pod
|
||||
kubectl exec -n gatekeeper-system deployment/gatekeeper-controller-manager -- \
|
||||
curl http://atcr-provider.gatekeeper-system/health
|
||||
```
|
||||
|
||||
### Verification Failing
|
||||
|
||||
```bash
|
||||
# Check provider logs
|
||||
kubectl logs -n gatekeeper-system deployment/atcr-provider
|
||||
|
||||
# Test verification manually
|
||||
kubectl run test-curl --rm -it --image=curlimages/curl -- \
|
||||
curl -X POST http://atcr-provider.gatekeeper-system/provide \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"keys":["image"],"values":["atcr.io/alice/myapp:latest"]}'
|
||||
```
|
||||
|
||||
### Policy Not Enforcing
|
||||
|
||||
```bash
|
||||
# Check Gatekeeper logs
|
||||
kubectl logs -n gatekeeper-system deployment/gatekeeper-controller-manager
|
||||
|
||||
# Check constraint status
|
||||
kubectl get constraint atcr-signatures-required -o yaml
|
||||
|
||||
# Test policy manually with conftest
|
||||
conftest test -p constraint-template.yaml pod.yaml
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Network Policies
|
||||
|
||||
Restrict network access:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: atcr-provider
|
||||
namespace: gatekeeper-system
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: atcr-provider
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager # Gatekeeper
|
||||
ports:
|
||||
- port: 8080
|
||||
egress:
|
||||
- to: # PLC directory
|
||||
- namespaceSelector: {}
|
||||
ports:
|
||||
- port: 443
|
||||
```
|
||||
|
||||
### Authentication
|
||||
|
||||
The provider should only be accessible from Gatekeeper. Options:
|
||||
- Network policies (recommended for Kubernetes)
|
||||
- Mutual TLS
|
||||
- API tokens
|
||||
|
||||
### Trust Policy Management
|
||||
|
||||
- Store trust policy in version control
|
||||
- Use GitOps (Flux, ArgoCD) for updates
|
||||
- Review DID changes carefully
|
||||
- Audit policy modifications
|
||||
|
||||
## See Also
|
||||
|
||||
- [Gatekeeper Documentation](https://open-policy-agent.github.io/gatekeeper/)
|
||||
- [External Data Provider](https://open-policy-agent.github.io/gatekeeper/website/docs/externaldata/)
|
||||
- [ATCR Signature Integration](../../../docs/SIGNATURE_INTEGRATION.md)
|
||||
- [ATCR Integration Strategy](../../../docs/INTEGRATION_STRATEGY.md)
|
||||
|
||||
## Support
|
||||
|
||||
For issues or questions:
|
||||
- GitHub Issues: https://github.com/atcr-io/atcr/issues
|
||||
- Gatekeeper GitHub: https://github.com/open-policy-agent/gatekeeper
|
||||
@@ -1,225 +0,0 @@
|
||||
// Package main implements an OPA Gatekeeper External Data Provider for ATProto signature verification.
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultPort is the default HTTP port
|
||||
DefaultPort = "8080"
|
||||
|
||||
// DefaultTrustPolicyPath is the default trust policy file path
|
||||
DefaultTrustPolicyPath = "/config/trust-policy.yaml"
|
||||
)
|
||||
|
||||
// Server is the HTTP server for the external data provider.
|
||||
type Server struct {
|
||||
verifier *Verifier
|
||||
port string
|
||||
httpServer *http.Server
|
||||
}
|
||||
|
||||
// ProviderRequest is the request format from Gatekeeper.
|
||||
type ProviderRequest struct {
|
||||
Keys []string `json:"keys"`
|
||||
Values []string `json:"values"`
|
||||
}
|
||||
|
||||
// ProviderResponse is the response format to Gatekeeper.
|
||||
type ProviderResponse struct {
|
||||
SystemError string `json:"system_error,omitempty"`
|
||||
Responses []map[string]interface{} `json:"responses"`
|
||||
}
|
||||
|
||||
// VerificationResult holds the result of verifying a single image.
|
||||
type VerificationResult struct {
|
||||
Image string `json:"image"`
|
||||
Verified bool `json:"verified"`
|
||||
DID string `json:"did,omitempty"`
|
||||
Handle string `json:"handle,omitempty"`
|
||||
SignedAt time.Time `json:"signedAt,omitempty"`
|
||||
CommitCID string `json:"commitCid,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// NewServer creates a new provider server.
|
||||
func NewServer(verifier *Verifier, port string) *Server {
|
||||
return &Server{
|
||||
verifier: verifier,
|
||||
port: port,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the HTTP server.
|
||||
func (s *Server) Start() error {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Provider endpoint (called by Gatekeeper)
|
||||
mux.HandleFunc("/provide", s.handleProvide)
|
||||
|
||||
// Health check endpoints
|
||||
mux.HandleFunc("/health", s.handleHealth)
|
||||
mux.HandleFunc("/ready", s.handleReady)
|
||||
|
||||
// Metrics endpoint (Prometheus)
|
||||
// TODO: Implement metrics
|
||||
// mux.HandleFunc("/metrics", s.handleMetrics)
|
||||
|
||||
s.httpServer = &http.Server{
|
||||
Addr: ":" + s.port,
|
||||
Handler: mux,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 30 * time.Second,
|
||||
IdleTimeout: 60 * time.Second,
|
||||
}
|
||||
|
||||
log.Printf("Starting ATProto signature verification provider on port %s", s.port)
|
||||
return s.httpServer.ListenAndServe()
|
||||
}
|
||||
|
||||
// Stop gracefully stops the HTTP server.
|
||||
func (s *Server) Stop(ctx context.Context) error {
|
||||
if s.httpServer != nil {
|
||||
return s.httpServer.Shutdown(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleProvide handles the provider endpoint called by Gatekeeper.
|
||||
func (s *Server) handleProvide(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse request
|
||||
var req ProviderRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
log.Printf("ERROR: failed to parse request: %v", err)
|
||||
http.Error(w, fmt.Sprintf("invalid request: %v", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("INFO: received verification request for %d images", len(req.Values))
|
||||
|
||||
// Verify each image
|
||||
responses := make([]map[string]interface{}, 0, len(req.Values))
|
||||
for _, image := range req.Values {
|
||||
result := s.verifyImage(r.Context(), image)
|
||||
responses = append(responses, structToMap(result))
|
||||
}
|
||||
|
||||
// Send response
|
||||
resp := ProviderResponse{
|
||||
Responses: responses,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
||||
log.Printf("ERROR: failed to encode response: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// verifyImage verifies a single image.
|
||||
func (s *Server) verifyImage(ctx context.Context, image string) VerificationResult {
|
||||
start := time.Now()
|
||||
log.Printf("INFO: verifying image: %s", image)
|
||||
|
||||
// Call verifier
|
||||
verified, metadata, err := s.verifier.Verify(ctx, image)
|
||||
duration := time.Since(start)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("ERROR: verification failed for %s: %v (duration: %v)", image, err, duration)
|
||||
return VerificationResult{
|
||||
Image: image,
|
||||
Verified: false,
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
if !verified {
|
||||
log.Printf("WARN: image %s failed verification (duration: %v)", image, duration)
|
||||
return VerificationResult{
|
||||
Image: image,
|
||||
Verified: false,
|
||||
Error: "signature verification failed",
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("INFO: image %s verified successfully (DID: %s, duration: %v)",
|
||||
image, metadata.DID, duration)
|
||||
|
||||
return VerificationResult{
|
||||
Image: image,
|
||||
Verified: true,
|
||||
DID: metadata.DID,
|
||||
Handle: metadata.Handle,
|
||||
SignedAt: metadata.SignedAt,
|
||||
CommitCID: metadata.CommitCID,
|
||||
}
|
||||
}
|
||||
|
||||
// handleHealth handles health check requests.
|
||||
func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]string{
|
||||
"status": "ok",
|
||||
"version": "1.0.0",
|
||||
})
|
||||
}
|
||||
|
||||
// handleReady handles readiness check requests.
|
||||
func (s *Server) handleReady(w http.ResponseWriter, r *http.Request) {
|
||||
// TODO: Check dependencies (DID resolver, PDS connectivity)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]string{
|
||||
"status": "ready",
|
||||
})
|
||||
}
|
||||
|
||||
// structToMap converts a struct to a map for JSON encoding.
|
||||
func structToMap(v interface{}) map[string]interface{} {
|
||||
data, _ := json.Marshal(v)
|
||||
var m map[string]interface{}
|
||||
json.Unmarshal(data, &m)
|
||||
return m
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Load configuration
|
||||
port := os.Getenv("HTTP_PORT")
|
||||
if port == "" {
|
||||
port = DefaultPort
|
||||
}
|
||||
|
||||
trustPolicyPath := os.Getenv("TRUST_POLICY_PATH")
|
||||
if trustPolicyPath == "" {
|
||||
trustPolicyPath = DefaultTrustPolicyPath
|
||||
}
|
||||
|
||||
// Create verifier
|
||||
verifier, err := NewVerifier(trustPolicyPath)
|
||||
if err != nil {
|
||||
log.Fatalf("FATAL: failed to create verifier: %v", err)
|
||||
}
|
||||
|
||||
// Create server
|
||||
server := NewServer(verifier, port)
|
||||
|
||||
// Start server
|
||||
if err := server.Start(); err != nil && err != http.ErrServerClosed {
|
||||
log.Fatalf("FATAL: server error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Implement verifier.go with ATProto signature verification logic
|
||||
// TODO: Implement resolver.go with DID resolution
|
||||
// TODO: Implement crypto.go with K-256 signature verification
|
||||
@@ -1,304 +0,0 @@
|
||||
# Ratify ATProto Verifier Plugin
|
||||
|
||||
This is a reference implementation of a Ratify verifier plugin for ATProto signatures.
|
||||
|
||||
## Overview
|
||||
|
||||
Ratify is a verification framework that integrates with OPA Gatekeeper to enforce signature policies in Kubernetes. This plugin adds support for verifying ATProto signatures on ATCR container images.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Kubernetes Pod Creation
|
||||
↓
|
||||
OPA Gatekeeper (admission webhook)
|
||||
↓
|
||||
Ratify (verification engine)
|
||||
↓
|
||||
ATProto Verifier Plugin ← This plugin
|
||||
↓
|
||||
1. Fetch signature artifact from registry
|
||||
2. Parse ATProto signature metadata
|
||||
3. Resolve DID to public key
|
||||
4. Fetch repository commit from PDS
|
||||
5. Verify ECDSA K-256 signature
|
||||
6. Check trust policy
|
||||
↓
|
||||
Return: Allow/Deny
|
||||
```
|
||||
|
||||
## Files
|
||||
|
||||
- `verifier.go` - Main verifier implementation
|
||||
- `config.go` - Configuration and trust policy
|
||||
- `resolver.go` - DID and PDS resolution
|
||||
- `crypto.go` - K-256 signature verification
|
||||
- `Dockerfile` - Build custom Ratify image with plugin
|
||||
- `deployment.yaml` - Kubernetes deployment manifest
|
||||
- `verifier-crd.yaml` - Ratify Verifier custom resource
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Go 1.21+
|
||||
- Ratify source code (for building plugin)
|
||||
- Kubernetes cluster with OPA Gatekeeper installed
|
||||
- Access to ATCR registry
|
||||
|
||||
## Building
|
||||
|
||||
```bash
|
||||
# Clone Ratify
|
||||
git clone https://github.com/ratify-project/ratify.git
|
||||
cd ratify
|
||||
|
||||
# Copy plugin files
|
||||
cp -r /path/to/examples/plugins/ratify-verifier plugins/verifier/atproto/
|
||||
|
||||
# Build plugin
|
||||
CGO_ENABLED=0 go build -o atproto-verifier \
|
||||
-ldflags="-w -s" \
|
||||
./plugins/verifier/atproto
|
||||
|
||||
# Build custom Ratify image with plugin
|
||||
docker build -f Dockerfile.with-atproto -t atcr.io/atcr/ratify-with-atproto:latest .
|
||||
```
|
||||
|
||||
## Deployment
|
||||
|
||||
### 1. Deploy Ratify with Plugin
|
||||
|
||||
```bash
|
||||
# Push custom image
|
||||
docker push atcr.io/atcr/ratify-with-atproto:latest
|
||||
|
||||
# Deploy Ratify
|
||||
kubectl apply -f deployment.yaml
|
||||
```
|
||||
|
||||
### 2. Configure Verifier
|
||||
|
||||
```bash
|
||||
# Create Verifier custom resource
|
||||
kubectl apply -f verifier-crd.yaml
|
||||
```
|
||||
|
||||
### 3. Configure Trust Policy
|
||||
|
||||
```bash
|
||||
# Create ConfigMap with trust policy
|
||||
kubectl create configmap atcr-trust-policy \
|
||||
--from-file=trust-policy.yaml \
|
||||
-n gatekeeper-system
|
||||
```
|
||||
|
||||
### 4. Create Gatekeeper Constraint
|
||||
|
||||
```bash
|
||||
kubectl apply -f constraint.yaml
|
||||
```
|
||||
|
||||
### 5. Test
|
||||
|
||||
```bash
|
||||
# Try to create pod with signed image (should succeed)
|
||||
kubectl run test-signed --image=atcr.io/alice/myapp:latest
|
||||
|
||||
# Try to create pod with unsigned image (should fail)
|
||||
kubectl run test-unsigned --image=atcr.io/malicious/fake:latest
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Trust Policy Format
|
||||
|
||||
```yaml
|
||||
# trust-policy.yaml
|
||||
version: 1.0
|
||||
|
||||
trustedDIDs:
|
||||
did:plc:alice123:
|
||||
name: "Alice (DevOps)"
|
||||
validFrom: "2024-01-01T00:00:00Z"
|
||||
expiresAt: null
|
||||
|
||||
did:plc:bob456:
|
||||
name: "Bob (Security)"
|
||||
validFrom: "2024-06-01T00:00:00Z"
|
||||
expiresAt: "2025-12-31T23:59:59Z"
|
||||
|
||||
policies:
|
||||
- name: production
|
||||
scope: "atcr.io/*/prod-*"
|
||||
require:
|
||||
signature: true
|
||||
trustedDIDs:
|
||||
- did:plc:alice123
|
||||
- did:plc:bob456
|
||||
action: enforce
|
||||
```
|
||||
|
||||
### Verifier Configuration
|
||||
|
||||
```yaml
|
||||
apiVersion: config.ratify.deislabs.io/v1beta1
|
||||
kind: Verifier
|
||||
metadata:
|
||||
name: atproto-verifier
|
||||
spec:
|
||||
name: atproto
|
||||
artifactType: application/vnd.atproto.signature.v1+json
|
||||
address: /.ratify/plugins/atproto-verifier
|
||||
parameters:
|
||||
trustPolicyPath: /config/trust-policy.yaml
|
||||
didResolverTimeout: 10s
|
||||
pdsTimeout: 10s
|
||||
cacheEnabled: true
|
||||
cacheTTL: 300s
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### Verifier Interface
|
||||
|
||||
The plugin implements Ratify's `ReferenceVerifier` interface:
|
||||
|
||||
```go
|
||||
type ReferenceVerifier interface {
|
||||
Name() string
|
||||
Type() string
|
||||
CanVerify(artifactType string) bool
|
||||
VerifyReference(
|
||||
ctx context.Context,
|
||||
subjectRef common.Reference,
|
||||
referenceDesc ocispecs.ReferenceDescriptor,
|
||||
store referrerstore.ReferrerStore,
|
||||
) (VerifierResult, error)
|
||||
}
|
||||
```
|
||||
|
||||
### Verification Flow
|
||||
|
||||
1. **Artifact Fetch**: Download signature artifact from registry via Ratify's store
|
||||
2. **Parse Metadata**: Extract ATProto signature metadata (DID, PDS, commit CID)
|
||||
3. **DID Resolution**: Resolve DID to public key via PLC directory or did:web
|
||||
4. **Commit Fetch**: Get repository commit from PDS via XRPC
|
||||
5. **Signature Verify**: Verify ECDSA K-256 signature over commit bytes
|
||||
6. **Trust Check**: Validate DID against trust policy
|
||||
7. **Result**: Return success/failure with metadata
|
||||
|
||||
### Error Handling
|
||||
|
||||
The plugin returns detailed error information:
|
||||
|
||||
```go
|
||||
type VerifierResult struct {
|
||||
IsSuccess bool
|
||||
Name string
|
||||
Type string
|
||||
Message string
|
||||
Extensions map[string]interface{}
|
||||
}
|
||||
```
|
||||
|
||||
**Extensions include:**
|
||||
- `did` - Signer's DID
|
||||
- `handle` - Signer's handle (if available)
|
||||
- `signedAt` - Signature timestamp
|
||||
- `commitCid` - ATProto commit CID
|
||||
- `pdsEndpoint` - PDS URL
|
||||
- `error` - Error details (if verification failed)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Plugin Not Found
|
||||
|
||||
```bash
|
||||
# Check plugin is in image
|
||||
kubectl exec -n gatekeeper-system deployment/ratify -c ratify -- ls -la /.ratify/plugins/
|
||||
|
||||
# Check logs
|
||||
kubectl logs -n gatekeeper-system deployment/ratify -c ratify
|
||||
```
|
||||
|
||||
### Verification Failing
|
||||
|
||||
```bash
|
||||
# Check Ratify logs for details
|
||||
kubectl logs -n gatekeeper-system deployment/ratify -c ratify | grep atproto
|
||||
|
||||
# Check Verifier status
|
||||
kubectl get verifier atproto-verifier -o yaml
|
||||
|
||||
# Test DID resolution manually
|
||||
curl https://plc.directory/did:plc:alice123
|
||||
```
|
||||
|
||||
### Trust Policy Issues
|
||||
|
||||
```bash
|
||||
# Check ConfigMap exists
|
||||
kubectl get configmap atcr-trust-policy -n gatekeeper-system
|
||||
|
||||
# View policy contents
|
||||
kubectl get configmap atcr-trust-policy -n gatekeeper-system -o yaml
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Caching
|
||||
|
||||
The plugin caches:
|
||||
- DID documents (TTL: 5 minutes)
|
||||
- PDS endpoints (TTL: 5 minutes)
|
||||
- Public keys (TTL: 5 minutes)
|
||||
|
||||
Configure via `cacheEnabled` and `cacheTTL` parameters.
|
||||
|
||||
### Timeouts
|
||||
|
||||
Configure timeouts for external calls:
|
||||
- `didResolverTimeout` - DID resolution (default: 10s)
|
||||
- `pdsTimeout` - PDS XRPC calls (default: 10s)
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
Consider implementing rate limiting for:
|
||||
- DID resolution (PLC directory)
|
||||
- PDS XRPC calls
|
||||
- Signature verification
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Trust Policy Management
|
||||
|
||||
- Store trust policy in version control
|
||||
- Review DID additions/removals carefully
|
||||
- Set expiration dates for temporary access
|
||||
- Audit trust policy changes
|
||||
|
||||
### Private Key Protection
|
||||
|
||||
- Plugin only uses public keys
|
||||
- No private keys needed for verification
|
||||
- DID resolution is read-only
|
||||
- PDS queries are read-only
|
||||
|
||||
### Denial of Service
|
||||
|
||||
- Implement timeouts for all external calls
|
||||
- Cache DID documents to reduce load
|
||||
- Rate limit verification requests
|
||||
- Monitor verification latency
|
||||
|
||||
## See Also
|
||||
|
||||
- [Ratify Documentation](https://ratify.dev/)
|
||||
- [Ratify Plugin Development](https://ratify.dev/docs/plugins/verifier/overview)
|
||||
- [ATCR Signature Integration](../../../docs/SIGNATURE_INTEGRATION.md)
|
||||
- [ATCR Integration Strategy](../../../docs/INTEGRATION_STRATEGY.md)
|
||||
|
||||
## Support
|
||||
|
||||
For issues or questions:
|
||||
- GitHub Issues: https://github.com/atcr-io/atcr/issues
|
||||
- Ratify GitHub: https://github.com/ratify-project/ratify
|
||||
@@ -1,214 +0,0 @@
|
||||
// Package atproto implements a Ratify verifier plugin for ATProto signatures.
|
||||
package atproto
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ratify-project/ratify/pkg/common"
|
||||
"github.com/ratify-project/ratify/pkg/ocispecs"
|
||||
"github.com/ratify-project/ratify/pkg/referrerstore"
|
||||
"github.com/ratify-project/ratify/pkg/verifier"
|
||||
)
|
||||
|
||||
const (
|
||||
// VerifierName is the name of this verifier
|
||||
VerifierName = "atproto"
|
||||
|
||||
// VerifierType is the type of this verifier
|
||||
VerifierType = "atproto"
|
||||
|
||||
// ATProtoSignatureArtifactType is the OCI artifact type for ATProto signatures
|
||||
ATProtoSignatureArtifactType = "application/vnd.atproto.signature.v1+json"
|
||||
)
|
||||
|
||||
// ATProtoVerifier implements the Ratify ReferenceVerifier interface for ATProto signatures.
|
||||
type ATProtoVerifier struct {
|
||||
name string
|
||||
config ATProtoConfig
|
||||
resolver *Resolver
|
||||
verifier *SignatureVerifier
|
||||
trustStore *TrustStore
|
||||
}
|
||||
|
||||
// ATProtoConfig holds configuration for the ATProto verifier.
|
||||
type ATProtoConfig struct {
|
||||
// TrustPolicyPath is the path to the trust policy YAML file
|
||||
TrustPolicyPath string `json:"trustPolicyPath"`
|
||||
|
||||
// DIDResolverTimeout is the timeout for DID resolution
|
||||
DIDResolverTimeout time.Duration `json:"didResolverTimeout"`
|
||||
|
||||
// PDSTimeout is the timeout for PDS XRPC calls
|
||||
PDSTimeout time.Duration `json:"pdsTimeout"`
|
||||
|
||||
// CacheEnabled enables caching of DID documents and public keys
|
||||
CacheEnabled bool `json:"cacheEnabled"`
|
||||
|
||||
// CacheTTL is the cache TTL for DID documents and public keys
|
||||
CacheTTL time.Duration `json:"cacheTTL"`
|
||||
}
|
||||
|
||||
// ATProtoSignature represents the ATProto signature metadata stored in the OCI artifact.
|
||||
type ATProtoSignature struct {
|
||||
Type string `json:"$type"`
|
||||
Version string `json:"version"`
|
||||
Subject struct {
|
||||
Digest string `json:"digest"`
|
||||
MediaType string `json:"mediaType"`
|
||||
} `json:"subject"`
|
||||
ATProto struct {
|
||||
DID string `json:"did"`
|
||||
Handle string `json:"handle"`
|
||||
PDSEndpoint string `json:"pdsEndpoint"`
|
||||
RecordURI string `json:"recordUri"`
|
||||
CommitCID string `json:"commitCid"`
|
||||
SignedAt time.Time `json:"signedAt"`
|
||||
} `json:"atproto"`
|
||||
Signature struct {
|
||||
Algorithm string `json:"algorithm"`
|
||||
KeyID string `json:"keyId"`
|
||||
PublicKeyMultibase string `json:"publicKeyMultibase"`
|
||||
} `json:"signature"`
|
||||
}
|
||||
|
||||
// NewATProtoVerifier creates a new ATProto verifier instance.
|
||||
func NewATProtoVerifier(name string, config ATProtoConfig) (*ATProtoVerifier, error) {
|
||||
// Load trust policy
|
||||
trustStore, err := LoadTrustStore(config.TrustPolicyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load trust policy: %w", err)
|
||||
}
|
||||
|
||||
// Create resolver with caching
|
||||
resolver := NewResolver(config.DIDResolverTimeout, config.CacheEnabled, config.CacheTTL)
|
||||
|
||||
// Create signature verifier
|
||||
verifier := NewSignatureVerifier(config.PDSTimeout)
|
||||
|
||||
return &ATProtoVerifier{
|
||||
name: name,
|
||||
config: config,
|
||||
resolver: resolver,
|
||||
verifier: verifier,
|
||||
trustStore: trustStore,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name returns the name of this verifier.
|
||||
func (v *ATProtoVerifier) Name() string {
|
||||
return v.name
|
||||
}
|
||||
|
||||
// Type returns the type of this verifier.
|
||||
func (v *ATProtoVerifier) Type() string {
|
||||
return VerifierType
|
||||
}
|
||||
|
||||
// CanVerify returns true if this verifier can verify the given artifact type.
|
||||
func (v *ATProtoVerifier) CanVerify(artifactType string) bool {
|
||||
return artifactType == ATProtoSignatureArtifactType
|
||||
}
|
||||
|
||||
// VerifyReference verifies an ATProto signature artifact.
|
||||
func (v *ATProtoVerifier) VerifyReference(
|
||||
ctx context.Context,
|
||||
subjectRef common.Reference,
|
||||
referenceDesc ocispecs.ReferenceDescriptor,
|
||||
store referrerstore.ReferrerStore,
|
||||
) (verifier.VerifierResult, error) {
|
||||
// 1. Fetch signature blob from store
|
||||
sigBlob, err := store.GetBlobContent(ctx, subjectRef, referenceDesc.Digest)
|
||||
if err != nil {
|
||||
return v.failureResult(fmt.Sprintf("failed to fetch signature blob: %v", err)), err
|
||||
}
|
||||
|
||||
// 2. Parse ATProto signature metadata
|
||||
var sigData ATProtoSignature
|
||||
if err := json.Unmarshal(sigBlob, &sigData); err != nil {
|
||||
return v.failureResult(fmt.Sprintf("failed to parse signature metadata: %v", err)), err
|
||||
}
|
||||
|
||||
// Validate signature format
|
||||
if err := v.validateSignature(&sigData); err != nil {
|
||||
return v.failureResult(fmt.Sprintf("invalid signature format: %v", err)), err
|
||||
}
|
||||
|
||||
// 3. Check trust policy first (fail fast if DID not trusted)
|
||||
if !v.trustStore.IsTrusted(sigData.ATProto.DID, time.Now()) {
|
||||
return v.failureResult(fmt.Sprintf("DID %s not in trusted list", sigData.ATProto.DID)),
|
||||
fmt.Errorf("untrusted DID")
|
||||
}
|
||||
|
||||
// 4. Resolve DID to public key
|
||||
pubKey, err := v.resolver.ResolveDIDToPublicKey(ctx, sigData.ATProto.DID)
|
||||
if err != nil {
|
||||
return v.failureResult(fmt.Sprintf("failed to resolve DID: %v", err)), err
|
||||
}
|
||||
|
||||
// 5. Fetch repository commit from PDS
|
||||
commit, err := v.verifier.FetchCommit(ctx, sigData.ATProto.PDSEndpoint,
|
||||
sigData.ATProto.DID, sigData.ATProto.CommitCID)
|
||||
if err != nil {
|
||||
return v.failureResult(fmt.Sprintf("failed to fetch commit: %v", err)), err
|
||||
}
|
||||
|
||||
// 6. Verify K-256 signature
|
||||
if err := v.verifier.VerifySignature(pubKey, commit); err != nil {
|
||||
return v.failureResult(fmt.Sprintf("signature verification failed: %v", err)), err
|
||||
}
|
||||
|
||||
// 7. Success - return detailed result
|
||||
return verifier.VerifierResult{
|
||||
IsSuccess: true,
|
||||
Name: v.name,
|
||||
Type: v.Type(),
|
||||
Message: fmt.Sprintf("Successfully verified ATProto signature for DID %s", sigData.ATProto.DID),
|
||||
Extensions: map[string]interface{}{
|
||||
"did": sigData.ATProto.DID,
|
||||
"handle": sigData.ATProto.Handle,
|
||||
"signedAt": sigData.ATProto.SignedAt,
|
||||
"commitCid": sigData.ATProto.CommitCID,
|
||||
"pdsEndpoint": sigData.ATProto.PDSEndpoint,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// validateSignature validates the signature metadata format.
|
||||
func (v *ATProtoVerifier) validateSignature(sig *ATProtoSignature) error {
|
||||
if sig.Type != "io.atcr.atproto.signature" {
|
||||
return fmt.Errorf("invalid signature type: %s", sig.Type)
|
||||
}
|
||||
if sig.ATProto.DID == "" {
|
||||
return fmt.Errorf("missing DID")
|
||||
}
|
||||
if sig.ATProto.PDSEndpoint == "" {
|
||||
return fmt.Errorf("missing PDS endpoint")
|
||||
}
|
||||
if sig.ATProto.CommitCID == "" {
|
||||
return fmt.Errorf("missing commit CID")
|
||||
}
|
||||
if sig.Signature.Algorithm != "ECDSA-K256-SHA256" {
|
||||
return fmt.Errorf("unsupported signature algorithm: %s", sig.Signature.Algorithm)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// failureResult creates a failure result with the given message.
|
||||
func (v *ATProtoVerifier) failureResult(message string) verifier.VerifierResult {
|
||||
return verifier.VerifierResult{
|
||||
IsSuccess: false,
|
||||
Name: v.name,
|
||||
Type: v.Type(),
|
||||
Message: message,
|
||||
Extensions: map[string]interface{}{
|
||||
"error": message,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Implement resolver.go with DID resolution logic
|
||||
// TODO: Implement crypto.go with K-256 signature verification
|
||||
// TODO: Implement config.go with trust policy loading
|
||||
@@ -1,364 +0,0 @@
|
||||
# ATProto Signature Verification Examples
|
||||
|
||||
This directory contains practical examples for verifying ATProto signatures on ATCR container images.
|
||||
|
||||
## Files
|
||||
|
||||
### Scripts
|
||||
|
||||
- **`atcr-verify.sh`** - Standalone signature verification script
|
||||
- Verifies ATProto signatures using shell commands
|
||||
- Requires: `curl`, `jq`, `crane`, `oras`
|
||||
- Does everything except full cryptographic verification
|
||||
- Use this until the `atcr-verify` CLI tool is built
|
||||
|
||||
- **`verify-and-pull.sh`** - Secure image pull wrapper
|
||||
- Verifies signatures before pulling images
|
||||
- Can be used as a `docker pull` replacement
|
||||
- Configurable via environment variables
|
||||
|
||||
### Configuration
|
||||
|
||||
- **`trust-policy.yaml`** - Example trust policy configuration
|
||||
- Defines which DIDs to trust
|
||||
- Specifies policies for different image scopes
|
||||
- Includes audit logging and reporting settings
|
||||
|
||||
- **`kubernetes-webhook.yaml`** - Kubernetes admission controller
|
||||
- Validates signatures before pod creation
|
||||
- Includes webhook deployment, service, and configuration
|
||||
- Uses trust policy ConfigMap
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Verify an Image
|
||||
|
||||
```bash
|
||||
# Make script executable
|
||||
chmod +x atcr-verify.sh
|
||||
|
||||
# Verify an image
|
||||
./atcr-verify.sh atcr.io/alice/myapp:latest
|
||||
```
|
||||
|
||||
**Output:**
|
||||
```
|
||||
═══════════════════════════════════════════════════
|
||||
ATProto Signature Verification
|
||||
═══════════════════════════════════════════════════
|
||||
Image: atcr.io/alice/myapp:latest
|
||||
═══════════════════════════════════════════════════
|
||||
|
||||
[1/7] Resolving image digest...
|
||||
→ sha256:abc123...
|
||||
[2/7] Discovering ATProto signature artifacts...
|
||||
→ Found 1 signature(s)
|
||||
→ Signature digest: sha256:sig789...
|
||||
→ Signed by DID: did:plc:alice123
|
||||
[3/7] Fetching signature metadata...
|
||||
→ DID: did:plc:alice123
|
||||
→ Handle: alice.bsky.social
|
||||
→ PDS: https://bsky.social
|
||||
→ Record: at://did:plc:alice123/io.atcr.manifest/abc123
|
||||
→ Signed at: 2025-10-31T12:34:56.789Z
|
||||
[4/7] Resolving DID to public key...
|
||||
→ Public key: zQ3shokFTS3brHcD...
|
||||
[5/7] Querying PDS for signed record...
|
||||
→ Record CID: bafyreig7...
|
||||
[6/7] Verifying record integrity...
|
||||
→ Record digest matches image digest
|
||||
[7/7] Cryptographic signature verification...
|
||||
⚠ Full cryptographic verification requires ATProto crypto library
|
||||
|
||||
═══════════════════════════════════════════════════
|
||||
✓ Verification Completed
|
||||
═══════════════════════════════════════════════════
|
||||
|
||||
Signed by: alice.bsky.social (did:plc:alice123)
|
||||
Signed at: 2025-10-31T12:34:56.789Z
|
||||
PDS: https://bsky.social
|
||||
Record: at://did:plc:alice123/io.atcr.manifest/abc123
|
||||
Signature: sha256:sig789...
|
||||
|
||||
═══════════════════════════════════════════════════
|
||||
```
|
||||
|
||||
### 2. Secure Pull
|
||||
|
||||
```bash
|
||||
# Make script executable
|
||||
chmod +x verify-and-pull.sh
|
||||
|
||||
# Pull image with verification
|
||||
./verify-and-pull.sh atcr.io/alice/myapp:latest
|
||||
|
||||
# With Docker options
|
||||
./verify-and-pull.sh atcr.io/alice/myapp:latest --platform linux/amd64
|
||||
```
|
||||
|
||||
**Create an alias for convenience:**
|
||||
```bash
|
||||
# Add to ~/.bashrc or ~/.zshrc
|
||||
alias docker-pull-secure='/path/to/verify-and-pull.sh'
|
||||
|
||||
# Use it
|
||||
docker-pull-secure atcr.io/alice/myapp:latest
|
||||
```
|
||||
|
||||
### 3. Deploy Kubernetes Webhook
|
||||
|
||||
```bash
|
||||
# 1. Generate TLS certificates for webhook
|
||||
openssl req -x509 -newkey rsa:4096 -keyout tls.key -out tls.crt \
|
||||
-days 365 -nodes -subj "/CN=atcr-verify-webhook.atcr-system.svc"
|
||||
|
||||
# 2. Create namespace and secret
|
||||
kubectl create namespace atcr-system
|
||||
kubectl create secret tls atcr-verify-webhook-certs \
|
||||
--cert=tls.crt --key=tls.key -n atcr-system
|
||||
|
||||
# 3. Update CA bundle in kubernetes-webhook.yaml
|
||||
cat tls.crt | base64 -w 0
|
||||
# Copy output and replace caBundle in kubernetes-webhook.yaml
|
||||
|
||||
# 4. Deploy webhook
|
||||
kubectl apply -f kubernetes-webhook.yaml
|
||||
|
||||
# 5. Enable verification for a namespace
|
||||
kubectl label namespace production atcr-verify=enabled
|
||||
|
||||
# 6. Test with a pod
|
||||
kubectl run test-pod --image=atcr.io/alice/myapp:latest -n production
|
||||
```
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### For Scripts
|
||||
|
||||
Install required tools:
|
||||
|
||||
**macOS (Homebrew):**
|
||||
```bash
|
||||
brew install curl jq crane oras
|
||||
```
|
||||
|
||||
**Linux (apt):**
|
||||
```bash
|
||||
# curl and jq
|
||||
sudo apt-get install curl jq
|
||||
|
||||
# crane
|
||||
curl -sL "https://github.com/google/go-containerregistry/releases/download/v0.15.2/go-containerregistry_Linux_x86_64.tar.gz" | tar -xz crane
|
||||
sudo mv crane /usr/local/bin/
|
||||
|
||||
# oras
|
||||
curl -LO "https://github.com/oras-project/oras/releases/download/v1.0.0/oras_1.0.0_linux_amd64.tar.gz"
|
||||
tar -xzf oras_1.0.0_linux_amd64.tar.gz
|
||||
sudo mv oras /usr/local/bin/
|
||||
```
|
||||
|
||||
### For Kubernetes Webhook
|
||||
|
||||
Requirements:
|
||||
- Kubernetes cluster (1.16+)
|
||||
- `kubectl` configured
|
||||
- Permission to create namespaces and webhooks
|
||||
- Webhook container image (build from source or use pre-built)
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables (verify-and-pull.sh)
|
||||
|
||||
- `VERIFY_SCRIPT` - Path to atcr-verify.sh (default: ./atcr-verify.sh)
|
||||
- `TRUST_POLICY` - Path to trust policy (default: ./trust-policy.yaml)
|
||||
- `REQUIRE_VERIFICATION` - Require verification (default: true)
|
||||
- `SKIP_ATCR_IMAGES` - Skip verification for non-ATCR images (default: false)
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
# Skip verification for non-ATCR images
|
||||
SKIP_ATCR_IMAGES=true ./verify-and-pull.sh docker.io/library/nginx:latest
|
||||
|
||||
# Allow pulling even if verification fails (NOT RECOMMENDED)
|
||||
REQUIRE_VERIFICATION=false ./verify-and-pull.sh atcr.io/alice/myapp:latest
|
||||
```
|
||||
|
||||
### Trust Policy
|
||||
|
||||
Edit `trust-policy.yaml` to customize:
|
||||
|
||||
1. **Add your DIDs:**
|
||||
```yaml
|
||||
trustedDIDs:
|
||||
did:plc:your-did:
|
||||
name: "Your Name"
|
||||
validFrom: "2024-01-01T00:00:00Z"
|
||||
```
|
||||
|
||||
2. **Define policies:**
|
||||
```yaml
|
||||
policies:
|
||||
- name: my-policy
|
||||
scope: "atcr.io/myorg/*"
|
||||
require:
|
||||
signature: true
|
||||
trustedDIDs:
|
||||
- did:plc:your-did
|
||||
action: enforce
|
||||
```
|
||||
|
||||
3. **Use with verification:**
|
||||
```bash
|
||||
# When atcr-verify CLI is available:
|
||||
atcr-verify IMAGE --policy trust-policy.yaml
|
||||
```
|
||||
|
||||
## Integration Patterns
|
||||
|
||||
### CI/CD (GitHub Actions)
|
||||
|
||||
```yaml
|
||||
- name: Verify image signature
|
||||
run: |
|
||||
chmod +x examples/verification/atcr-verify.sh
|
||||
./examples/verification/atcr-verify.sh ${{ env.IMAGE }}
|
||||
|
||||
- name: Deploy if verified
|
||||
if: success()
|
||||
run: kubectl set image deployment/app app=${{ env.IMAGE }}
|
||||
```
|
||||
|
||||
### CI/CD (GitLab CI)
|
||||
|
||||
```yaml
|
||||
verify:
|
||||
script:
|
||||
- chmod +x examples/verification/atcr-verify.sh
|
||||
- ./examples/verification/atcr-verify.sh $IMAGE
|
||||
|
||||
deploy:
|
||||
dependencies: [verify]
|
||||
script:
|
||||
- kubectl set image deployment/app app=$IMAGE
|
||||
```
|
||||
|
||||
### Docker Alias
|
||||
|
||||
```bash
|
||||
# ~/.bashrc or ~/.zshrc
|
||||
function docker() {
|
||||
if [ "$1" = "pull" ] && [[ "$2" =~ ^atcr\.io/ ]]; then
|
||||
echo "Using secure pull with signature verification..."
|
||||
/path/to/verify-and-pull.sh "${@:2}"
|
||||
else
|
||||
command docker "$@"
|
||||
fi
|
||||
}
|
||||
```
|
||||
|
||||
### Systemd Service
|
||||
|
||||
```ini
|
||||
# /etc/systemd/system/myapp.service
|
||||
[Unit]
|
||||
Description=My Application
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=/path/to/verify-and-pull.sh atcr.io/myorg/myapp:latest
|
||||
ExecStart=/usr/bin/docker run atcr.io/myorg/myapp:latest
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "No ATProto signature found"
|
||||
|
||||
**Cause:** Image doesn't have a signature artifact
|
||||
|
||||
**Solutions:**
|
||||
1. Check if image exists: `crane digest IMAGE`
|
||||
2. Re-push image to generate signature
|
||||
3. Verify referrers API is working:
|
||||
```bash
|
||||
curl "https://atcr.io/v2/REPO/referrers/DIGEST"
|
||||
```
|
||||
|
||||
### "Failed to resolve DID"
|
||||
|
||||
**Cause:** DID resolution failed
|
||||
|
||||
**Solutions:**
|
||||
1. Check internet connectivity
|
||||
2. Verify DID is valid: `curl https://plc.directory/DID`
|
||||
3. Check if DID document has verificationMethod
|
||||
|
||||
### "Failed to fetch record from PDS"
|
||||
|
||||
**Cause:** PDS is unreachable or record doesn't exist
|
||||
|
||||
**Solutions:**
|
||||
1. Check PDS endpoint: `curl PDS_URL/xrpc/com.atproto.server.describeServer`
|
||||
2. Verify record URI is correct
|
||||
3. Check if record exists in PDS
|
||||
|
||||
### Webhook Pods Don't Start
|
||||
|
||||
**Cause:** Webhook is rejecting all pods
|
||||
|
||||
**Solutions:**
|
||||
1. Check webhook logs: `kubectl logs -n atcr-system -l app=atcr-verify-webhook`
|
||||
2. Disable webhook temporarily: `kubectl delete validatingwebhookconfiguration atcr-verify`
|
||||
3. Fix issue and re-deploy
|
||||
4. Test with labeled namespace first
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
1. **Always verify in production**
|
||||
- Enable webhook for production namespaces
|
||||
- Set `failurePolicy: Fail` to block on errors
|
||||
|
||||
2. **Use trust policies**
|
||||
- Define specific trusted DIDs
|
||||
- Don't trust all signatures blindly
|
||||
- Set expiration dates for temporary access
|
||||
|
||||
3. **Monitor verification**
|
||||
- Enable audit logging
|
||||
- Review verification failures
|
||||
- Track signature coverage
|
||||
|
||||
4. **Rotate keys regularly**
|
||||
- Update DID documents when keys change
|
||||
- Revoke compromised keys immediately
|
||||
- Monitor for unexpected key changes
|
||||
|
||||
5. **Secure webhook deployment**
|
||||
- Use TLS for webhook communication
|
||||
- Restrict webhook RBAC permissions
|
||||
- Keep webhook image updated
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Test verification** with your images
|
||||
2. **Customize trust policy** for your organization
|
||||
3. **Deploy webhook** to test clusters first
|
||||
4. **Monitor** verification in CI/CD pipelines
|
||||
5. **Gradually roll out** to production
|
||||
|
||||
## See Also
|
||||
|
||||
- [ATProto Signatures](../../docs/ATPROTO_SIGNATURES.md) - Technical details
|
||||
- [Signature Integration](../../docs/SIGNATURE_INTEGRATION.md) - Integration guide
|
||||
- [SBOM Scanning](../../docs/SBOM_SCANNING.md) - Similar ORAS pattern
|
||||
|
||||
## Support
|
||||
|
||||
For issues or questions:
|
||||
- GitHub Issues: https://github.com/your-org/atcr/issues
|
||||
- Documentation: https://docs.atcr.io
|
||||
- Security: security@yourorg.com
|
||||
@@ -1,243 +0,0 @@
|
||||
#!/bin/bash
|
||||
# ATProto Signature Verification Script
|
||||
#
|
||||
# This script verifies ATProto signatures for container images stored in ATCR.
|
||||
# It performs all steps except full cryptographic verification (which requires
|
||||
# the indigo library). For production use, use the atcr-verify CLI tool.
|
||||
#
|
||||
# Usage: ./atcr-verify.sh IMAGE_REF
|
||||
# Example: ./atcr-verify.sh atcr.io/alice/myapp:latest
|
||||
#
|
||||
# Requirements:
|
||||
# - curl
|
||||
# - jq
|
||||
# - crane (https://github.com/google/go-containerregistry/releases)
|
||||
# - oras (https://oras.land/docs/installation)
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Check dependencies
|
||||
check_dependencies() {
|
||||
local missing=0
|
||||
|
||||
for cmd in curl jq crane oras; do
|
||||
if ! command -v $cmd &> /dev/null; then
|
||||
echo -e "${RED}✗${NC} Missing dependency: $cmd"
|
||||
missing=1
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $missing -eq 1 ]; then
|
||||
echo ""
|
||||
echo "Install missing dependencies:"
|
||||
echo " curl: https://curl.se/download.html"
|
||||
echo " jq: https://stedolan.github.io/jq/download/"
|
||||
echo " crane: https://github.com/google/go-containerregistry/releases"
|
||||
echo " oras: https://oras.land/docs/installation"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Print with color
|
||||
print_step() {
|
||||
echo -e "${BLUE}[$1/${TOTAL_STEPS}]${NC} $2..."
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e " ${GREEN}→${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e " ${RED}✗${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e " ${YELLOW}⚠${NC} $1"
|
||||
}
|
||||
|
||||
# Main verification function
|
||||
verify_image() {
|
||||
local image="$1"
|
||||
|
||||
if [ -z "$image" ]; then
|
||||
echo "Usage: $0 IMAGE_REF"
|
||||
echo "Example: $0 atcr.io/alice/myapp:latest"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TOTAL_STEPS=7
|
||||
|
||||
echo ""
|
||||
echo "═══════════════════════════════════════════════════"
|
||||
echo " ATProto Signature Verification"
|
||||
echo "═══════════════════════════════════════════════════"
|
||||
echo " Image: $image"
|
||||
echo "═══════════════════════════════════════════════════"
|
||||
echo ""
|
||||
|
||||
# Step 1: Resolve image digest
|
||||
print_step 1 "Resolving image digest"
|
||||
DIGEST=$(crane digest "$image" 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
print_error "Failed to resolve image digest"
|
||||
echo "$DIGEST"
|
||||
exit 1
|
||||
fi
|
||||
print_success "$DIGEST"
|
||||
|
||||
# Extract registry, repository, and tag
|
||||
REGISTRY=$(echo "$image" | cut -d/ -f1)
|
||||
REPO=$(echo "$image" | cut -d/ -f2-)
|
||||
REPO_PATH=$(echo "$REPO" | cut -d: -f1)
|
||||
|
||||
# Step 2: Discover ATProto signature artifacts
|
||||
print_step 2 "Discovering ATProto signature artifacts"
|
||||
REFERRERS_URL="https://${REGISTRY}/v2/${REPO_PATH}/referrers/${DIGEST}?artifactType=application/vnd.atproto.signature.v1+json"
|
||||
|
||||
SIG_ARTIFACTS=$(curl -s -H "Accept: application/vnd.oci.image.index.v1+json" "$REFERRERS_URL")
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
print_error "Failed to query referrers API"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SIG_COUNT=$(echo "$SIG_ARTIFACTS" | jq '.manifests | length')
|
||||
if [ "$SIG_COUNT" = "0" ]; then
|
||||
print_error "No ATProto signature found"
|
||||
echo ""
|
||||
echo "This image does not have an ATProto signature."
|
||||
echo "Signatures are automatically created when you push to ATCR."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "Found $SIG_COUNT signature(s)"
|
||||
|
||||
# Get first signature digest
|
||||
SIG_DIGEST=$(echo "$SIG_ARTIFACTS" | jq -r '.manifests[0].digest')
|
||||
SIG_DID=$(echo "$SIG_ARTIFACTS" | jq -r '.manifests[0].annotations["io.atcr.atproto.did"]')
|
||||
print_success "Signature digest: $SIG_DIGEST"
|
||||
print_success "Signed by DID: $SIG_DID"
|
||||
|
||||
# Step 3: Fetch signature metadata
|
||||
print_step 3 "Fetching signature metadata"
|
||||
|
||||
TMPDIR=$(mktemp -d)
|
||||
trap "rm -rf $TMPDIR" EXIT
|
||||
|
||||
oras pull "${REGISTRY}/${REPO_PATH}@${SIG_DIGEST}" -o "$TMPDIR" --quiet 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
print_error "Failed to fetch signature metadata"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Find the JSON file
|
||||
SIG_FILE=$(find "$TMPDIR" -name "*.json" -type f | head -n 1)
|
||||
if [ -z "$SIG_FILE" ]; then
|
||||
print_error "Signature metadata file not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DID=$(jq -r '.atproto.did' "$SIG_FILE")
|
||||
HANDLE=$(jq -r '.atproto.handle // "unknown"' "$SIG_FILE")
|
||||
PDS=$(jq -r '.atproto.pdsEndpoint' "$SIG_FILE")
|
||||
RECORD_URI=$(jq -r '.atproto.recordUri' "$SIG_FILE")
|
||||
COMMIT_CID=$(jq -r '.atproto.commitCid' "$SIG_FILE")
|
||||
SIGNED_AT=$(jq -r '.atproto.signedAt' "$SIG_FILE")
|
||||
|
||||
print_success "DID: $DID"
|
||||
print_success "Handle: $HANDLE"
|
||||
print_success "PDS: $PDS"
|
||||
print_success "Record: $RECORD_URI"
|
||||
print_success "Signed at: $SIGNED_AT"
|
||||
|
||||
# Step 4: Resolve DID to public key
|
||||
print_step 4 "Resolving DID to public key"
|
||||
|
||||
DID_DOC=$(curl -s "https://plc.directory/$DID")
|
||||
if [ $? -ne 0 ]; then
|
||||
print_error "Failed to resolve DID"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PUB_KEY_MB=$(echo "$DID_DOC" | jq -r '.verificationMethod[0].publicKeyMultibase')
|
||||
if [ "$PUB_KEY_MB" = "null" ] || [ -z "$PUB_KEY_MB" ]; then
|
||||
print_error "Public key not found in DID document"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "Public key: ${PUB_KEY_MB:0:20}...${PUB_KEY_MB: -10}"
|
||||
|
||||
# Step 5: Query PDS for signed record
|
||||
print_step 5 "Querying PDS for signed record"
|
||||
|
||||
# Extract collection and rkey from record URI (at://did/collection/rkey)
|
||||
COLLECTION=$(echo "$RECORD_URI" | sed 's|at://[^/]*/\([^/]*\)/.*|\1|')
|
||||
RKEY=$(echo "$RECORD_URI" | sed 's|at://.*/||')
|
||||
|
||||
RECORD_URL="${PDS}/xrpc/com.atproto.repo.getRecord?repo=${DID}&collection=${COLLECTION}&rkey=${RKEY}"
|
||||
RECORD=$(curl -s "$RECORD_URL")
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
print_error "Failed to fetch record from PDS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
RECORD_CID=$(echo "$RECORD" | jq -r '.cid')
|
||||
if [ "$RECORD_CID" = "null" ] || [ -z "$RECORD_CID" ]; then
|
||||
print_error "Record not found in PDS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "Record CID: $RECORD_CID"
|
||||
|
||||
# Step 6: Verify record matches image manifest
|
||||
print_step 6 "Verifying record integrity"
|
||||
|
||||
RECORD_DIGEST=$(echo "$RECORD" | jq -r '.value.digest')
|
||||
if [ "$RECORD_DIGEST" != "$DIGEST" ]; then
|
||||
print_error "Record digest ($RECORD_DIGEST) doesn't match image digest ($DIGEST)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "Record digest matches image digest"
|
||||
|
||||
# Step 7: Signature verification status
|
||||
print_step 7 "Cryptographic signature verification"
|
||||
|
||||
print_warning "Full cryptographic verification requires ATProto crypto library"
|
||||
print_warning "This script verifies:"
|
||||
echo " • Record exists in PDS"
|
||||
echo " • DID resolved successfully"
|
||||
echo " • Public key retrieved from DID document"
|
||||
echo " • Record digest matches image digest"
|
||||
echo ""
|
||||
print_warning "For full cryptographic verification, use: atcr-verify $image"
|
||||
|
||||
# Summary
|
||||
echo ""
|
||||
echo "═══════════════════════════════════════════════════"
|
||||
echo -e " ${GREEN}✓ Verification Completed${NC}"
|
||||
echo "═══════════════════════════════════════════════════"
|
||||
echo ""
|
||||
echo " Signed by: $HANDLE ($DID)"
|
||||
echo " Signed at: $SIGNED_AT"
|
||||
echo " PDS: $PDS"
|
||||
echo " Record: $RECORD_URI"
|
||||
echo " Signature: $SIG_DIGEST"
|
||||
echo ""
|
||||
echo "═══════════════════════════════════════════════════"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Check dependencies first
|
||||
check_dependencies
|
||||
|
||||
# Run verification
|
||||
verify_image "$1"
|
||||
@@ -1,259 +0,0 @@
|
||||
# Kubernetes Admission Webhook for ATProto Signature Verification
|
||||
#
|
||||
# This example shows how to deploy a validating admission webhook that
|
||||
# verifies ATProto signatures before allowing pods to be created.
|
||||
#
|
||||
# Prerequisites:
|
||||
# 1. Build and push the webhook image (see examples/webhook/ for code)
|
||||
# 2. Generate TLS certificates for the webhook
|
||||
# 3. Create trust policy ConfigMap
|
||||
#
|
||||
# Usage:
|
||||
# kubectl apply -f kubernetes-webhook.yaml
|
||||
# kubectl label namespace production atcr-verify=enabled
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: atcr-system
|
||||
---
|
||||
# ConfigMap with trust policy
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: atcr-trust-policy
|
||||
namespace: atcr-system
|
||||
data:
|
||||
policy.yaml: |
|
||||
version: 1.0
|
||||
|
||||
# Global settings
|
||||
defaultAction: enforce # enforce, audit, or allow
|
||||
|
||||
# Policies by image pattern
|
||||
policies:
|
||||
- name: production-images
|
||||
scope: "atcr.io/*/prod-*"
|
||||
require:
|
||||
signature: true
|
||||
trustedDIDs:
|
||||
- did:plc:your-org-devops
|
||||
- did:plc:your-org-security
|
||||
minSignatures: 1
|
||||
action: enforce
|
||||
|
||||
- name: staging-images
|
||||
scope: "atcr.io/*/staging-*"
|
||||
require:
|
||||
signature: true
|
||||
trustedDIDs:
|
||||
- did:plc:your-org-devops
|
||||
- did:plc:your-org-security
|
||||
- did:plc:your-developers
|
||||
action: enforce
|
||||
|
||||
- name: dev-images
|
||||
scope: "atcr.io/*/dev-*"
|
||||
require:
|
||||
signature: false
|
||||
action: audit # Log but don't block
|
||||
|
||||
# Trusted DIDs configuration
|
||||
trustedDIDs:
|
||||
did:plc:your-org-devops:
|
||||
name: "DevOps Team"
|
||||
validFrom: "2024-01-01T00:00:00Z"
|
||||
expiresAt: null
|
||||
|
||||
did:plc:your-org-security:
|
||||
name: "Security Team"
|
||||
validFrom: "2024-01-01T00:00:00Z"
|
||||
expiresAt: null
|
||||
|
||||
did:plc:your-developers:
|
||||
name: "Developer Team"
|
||||
validFrom: "2024-06-01T00:00:00Z"
|
||||
expiresAt: null
|
||||
---
|
||||
# Service for webhook
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: atcr-verify-webhook
|
||||
namespace: atcr-system
|
||||
spec:
|
||||
selector:
|
||||
app: atcr-verify-webhook
|
||||
ports:
|
||||
- name: https
|
||||
port: 443
|
||||
targetPort: 8443
|
||||
---
|
||||
# Deployment for webhook
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: atcr-verify-webhook
|
||||
namespace: atcr-system
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: atcr-verify-webhook
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: atcr-verify-webhook
|
||||
spec:
|
||||
containers:
|
||||
- name: webhook
|
||||
image: atcr.io/atcr/verify-webhook:latest
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
name: https
|
||||
env:
|
||||
- name: TLS_CERT_FILE
|
||||
value: /etc/webhook/certs/tls.crt
|
||||
- name: TLS_KEY_FILE
|
||||
value: /etc/webhook/certs/tls.key
|
||||
- name: POLICY_FILE
|
||||
value: /etc/webhook/policy/policy.yaml
|
||||
- name: LOG_LEVEL
|
||||
value: info
|
||||
volumeMounts:
|
||||
- name: webhook-certs
|
||||
mountPath: /etc/webhook/certs
|
||||
readOnly: true
|
||||
- name: policy
|
||||
mountPath: /etc/webhook/policy
|
||||
readOnly: true
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "500m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 8443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
volumes:
|
||||
- name: webhook-certs
|
||||
secret:
|
||||
secretName: atcr-verify-webhook-certs
|
||||
- name: policy
|
||||
configMap:
|
||||
name: atcr-trust-policy
|
||||
---
|
||||
# ValidatingWebhookConfiguration
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: atcr-verify
|
||||
webhooks:
|
||||
- name: verify.atcr.io
|
||||
admissionReviewVersions: ["v1", "v1beta1"]
|
||||
sideEffects: None
|
||||
|
||||
# Client configuration
|
||||
clientConfig:
|
||||
service:
|
||||
name: atcr-verify-webhook
|
||||
namespace: atcr-system
|
||||
path: /validate
|
||||
port: 443
|
||||
# CA bundle for webhook TLS (base64-encoded CA cert)
|
||||
# Generate with: cat ca.crt | base64 -w 0
|
||||
caBundle: LS0tLS1CRUdJTi... # Replace with your CA bundle
|
||||
|
||||
# Rules - what to validate
|
||||
rules:
|
||||
- operations: ["CREATE", "UPDATE"]
|
||||
apiGroups: [""]
|
||||
apiVersions: ["v1"]
|
||||
resources: ["pods"]
|
||||
scope: "Namespaced"
|
||||
|
||||
# Namespace selector - only validate labeled namespaces
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: atcr-verify
|
||||
operator: In
|
||||
values: ["enabled", "enforce"]
|
||||
|
||||
# Failure policy - what to do if webhook fails
|
||||
failurePolicy: Fail # Reject pods if webhook is unavailable
|
||||
|
||||
# Timeout
|
||||
timeoutSeconds: 10
|
||||
|
||||
# Match policy
|
||||
matchPolicy: Equivalent
|
||||
---
|
||||
# Example: Label a namespace to enable verification
|
||||
# kubectl label namespace production atcr-verify=enabled
|
||||
---
|
||||
# RBAC for webhook
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: atcr-verify-webhook
|
||||
namespace: atcr-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: atcr-verify-webhook
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: atcr-verify-webhook
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: atcr-verify-webhook
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: atcr-verify-webhook
|
||||
namespace: atcr-system
|
||||
---
|
||||
# Secret for TLS certificates
|
||||
# Generate certificates with:
|
||||
# openssl req -x509 -newkey rsa:4096 -keyout tls.key -out tls.crt \
|
||||
# -days 365 -nodes -subj "/CN=atcr-verify-webhook.atcr-system.svc"
|
||||
#
|
||||
# Create secret with:
|
||||
# kubectl create secret tls atcr-verify-webhook-certs \
|
||||
# --cert=tls.crt --key=tls.key -n atcr-system
|
||||
#
|
||||
# (Commented out - create manually with your certs)
|
||||
# apiVersion: v1
|
||||
# kind: Secret
|
||||
# metadata:
|
||||
# name: atcr-verify-webhook-certs
|
||||
# namespace: atcr-system
|
||||
# type: kubernetes.io/tls
|
||||
# data:
|
||||
# tls.crt: <base64-encoded-cert>
|
||||
# tls.key: <base64-encoded-key>
|
||||
@@ -1,247 +0,0 @@
|
||||
# ATProto Signature Trust Policy
|
||||
#
|
||||
# This file defines which signatures to trust and what to do when
|
||||
# signatures are invalid or missing.
|
||||
#
|
||||
# Usage with atcr-verify:
|
||||
# atcr-verify IMAGE --policy trust-policy.yaml
|
||||
|
||||
version: 1.0
|
||||
|
||||
# Global settings
|
||||
defaultAction: enforce # Options: enforce, audit, allow
|
||||
requireSignature: true # Require at least one signature
|
||||
|
||||
# Policies matched by image scope (first match wins)
|
||||
policies:
|
||||
# Production images require signatures from trusted DIDs
|
||||
- name: production-images
|
||||
description: "Production images must be signed by DevOps or Security team"
|
||||
scope: "atcr.io/*/prod-*"
|
||||
require:
|
||||
signature: true
|
||||
trustedDIDs:
|
||||
- did:plc:your-org-devops
|
||||
- did:plc:your-org-security
|
||||
minSignatures: 1
|
||||
maxAge: 2592000 # 30 days in seconds
|
||||
action: enforce # Reject if policy fails
|
||||
|
||||
# Critical infrastructure requires multi-signature
|
||||
- name: critical-infrastructure
|
||||
description: "Critical services require 2 signatures"
|
||||
scope: "atcr.io/*/critical-*"
|
||||
require:
|
||||
signature: true
|
||||
trustedDIDs:
|
||||
- did:plc:your-org-security
|
||||
- did:plc:your-org-devops
|
||||
minSignatures: 2 # Require at least 2 signatures
|
||||
algorithms:
|
||||
- ECDSA-K256-SHA256 # Only allow specific algorithms
|
||||
action: enforce
|
||||
|
||||
# Staging images require signature from any team member
|
||||
- name: staging-images
|
||||
description: "Staging images need any trusted signature"
|
||||
scope: "atcr.io/*/staging-*"
|
||||
require:
|
||||
signature: true
|
||||
trustedDIDs:
|
||||
- did:plc:your-org-devops
|
||||
- did:plc:your-org-security
|
||||
- did:plc:your-org-developers
|
||||
minSignatures: 1
|
||||
action: enforce
|
||||
|
||||
# Development images are audited but not blocked
|
||||
- name: dev-images
|
||||
description: "Development images are monitored"
|
||||
scope: "atcr.io/*/dev-*"
|
||||
require:
|
||||
signature: false # Don't require signatures
|
||||
action: audit # Log but don't reject
|
||||
|
||||
# Test images from external sources
|
||||
- name: external-test-images
|
||||
description: "Test images from partners"
|
||||
scope: "atcr.io/external/*"
|
||||
require:
|
||||
signature: true
|
||||
trustedDIDs:
|
||||
- did:plc:partner-acme
|
||||
- did:plc:partner-widgets
|
||||
minSignatures: 1
|
||||
action: enforce
|
||||
|
||||
# Default fallback for all other images
|
||||
- name: default
|
||||
description: "All other images require signature"
|
||||
scope: "atcr.io/*/*"
|
||||
require:
|
||||
signature: true
|
||||
minSignatures: 1
|
||||
action: enforce
|
||||
|
||||
# Trusted DID registry
|
||||
trustedDIDs:
|
||||
# Your organization's DevOps team
|
||||
did:plc:your-org-devops:
|
||||
name: "DevOps Team"
|
||||
description: "Production deployment automation"
|
||||
validFrom: "2024-01-01T00:00:00Z"
|
||||
expiresAt: null # Never expires
|
||||
contact: "devops@yourorg.com"
|
||||
allowedScopes:
|
||||
- "atcr.io/*/prod-*"
|
||||
- "atcr.io/*/staging-*"
|
||||
- "atcr.io/*/critical-*"
|
||||
|
||||
# Your organization's Security team
|
||||
did:plc:your-org-security:
|
||||
name: "Security Team"
|
||||
description: "Security-reviewed images"
|
||||
validFrom: "2024-01-01T00:00:00Z"
|
||||
expiresAt: null
|
||||
contact: "security@yourorg.com"
|
||||
allowedScopes:
|
||||
- "atcr.io/*/*" # Can sign any image
|
||||
|
||||
# Developer team (limited access)
|
||||
did:plc:your-org-developers:
|
||||
name: "Developer Team"
|
||||
description: "Development and staging images"
|
||||
validFrom: "2024-06-01T00:00:00Z"
|
||||
expiresAt: "2025-12-31T23:59:59Z" # Temporary access
|
||||
contact: "dev-team@yourorg.com"
|
||||
allowedScopes:
|
||||
- "atcr.io/*/dev-*"
|
||||
- "atcr.io/*/staging-*"
|
||||
notes: "Access expires end of 2025 - review then"
|
||||
|
||||
# External partner: ACME Corp
|
||||
did:plc:partner-acme:
|
||||
name: "ACME Corp Integration Team"
|
||||
description: "Third-party integration images"
|
||||
validFrom: "2024-09-01T00:00:00Z"
|
||||
expiresAt: "2025-09-01T00:00:00Z"
|
||||
contact: "integration@acme.example.com"
|
||||
allowedScopes:
|
||||
- "atcr.io/external/acme-*"
|
||||
|
||||
# External partner: Widgets Inc
|
||||
did:plc:partner-widgets:
|
||||
name: "Widgets Inc"
|
||||
description: "Widgets service integration"
|
||||
validFrom: "2024-10-01T00:00:00Z"
|
||||
expiresAt: "2025-10-01T00:00:00Z"
|
||||
contact: "api@widgets.example.com"
|
||||
allowedScopes:
|
||||
- "atcr.io/external/widgets-*"
|
||||
|
||||
# Signature validation settings
|
||||
validation:
|
||||
# Signature age limits
|
||||
maxSignatureAge: 7776000 # 90 days in seconds (null = no limit)
|
||||
|
||||
# Allowed signature algorithms
|
||||
allowedAlgorithms:
|
||||
- ECDSA-K256-SHA256 # ATProto default
|
||||
- ECDSA-P256-SHA256 # Alternative
|
||||
|
||||
# DID resolution settings
|
||||
didResolver:
|
||||
timeout: 10 # seconds
|
||||
cache:
|
||||
enabled: true
|
||||
ttl: 3600 # 1 hour in seconds
|
||||
fallbackResolvers:
|
||||
- https://plc.directory
|
||||
- https://backup-plc.example.com
|
||||
|
||||
# PDS connection settings
|
||||
pds:
|
||||
timeout: 15 # seconds
|
||||
retries: 3
|
||||
cache:
|
||||
enabled: true
|
||||
ttl: 600 # 10 minutes
|
||||
|
||||
# Audit logging
|
||||
audit:
|
||||
enabled: true
|
||||
logLevel: info # debug, info, warn, error
|
||||
|
||||
# What to log
|
||||
logEvents:
|
||||
- signature_verified
|
||||
- signature_missing
|
||||
- signature_invalid
|
||||
- signature_expired
|
||||
- did_resolution_failed
|
||||
- pds_query_failed
|
||||
- policy_violation
|
||||
|
||||
# Log destinations
|
||||
destinations:
|
||||
- type: stdout
|
||||
format: json
|
||||
- type: file
|
||||
path: /var/log/atcr-verify/audit.log
|
||||
format: json
|
||||
rotate: true
|
||||
maxSize: 100MB
|
||||
maxFiles: 10
|
||||
|
||||
# Reporting and metrics
|
||||
reporting:
|
||||
# Prometheus metrics
|
||||
metrics:
|
||||
enabled: true
|
||||
port: 9090
|
||||
path: /metrics
|
||||
|
||||
# Periodic reports
|
||||
reports:
|
||||
enabled: true
|
||||
interval: 86400 # Daily in seconds
|
||||
email:
|
||||
- security@yourorg.com
|
||||
- devops@yourorg.com
|
||||
includeStatistics: true
|
||||
|
||||
# Emergency overrides
|
||||
overrides:
|
||||
# Allow bypassing verification in emergencies
|
||||
enabled: false # Enable with extreme caution!
|
||||
requireApproval: true
|
||||
approvers:
|
||||
- security@yourorg.com
|
||||
validDuration: 3600 # Override valid for 1 hour
|
||||
|
||||
# Examples of policy evaluation:
|
||||
#
|
||||
# atcr.io/myorg/prod-api:v1.2.3
|
||||
# → Matches: production-images
|
||||
# → Requires: 1 signature from DevOps or Security
|
||||
# → Action: enforce
|
||||
#
|
||||
# atcr.io/myorg/critical-auth:v2.0.0
|
||||
# → Matches: critical-infrastructure
|
||||
# → Requires: 2 signatures from Security and DevOps
|
||||
# → Action: enforce
|
||||
#
|
||||
# atcr.io/myorg/staging-frontend:latest
|
||||
# → Matches: staging-images
|
||||
# → Requires: 1 signature from any team member
|
||||
# → Action: enforce
|
||||
#
|
||||
# atcr.io/myorg/dev-experiment:test
|
||||
# → Matches: dev-images
|
||||
# → Requires: none
|
||||
# → Action: audit (log only)
|
||||
#
|
||||
# atcr.io/external/acme-connector:v1.0
|
||||
# → Matches: external-test-images
|
||||
# → Requires: 1 signature from partner-acme
|
||||
# → Action: enforce
|
||||
@@ -1,162 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Verify and Pull Script
|
||||
#
|
||||
# This script verifies ATProto signatures before pulling images with Docker.
|
||||
# It acts as a wrapper around `docker pull` to enforce signature verification.
|
||||
#
|
||||
# Usage: ./verify-and-pull.sh IMAGE [DOCKER_PULL_OPTIONS]
|
||||
# Example: ./verify-and-pull.sh atcr.io/alice/myapp:latest
|
||||
# Example: ./verify-and-pull.sh atcr.io/alice/myapp:latest --platform linux/amd64
|
||||
#
|
||||
# To use this as a replacement for docker pull, create an alias:
|
||||
# alias docker-pull-secure='/path/to/verify-and-pull.sh'
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
VERIFY_SCRIPT="${VERIFY_SCRIPT:-$(dirname $0)/atcr-verify.sh}"
|
||||
TRUST_POLICY="${TRUST_POLICY:-$(dirname $0)/trust-policy.yaml}"
|
||||
REQUIRE_VERIFICATION="${REQUIRE_VERIFICATION:-true}"
|
||||
SKIP_ATCR_IMAGES="${SKIP_ATCR_IMAGES:-false}" # Skip verification for non-ATCR images
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_header() {
|
||||
echo ""
|
||||
echo -e "${BLUE}═══════════════════════════════════════════════════${NC}"
|
||||
echo -e "${BLUE} Secure Image Pull with Signature Verification${NC}"
|
||||
echo -e "${BLUE}═══════════════════════════════════════════════════${NC}"
|
||||
echo ""
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}✓${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}✗${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}⚠${NC} $1"
|
||||
}
|
||||
|
||||
# Check if image is from ATCR
|
||||
is_atcr_image() {
|
||||
local image="$1"
|
||||
if [[ "$image" =~ ^atcr\.io/ ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main function
|
||||
main() {
|
||||
if [ $# -eq 0 ]; then
|
||||
echo "Usage: $0 IMAGE [DOCKER_PULL_OPTIONS]"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 atcr.io/alice/myapp:latest"
|
||||
echo " $0 atcr.io/alice/myapp:latest --platform linux/amd64"
|
||||
echo ""
|
||||
echo "Environment variables:"
|
||||
echo " VERIFY_SCRIPT - Path to verification script (default: ./atcr-verify.sh)"
|
||||
echo " TRUST_POLICY - Path to trust policy (default: ./trust-policy.yaml)"
|
||||
echo " REQUIRE_VERIFICATION - Require verification for ATCR images (default: true)"
|
||||
echo " SKIP_ATCR_IMAGES - Skip verification for non-ATCR images (default: false)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local image="$1"
|
||||
shift
|
||||
local docker_args="$@"
|
||||
|
||||
print_header
|
||||
|
||||
echo -e "${BLUE}Image:${NC} $image"
|
||||
if [ -n "$docker_args" ]; then
|
||||
echo -e "${BLUE}Docker options:${NC} $docker_args"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check if this is an ATCR image
|
||||
if ! is_atcr_image "$image"; then
|
||||
if [ "$SKIP_ATCR_IMAGES" = "true" ]; then
|
||||
print_warning "Not an ATCR image - skipping signature verification"
|
||||
echo ""
|
||||
docker pull $docker_args "$image"
|
||||
exit $?
|
||||
else
|
||||
print_warning "Not an ATCR image"
|
||||
if [ "$REQUIRE_VERIFICATION" = "true" ]; then
|
||||
print_error "Verification required but image is not from ATCR"
|
||||
exit 1
|
||||
else
|
||||
print_warning "Proceeding without verification"
|
||||
echo ""
|
||||
docker pull $docker_args "$image"
|
||||
exit $?
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Step 1: Verify signature
|
||||
echo -e "${BLUE}Step 1: Verifying ATProto signature${NC}"
|
||||
echo ""
|
||||
|
||||
if [ ! -f "$VERIFY_SCRIPT" ]; then
|
||||
print_error "Verification script not found: $VERIFY_SCRIPT"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run verification
|
||||
if bash "$VERIFY_SCRIPT" "$image"; then
|
||||
print_success "Signature verification passed"
|
||||
echo ""
|
||||
else
|
||||
print_error "Signature verification failed"
|
||||
echo ""
|
||||
|
||||
if [ "$REQUIRE_VERIFICATION" = "true" ]; then
|
||||
echo -e "${RED}Image pull blocked due to failed signature verification${NC}"
|
||||
echo ""
|
||||
echo "To proceed anyway (NOT RECOMMENDED), run:"
|
||||
echo " REQUIRE_VERIFICATION=false $0 $image $docker_args"
|
||||
exit 1
|
||||
else
|
||||
print_warning "Verification failed but REQUIRE_VERIFICATION=false"
|
||||
print_warning "Proceeding with pull (NOT RECOMMENDED)"
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
|
||||
# Step 2: Pull image
|
||||
echo -e "${BLUE}Step 2: Pulling image${NC}"
|
||||
echo ""
|
||||
|
||||
if docker pull $docker_args "$image"; then
|
||||
print_success "Image pulled successfully"
|
||||
else
|
||||
print_error "Failed to pull image"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Summary
|
||||
echo ""
|
||||
echo -e "${GREEN}═══════════════════════════════════════════════════${NC}"
|
||||
echo -e "${GREEN} ✓ Secure pull completed successfully${NC}"
|
||||
echo -e "${GREEN}═══════════════════════════════════════════════════${NC}"
|
||||
echo ""
|
||||
echo -e "${BLUE}Image:${NC} $image"
|
||||
echo -e "${BLUE}Status:${NC} Verified and pulled"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
322
go.mod
322
go.mod
@@ -1,8 +1,10 @@
|
||||
module atcr.io
|
||||
|
||||
go 1.24.7
|
||||
go 1.24.9
|
||||
|
||||
require (
|
||||
github.com/anchore/grype v0.102.0
|
||||
github.com/anchore/syft v1.36.0
|
||||
github.com/aws/aws-sdk-go v1.55.5
|
||||
github.com/bluesky-social/indigo v0.0.0-20251021193747-543ab1124beb
|
||||
github.com/distribution/distribution/v3 v3.0.0
|
||||
@@ -23,49 +25,191 @@ require (
|
||||
github.com/microcosm-cc/bluemonday v1.0.27
|
||||
github.com/multiformats/go-multihash v0.2.3
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/whyrusleeping/cbor-gen v0.3.1
|
||||
github.com/yuin/goldmark v1.7.13
|
||||
go.opentelemetry.io/otel v1.32.0
|
||||
go.opentelemetry.io/otel v1.37.0
|
||||
go.yaml.in/yaml/v4 v4.0.0-rc.2
|
||||
golang.org/x/crypto v0.39.0
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028
|
||||
gorm.io/gorm v1.25.9
|
||||
golang.org/x/crypto v0.43.0
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da
|
||||
gorm.io/gorm v1.31.0
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
cloud.google.com/go v0.121.3 // indirect
|
||||
cloud.google.com/go/auth v0.16.2 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
cloud.google.com/go/storage v1.55.0 // indirect
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20250520111509-a70c2aa677fa // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/CycloneDX/cyclonedx-go v0.9.3 // indirect
|
||||
github.com/DataDog/zstd v1.5.7 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
|
||||
github.com/Intevation/gval v1.3.0 // indirect
|
||||
github.com/Intevation/jsonpath v0.2.1 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.4.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.13.0 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
github.com/RussellLuo/slidingwindow v0.0.0-20200528002341-535bb99d338b // indirect
|
||||
github.com/STARRY-S/zip v0.2.3 // indirect
|
||||
github.com/acobaugh/osrelease v0.1.0 // indirect
|
||||
github.com/adrg/xdg v0.5.3 // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/anchore/archiver/v3 v3.5.3-0.20241210171143-5b1d8d1c7c51 // indirect
|
||||
github.com/anchore/clio v0.0.0-20250715152405-a0fa658e5084 // indirect
|
||||
github.com/anchore/fangs v0.0.0-20250716230140-94c22408c232 // indirect
|
||||
github.com/anchore/go-collections v0.0.0-20251016125210-a3c352120e8c // indirect
|
||||
github.com/anchore/go-homedir v0.0.0-20250319154043-c29668562e4d // indirect
|
||||
github.com/anchore/go-logger v0.0.0-20250318195838-07ae343dd722 // indirect
|
||||
github.com/anchore/go-lzo v0.1.0 // indirect
|
||||
github.com/anchore/go-macholibre v0.0.0-20250320151634-807da7ad2331 // indirect
|
||||
github.com/anchore/go-rpmdb v0.0.0-20250516171929-f77691e1faec // indirect
|
||||
github.com/anchore/go-struct-converter v0.0.0-20250211213226-cce56d595160 // indirect
|
||||
github.com/anchore/go-sync v0.0.0-20250714163430-add63db73ad1 // indirect
|
||||
github.com/anchore/go-version v1.2.2-0.20210903204242-51efa5b487c4 // indirect
|
||||
github.com/anchore/packageurl-go v0.1.1-0.20250220190351-d62adb6e1115 // indirect
|
||||
github.com/anchore/stereoscope v0.1.11 // indirect
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
||||
github.com/aquasecurity/go-pep440-version v0.0.1 // indirect
|
||||
github.com/aquasecurity/go-version v0.0.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.70 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect
|
||||
github.com/aws/smithy-go v1.22.4 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/aymerick/douceur v0.2.0 // indirect
|
||||
github.com/becheran/wildmatch-go v1.0.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
|
||||
github.com/bitnami/go-version v0.0.0-20250505154626-452e8c5ee607 // indirect
|
||||
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb // indirect
|
||||
github.com/bmatcuk/doublestar/v2 v2.0.4 // indirect
|
||||
github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect
|
||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.1 // indirect
|
||||
github.com/bodgit/windows v1.0.1 // indirect
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.3.1 // indirect
|
||||
github.com/charmbracelet/lipgloss v1.1.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.10.1 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.3 // indirect
|
||||
github.com/containerd/containerd v1.7.28 // indirect
|
||||
github.com/containerd/containerd/api v1.9.0 // indirect
|
||||
github.com/containerd/continuity v0.4.5 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/fifo v1.1.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/containerd/ttrpc v1.2.7 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/deitch/magic v0.0.0-20240306090643-c67ab88f10cb // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/diskfs/go-diskfs v1.7.0 // indirect
|
||||
github.com/docker/cli v28.5.1+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v28.5.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.9.3 // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/elliotchance/phpserialize v1.4.0 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/facebookincubator/nvdtools v0.1.5 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
|
||||
github.com/github/go-spdx/v2 v2.3.4 // indirect
|
||||
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||
github.com/glebarez/sqlite v1.11.0 // indirect
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.6.2 // indirect
|
||||
github.com/go-git/go-git/v5 v5.16.3 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.2 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-restruct/restruct v1.2.0-alpha // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/goccy/go-yaml v1.18.0 // indirect
|
||||
github.com/gocql/gocql v1.7.0 // indirect
|
||||
github.com/gocsaf/csaf/v3 v3.3.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/gohugoio/hashstructure v0.6.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/snappy v1.0.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/go-containerregistry v0.20.6 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/licensecheck v0.3.1 // indirect
|
||||
github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
|
||||
github.com/gookit/color v1.6.0 // indirect
|
||||
github.com/gorilla/css v1.0.1 // indirect
|
||||
github.com/gorilla/handlers v1.5.2 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
||||
github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b // indirect
|
||||
github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-getter v1.8.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.5 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.6 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/hcl/v2 v2.24.0 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/iancoleman/strcase v0.3.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/ipfs/bbloom v0.0.4 // indirect
|
||||
github.com/ipfs/go-blockservice v0.5.2 // indirect
|
||||
@@ -86,22 +230,70 @@ require (
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/jbenet/goprocess v0.1.4 // indirect
|
||||
github.com/jinzhu/copier v0.4.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/kastenhq/goversion v0.0.0-20230811215019-93b2f8823953 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/knqyf263/go-apk-version v0.0.0-20200609155635-041fdbb8563f // indirect
|
||||
github.com/knqyf263/go-deb-version v0.0.0-20241115132648-6f4aee6ccd23 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/masahiro331/go-mvn-version v0.0.0-20250131095131-f4974fa13b8a // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mholt/archives v0.1.5 // indirect
|
||||
github.com/mikelolasagasti/xz v1.0.1 // indirect
|
||||
github.com/minio/minlz v1.0.1 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.2 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/moby/sys/signal v0.7.1 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/nix-community/go-nix v0.0.0-20250101154619-4bdde671e0a1 // indirect
|
||||
github.com/nwaples/rardecode v1.1.3 // indirect
|
||||
github.com/nwaples/rardecode/v2 v2.2.0 // indirect
|
||||
github.com/olekukonko/errors v1.1.0 // indirect
|
||||
github.com/olekukonko/ll v0.0.9 // indirect
|
||||
github.com/olekukonko/tablewriter v1.1.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.1 // indirect
|
||||
github.com/opencontainers/selinux v1.12.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/openvex/go-vex v0.2.7 // indirect
|
||||
github.com/package-url/packageurl-go v0.1.3 // indirect
|
||||
github.com/pandatix/go-cvss v0.6.2 // indirect
|
||||
github.com/pborman/indent v1.2.1 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pjbgf/sha1cd v0.4.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pkg/profile v1.7.0 // indirect
|
||||
github.com/pkg/xattr v0.4.12 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f // indirect
|
||||
github.com/prometheus/client_golang v1.20.5 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
@@ -110,47 +302,107 @@ require (
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect
|
||||
github.com/redis/go-redis/v9 v9.7.3 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rust-secure-code/go-rustaudit v0.0.0-20250226111315-e20ec32e963c // indirect
|
||||
github.com/sagikazarmark/locafero v0.9.0 // indirect
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
|
||||
github.com/sassoftware/go-rpmutils v0.4.0 // indirect
|
||||
github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e // indirect
|
||||
github.com/sergi/go-diff v1.4.0 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect
|
||||
github.com/skeema/knownhosts v1.3.1 // indirect
|
||||
github.com/sorairolake/lzip-go v0.3.8 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spdx/gordf v0.0.0-20250128162952-000978ccd6fb // indirect
|
||||
github.com/spdx/tools-golang v0.5.5 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.9.2 // indirect
|
||||
github.com/spf13/pflag v1.0.9 // indirect
|
||||
github.com/spf13/viper v1.20.1 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/sylabs/sif/v2 v2.22.0 // indirect
|
||||
github.com/sylabs/squashfs v1.0.6 // indirect
|
||||
github.com/therootcompany/xz v1.0.1 // indirect
|
||||
github.com/ulikunitz/xz v0.5.15 // indirect
|
||||
github.com/vbatts/go-mtree v0.6.0 // indirect
|
||||
github.com/vbatts/tar-split v0.12.1 // indirect
|
||||
github.com/vifraa/gopom v1.0.0 // indirect
|
||||
github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651 // indirect
|
||||
github.com/wagoodman/go-progress v0.0.0-20230925121702-07e42b3cdba0 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
github.com/zclconf/go-cty v1.16.3 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect
|
||||
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect
|
||||
go.etcd.io/bbolt v1.4.2 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.37.0 // indirect
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.8.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.32.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/net v0.37.0 // indirect
|
||||
golang.org/x/sync v0.15.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect
|
||||
google.golang.org/grpc v1.68.0 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/net v0.46.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/term v0.36.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
google.golang.org/api v0.242.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250715232539-7130f93afb79 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250715232539-7130f93afb79 // indirect
|
||||
google.golang.org/grpc v1.74.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
gorm.io/driver/postgres v1.5.7 // indirect
|
||||
lukechampine.com/blake3 v1.2.1 // indirect
|
||||
modernc.org/libc v1.66.10 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.39.1 // indirect
|
||||
)
|
||||
|
||||
// Pin OpenTelemetry SDK to v1.32.0 for compatibility with distribution/distribution
|
||||
replace (
|
||||
go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v1.32.0
|
||||
go.opentelemetry.io/otel/sdk/log => go.opentelemetry.io/otel/sdk/log v0.8.0
|
||||
go.opentelemetry.io/otel/sdk/metric => go.opentelemetry.io/otel/sdk/metric v1.32.0
|
||||
)
|
||||
|
||||
25
license
25
license
@@ -1,25 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
=====================
|
||||
|
||||
Copyright © 2025 Evan Jarrett
|
||||
|
||||
Permission is hereby granted, free of charge, to any person
|
||||
obtaining a copy of this software and associated documentation
|
||||
files (the “Software”), to deal in the Software without
|
||||
restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
@@ -151,13 +151,24 @@ type TagWithPlatforms struct {
|
||||
IsMultiArch bool
|
||||
}
|
||||
|
||||
// VulnerabilitySummary represents vulnerability counts by severity
|
||||
type VulnerabilitySummary struct {
|
||||
Critical int
|
||||
High int
|
||||
Medium int
|
||||
Low int
|
||||
Total int
|
||||
}
|
||||
|
||||
// ManifestWithMetadata extends Manifest with tags and platform information
|
||||
type ManifestWithMetadata struct {
|
||||
Manifest
|
||||
Tags []string
|
||||
Platforms []PlatformInfo
|
||||
PlatformCount int
|
||||
IsManifestList bool
|
||||
Reachable bool // Whether the hold endpoint is reachable
|
||||
Pending bool // Whether health check is still in progress
|
||||
Tags []string
|
||||
Platforms []PlatformInfo
|
||||
PlatformCount int
|
||||
IsManifestList bool
|
||||
Reachable bool // Whether the hold endpoint is reachable
|
||||
Pending bool // Whether health check is still in progress
|
||||
Vulnerabilities *VulnerabilitySummary
|
||||
HasVulnerabilities bool
|
||||
}
|
||||
|
||||
@@ -43,14 +43,12 @@ func (h *ManifestHealthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
|
||||
reachable, err := h.HealthChecker.CheckHealth(ctx, endpoint)
|
||||
|
||||
// Check for HTTP errors first (connection refused, network unreachable, etc.)
|
||||
// This ensures we catch real failures even when timing aligns with context timeout
|
||||
if err != nil {
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
// Still pending - render "Checking..." badge with HTMX retry
|
||||
h.renderBadge(w, endpoint, false, true)
|
||||
} else if err != nil {
|
||||
// Error - mark as unreachable
|
||||
h.renderBadge(w, endpoint, false, false)
|
||||
} else if ctx.Err() == context.DeadlineExceeded {
|
||||
// Context timed out but no HTTP error yet - still pending
|
||||
h.renderBadge(w, endpoint, false, true)
|
||||
} else {
|
||||
// Success
|
||||
h.renderBadge(w, endpoint, reachable, false)
|
||||
@@ -67,10 +65,10 @@ func (h *ManifestHealthHandler) renderBadge(w http.ResponseWriter, endpoint stri
|
||||
w.Write([]byte(`<span class="checking-badge"
|
||||
hx-get="` + retryURL + `"
|
||||
hx-trigger="load delay:3s"
|
||||
hx-swap="outerHTML"><i data-lucide="refresh-ccw"></i> Checking...</span>`))
|
||||
hx-swap="outerHTML">🔄 Checking...</span>`))
|
||||
} else if !reachable {
|
||||
// Unreachable - render offline badge
|
||||
w.Write([]byte(`<span class="offline-badge"><i data-lucide="triangle-alert"></i> Offline</span>`))
|
||||
w.Write([]byte(`<span class="offline-badge">⚠️ Offline</span>`))
|
||||
} else {
|
||||
// Reachable - no badge (empty response)
|
||||
w.Write([]byte(``))
|
||||
|
||||
@@ -3,9 +3,14 @@ package handlers
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -30,6 +35,176 @@ type RepositoryPageHandler struct {
|
||||
ReadmeCache *readme.Cache
|
||||
}
|
||||
|
||||
// queryVulnerabilities queries the hold service for vulnerability scan results
|
||||
func (h *RepositoryPageHandler) queryVulnerabilities(ctx context.Context, holdEndpoint string, digest string) (*db.VulnerabilitySummary, error) {
|
||||
// Skip if no hold endpoint
|
||||
if holdEndpoint == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Query referrers endpoint for vulnerability scan results
|
||||
// Match the artifactType used by the scanner in pkg/hold/scanner/storage.go
|
||||
artifactType := "application/vnd.atcr.vulnerabilities+json"
|
||||
|
||||
// Properly encode query parameters (especially the + in the media type)
|
||||
queryParams := url.Values{}
|
||||
queryParams.Set("digest", digest)
|
||||
queryParams.Set("artifactType", artifactType)
|
||||
requestURL := fmt.Sprintf("%s/xrpc/io.atcr.hold.getReferrers?%s", holdEndpoint, queryParams.Encode())
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", requestURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
// No scan results found
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("failed to query referrers: %s - %s", resp.Status, string(body))
|
||||
}
|
||||
|
||||
// Parse response
|
||||
var result struct {
|
||||
Referrers []struct {
|
||||
Annotations map[string]string `json:"annotations"`
|
||||
} `json:"referrers"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode referrers response: %w", err)
|
||||
}
|
||||
|
||||
slog.Debug("Received referrers response",
|
||||
"digest", digest,
|
||||
"referrerCount", len(result.Referrers))
|
||||
|
||||
// Find the most recent vulnerability scan result
|
||||
if len(result.Referrers) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Parse vulnerability counts from annotations
|
||||
// Match the annotation keys used by the scanner in pkg/hold/scanner/storage.go
|
||||
annotations := result.Referrers[0].Annotations
|
||||
slog.Debug("First referrer annotations",
|
||||
"digest", digest,
|
||||
"annotations", annotations,
|
||||
"annotationsLen", len(annotations))
|
||||
|
||||
summary := &db.VulnerabilitySummary{}
|
||||
|
||||
if critical, ok := annotations["io.atcr.vuln.critical"]; ok {
|
||||
summary.Critical, _ = strconv.Atoi(critical)
|
||||
}
|
||||
if high, ok := annotations["io.atcr.vuln.high"]; ok {
|
||||
summary.High, _ = strconv.Atoi(high)
|
||||
}
|
||||
if medium, ok := annotations["io.atcr.vuln.medium"]; ok {
|
||||
summary.Medium, _ = strconv.Atoi(medium)
|
||||
}
|
||||
if low, ok := annotations["io.atcr.vuln.low"]; ok {
|
||||
summary.Low, _ = strconv.Atoi(low)
|
||||
}
|
||||
if total, ok := annotations["io.atcr.vuln.total"]; ok {
|
||||
summary.Total, _ = strconv.Atoi(total)
|
||||
}
|
||||
|
||||
// If Total is missing or 0, calculate from individual counts
|
||||
if summary.Total == 0 {
|
||||
summary.Total = summary.Critical + summary.High + summary.Medium + summary.Low
|
||||
}
|
||||
|
||||
slog.Debug("Parsed vulnerability summary",
|
||||
"digest", digest,
|
||||
"critical", summary.Critical,
|
||||
"high", summary.High,
|
||||
"medium", summary.Medium,
|
||||
"low", summary.Low,
|
||||
"total", summary.Total)
|
||||
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
// HandleVulnerabilityDetails returns the full vulnerability report for a manifest
|
||||
func (h *RepositoryPageHandler) HandleVulnerabilityDetails(w http.ResponseWriter, r *http.Request) {
|
||||
digest := r.URL.Query().Get("digest")
|
||||
holdEndpoint := r.URL.Query().Get("holdEndpoint")
|
||||
|
||||
if digest == "" || holdEndpoint == "" {
|
||||
http.Error(w, "digest and holdEndpoint required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Query referrers to get the vulnerability report digest
|
||||
artifactType := "application/vnd.atcr.vulnerabilities+json"
|
||||
queryParams := url.Values{}
|
||||
queryParams.Set("digest", digest)
|
||||
queryParams.Set("artifactType", artifactType)
|
||||
requestURL := fmt.Sprintf("%s/xrpc/io.atcr.hold.getReferrers?%s", holdEndpoint, queryParams.Encode())
|
||||
|
||||
req, err := http.NewRequestWithContext(r.Context(), "GET", requestURL, nil)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
http.Error(w, "No vulnerability scan found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
http.Error(w, "Failed to query referrers", resp.StatusCode)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse response - now includes the vulnerability report data directly
|
||||
var result struct {
|
||||
Referrers []struct {
|
||||
Digest string `json:"digest"`
|
||||
Annotations map[string]string `json:"annotations"`
|
||||
ReportData map[string]interface{} `json:"reportData"` // The actual vulnerability report
|
||||
} `json:"referrers"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
http.Error(w, "Failed to decode referrers response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if len(result.Referrers) == 0 {
|
||||
http.Error(w, "No vulnerability scan found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if reportData is included
|
||||
if result.Referrers[0].ReportData == nil {
|
||||
http.Error(w, "Vulnerability report data not available", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Return the vulnerability report JSON directly
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(result.Referrers[0].ReportData)
|
||||
}
|
||||
|
||||
func (h *RepositoryPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
handle := chi.URLParam(r, "handle")
|
||||
repository := chi.URLParam(r, "repository")
|
||||
@@ -60,6 +235,44 @@ func (h *RepositoryPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
// Query vulnerability scan results for each manifest (concurrent with 2s timeout)
|
||||
{
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
|
||||
for i := range manifests {
|
||||
// Skip manifest lists - only query for image manifests
|
||||
if manifests[i].IsManifestList {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
|
||||
vulnerabilities, err := h.queryVulnerabilities(ctx, manifests[idx].Manifest.HoldEndpoint, manifests[idx].Manifest.Digest)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to query vulnerabilities",
|
||||
"digest", manifests[idx].Manifest.Digest,
|
||||
"error", err)
|
||||
return
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
if vulnerabilities != nil && vulnerabilities.Total > 0 {
|
||||
manifests[idx].Vulnerabilities = vulnerabilities
|
||||
manifests[idx].HasVulnerabilities = true
|
||||
}
|
||||
mu.Unlock()
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Check health status for each manifest's hold endpoint (concurrent with 1s timeout)
|
||||
if h.HealthChecker != nil {
|
||||
// Create context with 1 second deadline for fast-fail
|
||||
|
||||
@@ -107,15 +107,22 @@ func (p *Processor) ProcessManifest(ctx context.Context, did string, recordData
|
||||
// Detect manifest type
|
||||
isManifestList := len(manifestRecord.Manifests) > 0
|
||||
|
||||
// Parse CreatedAt timestamp
|
||||
createdAt, err := time.Parse(time.RFC3339, manifestRecord.CreatedAt)
|
||||
if err != nil {
|
||||
// If parsing fails, use current time
|
||||
createdAt = time.Now()
|
||||
}
|
||||
|
||||
// Prepare manifest for insertion (WITHOUT annotation fields)
|
||||
manifest := &db.Manifest{
|
||||
DID: did,
|
||||
Repository: manifestRecord.Repository,
|
||||
Digest: manifestRecord.Digest,
|
||||
MediaType: manifestRecord.MediaType,
|
||||
SchemaVersion: manifestRecord.SchemaVersion,
|
||||
SchemaVersion: int(manifestRecord.SchemaVersion),
|
||||
HoldEndpoint: manifestRecord.HoldEndpoint,
|
||||
CreatedAt: manifestRecord.CreatedAt,
|
||||
CreatedAt: createdAt,
|
||||
// Annotations removed - stored separately in repository_annotations table
|
||||
}
|
||||
|
||||
|
||||
@@ -148,7 +148,7 @@ func TestProcessManifest_ImageManifest(t *testing.T) {
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
SchemaVersion: 2,
|
||||
HoldEndpoint: "did:web:hold01.atcr.io",
|
||||
CreatedAt: time.Now(),
|
||||
CreatedAt: time.Now().String(),
|
||||
Config: &atproto.BlobReference{
|
||||
Digest: "sha256:config123",
|
||||
Size: 1234,
|
||||
@@ -247,7 +247,7 @@ func TestProcessManifest_ManifestList(t *testing.T) {
|
||||
MediaType: "application/vnd.oci.image.index.v1+json",
|
||||
SchemaVersion: 2,
|
||||
HoldEndpoint: "did:web:hold01.atcr.io",
|
||||
CreatedAt: time.Now(),
|
||||
CreatedAt: time.Now().String(),
|
||||
Manifests: []atproto.ManifestReference{
|
||||
{
|
||||
Digest: "sha256:amd64manifest",
|
||||
@@ -471,7 +471,7 @@ func TestProcessManifest_Duplicate(t *testing.T) {
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
SchemaVersion: 2,
|
||||
HoldEndpoint: "did:web:hold01.atcr.io",
|
||||
CreatedAt: time.Now(),
|
||||
CreatedAt: time.Now().String(),
|
||||
}
|
||||
|
||||
// Marshal to bytes for ProcessManifest
|
||||
@@ -523,7 +523,7 @@ func TestProcessManifest_EmptyAnnotations(t *testing.T) {
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
SchemaVersion: 2,
|
||||
HoldEndpoint: "did:web:hold01.atcr.io",
|
||||
CreatedAt: time.Now(),
|
||||
CreatedAt: time.Now().String(),
|
||||
Annotations: nil,
|
||||
}
|
||||
|
||||
|
||||
@@ -154,13 +154,10 @@ func (nr *NamespaceResolver) Repository(ctx context.Context, name reference.Name
|
||||
// Auto-reconcile crew membership on first push/pull
|
||||
// This ensures users can push immediately after docker login without web sign-in
|
||||
// EnsureCrewMembership is best-effort and logs errors without failing the request
|
||||
// Run in background to avoid blocking registry operations if hold is offline
|
||||
if holdDID != "" && nr.refresher != nil {
|
||||
slog.Debug("Auto-reconciling crew membership", "component", "registry/middleware", "did", did, "hold_did", holdDID)
|
||||
client := atproto.NewClient(pdsEndpoint, did, "")
|
||||
go func(ctx context.Context, client *atproto.Client, refresher *oauth.Refresher, holdDID string) {
|
||||
storage.EnsureCrewMembership(ctx, client, refresher, holdDID)
|
||||
}(ctx, client, nr.refresher, holdDID)
|
||||
storage.EnsureCrewMembership(ctx, client, nr.refresher, holdDID)
|
||||
}
|
||||
|
||||
// Get service token for hold authentication
|
||||
|
||||
@@ -2,7 +2,6 @@ package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
@@ -10,37 +9,20 @@ import (
|
||||
|
||||
// Mock implementations for testing
|
||||
type mockDatabaseMetrics struct {
|
||||
mu sync.Mutex
|
||||
pullCount int
|
||||
pushCount int
|
||||
}
|
||||
|
||||
func (m *mockDatabaseMetrics) IncrementPullCount(did, repository string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.pullCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDatabaseMetrics) IncrementPushCount(did, repository string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.pushCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDatabaseMetrics) getPullCount() int {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.pullCount
|
||||
}
|
||||
|
||||
func (m *mockDatabaseMetrics) getPushCount() int {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.pushCount
|
||||
}
|
||||
|
||||
type mockReadmeCache struct{}
|
||||
|
||||
func (m *mockReadmeCache) Get(ctx context.Context, url string) (string, error) {
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
@@ -60,10 +59,6 @@ func EnsureCrewMembership(ctx context.Context, client *atproto.Client, refresher
|
||||
// requestCrewMembership calls the hold's requestCrew endpoint
|
||||
// The endpoint handles all authorization and duplicate checking internally
|
||||
func requestCrewMembership(ctx context.Context, holdEndpoint, serviceToken string) error {
|
||||
// Add 5 second timeout to prevent hanging on offline holds
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
url := fmt.Sprintf("%s%s", holdEndpoint, atproto.HoldRequestCrew)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, nil)
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"maps"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
@@ -23,7 +22,6 @@ import (
|
||||
// It stores manifests in ATProto as records
|
||||
type ManifestStore struct {
|
||||
ctx *RegistryContext // Context with user/hold info
|
||||
mu sync.RWMutex // Protects lastFetchedHoldDID
|
||||
lastFetchedHoldDID string // Hold DID from most recently fetched manifest (for pull)
|
||||
blobStore distribution.BlobStore // Blob store for fetching config during push
|
||||
}
|
||||
@@ -69,7 +67,6 @@ func (s *ManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...
|
||||
// Store the hold DID for subsequent blob requests during pull
|
||||
// Prefer HoldDID (new format) with fallback to HoldEndpoint (legacy URL format)
|
||||
// The routing repository will cache this for concurrent blob fetches
|
||||
s.mu.Lock()
|
||||
if manifestRecord.HoldDID != "" {
|
||||
// New format: DID reference (preferred)
|
||||
s.lastFetchedHoldDID = manifestRecord.HoldDID
|
||||
@@ -77,7 +74,6 @@ func (s *ManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...
|
||||
// Legacy format: URL reference - convert to DID
|
||||
s.lastFetchedHoldDID = atproto.ResolveHoldDIDFromURL(manifestRecord.HoldEndpoint)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
var ociManifest []byte
|
||||
|
||||
@@ -236,8 +232,6 @@ func digestToRKey(dgst digest.Digest) string {
|
||||
// GetLastFetchedHoldDID returns the hold DID from the most recently fetched manifest
|
||||
// This is used by the routing repository to cache the hold for blob requests
|
||||
func (s *ManifestStore) GetLastFetchedHoldDID() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastFetchedHoldDID
|
||||
}
|
||||
|
||||
@@ -313,6 +307,11 @@ func (s *ManifestStore) notifyHoldAboutManifest(ctx context.Context, manifestRec
|
||||
}
|
||||
|
||||
// Add layers if present
|
||||
slog.Debug("Preparing manifest notification",
|
||||
"repository", s.ctx.Repository,
|
||||
"tag", tag,
|
||||
"manifestLayers", len(manifestRecord.Layers))
|
||||
|
||||
if len(manifestRecord.Layers) > 0 {
|
||||
layers := make([]map[string]any, len(manifestRecord.Layers))
|
||||
for i, layer := range manifestRecord.Layers {
|
||||
@@ -323,16 +322,29 @@ func (s *ManifestStore) notifyHoldAboutManifest(ctx context.Context, manifestRec
|
||||
}
|
||||
}
|
||||
manifestData["layers"] = layers
|
||||
slog.Debug("Added layers to notification", "layerCount", len(layers))
|
||||
} else {
|
||||
slog.Warn("Manifest has no layers",
|
||||
"repository", s.ctx.Repository,
|
||||
"tag", tag,
|
||||
"digest", manifestDigest,
|
||||
"mediaType", manifestRecord.MediaType)
|
||||
}
|
||||
|
||||
notifyReq := map[string]any{
|
||||
"repository": s.ctx.Repository,
|
||||
"tag": tag,
|
||||
"userDid": s.ctx.DID,
|
||||
"userHandle": s.ctx.Handle,
|
||||
"manifest": manifestData,
|
||||
"repository": s.ctx.Repository,
|
||||
"tag": tag,
|
||||
"manifestDigest": manifestDigest,
|
||||
"userDid": s.ctx.DID,
|
||||
"userHandle": s.ctx.Handle,
|
||||
"manifest": manifestData,
|
||||
}
|
||||
|
||||
slog.Debug("Sending manifest notification",
|
||||
"repository", s.ctx.Repository,
|
||||
"holdEndpoint", holdEndpoint,
|
||||
"hasLayers", manifestData["layers"] != nil)
|
||||
|
||||
// Marshal request
|
||||
reqBody, err := json.Marshal(notifyReq)
|
||||
if err != nil {
|
||||
|
||||
@@ -669,13 +669,13 @@ func TestManifestStore_Get_OnlyCountsGETRequests(t *testing.T) {
|
||||
|
||||
if tt.expectPullIncrement {
|
||||
// Check that IncrementPullCount was called
|
||||
if mockDB.getPullCount() == 0 {
|
||||
if mockDB.pullCount == 0 {
|
||||
t.Error("Expected pull count to be incremented for GET request, but it wasn't")
|
||||
}
|
||||
} else {
|
||||
// Check that IncrementPullCount was NOT called
|
||||
if mockDB.getPullCount() > 0 {
|
||||
t.Errorf("Expected pull count NOT to be incremented for %s request, but it was (count=%d)", tt.httpMethod, mockDB.getPullCount())
|
||||
if mockDB.pullCount > 0 {
|
||||
t.Errorf("Expected pull count NOT to be incremented for %s request, but it was (count=%d)", tt.httpMethod, mockDB.pullCount)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -219,7 +219,6 @@ func TestGetProfile(t *testing.T) {
|
||||
// Clear migration locks before each test
|
||||
migrationLocks = sync.Map{}
|
||||
|
||||
var mu sync.Mutex
|
||||
putRecordCalled := false
|
||||
var migrationRequest map[string]any
|
||||
|
||||
@@ -233,10 +232,8 @@ func TestGetProfile(t *testing.T) {
|
||||
|
||||
// PutRecord (migration)
|
||||
if r.Method == "POST" && strings.Contains(r.URL.Path, "putRecord") {
|
||||
mu.Lock()
|
||||
putRecordCalled = true
|
||||
json.NewDecoder(r.Body).Decode(&migrationRequest)
|
||||
mu.Unlock()
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.sailor.profile/self","cid":"bafytest"}`))
|
||||
return
|
||||
@@ -273,17 +270,12 @@ func TestGetProfile(t *testing.T) {
|
||||
// Give goroutine time to execute
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
mu.Lock()
|
||||
called := putRecordCalled
|
||||
request := migrationRequest
|
||||
mu.Unlock()
|
||||
|
||||
if !called {
|
||||
if !putRecordCalled {
|
||||
t.Error("Expected migration PutRecord to be called")
|
||||
}
|
||||
|
||||
if request != nil {
|
||||
recordData := request["record"].(map[string]any)
|
||||
if migrationRequest != nil {
|
||||
recordData := migrationRequest["record"].(map[string]any)
|
||||
migratedHold := recordData["defaultHold"]
|
||||
if migratedHold != tt.expectedHoldDID {
|
||||
t.Errorf("Migrated defaultHold = %v, want %v", migratedHold, tt.expectedHoldDID)
|
||||
|
||||
@@ -7,7 +7,6 @@ package storage
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/distribution/distribution/v3"
|
||||
@@ -18,7 +17,6 @@ import (
|
||||
type RoutingRepository struct {
|
||||
distribution.Repository
|
||||
Ctx *RegistryContext // All context and services (exported for token updates)
|
||||
mu sync.Mutex // Protects manifestStore and blobStore
|
||||
manifestStore *ManifestStore // Cached manifest store instance
|
||||
blobStore *ProxyBlobStore // Cached blob store instance
|
||||
}
|
||||
@@ -33,47 +31,35 @@ func NewRoutingRepository(baseRepo distribution.Repository, ctx *RegistryContext
|
||||
|
||||
// Manifests returns the ATProto-backed manifest service
|
||||
func (r *RoutingRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
|
||||
r.mu.Lock()
|
||||
// Create or return cached manifest store
|
||||
if r.manifestStore == nil {
|
||||
// Ensure blob store is created first (needed for label extraction during push)
|
||||
// Release lock while calling Blobs to avoid deadlock
|
||||
r.mu.Unlock()
|
||||
blobStore := r.Blobs(ctx)
|
||||
r.mu.Lock()
|
||||
|
||||
// Double-check after reacquiring lock (another goroutine might have set it)
|
||||
if r.manifestStore == nil {
|
||||
r.manifestStore = NewManifestStore(r.Ctx, blobStore)
|
||||
}
|
||||
r.manifestStore = NewManifestStore(r.Ctx, blobStore)
|
||||
}
|
||||
manifestStore := r.manifestStore
|
||||
r.mu.Unlock()
|
||||
|
||||
// After any manifest operation, cache the hold DID for blob fetches
|
||||
// We use a goroutine to avoid blocking, and check after a short delay to allow the operation to complete
|
||||
go func() {
|
||||
time.Sleep(100 * time.Millisecond) // Brief delay to let manifest fetch complete
|
||||
if holdDID := manifestStore.GetLastFetchedHoldDID(); holdDID != "" {
|
||||
if holdDID := r.manifestStore.GetLastFetchedHoldDID(); holdDID != "" {
|
||||
// Cache for 10 minutes - should cover typical pull operations
|
||||
GetGlobalHoldCache().Set(r.Ctx.DID, r.Ctx.Repository, holdDID, 10*time.Minute)
|
||||
slog.Debug("Cached hold DID", "component", "storage/routing", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", holdDID)
|
||||
}
|
||||
}()
|
||||
|
||||
return manifestStore, nil
|
||||
return r.manifestStore, nil
|
||||
}
|
||||
|
||||
// Blobs returns a proxy blob store that routes to external hold service
|
||||
// The registry (AppView) NEVER stores blobs locally - all blobs go through hold service
|
||||
func (r *RoutingRepository) Blobs(ctx context.Context) distribution.BlobStore {
|
||||
r.mu.Lock()
|
||||
// Return cached blob store if available
|
||||
if r.blobStore != nil {
|
||||
blobStore := r.blobStore
|
||||
r.mu.Unlock()
|
||||
slog.Debug("Returning cached blob store", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository)
|
||||
return blobStore
|
||||
return r.blobStore
|
||||
}
|
||||
|
||||
// For pull operations, check if we have a cached hold DID from a recent manifest fetch
|
||||
@@ -99,9 +85,7 @@ func (r *RoutingRepository) Blobs(ctx context.Context) distribution.BlobStore {
|
||||
|
||||
// Create and cache proxy blob store
|
||||
r.blobStore = NewProxyBlobStore(r.Ctx)
|
||||
blobStore := r.blobStore
|
||||
r.mu.Unlock()
|
||||
return blobStore
|
||||
return r.blobStore
|
||||
}
|
||||
|
||||
// Tags returns the tag service
|
||||
|
||||
@@ -12,11 +12,11 @@
|
||||
<!-- Stylesheets -->
|
||||
<link rel="stylesheet" href="/css/style.css">
|
||||
|
||||
<!-- HTMX (vendored) -->
|
||||
<script src="/js/htmx.min.js"></script>
|
||||
<!-- HTMX -->
|
||||
<script src="https://unpkg.com/htmx.org@2.0.8/dist/htmx.min.js"></script>
|
||||
|
||||
<!-- Lucide Icons (vendored) -->
|
||||
<script src="/js/lucide.min.js"></script>
|
||||
<!-- Lucide Icons -->
|
||||
<script src="https://unpkg.com/lucide@latest"></script>
|
||||
|
||||
<!-- App Scripts -->
|
||||
<script src="/js/app.js"></script>
|
||||
|
||||
@@ -176,12 +176,39 @@
|
||||
{{ else }}
|
||||
<span class="manifest-type"><i data-lucide="file-text"></i> Image</span>
|
||||
{{ end }}
|
||||
{{ if .HasVulnerabilities }}
|
||||
<div class="vuln-badges-link"
|
||||
onclick="showVulnerabilities('{{ .Manifest.Digest }}', '{{ .Manifest.HoldEndpoint }}')"
|
||||
style="cursor: pointer;"
|
||||
title="Click to view vulnerability details">
|
||||
{{ if gt .Vulnerabilities.Critical 0 }}
|
||||
<span class="vuln-badge vuln-critical" title="Critical vulnerabilities">
|
||||
<i data-lucide="alert-octagon"></i> {{ .Vulnerabilities.Critical }} Critical
|
||||
</span>
|
||||
{{ end }}
|
||||
{{ if gt .Vulnerabilities.High 0 }}
|
||||
<span class="vuln-badge vuln-high" title="High severity vulnerabilities">
|
||||
<i data-lucide="alert-triangle"></i> {{ .Vulnerabilities.High }} High
|
||||
</span>
|
||||
{{ end }}
|
||||
{{ if gt .Vulnerabilities.Medium 0 }}
|
||||
<span class="vuln-badge vuln-medium" title="Medium severity vulnerabilities">
|
||||
<i data-lucide="alert-circle"></i> {{ .Vulnerabilities.Medium }} Medium
|
||||
</span>
|
||||
{{ end }}
|
||||
{{ if gt .Vulnerabilities.Low 0 }}
|
||||
<span class="vuln-badge vuln-low" title="Low severity vulnerabilities">
|
||||
<i data-lucide="info"></i> {{ .Vulnerabilities.Low }} Low
|
||||
</span>
|
||||
{{ end }}
|
||||
</div>
|
||||
{{ end }}
|
||||
{{ if .Pending }}
|
||||
<span class="checking-badge"
|
||||
hx-get="/api/manifest-health?endpoint={{ .Manifest.HoldEndpoint | urlquery }}"
|
||||
hx-trigger="load delay:2s"
|
||||
hx-swap="outerHTML">
|
||||
<i data-lucide="refresh-ccw"></i> Checking...
|
||||
<i data-lucide="rotate-cw"></i> Checking...
|
||||
</span>
|
||||
{{ else if not .Reachable }}
|
||||
<span class="offline-badge"><i data-lucide="alert-triangle"></i> Offline</span>
|
||||
@@ -242,6 +269,22 @@
|
||||
<!-- Modal container for HTMX -->
|
||||
<div id="modal"></div>
|
||||
|
||||
<!-- Vulnerability Details Modal -->
|
||||
<div id="vuln-modal" class="modal-overlay" style="display: none;">
|
||||
<div class="modal-dialog vuln-modal-dialog">
|
||||
<div class="modal-header">
|
||||
<h3>Vulnerability Report</h3>
|
||||
<button class="modal-close" onclick="closeVulnModal()">×</button>
|
||||
</div>
|
||||
<div class="modal-body" id="vuln-modal-body">
|
||||
<div class="loading">Loading vulnerabilities...</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button class="btn btn-secondary" onclick="closeVulnModal()">Close</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Manifest Delete Confirmation Modal -->
|
||||
<div id="manifest-delete-modal" class="modal-overlay" style="display: none;">
|
||||
<div class="modal-dialog">
|
||||
@@ -357,7 +400,215 @@
|
||||
.btn-danger:hover {
|
||||
background: #c82333;
|
||||
}
|
||||
|
||||
/* Vulnerability badges */
|
||||
.vuln-badges-link {
|
||||
display: inline-flex;
|
||||
gap: 0.5rem;
|
||||
text-decoration: none;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.vuln-badges-link:hover .vuln-badge {
|
||||
opacity: 0.8;
|
||||
transform: translateY(-1px);
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.vuln-badge {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 0.25rem;
|
||||
padding: 0.25rem 0.5rem;
|
||||
border-radius: 4px;
|
||||
font-size: 0.75rem;
|
||||
font-weight: 600;
|
||||
color: white;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.vuln-badge svg {
|
||||
width: 14px;
|
||||
height: 14px;
|
||||
}
|
||||
|
||||
.vuln-critical {
|
||||
background: #dc3545;
|
||||
border: 1px solid #bd2130;
|
||||
}
|
||||
|
||||
.vuln-high {
|
||||
background: #fd7e14;
|
||||
border: 1px solid #dc6502;
|
||||
}
|
||||
|
||||
.vuln-medium {
|
||||
background: #ffc107;
|
||||
border: 1px solid #e0a800;
|
||||
color: #000;
|
||||
}
|
||||
|
||||
.vuln-low {
|
||||
background: #6c757d;
|
||||
border: 1px solid #5a6268;
|
||||
}
|
||||
|
||||
/* Vulnerability modal specific styles */
|
||||
.vuln-modal-dialog {
|
||||
max-width: 900px;
|
||||
max-height: 80vh;
|
||||
}
|
||||
|
||||
.vuln-modal-dialog .modal-body {
|
||||
max-height: 60vh;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.vuln-summary {
|
||||
display: flex;
|
||||
gap: 1rem;
|
||||
margin-bottom: 1.5rem;
|
||||
padding: 1rem;
|
||||
background: var(--bg-tertiary, #2a2a2a);
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.vuln-table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
.vuln-table th {
|
||||
text-align: left;
|
||||
padding: 0.75rem;
|
||||
background: var(--bg-tertiary, #2a2a2a);
|
||||
border-bottom: 2px solid var(--border-color, #333);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.vuln-table td {
|
||||
padding: 0.75rem;
|
||||
border-bottom: 1px solid var(--border-color, #333);
|
||||
}
|
||||
|
||||
.vuln-table tr:hover {
|
||||
background: var(--bg-hover, #3a3a3a);
|
||||
}
|
||||
|
||||
.vuln-id {
|
||||
font-family: monospace;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.vuln-severity-critical {
|
||||
color: #dc3545;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.vuln-severity-high {
|
||||
color: #fd7e14;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.vuln-severity-medium {
|
||||
color: #ffc107;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.vuln-severity-low {
|
||||
color: #6c757d;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.loading {
|
||||
text-align: center;
|
||||
padding: 2rem;
|
||||
color: var(--text-muted, #999);
|
||||
}
|
||||
</style>
|
||||
|
||||
<script>
|
||||
function showVulnerabilities(digest, holdEndpoint) {
|
||||
const modal = document.getElementById('vuln-modal');
|
||||
const modalBody = document.getElementById('vuln-modal-body');
|
||||
|
||||
// Show modal with loading state
|
||||
modal.style.display = 'flex';
|
||||
modalBody.innerHTML = '<div class="loading">Loading vulnerabilities...</div>';
|
||||
|
||||
// Fetch vulnerability data
|
||||
const url = `/api/vulnerabilities?digest=${encodeURIComponent(digest)}&holdEndpoint=${encodeURIComponent(holdEndpoint)}`;
|
||||
|
||||
fetch(url)
|
||||
.then(response => {
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
|
||||
}
|
||||
return response.json();
|
||||
})
|
||||
.then(data => {
|
||||
renderVulnerabilities(data, modalBody);
|
||||
})
|
||||
.catch(error => {
|
||||
modalBody.innerHTML = `<div class="loading" style="color: #dc3545;">Failed to load vulnerabilities: ${error.message}</div>`;
|
||||
});
|
||||
}
|
||||
|
||||
function renderVulnerabilities(data, container) {
|
||||
const summary = data.summary || {};
|
||||
const matches = data.matches || [];
|
||||
|
||||
let html = '';
|
||||
|
||||
// Summary
|
||||
html += '<div class="vuln-summary">';
|
||||
if (summary.critical > 0) html += `<span class="vuln-badge vuln-critical">${summary.critical} Critical</span>`;
|
||||
if (summary.high > 0) html += `<span class="vuln-badge vuln-high">${summary.high} High</span>`;
|
||||
if (summary.medium > 0) html += `<span class="vuln-badge vuln-medium">${summary.medium} Medium</span>`;
|
||||
if (summary.low > 0) html += `<span class="vuln-badge vuln-low">${summary.low} Low</span>`;
|
||||
html += `<span style="margin-left: auto;">Total: ${summary.total || matches.length}</span>`;
|
||||
html += '</div>';
|
||||
|
||||
// Vulnerabilities table
|
||||
if (matches.length === 0) {
|
||||
html += '<p>No vulnerabilities found.</p>';
|
||||
} else {
|
||||
html += '<table class="vuln-table">';
|
||||
html += '<thead><tr><th>CVE</th><th>Severity</th><th>Package</th><th>Installed</th><th>Fixed In</th></tr></thead>';
|
||||
html += '<tbody>';
|
||||
|
||||
matches.forEach(match => {
|
||||
const vuln = match.Vulnerability || {};
|
||||
const pkg = match.Package || {};
|
||||
const severity = (vuln.Metadata?.Severity || 'Unknown').toLowerCase();
|
||||
const severityClass = `vuln-severity-${severity}`;
|
||||
|
||||
html += '<tr>';
|
||||
html += `<td class="vuln-id">${vuln.ID || 'N/A'}</td>`;
|
||||
html += `<td class="${severityClass}">${vuln.Metadata?.Severity || 'Unknown'}</td>`;
|
||||
html += `<td>${pkg.Name || 'N/A'}</td>`;
|
||||
html += `<td>${pkg.Version || 'N/A'}</td>`;
|
||||
html += `<td>${vuln.Fix?.Versions?.join(', ') || 'No fix available'}</td>`;
|
||||
html += '</tr>';
|
||||
});
|
||||
|
||||
html += '</tbody></table>';
|
||||
}
|
||||
|
||||
container.innerHTML = html;
|
||||
}
|
||||
|
||||
function closeVulnModal() {
|
||||
document.getElementById('vuln-modal').style.display = 'none';
|
||||
}
|
||||
|
||||
// Close modal when clicking outside
|
||||
document.getElementById('vuln-modal')?.addEventListener('click', function(e) {
|
||||
if (e.target === this) {
|
||||
closeVulnModal();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
{{ end }}
|
||||
|
||||
@@ -12,9 +12,6 @@ import (
|
||||
"atcr.io/pkg/appview/licenses"
|
||||
)
|
||||
|
||||
//go:generate curl -fsSL -o static/js/htmx.min.js https://unpkg.com/htmx.org@2.0.8/dist/htmx.min.js
|
||||
//go:generate curl -fsSL -o static/js/lucide.min.js https://unpkg.com/lucide@latest/dist/umd/lucide.min.js
|
||||
|
||||
//go:embed templates/**/*.html
|
||||
var templatesFS embed.FS
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -266,15 +266,15 @@ func (c *Client) ListRecords(ctx context.Context, collection string, limit int)
|
||||
// ATProtoBlobRef represents a reference to a blob in ATProto's native blob storage
|
||||
// This is different from OCIBlobDescriptor which describes OCI image layers
|
||||
type ATProtoBlobRef struct {
|
||||
Type string `json:"$type"`
|
||||
Ref Link `json:"ref"`
|
||||
MimeType string `json:"mimeType"`
|
||||
Size int64 `json:"size"`
|
||||
Type string `json:"$type" cborgen:"$type"`
|
||||
Ref Link `json:"ref" cborgen:"ref"`
|
||||
MimeType string `json:"mimeType" cborgen:"mimeType"`
|
||||
Size int64 `json:"size" cborgen:"size"`
|
||||
}
|
||||
|
||||
// Link represents an IPFS link to blob content
|
||||
type Link struct {
|
||||
Link string `json:"$link"`
|
||||
Link string `json:"$link" cborgen:"$link"`
|
||||
}
|
||||
|
||||
// UploadBlob uploads binary data to the PDS and returns a blob reference
|
||||
|
||||
@@ -45,6 +45,12 @@ const (
|
||||
// Request: {"repository": "...", "tag": "...", "userDid": "...", "userHandle": "...", "manifest": {...}}
|
||||
// Response: {"success": true, "layersCreated": 5, "postCreated": true, "postUri": "at://..."}
|
||||
HoldNotifyManifest = "/xrpc/io.atcr.hold.notifyManifest"
|
||||
|
||||
// HoldGetReferrers queries for ORAS artifacts that reference a subject manifest (SBOM, signatures, scan reports).
|
||||
// Method: GET
|
||||
// Query: digest={sha256:...}&artifactType={optional-filter}
|
||||
// Response: {"referrers": [{"digest": "...", "artifactType": "...", "annotations": {...}}, ...]}
|
||||
HoldGetReferrers = "/xrpc/io.atcr.hold.getReferrers"
|
||||
)
|
||||
|
||||
// Hold service crew management endpoints (io.atcr.hold.*)
|
||||
|
||||
@@ -25,14 +25,22 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Generate map-style encoders for CrewRecord, CaptainRecord, LayerRecord, and TangledProfileRecord
|
||||
// Generate map-style encoders for all ATProto records and nested types
|
||||
if err := cbg.WriteMapEncodersToFile("cbor_gen.go", "atproto",
|
||||
atproto.CrewRecord{},
|
||||
atproto.CaptainRecord{},
|
||||
atproto.LayerRecord{},
|
||||
atproto.TangledProfileRecord{},
|
||||
atproto.ManifestRecord{},
|
||||
atproto.BlobReference{},
|
||||
atproto.ManifestReference{},
|
||||
atproto.Platform{},
|
||||
atproto.ATProtoBlobRef{},
|
||||
atproto.Link{},
|
||||
); err != nil {
|
||||
fmt.Printf("Failed to generate CBOR encoders: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println("Generated CBOR encoders in pkg/atproto/cbor_gen.go")
|
||||
}
|
||||
|
||||
@@ -53,109 +53,125 @@ const (
|
||||
// This follows the OCI image manifest specification but stored as an ATProto record
|
||||
type ManifestRecord struct {
|
||||
// Type should be "io.atcr.manifest"
|
||||
Type string `json:"$type"`
|
||||
Type string `json:"$type" cborgen:"$type"`
|
||||
|
||||
// Repository is the name of the repository (e.g., "myapp")
|
||||
Repository string `json:"repository"`
|
||||
Repository string `json:"repository" cborgen:"repository"`
|
||||
|
||||
// Digest is the content digest (e.g., "sha256:abc123...")
|
||||
Digest string `json:"digest"`
|
||||
Digest string `json:"digest" cborgen:"digest"`
|
||||
|
||||
// HoldDID is the DID of the hold service where blobs are stored
|
||||
// This is the primary reference for hold resolution
|
||||
// e.g., "did:web:hold01.atcr.io"
|
||||
HoldDID string `json:"holdDid,omitempty"`
|
||||
HoldDID string `json:"holdDid,omitempty" cborgen:"holdDid,omitempty"`
|
||||
|
||||
// HoldEndpoint is the hold service endpoint URL where blobs are stored (DEPRECATED)
|
||||
// Kept for backward compatibility with manifests created before DID migration
|
||||
// New manifests should use HoldDID instead
|
||||
// This is a historical reference that doesn't change even if user's default hold changes
|
||||
HoldEndpoint string `json:"holdEndpoint,omitempty"`
|
||||
HoldEndpoint string `json:"holdEndpoint,omitempty" cborgen:"holdEndpoint,omitempty"`
|
||||
|
||||
// MediaType is the OCI media type (e.g., "application/vnd.oci.image.manifest.v1+json")
|
||||
MediaType string `json:"mediaType"`
|
||||
MediaType string `json:"mediaType" cborgen:"mediaType"`
|
||||
|
||||
// ArtifactType distinguishes ORAS artifacts (SBOMs, signatures, scan reports)
|
||||
// e.g., "application/spdx+json", "application/vnd.atcr.vulnerabilities+json"
|
||||
// Empty for regular image manifests
|
||||
ArtifactType string `json:"artifactType,omitempty" cborgen:"artifactType,omitempty"`
|
||||
|
||||
// OwnerDID is the DID of the user who owns this manifest (for multi-tenant holds)
|
||||
// Used for ORAS artifacts stored in hold's PDS to track ownership
|
||||
OwnerDID string `json:"ownerDid,omitempty" cborgen:"ownerDid,omitempty"`
|
||||
|
||||
// ScannedAt is the timestamp when this artifact was scanned (for SBOM/vuln artifacts)
|
||||
ScannedAt string `json:"scannedAt,omitempty" cborgen:"scannedAt,omitempty"`
|
||||
|
||||
// ScannerVersion is the version of the scanner that generated this artifact
|
||||
// e.g., "syft-v1.36.0", "grype-v0.102.0"
|
||||
ScannerVersion string `json:"scannerVersion,omitempty" cborgen:"scannerVersion,omitempty"`
|
||||
|
||||
// SchemaVersion is the OCI schema version (typically 2)
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
SchemaVersion int64 `json:"schemaVersion" cborgen:"schemaVersion"`
|
||||
|
||||
// Config references the image configuration blob (for image manifests)
|
||||
// Nil for manifest lists/indexes
|
||||
Config *BlobReference `json:"config,omitempty"`
|
||||
Config *BlobReference `json:"config,omitempty" cborgen:"config,omitempty"`
|
||||
|
||||
// Layers references the filesystem layers (for image manifests)
|
||||
// Empty for manifest lists/indexes
|
||||
Layers []BlobReference `json:"layers,omitempty"`
|
||||
Layers []BlobReference `json:"layers,omitempty" cborgen:"layers,omitempty"`
|
||||
|
||||
// Manifests references other manifests (for manifest lists/indexes)
|
||||
// Empty for image manifests
|
||||
Manifests []ManifestReference `json:"manifests,omitempty"`
|
||||
Manifests []ManifestReference `json:"manifests,omitempty" cborgen:"manifests,omitempty"`
|
||||
|
||||
// Annotations contains arbitrary metadata
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty" cborgen:"annotations"`
|
||||
|
||||
// Subject references another manifest (for attestations, signatures, etc.)
|
||||
Subject *BlobReference `json:"subject,omitempty"`
|
||||
Subject *BlobReference `json:"subject,omitempty" cborgen:"subject,omitempty"`
|
||||
|
||||
// ManifestBlob is a reference to the manifest blob stored in ATProto blob storage
|
||||
ManifestBlob *ATProtoBlobRef `json:"manifestBlob,omitempty"`
|
||||
ManifestBlob *ATProtoBlobRef `json:"manifestBlob,omitempty" cborgen:"manifestBlob,omitempty"`
|
||||
|
||||
// CreatedAt timestamp
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
// CreatedAt timestamp (RFC3339)
|
||||
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
|
||||
}
|
||||
|
||||
// BlobReference represents a reference to a blob (layer or config)
|
||||
// Blobs are stored in S3 and referenced by digest
|
||||
type BlobReference struct {
|
||||
// MediaType of the blob
|
||||
MediaType string `json:"mediaType"`
|
||||
MediaType string `json:"mediaType" cborgen:"mediaType"`
|
||||
|
||||
// Digest is the content digest (e.g., "sha256:abc123...")
|
||||
Digest string `json:"digest"`
|
||||
Digest string `json:"digest" cborgen:"digest"`
|
||||
|
||||
// Size in bytes
|
||||
Size int64 `json:"size"`
|
||||
Size int64 `json:"size" cborgen:"size"`
|
||||
|
||||
// URLs where the blob can be retrieved (S3 URLs)
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
URLs []string `json:"urls,omitempty" cborgen:"urls,omitempty"`
|
||||
|
||||
// Annotations for the blob
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty" cborgen:"annotations"`
|
||||
}
|
||||
|
||||
// ManifestReference represents a reference to a manifest in a manifest list/index
|
||||
type ManifestReference struct {
|
||||
// MediaType of the referenced manifest
|
||||
MediaType string `json:"mediaType"`
|
||||
MediaType string `json:"mediaType" cborgen:"mediaType"`
|
||||
|
||||
// Digest is the content digest (e.g., "sha256:abc123...")
|
||||
Digest string `json:"digest"`
|
||||
Digest string `json:"digest" cborgen:"digest"`
|
||||
|
||||
// Size in bytes
|
||||
Size int64 `json:"size"`
|
||||
Size int64 `json:"size" cborgen:"size"`
|
||||
|
||||
// Platform describes the platform/architecture this manifest is for
|
||||
Platform *Platform `json:"platform,omitempty"`
|
||||
Platform *Platform `json:"platform,omitempty" cborgen:"platform,omitempty"`
|
||||
|
||||
// Annotations for the manifest reference
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty" cborgen:"annotations"`
|
||||
}
|
||||
|
||||
// Platform describes the platform (OS/architecture) for a manifest
|
||||
type Platform struct {
|
||||
// Architecture is the CPU architecture (e.g., "amd64", "arm64", "arm")
|
||||
Architecture string `json:"architecture"`
|
||||
Architecture string `json:"architecture" cborgen:"architecture"`
|
||||
|
||||
// OS is the operating system (e.g., "linux", "windows", "darwin")
|
||||
OS string `json:"os"`
|
||||
OS string `json:"os" cborgen:"os"`
|
||||
|
||||
// OSVersion is the optional OS version
|
||||
OSVersion string `json:"os.version,omitempty"`
|
||||
OSVersion string `json:"os.version,omitempty" cborgen:"os.version,omitempty"`
|
||||
|
||||
// OSFeatures is an optional list of OS features
|
||||
OSFeatures []string `json:"os.features,omitempty"`
|
||||
OSFeatures []string `json:"os.features,omitempty" cborgen:"os.features,omitempty"`
|
||||
|
||||
// Variant is the optional CPU variant (e.g., "v7" for ARM)
|
||||
Variant string `json:"variant,omitempty"`
|
||||
Variant string `json:"variant,omitempty" cborgen:"variant,omitempty"`
|
||||
}
|
||||
|
||||
// NewManifestRecord creates a new manifest record from OCI manifest JSON
|
||||
@@ -164,6 +180,7 @@ func NewManifestRecord(repository, digest string, ociManifest []byte) (*Manifest
|
||||
var ociData struct {
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
MediaType string `json:"mediaType"`
|
||||
ArtifactType string `json:"artifactType,omitempty"`
|
||||
Config json.RawMessage `json:"config,omitempty"`
|
||||
Layers []json.RawMessage `json:"layers,omitempty"`
|
||||
Manifests []json.RawMessage `json:"manifests,omitempty"`
|
||||
@@ -195,10 +212,11 @@ func NewManifestRecord(repository, digest string, ociManifest []byte) (*Manifest
|
||||
Repository: repository,
|
||||
Digest: digest,
|
||||
MediaType: ociData.MediaType,
|
||||
SchemaVersion: ociData.SchemaVersion,
|
||||
ArtifactType: ociData.ArtifactType,
|
||||
SchemaVersion: int64(ociData.SchemaVersion),
|
||||
Annotations: ociData.Annotations,
|
||||
// ManifestBlob will be set by the caller after uploading to blob storage
|
||||
CreatedAt: time.Now(),
|
||||
CreatedAt: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
if isManifestList {
|
||||
|
||||
@@ -146,7 +146,7 @@ func TestNewManifestRecord(t *testing.T) {
|
||||
if record.Annotations["org.opencontainers.image.created"] != "2025-01-01T00:00:00Z" {
|
||||
t.Errorf("Annotations missing expected key")
|
||||
}
|
||||
if record.CreatedAt.IsZero() {
|
||||
if record.CreatedAt == "0" {
|
||||
t.Error("CreatedAt should not be zero")
|
||||
}
|
||||
if record.Subject != nil {
|
||||
|
||||
@@ -22,6 +22,7 @@ type Config struct {
|
||||
Server ServerConfig `yaml:"server"`
|
||||
Registration RegistrationConfig `yaml:"registration"`
|
||||
Database DatabaseConfig `yaml:"database"`
|
||||
Scanner ScannerConfig `yaml:"scanner"`
|
||||
}
|
||||
|
||||
// RegistrationConfig defines auto-registration settings
|
||||
@@ -85,6 +86,27 @@ type DatabaseConfig struct {
|
||||
KeyPath string `yaml:"key_path"`
|
||||
}
|
||||
|
||||
// ScannerConfig defines SBOM and vulnerability scanning settings
|
||||
type ScannerConfig struct {
|
||||
// Enabled controls whether SBOM scanning is enabled (from env: HOLD_SBOM_ENABLED)
|
||||
Enabled bool `yaml:"enabled"`
|
||||
|
||||
// Workers is the number of concurrent scan workers (from env: HOLD_SBOM_WORKERS)
|
||||
Workers int `yaml:"workers"`
|
||||
|
||||
// VulnEnabled controls whether vulnerability scanning is enabled (from env: HOLD_VULN_ENABLED)
|
||||
VulnEnabled bool `yaml:"vuln_enabled"`
|
||||
|
||||
// VulnDBPath is the path to store Grype vulnerability database (from env: HOLD_VULN_DB_PATH)
|
||||
// Defaults to {DatabasePath}/grype-db
|
||||
VulnDBPath string `yaml:"vuln_db_path"`
|
||||
|
||||
// VulnDBUpdateInterval is how often to update the vulnerability database (from env: HOLD_VULN_DB_UPDATE_INTERVAL)
|
||||
// Format: duration string (e.g., "24h", "1h30m")
|
||||
// Defaults to 24 hours
|
||||
VulnDBUpdateInterval time.Duration `yaml:"vuln_db_update_interval"`
|
||||
}
|
||||
|
||||
// LoadConfigFromEnv loads all configuration from environment variables
|
||||
func LoadConfigFromEnv() (*Config, error) {
|
||||
cfg := &Config{
|
||||
@@ -121,6 +143,24 @@ func LoadConfigFromEnv() (*Config, error) {
|
||||
cfg.Database.KeyPath = filepath.Join(cfg.Database.Path, "signing.key")
|
||||
}
|
||||
|
||||
// Scanner configuration (optional - enables SBOM/vulnerability scanning)
|
||||
cfg.Scanner.Enabled = os.Getenv("HOLD_SBOM_ENABLED") == "true"
|
||||
cfg.Scanner.Workers = 2 // Default
|
||||
if workersStr := os.Getenv("HOLD_SBOM_WORKERS"); workersStr != "" {
|
||||
var workersInt int
|
||||
if _, err := fmt.Sscanf(workersStr, "%d", &workersInt); err == nil && workersInt > 0 {
|
||||
cfg.Scanner.Workers = workersInt
|
||||
}
|
||||
}
|
||||
cfg.Scanner.VulnEnabled = os.Getenv("HOLD_VULN_ENABLED") == "true"
|
||||
cfg.Scanner.VulnDBPath = getEnvOrDefault("HOLD_VULN_DB_PATH", filepath.Join(cfg.Database.Path, "grype-db"))
|
||||
cfg.Scanner.VulnDBUpdateInterval = 24 * time.Hour // Default
|
||||
if intervalStr := os.Getenv("HOLD_VULN_DB_UPDATE_INTERVAL"); intervalStr != "" {
|
||||
if interval, err := time.ParseDuration(intervalStr); err == nil {
|
||||
cfg.Scanner.VulnDBUpdateInterval = interval
|
||||
}
|
||||
}
|
||||
|
||||
// Storage configuration - build from env vars based on storage type
|
||||
storageType := getEnvOrDefault("STORAGE_DRIVER", "s3")
|
||||
var err error
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -387,14 +389,33 @@ func (h *XRPCHandler) CompleteMultipartUploadWithManager(ctx context.Context, up
|
||||
}
|
||||
slog.Debug("Source blob verified", "path", sourcePath)
|
||||
|
||||
// Move from temp to final digest location using driver
|
||||
// Driver handles path management correctly (including S3 prefix)
|
||||
if err := h.driver.Move(ctx, sourcePath, destPath); err != nil {
|
||||
slog.Error("Failed to move blob",
|
||||
// Move from temp to final digest location
|
||||
// Strategy: Use S3 CopyObject directly instead of driver's Move() to avoid
|
||||
// UploadPartCopy issues with S3-compatible services (Storj, MinIO, etc.)
|
||||
//
|
||||
// Fallback order:
|
||||
// 1. S3 CopyObject (server-side, works up to 5GB, universally supported)
|
||||
// 2. Manual copy-and-delete (slower but works for any size/backend)
|
||||
//
|
||||
// Note: We skip driver.Move() because it uses UploadPartCopy for files >100MB,
|
||||
// which many S3-compatible services don't support (returns 501 Not Implemented)
|
||||
|
||||
if err := h.s3CopyObject(ctx, sourcePath, destPath); err != nil {
|
||||
// S3 CopyObject failed (not S3, file too large, or other error)
|
||||
// Fall back to manual copy-and-delete
|
||||
slog.Warn("S3 CopyObject failed, attempting read/write fallback",
|
||||
"source", sourcePath,
|
||||
"dest", destPath,
|
||||
"error", err)
|
||||
return fmt.Errorf("failed to move blob to final location: %w", err)
|
||||
|
||||
if fallbackErr := h.copyAndDelete(ctx, sourcePath, destPath); fallbackErr != nil {
|
||||
slog.Error("All move strategies failed",
|
||||
"source", sourcePath,
|
||||
"dest", destPath,
|
||||
"s3CopyErr", err,
|
||||
"fallbackErr", fallbackErr)
|
||||
return fmt.Errorf("failed to move blob to final location: %w", fallbackErr)
|
||||
}
|
||||
}
|
||||
|
||||
slog.Info("Moved blob to final location",
|
||||
@@ -500,6 +521,114 @@ func normalizeETag(etag string) string {
|
||||
return fmt.Sprintf("\"%s\"", etag)
|
||||
}
|
||||
|
||||
// s3CopyObject uses S3's CopyObject API directly for server-side copy.
|
||||
// This works for objects up to 5GB and is supported by most S3-compatible services.
|
||||
// Falls back to copyAndDelete if S3 is not available or the operation fails.
|
||||
func (h *XRPCHandler) s3CopyObject(ctx context.Context, sourcePath, destPath string) error {
|
||||
// Check if S3 is configured
|
||||
if h.s3Service.Client == nil {
|
||||
return fmt.Errorf("S3 not configured")
|
||||
}
|
||||
|
||||
// Convert paths to S3 keys (remove leading slash and add prefix if configured)
|
||||
sourceKey := strings.TrimPrefix(sourcePath, "/")
|
||||
destKey := strings.TrimPrefix(destPath, "/")
|
||||
|
||||
if h.s3Service.PathPrefix != "" {
|
||||
sourceKey = h.s3Service.PathPrefix + "/" + sourceKey
|
||||
destKey = h.s3Service.PathPrefix + "/" + destKey
|
||||
}
|
||||
|
||||
// Construct the copy source (must be in format: bucket/key)
|
||||
copySource := h.s3Service.Bucket + "/" + sourceKey
|
||||
|
||||
// Perform server-side copy using S3 CopyObject API
|
||||
// This works for objects up to 5GB in a single operation
|
||||
_, err := h.s3Service.Client.CopyObjectWithContext(ctx, &s3.CopyObjectInput{
|
||||
Bucket: &h.s3Service.Bucket,
|
||||
Key: &destKey,
|
||||
CopySource: ©Source,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("S3 CopyObject failed: %w", err)
|
||||
}
|
||||
|
||||
slog.Info("Successfully copied blob using S3 CopyObject (server-side)",
|
||||
"source", sourcePath,
|
||||
"dest", destPath)
|
||||
|
||||
// Delete the source after successful copy
|
||||
if err := h.driver.Delete(ctx, sourcePath); err != nil {
|
||||
slog.Warn("Failed to delete source after S3 copy (destination created successfully)",
|
||||
"source", sourcePath,
|
||||
"error", err)
|
||||
// Don't return error - the copy succeeded, cleanup is best-effort
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyAndDelete implements a fallback for Move() when the storage driver doesn't support
|
||||
// efficient server-side copy operations (e.g., S3 UploadPartCopy not available).
|
||||
// It reads from source, writes to destination, then deletes source.
|
||||
func (h *XRPCHandler) copyAndDelete(ctx context.Context, sourcePath, destPath string) error {
|
||||
// Read from source
|
||||
reader, err := h.driver.Reader(ctx, sourcePath, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source for reading: %w", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
// Create writer for destination
|
||||
writer, err := h.driver.Writer(ctx, destPath, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create destination writer: %w", err)
|
||||
}
|
||||
|
||||
// Copy data from source to destination
|
||||
// Use a buffered approach to avoid loading entire blob into memory
|
||||
buf := make([]byte, 32*1024*1024) // 32MB buffer
|
||||
var totalCopied int64
|
||||
for {
|
||||
n, readErr := reader.Read(buf)
|
||||
if n > 0 {
|
||||
written, writeErr := writer.Write(buf[:n])
|
||||
if writeErr != nil {
|
||||
writer.Cancel(ctx)
|
||||
return fmt.Errorf("failed to write to destination: %w", writeErr)
|
||||
}
|
||||
totalCopied += int64(written)
|
||||
}
|
||||
if readErr != nil {
|
||||
if errors.Is(readErr, io.EOF) {
|
||||
break
|
||||
}
|
||||
writer.Cancel(ctx)
|
||||
return fmt.Errorf("failed to read from source: %w", readErr)
|
||||
}
|
||||
}
|
||||
|
||||
// Commit the destination
|
||||
if err := writer.Commit(ctx); err != nil {
|
||||
return fmt.Errorf("failed to commit destination: %w", err)
|
||||
}
|
||||
|
||||
// Delete the source
|
||||
if err := h.driver.Delete(ctx, sourcePath); err != nil {
|
||||
slog.Warn("Failed to delete source after copy (destination committed successfully)",
|
||||
"source", sourcePath,
|
||||
"error", err)
|
||||
// Don't return error - the copy succeeded, cleanup is best-effort
|
||||
}
|
||||
|
||||
slog.Info("Successfully copied blob using fallback method",
|
||||
"source", sourcePath,
|
||||
"dest", destPath,
|
||||
"size", totalCopied)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// blobPath converts a digest (e.g., "sha256:abc123...") or temp path to a storage path
|
||||
// Distribution stores blobs as: /docker/registry/v2/blobs/{algorithm}/{xx}/{hash}/data
|
||||
// where xx is the first 2 characters of the hash for directory sharding
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
"atcr.io/pkg/hold/pds"
|
||||
"atcr.io/pkg/hold/scanner"
|
||||
"atcr.io/pkg/s3"
|
||||
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
|
||||
"github.com/go-chi/chi/v5"
|
||||
@@ -23,10 +24,17 @@ type XRPCHandler struct {
|
||||
pds *pds.HoldPDS
|
||||
httpClient pds.HTTPClient
|
||||
enableBlueskyPosts bool
|
||||
scanQueue ScanQueue // Scanner queue interface (nil if scanning disabled)
|
||||
}
|
||||
|
||||
// ScanQueue is an interface for enqueuing scan jobs
|
||||
// This allows us to pass in a scanner queue without importing the scanner package (avoiding circular deps)
|
||||
type ScanQueue interface {
|
||||
Enqueue(job any) error
|
||||
}
|
||||
|
||||
// NewXRPCHandler creates a new OCI XRPC handler
|
||||
func NewXRPCHandler(holdPDS *pds.HoldPDS, s3Service s3.S3Service, driver storagedriver.StorageDriver, disablePresignedURLs bool, enableBlueskyPosts bool, httpClient pds.HTTPClient) *XRPCHandler {
|
||||
func NewXRPCHandler(holdPDS *pds.HoldPDS, s3Service s3.S3Service, driver storagedriver.StorageDriver, disablePresignedURLs bool, enableBlueskyPosts bool, httpClient pds.HTTPClient, scanQueue ScanQueue) *XRPCHandler {
|
||||
return &XRPCHandler{
|
||||
driver: driver,
|
||||
disablePresignedURLs: disablePresignedURLs,
|
||||
@@ -35,6 +43,7 @@ func NewXRPCHandler(holdPDS *pds.HoldPDS, s3Service s3.S3Service, driver storage
|
||||
pds: holdPDS,
|
||||
httpClient: httpClient,
|
||||
enableBlueskyPosts: enableBlueskyPosts,
|
||||
scanQueue: scanQueue,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -215,11 +224,12 @@ func (h *XRPCHandler) HandleNotifyManifest(w http.ResponseWriter, r *http.Reques
|
||||
|
||||
// Parse request
|
||||
var req struct {
|
||||
Repository string `json:"repository"`
|
||||
Tag string `json:"tag"`
|
||||
UserDID string `json:"userDid"`
|
||||
UserHandle string `json:"userHandle"`
|
||||
Manifest struct {
|
||||
Repository string `json:"repository"`
|
||||
Tag string `json:"tag"`
|
||||
ManifestDigest string `json:"manifestDigest"`
|
||||
UserDID string `json:"userDid"`
|
||||
UserHandle string `json:"userHandle"`
|
||||
Manifest struct {
|
||||
MediaType string `json:"mediaType"`
|
||||
Config struct {
|
||||
Digest string `json:"digest"`
|
||||
@@ -238,6 +248,12 @@ func (h *XRPCHandler) HandleNotifyManifest(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
slog.Info("Received manifest notification",
|
||||
"repository", req.Repository,
|
||||
"tag", req.Tag,
|
||||
"layerCount", len(req.Manifest.Layers),
|
||||
"hasConfig", req.Manifest.Config.Digest != "")
|
||||
|
||||
// Verify user DID matches token
|
||||
if req.UserDID != validatedUser.DID {
|
||||
RespondError(w, http.StatusForbidden, "user DID mismatch")
|
||||
@@ -287,10 +303,11 @@ func (h *XRPCHandler) HandleNotifyManifest(w http.ResponseWriter, r *http.Reques
|
||||
var postURI string
|
||||
postCreated := false
|
||||
if postsEnabled {
|
||||
// Extract manifest digest from first layer (or use config digest as fallback)
|
||||
manifestDigest := req.Manifest.Config.Digest
|
||||
if len(req.Manifest.Layers) > 0 {
|
||||
manifestDigest = req.Manifest.Layers[0].Digest
|
||||
// Use the actual manifest digest from the request
|
||||
manifestDigest := req.ManifestDigest
|
||||
if manifestDigest == "" {
|
||||
// Fallback to config digest for backward compatibility (shouldn't happen with updated AppView)
|
||||
manifestDigest = req.Manifest.Config.Digest
|
||||
}
|
||||
|
||||
postURI, err = h.pds.CreateManifestPost(
|
||||
@@ -309,6 +326,57 @@ func (h *XRPCHandler) HandleNotifyManifest(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
}
|
||||
|
||||
// Enqueue vulnerability scan if scanner is enabled
|
||||
if h.scanQueue != nil {
|
||||
// Use the actual manifest digest from the request
|
||||
manifestDigest := req.ManifestDigest
|
||||
if manifestDigest == "" {
|
||||
// Fallback to config digest for backward compatibility (shouldn't happen with updated AppView)
|
||||
manifestDigest = req.Manifest.Config.Digest
|
||||
slog.Warn("Manifest digest not provided in notification, using config digest as fallback",
|
||||
"repository", req.Repository, "configDigest", manifestDigest)
|
||||
}
|
||||
|
||||
// Convert request layers to atproto.BlobReference
|
||||
layers := make([]atproto.BlobReference, len(req.Manifest.Layers))
|
||||
for i, layer := range req.Manifest.Layers {
|
||||
layers[i] = atproto.BlobReference{
|
||||
Digest: layer.Digest,
|
||||
Size: layer.Size,
|
||||
MediaType: layer.MediaType,
|
||||
}
|
||||
}
|
||||
|
||||
// Create properly typed scan job
|
||||
scanJob := &scanner.ScanJob{
|
||||
ManifestDigest: manifestDigest,
|
||||
Repository: req.Repository,
|
||||
Tag: req.Tag,
|
||||
UserDID: req.UserDID,
|
||||
UserHandle: req.UserHandle,
|
||||
Config: atproto.BlobReference{
|
||||
Digest: req.Manifest.Config.Digest,
|
||||
Size: req.Manifest.Config.Size,
|
||||
MediaType: "application/vnd.oci.image.config.v1+json",
|
||||
},
|
||||
Layers: layers,
|
||||
}
|
||||
|
||||
slog.Info("Enqueueing scan job",
|
||||
"repository", req.Repository,
|
||||
"layersInJob", len(layers))
|
||||
|
||||
if err := h.scanQueue.Enqueue(scanJob); err != nil {
|
||||
slog.Error("Failed to enqueue scan job", "error", err, "repository", req.Repository)
|
||||
} else {
|
||||
slog.Info("Enqueued vulnerability scan",
|
||||
"repository", req.Repository,
|
||||
"tag", req.Tag,
|
||||
"digest", manifestDigest,
|
||||
"layers", len(req.Manifest.Layers))
|
||||
}
|
||||
}
|
||||
|
||||
// Return response
|
||||
resp := map[string]any{
|
||||
"success": layersCreated > 0 || postCreated,
|
||||
|
||||
@@ -127,7 +127,7 @@ func setupTestOCIHandler(t *testing.T) (*XRPCHandler, context.Context) {
|
||||
|
||||
// Create OCI handler with buffered mode (no S3)
|
||||
mockS3 := s3.S3Service{}
|
||||
handler := NewXRPCHandler(holdPDS, mockS3, driver, true, false, mockClient)
|
||||
handler := NewXRPCHandler(holdPDS, mockS3, driver, true, false, mockClient, nil)
|
||||
|
||||
return handler, ctx
|
||||
}
|
||||
|
||||
@@ -267,6 +267,12 @@ func (p *HoldPDS) ListCollections(ctx context.Context) ([]string, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CreateManifestRecord creates a manifest record with a specific rkey
|
||||
// Used by the scanner to store ORAS artifacts (SBOMs, vulnerability reports)
|
||||
func (p *HoldPDS) CreateManifestRecord(ctx context.Context, record *atproto.ManifestRecord, rkey string) (string, cid.Cid, error) {
|
||||
return p.repomgr.PutRecord(ctx, p.uid, atproto.ManifestCollection, rkey, record)
|
||||
}
|
||||
|
||||
// Close closes the carstore
|
||||
func (p *HoldPDS) Close() error {
|
||||
// TODO: Close session properly
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
"atcr.io/pkg/s3"
|
||||
@@ -171,6 +172,9 @@ func (h *XRPCHandler) RegisterHandlers(r chi.Router) {
|
||||
r.Get(atproto.IdentityResolveHandle, h.HandleResolveHandle)
|
||||
r.Get(atproto.ActorGetProfile, h.HandleGetProfile)
|
||||
r.Get(atproto.ActorGetProfiles, h.HandleGetProfiles)
|
||||
|
||||
// ORAS/Scanner endpoints
|
||||
r.Get(atproto.HoldGetReferrers, h.HandleGetReferrers)
|
||||
})
|
||||
|
||||
// Blob read endpoints (conditional auth based on captain.public)
|
||||
@@ -1415,3 +1419,234 @@ func getProxyURL(publicURL string, digest, did string, operation string) string
|
||||
// Clients should use multipart upload flow via com.atproto.repo.uploadBlob
|
||||
return ""
|
||||
}
|
||||
|
||||
// HandleGetReferrers queries for ORAS artifacts (SBOMs, signatures, scan reports) that reference a subject manifest
|
||||
// GET /xrpc/io.atcr.hold.getReferrers?digest=sha256:abc123&artifactType=application/vnd.atcr.vulnerabilities+json
|
||||
func (h *XRPCHandler) HandleGetReferrers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Parse query parameters
|
||||
digest := r.URL.Query().Get("digest")
|
||||
artifactType := r.URL.Query().Get("artifactType")
|
||||
|
||||
if digest == "" {
|
||||
http.Error(w, "digest parameter required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
slog.Info("Querying referrers", "digest", digest, "artifactType", artifactType)
|
||||
|
||||
// Query all manifest records from the hold's PDS using carstore
|
||||
session, err := h.pds.carstore.ReadOnlySession(h.pds.uid)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create session", "error", err)
|
||||
http.Error(w, fmt.Sprintf("failed to create session: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
head, err := h.pds.carstore.GetUserRepoHead(ctx, h.pds.uid)
|
||||
if err != nil {
|
||||
slog.Error("Failed to get repo head", "error", err)
|
||||
http.Error(w, fmt.Sprintf("failed to get repo head: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if !head.Defined() {
|
||||
// Empty repo, return empty referrers list
|
||||
slog.Info("Empty repo, no referrers found")
|
||||
response := map[string]interface{}{
|
||||
"referrers": []interface{}{},
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(response)
|
||||
return
|
||||
}
|
||||
|
||||
repoHandle, err := repo.OpenRepo(ctx, session, head)
|
||||
if err != nil {
|
||||
slog.Error("Failed to open repo", "error", err)
|
||||
http.Error(w, fmt.Sprintf("failed to open repo: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Filter for referrers with matching subject
|
||||
referrers := []map[string]interface{}{}
|
||||
totalManifests := 0
|
||||
|
||||
// Iterate over all records in the manifest collection
|
||||
err = repoHandle.ForEach(ctx, atproto.ManifestCollection, func(k string, v cid.Cid) error {
|
||||
totalManifests++
|
||||
|
||||
// Get the record bytes directly from the repo
|
||||
_, recBytes, err := repoHandle.GetRecordBytes(ctx, k)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to get record", "key", k, "error", err)
|
||||
return nil // Skip this record
|
||||
}
|
||||
|
||||
// Unmarshal the CBOR bytes into our concrete type
|
||||
var manifest atproto.ManifestRecord
|
||||
if err := manifest.UnmarshalCBOR(bytes.NewReader(*recBytes)); err != nil {
|
||||
slog.Warn("Failed to unmarshal ManifestRecord", "key", k, "error", err)
|
||||
return nil // Skip this record
|
||||
}
|
||||
|
||||
// Debug: log what we found
|
||||
slog.Debug("Checking manifest",
|
||||
"key", k,
|
||||
"digest", manifest.Digest,
|
||||
"hasSubject", manifest.Subject != nil,
|
||||
"subjectDigest", func() string {
|
||||
if manifest.Subject != nil {
|
||||
return manifest.Subject.Digest
|
||||
}
|
||||
return "none"
|
||||
}(),
|
||||
"artifactType", manifest.ArtifactType,
|
||||
"mediaType", manifest.MediaType)
|
||||
|
||||
// Check if this manifest has a subject that matches the requested digest
|
||||
if manifest.Subject != nil && manifest.Subject.Digest == digest {
|
||||
// If artifactType filter is specified, only include matching artifacts
|
||||
if artifactType != "" && manifest.ArtifactType != artifactType {
|
||||
slog.Debug("Skipping referrer due to artifactType mismatch",
|
||||
"key", k,
|
||||
"digest", manifest.Digest,
|
||||
"wantArtifactType", artifactType,
|
||||
"gotArtifactType", manifest.ArtifactType)
|
||||
return nil // Skip this record
|
||||
}
|
||||
|
||||
// Build referrer response
|
||||
referrer := map[string]interface{}{
|
||||
"digest": manifest.Digest,
|
||||
"mediaType": manifest.MediaType,
|
||||
"size": 0, // We don't track manifest size currently
|
||||
"artifactType": manifest.ArtifactType,
|
||||
"annotations": manifest.Annotations,
|
||||
}
|
||||
|
||||
// Add scanner metadata if available
|
||||
if manifest.ScannedAt != "" {
|
||||
referrer["scannedAt"] = manifest.ScannedAt
|
||||
}
|
||||
if manifest.ScannerVersion != "" {
|
||||
referrer["scannerVersion"] = manifest.ScannerVersion
|
||||
}
|
||||
if manifest.OwnerDID != "" {
|
||||
referrer["ownerDid"] = manifest.OwnerDID
|
||||
}
|
||||
|
||||
// Fetch the actual vulnerability report blob data
|
||||
// manifest.Digest is the ORAS manifest digest, we need to:
|
||||
// 1. Fetch the ORAS manifest blob
|
||||
// 2. Parse it to get layers[0].digest (the actual vulnerability report)
|
||||
// 3. Fetch that blob
|
||||
|
||||
// Step 1: Fetch ORAS manifest
|
||||
orasManifestPath := fmt.Sprintf("/docker/registry/v2/blobs/sha256/%s/%s/data",
|
||||
manifest.Digest[7:9], // First 2 chars after "sha256:"
|
||||
manifest.Digest[7:]) // Full hex after "sha256:"
|
||||
|
||||
orasManifestData, err := h.storageDriver.GetContent(ctx, orasManifestPath)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to fetch ORAS manifest blob",
|
||||
"digest", manifest.Digest,
|
||||
"path", orasManifestPath,
|
||||
"error", err)
|
||||
// Continue without the blob data
|
||||
} else {
|
||||
// Step 2: Parse ORAS manifest to get vulnerability report digest
|
||||
var orasManifest struct {
|
||||
Layers []struct {
|
||||
Digest string `json:"digest"`
|
||||
} `json:"layers"`
|
||||
}
|
||||
if err := json.Unmarshal(orasManifestData, &orasManifest); err != nil {
|
||||
slog.Warn("Failed to parse ORAS manifest JSON",
|
||||
"digest", manifest.Digest,
|
||||
"error", err)
|
||||
} else if len(orasManifest.Layers) > 0 {
|
||||
// Step 3: Fetch the vulnerability report blob from layers[0]
|
||||
vulnReportDigest := orasManifest.Layers[0].Digest
|
||||
vulnReportPath := fmt.Sprintf("/docker/registry/v2/blobs/sha256/%s/%s/data",
|
||||
vulnReportDigest[7:9],
|
||||
vulnReportDigest[7:])
|
||||
|
||||
vulnReportData, err := h.storageDriver.GetContent(ctx, vulnReportPath)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to fetch vulnerability report blob",
|
||||
"digest", vulnReportDigest,
|
||||
"path", vulnReportPath,
|
||||
"error", err)
|
||||
} else {
|
||||
// Parse and include the vulnerability report
|
||||
var reportData map[string]interface{}
|
||||
if err := json.Unmarshal(vulnReportData, &reportData); err != nil {
|
||||
slog.Warn("Failed to parse vulnerability report JSON",
|
||||
"digest", vulnReportDigest,
|
||||
"error", err)
|
||||
} else {
|
||||
referrer["reportData"] = reportData
|
||||
slog.Debug("Included vulnerability report data in referrer",
|
||||
"orasDigest", manifest.Digest,
|
||||
"reportDigest", vulnReportDigest,
|
||||
"reportSize", len(vulnReportData))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
slog.Debug("Found matching referrer",
|
||||
"key", k,
|
||||
"digest", manifest.Digest,
|
||||
"artifactType", manifest.ArtifactType,
|
||||
"annotations", manifest.Annotations,
|
||||
"annotationsLen", len(manifest.Annotations))
|
||||
|
||||
referrers = append(referrers, referrer)
|
||||
}
|
||||
|
||||
return nil // Continue iteration
|
||||
})
|
||||
|
||||
if err != nil && err != repo.ErrDoneIterating && !strings.Contains(err.Error(), "done iterating") {
|
||||
slog.Error("Failed to iterate records", "error", err)
|
||||
http.Error(w, fmt.Sprintf("failed to iterate records: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Sort referrers by scannedAt timestamp (descending, most recent first)
|
||||
// This ensures the AppView gets the latest scan result when it takes the first referrer
|
||||
sort.Slice(referrers, func(i, j int) bool {
|
||||
iScanned, iOk := referrers[i]["scannedAt"].(string)
|
||||
jScanned, jOk := referrers[j]["scannedAt"].(string)
|
||||
// If both have scannedAt, compare timestamps (reverse order for descending)
|
||||
if iOk && jOk {
|
||||
return iScanned > jScanned
|
||||
}
|
||||
// If only one has scannedAt, prefer that one
|
||||
if iOk {
|
||||
return true
|
||||
}
|
||||
if jOk {
|
||||
return false
|
||||
}
|
||||
// Neither has scannedAt, maintain original order
|
||||
return false
|
||||
})
|
||||
|
||||
slog.Info("Found referrers",
|
||||
"count", len(referrers),
|
||||
"totalManifests", totalManifests,
|
||||
"digest", digest,
|
||||
"artifactType", artifactType)
|
||||
|
||||
// Return response
|
||||
response := map[string]interface{}{
|
||||
"referrers": referrers,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
|
||||
270
pkg/hold/scanner/extractor.go
Normal file
270
pkg/hold/scanner/extractor.go
Normal file
@@ -0,0 +1,270 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// extractLayers extracts all image layers from storage to a temporary directory
|
||||
// Returns the directory path and a cleanup function
|
||||
func (w *Worker) extractLayers(ctx context.Context, job *ScanJob) (string, func(), error) {
|
||||
// Create temp directory for extraction
|
||||
// Use the database directory as the base (since we're in a scratch container with no /tmp)
|
||||
scanTmpBase := filepath.Join(w.config.Database.Path, "scanner-tmp")
|
||||
if err := os.MkdirAll(scanTmpBase, 0755); err != nil {
|
||||
return "", nil, fmt.Errorf("failed to create scanner temp base: %w", err)
|
||||
}
|
||||
|
||||
tmpDir, err := os.MkdirTemp(scanTmpBase, "scan-*")
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("failed to create temp directory: %w", err)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
if err := os.RemoveAll(tmpDir); err != nil {
|
||||
slog.Warn("Failed to clean up temp directory", "dir", tmpDir, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create image directory structure
|
||||
imageDir := filepath.Join(tmpDir, "image")
|
||||
if err := os.MkdirAll(imageDir, 0755); err != nil {
|
||||
cleanup()
|
||||
return "", nil, fmt.Errorf("failed to create image directory: %w", err)
|
||||
}
|
||||
|
||||
// Download and extract config blob
|
||||
slog.Info("Downloading config blob", "digest", job.Config.Digest)
|
||||
configPath := filepath.Join(imageDir, "config.json")
|
||||
if err := w.downloadBlob(ctx, job.Config.Digest, configPath); err != nil {
|
||||
cleanup()
|
||||
return "", nil, fmt.Errorf("failed to download config blob: %w", err)
|
||||
}
|
||||
|
||||
// Validate config is valid JSON
|
||||
configData, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return "", nil, fmt.Errorf("failed to read config: %w", err)
|
||||
}
|
||||
var configObj map[string]interface{}
|
||||
if err := json.Unmarshal(configData, &configObj); err != nil {
|
||||
cleanup()
|
||||
return "", nil, fmt.Errorf("invalid config JSON: %w", err)
|
||||
}
|
||||
|
||||
// Create layers directory for extracted content
|
||||
layersDir := filepath.Join(imageDir, "layers")
|
||||
if err := os.MkdirAll(layersDir, 0755); err != nil {
|
||||
cleanup()
|
||||
return "", nil, fmt.Errorf("failed to create layers directory: %w", err)
|
||||
}
|
||||
|
||||
// Download and extract each layer in order (creating overlayfs-style filesystem)
|
||||
rootfsDir := filepath.Join(imageDir, "rootfs")
|
||||
if err := os.MkdirAll(rootfsDir, 0755); err != nil {
|
||||
cleanup()
|
||||
return "", nil, fmt.Errorf("failed to create rootfs directory: %w", err)
|
||||
}
|
||||
|
||||
for i, layer := range job.Layers {
|
||||
slog.Info("Extracting layer", "index", i, "digest", layer.Digest, "size", layer.Size)
|
||||
|
||||
// Download layer blob to temp file
|
||||
layerPath := filepath.Join(layersDir, fmt.Sprintf("layer-%d.tar.gz", i))
|
||||
if err := w.downloadBlob(ctx, layer.Digest, layerPath); err != nil {
|
||||
cleanup()
|
||||
return "", nil, fmt.Errorf("failed to download layer %d: %w", i, err)
|
||||
}
|
||||
|
||||
// Extract layer on top of rootfs (overlayfs style)
|
||||
if err := w.extractTarGz(layerPath, rootfsDir); err != nil {
|
||||
cleanup()
|
||||
return "", nil, fmt.Errorf("failed to extract layer %d: %w", i, err)
|
||||
}
|
||||
|
||||
// Remove layer tar.gz to save space
|
||||
os.Remove(layerPath)
|
||||
}
|
||||
|
||||
// Check what was extracted
|
||||
entries, err := os.ReadDir(rootfsDir)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to read rootfs directory", "error", err)
|
||||
} else {
|
||||
slog.Info("Successfully extracted image",
|
||||
"layers", len(job.Layers),
|
||||
"rootfs", rootfsDir,
|
||||
"topLevelEntries", len(entries),
|
||||
"sampleEntries", func() []string {
|
||||
var samples []string
|
||||
for i, e := range entries {
|
||||
if i >= 10 {
|
||||
break
|
||||
}
|
||||
samples = append(samples, e.Name())
|
||||
}
|
||||
return samples
|
||||
}())
|
||||
}
|
||||
|
||||
return rootfsDir, cleanup, nil
|
||||
}
|
||||
|
||||
// downloadBlob downloads a blob from storage to a local file
|
||||
func (w *Worker) downloadBlob(ctx context.Context, digest, destPath string) error {
|
||||
// Convert digest to storage path using distribution's sharding scheme
|
||||
// Format: /docker/registry/v2/blobs/sha256/47/4734bc89.../data
|
||||
// where 47 is the first 2 characters of the hash for directory sharding
|
||||
blobPath := blobPathForDigest(digest)
|
||||
|
||||
// Open blob from storage driver
|
||||
reader, err := w.driver.Reader(ctx, blobPath, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open blob %s: %w", digest, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
// Create destination file
|
||||
dest, err := os.Create(destPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create destination file: %w", err)
|
||||
}
|
||||
defer dest.Close()
|
||||
|
||||
// Copy blob data to file
|
||||
if _, err := io.Copy(dest, reader); err != nil {
|
||||
return fmt.Errorf("failed to copy blob data: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractTarGz extracts a tar.gz file to a destination directory (overlayfs style)
|
||||
func (w *Worker) extractTarGz(tarGzPath, destDir string) error {
|
||||
// Open tar.gz file
|
||||
file, err := os.Open(tarGzPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open tar.gz: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Create gzip reader
|
||||
gzr, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create gzip reader: %w", err)
|
||||
}
|
||||
defer gzr.Close()
|
||||
|
||||
// Create tar reader
|
||||
tr := tar.NewReader(gzr)
|
||||
|
||||
// Extract each file
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read tar header: %w", err)
|
||||
}
|
||||
|
||||
// Build target path (clean to prevent path traversal)
|
||||
target := filepath.Join(destDir, filepath.Clean(header.Name))
|
||||
|
||||
// Ensure target is within destDir (security check)
|
||||
if !strings.HasPrefix(target, filepath.Clean(destDir)+string(os.PathSeparator)) {
|
||||
slog.Warn("Skipping path outside destination", "path", header.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
// Create directory
|
||||
if err := os.MkdirAll(target, os.FileMode(header.Mode)); err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", target, err)
|
||||
}
|
||||
|
||||
case tar.TypeReg:
|
||||
// Create parent directory
|
||||
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
|
||||
return fmt.Errorf("failed to create parent directory: %w", err)
|
||||
}
|
||||
|
||||
// Create file
|
||||
outFile, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(header.Mode))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create file %s: %w", target, err)
|
||||
}
|
||||
|
||||
// Copy file contents
|
||||
if _, err := io.Copy(outFile, tr); err != nil {
|
||||
outFile.Close()
|
||||
return fmt.Errorf("failed to write file %s: %w", target, err)
|
||||
}
|
||||
outFile.Close()
|
||||
|
||||
case tar.TypeSymlink:
|
||||
// Create symlink
|
||||
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
|
||||
return fmt.Errorf("failed to create parent directory for symlink: %w", err)
|
||||
}
|
||||
|
||||
// Remove existing file/symlink if it exists
|
||||
os.Remove(target)
|
||||
|
||||
if err := os.Symlink(header.Linkname, target); err != nil {
|
||||
slog.Warn("Failed to create symlink", "target", target, "link", header.Linkname, "error", err)
|
||||
}
|
||||
|
||||
case tar.TypeLink:
|
||||
// Create hard link
|
||||
linkTarget := filepath.Join(destDir, filepath.Clean(header.Linkname))
|
||||
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
|
||||
return fmt.Errorf("failed to create parent directory for hardlink: %w", err)
|
||||
}
|
||||
|
||||
// Remove existing file if it exists
|
||||
os.Remove(target)
|
||||
|
||||
if err := os.Link(linkTarget, target); err != nil {
|
||||
slog.Warn("Failed to create hardlink", "target", target, "link", linkTarget, "error", err)
|
||||
}
|
||||
|
||||
default:
|
||||
slog.Debug("Skipping unsupported tar entry type", "type", header.Typeflag, "name", header.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// blobPathForDigest converts a digest to a storage path using distribution's sharding scheme
|
||||
// Format: /docker/registry/v2/blobs/sha256/47/4734bc89.../data
|
||||
// where 47 is the first 2 characters of the hash for directory sharding
|
||||
func blobPathForDigest(digest string) string {
|
||||
// Split digest into algorithm and hash
|
||||
parts := strings.SplitN(digest, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
// Fallback for malformed digest
|
||||
return fmt.Sprintf("/docker/registry/v2/blobs/%s/data", digest)
|
||||
}
|
||||
|
||||
algorithm := parts[0]
|
||||
hash := parts[1]
|
||||
|
||||
// Use first 2 characters for sharding
|
||||
if len(hash) < 2 {
|
||||
return fmt.Sprintf("/docker/registry/v2/blobs/%s/%s/data", algorithm, hash)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("/docker/registry/v2/blobs/%s/%s/%s/data", algorithm, hash[:2], hash)
|
||||
}
|
||||
351
pkg/hold/scanner/grype.go
Normal file
351
pkg/hold/scanner/grype.go
Normal file
@@ -0,0 +1,351 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/anchore/grype/grype"
|
||||
"github.com/anchore/grype/grype/db/v6/distribution"
|
||||
"github.com/anchore/grype/grype/db/v6/installation"
|
||||
"github.com/anchore/grype/grype/distro"
|
||||
"github.com/anchore/grype/grype/match"
|
||||
"github.com/anchore/grype/grype/matcher"
|
||||
"github.com/anchore/grype/grype/matcher/dotnet"
|
||||
"github.com/anchore/grype/grype/matcher/golang"
|
||||
"github.com/anchore/grype/grype/matcher/java"
|
||||
"github.com/anchore/grype/grype/matcher/javascript"
|
||||
"github.com/anchore/grype/grype/matcher/python"
|
||||
"github.com/anchore/grype/grype/matcher/ruby"
|
||||
"github.com/anchore/grype/grype/matcher/stock"
|
||||
grypePkg "github.com/anchore/grype/grype/pkg"
|
||||
"github.com/anchore/grype/grype/vulnerability"
|
||||
"github.com/anchore/syft/syft/sbom"
|
||||
)
|
||||
|
||||
// Global vulnerability database (shared across workers)
|
||||
var (
|
||||
vulnDB vulnerability.Provider
|
||||
vulnDBLock sync.RWMutex
|
||||
)
|
||||
|
||||
// scanVulnerabilities scans an SBOM for vulnerabilities using Grype
|
||||
// Returns vulnerability report JSON, digest, summary, and any error
|
||||
func (w *Worker) scanVulnerabilities(ctx context.Context, s *sbom.SBOM) ([]byte, string, VulnerabilitySummary, error) {
|
||||
slog.Info("Scanning for vulnerabilities with Grype")
|
||||
|
||||
// Load vulnerability database (cached globally)
|
||||
store, err := w.loadVulnDatabase(ctx)
|
||||
if err != nil {
|
||||
return nil, "", VulnerabilitySummary{}, fmt.Errorf("failed to load vulnerability database: %w", err)
|
||||
}
|
||||
|
||||
// Create package context from SBOM (need distro for synthesis)
|
||||
var grypeDistro *distro.Distro
|
||||
if s.Artifacts.LinuxDistribution != nil {
|
||||
grypeDistro = distro.FromRelease(s.Artifacts.LinuxDistribution, nil)
|
||||
if grypeDistro != nil {
|
||||
slog.Info("Using distro for package synthesis",
|
||||
"name", grypeDistro.Name(),
|
||||
"version", grypeDistro.Version,
|
||||
"type", grypeDistro.Type,
|
||||
"codename", grypeDistro.Codename)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert Syft packages to Grype packages WITH distro info
|
||||
synthesisConfig := grypePkg.SynthesisConfig{
|
||||
GenerateMissingCPEs: true,
|
||||
Distro: grypePkg.DistroConfig{
|
||||
Override: grypeDistro,
|
||||
},
|
||||
}
|
||||
grypePackages := grypePkg.FromCollection(s.Artifacts.Packages, synthesisConfig)
|
||||
|
||||
slog.Info("Converted packages for vulnerability scanning",
|
||||
"syftPackages", s.Artifacts.Packages.PackageCount(),
|
||||
"grypePackages", len(grypePackages),
|
||||
"distro", func() string {
|
||||
if s.Artifacts.LinuxDistribution != nil {
|
||||
return fmt.Sprintf("%s %s", s.Artifacts.LinuxDistribution.Name, s.Artifacts.LinuxDistribution.Version)
|
||||
}
|
||||
return "none"
|
||||
}())
|
||||
|
||||
// Create matchers
|
||||
matchers := matcher.NewDefaultMatchers(matcher.Config{
|
||||
Java: java.MatcherConfig{},
|
||||
Ruby: ruby.MatcherConfig{},
|
||||
Python: python.MatcherConfig{},
|
||||
Dotnet: dotnet.MatcherConfig{},
|
||||
Javascript: javascript.MatcherConfig{},
|
||||
Golang: golang.MatcherConfig{},
|
||||
Stock: stock.MatcherConfig{},
|
||||
})
|
||||
|
||||
// Create package context with the same distro we used for synthesis
|
||||
pkgContext := grypePkg.Context{
|
||||
Source: &s.Source,
|
||||
Distro: grypeDistro,
|
||||
}
|
||||
|
||||
// Create vulnerability matcher
|
||||
vulnerabilityMatcher := &grype.VulnerabilityMatcher{
|
||||
VulnerabilityProvider: store,
|
||||
Matchers: matchers,
|
||||
NormalizeByCVE: true,
|
||||
}
|
||||
|
||||
// Find vulnerabilities
|
||||
slog.Info("Matching vulnerabilities",
|
||||
"packages", len(grypePackages),
|
||||
"distro", func() string {
|
||||
if grypeDistro != nil {
|
||||
return fmt.Sprintf("%s %s", grypeDistro.Name(), grypeDistro.Version)
|
||||
}
|
||||
return "none"
|
||||
}())
|
||||
allMatches, _, err := vulnerabilityMatcher.FindMatches(grypePackages, pkgContext)
|
||||
if err != nil {
|
||||
return nil, "", VulnerabilitySummary{}, fmt.Errorf("failed to find vulnerabilities: %w", err)
|
||||
}
|
||||
|
||||
slog.Info("Vulnerability matching complete",
|
||||
"totalMatches", allMatches.Count())
|
||||
|
||||
// If we found 0 matches, log some diagnostic info
|
||||
if allMatches.Count() == 0 {
|
||||
slog.Warn("No vulnerability matches found - this may indicate an issue",
|
||||
"distro", func() string {
|
||||
if grypeDistro != nil {
|
||||
return fmt.Sprintf("%s %s", grypeDistro.Name(), grypeDistro.Version)
|
||||
}
|
||||
return "none"
|
||||
}(),
|
||||
"packages", len(grypePackages),
|
||||
"databaseBuilt", func() string {
|
||||
vulnDBLock.RLock()
|
||||
defer vulnDBLock.RUnlock()
|
||||
if vulnDB == nil {
|
||||
return "not loaded"
|
||||
}
|
||||
// We can't easily get the build date here without exposing internal state
|
||||
return "loaded"
|
||||
}())
|
||||
}
|
||||
|
||||
// Count vulnerabilities by severity
|
||||
summary := w.countVulnerabilitiesBySeverity(*allMatches)
|
||||
|
||||
slog.Info("Vulnerability scan complete",
|
||||
"critical", summary.Critical,
|
||||
"high", summary.High,
|
||||
"medium", summary.Medium,
|
||||
"low", summary.Low,
|
||||
"total", summary.Total)
|
||||
|
||||
// Create vulnerability report JSON
|
||||
report := map[string]interface{}{
|
||||
"matches": allMatches.Sorted(),
|
||||
"source": s.Source,
|
||||
"distro": s.Artifacts.LinuxDistribution,
|
||||
"descriptor": map[string]interface{}{
|
||||
"name": "grype",
|
||||
"version": "v0.102.0", // TODO: Get actual Grype version
|
||||
},
|
||||
"summary": summary,
|
||||
}
|
||||
|
||||
// Encode report to JSON
|
||||
reportJSON, err := json.MarshalIndent(report, "", " ")
|
||||
if err != nil {
|
||||
return nil, "", VulnerabilitySummary{}, fmt.Errorf("failed to encode vulnerability report: %w", err)
|
||||
}
|
||||
|
||||
// Calculate digest
|
||||
hash := sha256.Sum256(reportJSON)
|
||||
digest := fmt.Sprintf("sha256:%x", hash)
|
||||
|
||||
slog.Info("Vulnerability report generated", "size", len(reportJSON), "digest", digest)
|
||||
|
||||
// Upload report blob to storage
|
||||
if err := w.uploadBlob(ctx, digest, reportJSON); err != nil {
|
||||
return nil, "", VulnerabilitySummary{}, fmt.Errorf("failed to upload vulnerability report: %w", err)
|
||||
}
|
||||
|
||||
return reportJSON, digest, summary, nil
|
||||
}
|
||||
|
||||
// loadVulnDatabase loads the Grype vulnerability database (with caching)
|
||||
func (w *Worker) loadVulnDatabase(ctx context.Context) (vulnerability.Provider, error) {
|
||||
// Check if database is already loaded
|
||||
vulnDBLock.RLock()
|
||||
if vulnDB != nil {
|
||||
vulnDBLock.RUnlock()
|
||||
return vulnDB, nil
|
||||
}
|
||||
vulnDBLock.RUnlock()
|
||||
|
||||
// Acquire write lock to load database
|
||||
vulnDBLock.Lock()
|
||||
defer vulnDBLock.Unlock()
|
||||
|
||||
// Check again (another goroutine might have loaded it)
|
||||
if vulnDB != nil {
|
||||
return vulnDB, nil
|
||||
}
|
||||
|
||||
slog.Info("Loading Grype vulnerability database", "path", w.config.Scanner.VulnDBPath)
|
||||
|
||||
// Ensure database directory exists
|
||||
if err := ensureDir(w.config.Scanner.VulnDBPath); err != nil {
|
||||
return nil, fmt.Errorf("failed to create vulnerability database directory: %w", err)
|
||||
}
|
||||
|
||||
// Configure database distribution
|
||||
distConfig := distribution.DefaultConfig()
|
||||
|
||||
// Configure database installation
|
||||
installConfig := installation.Config{
|
||||
DBRootDir: w.config.Scanner.VulnDBPath,
|
||||
ValidateAge: true,
|
||||
ValidateChecksum: true,
|
||||
MaxAllowedBuiltAge: w.config.Scanner.VulnDBUpdateInterval,
|
||||
}
|
||||
|
||||
// Load database (should already be downloaded by initializeVulnDatabase)
|
||||
store, status, err := grype.LoadVulnerabilityDB(distConfig, installConfig, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load vulnerability database (status=%v): %w (hint: database may still be downloading)", status, err)
|
||||
}
|
||||
|
||||
slog.Info("Vulnerability database loaded",
|
||||
"status", status,
|
||||
"built", status.Built,
|
||||
"location", status.Path,
|
||||
"schemaVersion", status.SchemaVersion)
|
||||
|
||||
// Check database file size to verify it has content
|
||||
if stat, err := os.Stat(status.Path); err == nil {
|
||||
slog.Info("Vulnerability database file stats",
|
||||
"size", stat.Size(),
|
||||
"sizeMB", stat.Size()/1024/1024)
|
||||
}
|
||||
|
||||
// Cache database globally
|
||||
vulnDB = store
|
||||
|
||||
slog.Info("Vulnerability database loaded successfully")
|
||||
return vulnDB, nil
|
||||
}
|
||||
|
||||
// countVulnerabilitiesBySeverity counts vulnerabilities by severity level
|
||||
func (w *Worker) countVulnerabilitiesBySeverity(matches match.Matches) VulnerabilitySummary {
|
||||
summary := VulnerabilitySummary{}
|
||||
|
||||
for m := range matches.Enumerate() {
|
||||
summary.Total++
|
||||
|
||||
// Get severity from vulnerability metadata
|
||||
if m.Vulnerability.Metadata != nil {
|
||||
severity := m.Vulnerability.Metadata.Severity
|
||||
switch severity {
|
||||
case "Critical":
|
||||
summary.Critical++
|
||||
case "High":
|
||||
summary.High++
|
||||
case "Medium":
|
||||
summary.Medium++
|
||||
case "Low":
|
||||
summary.Low++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return summary
|
||||
}
|
||||
|
||||
// initializeVulnDatabase downloads and initializes the vulnerability database on startup
|
||||
func (w *Worker) initializeVulnDatabase(ctx context.Context) error {
|
||||
slog.Info("Initializing vulnerability database", "path", w.config.Scanner.VulnDBPath)
|
||||
|
||||
// Ensure database directory exists
|
||||
if err := ensureDir(w.config.Scanner.VulnDBPath); err != nil {
|
||||
return fmt.Errorf("failed to create vulnerability database directory: %w", err)
|
||||
}
|
||||
|
||||
// Create temp directory for Grype downloads (scratch container has no /tmp)
|
||||
tmpDir := filepath.Join(w.config.Database.Path, "tmp")
|
||||
if err := ensureDir(tmpDir); err != nil {
|
||||
return fmt.Errorf("failed to create temp directory: %w", err)
|
||||
}
|
||||
|
||||
// Set TMPDIR environment variable so Grype uses our temp directory
|
||||
oldTmpDir := os.Getenv("TMPDIR")
|
||||
os.Setenv("TMPDIR", tmpDir)
|
||||
defer func() {
|
||||
if oldTmpDir != "" {
|
||||
os.Setenv("TMPDIR", oldTmpDir)
|
||||
} else {
|
||||
os.Unsetenv("TMPDIR")
|
||||
}
|
||||
}()
|
||||
|
||||
// Configure database distribution
|
||||
distConfig := distribution.DefaultConfig()
|
||||
|
||||
// Configure database installation
|
||||
installConfig := installation.Config{
|
||||
DBRootDir: w.config.Scanner.VulnDBPath,
|
||||
ValidateAge: true,
|
||||
ValidateChecksum: true,
|
||||
MaxAllowedBuiltAge: w.config.Scanner.VulnDBUpdateInterval,
|
||||
}
|
||||
|
||||
// Create distribution client for downloading
|
||||
downloader, err := distribution.NewClient(distConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create database downloader: %w", err)
|
||||
}
|
||||
|
||||
// Create curator to manage database
|
||||
curator, err := installation.NewCurator(installConfig, downloader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create database curator: %w", err)
|
||||
}
|
||||
|
||||
// Check if database already exists
|
||||
status := curator.Status()
|
||||
if !status.Built.IsZero() && status.Error == nil {
|
||||
slog.Info("Vulnerability database already exists", "built", status.Built, "schema", status.SchemaVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Download database (this may take several minutes)
|
||||
slog.Info("Downloading vulnerability database (this may take 5-10 minutes)...")
|
||||
updated, err := curator.Update()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to download vulnerability database: %w", err)
|
||||
}
|
||||
|
||||
if updated {
|
||||
slog.Info("Vulnerability database downloaded successfully")
|
||||
} else {
|
||||
slog.Info("Vulnerability database is up to date")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureDir creates a directory if it doesn't exist
|
||||
func ensureDir(path string) error {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
67
pkg/hold/scanner/job.go
Normal file
67
pkg/hold/scanner/job.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
)
|
||||
|
||||
// ScanJob represents a vulnerability scanning job for a container image
|
||||
type ScanJob struct {
|
||||
// ManifestDigest is the digest of the manifest to scan
|
||||
ManifestDigest string
|
||||
|
||||
// Repository is the repository name (e.g., "alice/myapp")
|
||||
Repository string
|
||||
|
||||
// Tag is the tag name (e.g., "latest")
|
||||
Tag string
|
||||
|
||||
// UserDID is the DID of the user who owns this image
|
||||
UserDID string
|
||||
|
||||
// UserHandle is the handle of the user (for display)
|
||||
UserHandle string
|
||||
|
||||
// Config is the image config blob descriptor
|
||||
Config atproto.BlobReference
|
||||
|
||||
// Layers are the image layer blob descriptors (in order)
|
||||
Layers []atproto.BlobReference
|
||||
|
||||
// EnqueuedAt is when this job was enqueued
|
||||
EnqueuedAt time.Time
|
||||
}
|
||||
|
||||
// ScanResult represents the result of a vulnerability scan
|
||||
type ScanResult struct {
|
||||
// Job is the original scan job
|
||||
Job *ScanJob
|
||||
|
||||
// VulnerabilitiesJSON is the raw Grype JSON output
|
||||
VulnerabilitiesJSON []byte
|
||||
|
||||
// Summary contains vulnerability counts by severity
|
||||
Summary VulnerabilitySummary
|
||||
|
||||
// SBOMDigest is the digest of the SBOM blob (if SBOM was generated)
|
||||
SBOMDigest string
|
||||
|
||||
// VulnDigest is the digest of the vulnerability report blob
|
||||
VulnDigest string
|
||||
|
||||
// ScannedAt is when the scan completed
|
||||
ScannedAt time.Time
|
||||
|
||||
// ScannerVersion is the version of the scanner used
|
||||
ScannerVersion string
|
||||
}
|
||||
|
||||
// VulnerabilitySummary contains counts of vulnerabilities by severity
|
||||
type VulnerabilitySummary struct {
|
||||
Critical int `json:"critical"`
|
||||
High int `json:"high"`
|
||||
Medium int `json:"medium"`
|
||||
Low int `json:"low"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
226
pkg/hold/scanner/queue.go
Normal file
226
pkg/hold/scanner/queue.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sync"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
)
|
||||
|
||||
// Queue manages a pool of workers for scanning container images
|
||||
type Queue struct {
|
||||
jobs chan *ScanJob
|
||||
results chan *ScanResult
|
||||
workers int
|
||||
wg sync.WaitGroup
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// NewQueue creates a new scanner queue with the specified number of workers
|
||||
func NewQueue(workers int, bufferSize int) *Queue {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
return &Queue{
|
||||
jobs: make(chan *ScanJob, bufferSize),
|
||||
results: make(chan *ScanResult, bufferSize),
|
||||
workers: workers,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the worker pool
|
||||
// The workerFunc is called for each job to perform the actual scanning
|
||||
func (q *Queue) Start(workerFunc func(context.Context, *ScanJob) (*ScanResult, error)) {
|
||||
slog.Info("Starting scanner worker pool", "workers", q.workers)
|
||||
|
||||
for i := 0; i < q.workers; i++ {
|
||||
q.wg.Add(1)
|
||||
go q.worker(i, workerFunc)
|
||||
}
|
||||
|
||||
// Start result handler goroutine
|
||||
q.wg.Add(1)
|
||||
go q.resultHandler()
|
||||
}
|
||||
|
||||
// worker processes jobs from the queue
|
||||
func (q *Queue) worker(id int, workerFunc func(context.Context, *ScanJob) (*ScanResult, error)) {
|
||||
defer q.wg.Done()
|
||||
|
||||
slog.Info("Scanner worker started", "worker_id", id)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-q.ctx.Done():
|
||||
slog.Info("Scanner worker shutting down", "worker_id", id)
|
||||
return
|
||||
|
||||
case job, ok := <-q.jobs:
|
||||
if !ok {
|
||||
slog.Info("Scanner worker: jobs channel closed", "worker_id", id)
|
||||
return
|
||||
}
|
||||
|
||||
slog.Info("Scanner worker processing job",
|
||||
"worker_id", id,
|
||||
"repository", job.Repository,
|
||||
"tag", job.Tag,
|
||||
"digest", job.ManifestDigest)
|
||||
|
||||
result, err := workerFunc(q.ctx, job)
|
||||
if err != nil {
|
||||
slog.Error("Scanner worker failed to process job",
|
||||
"worker_id", id,
|
||||
"repository", job.Repository,
|
||||
"tag", job.Tag,
|
||||
"error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Send result to results channel
|
||||
select {
|
||||
case q.results <- result:
|
||||
slog.Info("Scanner worker completed job",
|
||||
"worker_id", id,
|
||||
"repository", job.Repository,
|
||||
"tag", job.Tag,
|
||||
"vulnerabilities", result.Summary.Total)
|
||||
case <-q.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// resultHandler processes scan results (for logging and metrics)
|
||||
func (q *Queue) resultHandler() {
|
||||
defer q.wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-q.ctx.Done():
|
||||
return
|
||||
|
||||
case result, ok := <-q.results:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// Log the result
|
||||
slog.Info("Scan completed",
|
||||
"repository", result.Job.Repository,
|
||||
"tag", result.Job.Tag,
|
||||
"digest", result.Job.ManifestDigest,
|
||||
"critical", result.Summary.Critical,
|
||||
"high", result.Summary.High,
|
||||
"medium", result.Summary.Medium,
|
||||
"low", result.Summary.Low,
|
||||
"total", result.Summary.Total,
|
||||
"scanner", result.ScannerVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enqueue adds a job to the queue
|
||||
func (q *Queue) Enqueue(jobAny any) error {
|
||||
// Type assert to ScanJob (can be map or struct from HandleNotifyManifest)
|
||||
var job *ScanJob
|
||||
|
||||
switch v := jobAny.(type) {
|
||||
case *ScanJob:
|
||||
job = v
|
||||
case map[string]interface{}:
|
||||
// Convert map to ScanJob (from HandleNotifyManifest)
|
||||
job = &ScanJob{
|
||||
ManifestDigest: v["manifestDigest"].(string),
|
||||
Repository: v["repository"].(string),
|
||||
Tag: v["tag"].(string),
|
||||
UserDID: v["userDID"].(string),
|
||||
UserHandle: v["userHandle"].(string),
|
||||
}
|
||||
|
||||
// Parse config blob reference
|
||||
if configMap, ok := v["config"].(map[string]interface{}); ok {
|
||||
job.Config = atproto.BlobReference{
|
||||
Digest: configMap["digest"].(string),
|
||||
Size: convertToInt64(configMap["size"]),
|
||||
MediaType: configMap["mediaType"].(string),
|
||||
}
|
||||
}
|
||||
|
||||
// Parse layers
|
||||
if layersSlice, ok := v["layers"].([]interface{}); ok {
|
||||
slog.Info("Parsing layers from scan job",
|
||||
"layersFound", len(layersSlice))
|
||||
job.Layers = make([]atproto.BlobReference, len(layersSlice))
|
||||
for i, layerAny := range layersSlice {
|
||||
if layerMap, ok := layerAny.(map[string]interface{}); ok {
|
||||
job.Layers[i] = atproto.BlobReference{
|
||||
Digest: layerMap["digest"].(string),
|
||||
Size: convertToInt64(layerMap["size"]),
|
||||
MediaType: layerMap["mediaType"].(string),
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
slog.Warn("No layers found in scan job map",
|
||||
"layersType", fmt.Sprintf("%T", v["layers"]),
|
||||
"layersValue", v["layers"])
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid job type: %T", jobAny)
|
||||
}
|
||||
|
||||
select {
|
||||
case q.jobs <- job:
|
||||
slog.Info("Enqueued scan job",
|
||||
"repository", job.Repository,
|
||||
"tag", job.Tag,
|
||||
"digest", job.ManifestDigest)
|
||||
return nil
|
||||
case <-q.ctx.Done():
|
||||
return q.ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown gracefully shuts down the queue, waiting for all workers to finish
|
||||
func (q *Queue) Shutdown() {
|
||||
slog.Info("Shutting down scanner queue")
|
||||
|
||||
// Close the jobs channel to signal no more jobs
|
||||
close(q.jobs)
|
||||
|
||||
// Wait for all workers to finish
|
||||
q.wg.Wait()
|
||||
|
||||
// Close results channel
|
||||
close(q.results)
|
||||
|
||||
// Cancel context
|
||||
q.cancel()
|
||||
|
||||
slog.Info("Scanner queue shut down complete")
|
||||
}
|
||||
|
||||
// Len returns the number of jobs currently in the queue
|
||||
func (q *Queue) Len() int {
|
||||
return len(q.jobs)
|
||||
}
|
||||
|
||||
// convertToInt64 converts an interface{} number to int64, handling both float64 and int64
|
||||
func convertToInt64(v interface{}) int64 {
|
||||
switch n := v.(type) {
|
||||
case float64:
|
||||
return int64(n)
|
||||
case int64:
|
||||
return n
|
||||
case int:
|
||||
return int64(n)
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
123
pkg/hold/scanner/storage.go
Normal file
123
pkg/hold/scanner/storage.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
)
|
||||
|
||||
// storeResults uploads scan results and creates ORAS manifest records in the hold's PDS
|
||||
func (w *Worker) storeResults(ctx context.Context, job *ScanJob, sbomDigest, vulnDigest string, vulnJSON []byte, summary VulnerabilitySummary) error {
|
||||
if !w.config.Scanner.VulnEnabled {
|
||||
slog.Info("Vulnerability scanning disabled, skipping result storage")
|
||||
return nil
|
||||
}
|
||||
|
||||
slog.Info("Storing scan results as ORAS artifact",
|
||||
"repository", job.Repository,
|
||||
"subjectDigest", job.ManifestDigest,
|
||||
"vulnDigest", vulnDigest)
|
||||
|
||||
// Create ORAS manifest for vulnerability report
|
||||
orasManifest := map[string]interface{}{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"artifactType": "application/vnd.atcr.vulnerabilities+json",
|
||||
"config": map[string]interface{}{
|
||||
"mediaType": "application/vnd.oci.empty.v1+json",
|
||||
"digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a", // Empty JSON object
|
||||
"size": 2,
|
||||
},
|
||||
"subject": map[string]interface{}{
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"digest": job.ManifestDigest,
|
||||
"size": 0, // We don't have the size, but it's optional
|
||||
},
|
||||
"layers": []map[string]interface{}{
|
||||
{
|
||||
"mediaType": "application/json",
|
||||
"digest": vulnDigest,
|
||||
"size": len(vulnJSON),
|
||||
"annotations": map[string]string{
|
||||
"org.opencontainers.image.title": "vulnerability-report.json",
|
||||
},
|
||||
},
|
||||
},
|
||||
"annotations": map[string]string{
|
||||
"io.atcr.vuln.critical": fmt.Sprintf("%d", summary.Critical),
|
||||
"io.atcr.vuln.high": fmt.Sprintf("%d", summary.High),
|
||||
"io.atcr.vuln.medium": fmt.Sprintf("%d", summary.Medium),
|
||||
"io.atcr.vuln.low": fmt.Sprintf("%d", summary.Low),
|
||||
"io.atcr.vuln.total": fmt.Sprintf("%d", summary.Total),
|
||||
"io.atcr.vuln.scannedAt": time.Now().Format(time.RFC3339),
|
||||
"io.atcr.vuln.scannerVersion": w.getScannerVersion(),
|
||||
},
|
||||
}
|
||||
|
||||
// Encode ORAS manifest to JSON
|
||||
orasManifestJSON, err := json.Marshal(orasManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode ORAS manifest: %w", err)
|
||||
}
|
||||
|
||||
// Calculate ORAS manifest digest
|
||||
orasDigest := fmt.Sprintf("sha256:%x", sha256Bytes(orasManifestJSON))
|
||||
|
||||
// Upload ORAS manifest blob to storage
|
||||
if err := w.uploadBlob(ctx, orasDigest, orasManifestJSON); err != nil {
|
||||
return fmt.Errorf("failed to upload ORAS manifest blob: %w", err)
|
||||
}
|
||||
|
||||
// Create manifest record in hold's PDS
|
||||
if err := w.createManifestRecord(ctx, job, orasDigest, orasManifestJSON, summary); err != nil {
|
||||
return fmt.Errorf("failed to create manifest record: %w", err)
|
||||
}
|
||||
|
||||
slog.Info("Successfully stored scan results", "orasDigest", orasDigest)
|
||||
return nil
|
||||
}
|
||||
|
||||
// createManifestRecord creates an ORAS manifest record in the hold's PDS
|
||||
func (w *Worker) createManifestRecord(ctx context.Context, job *ScanJob, orasDigest string, orasManifestJSON []byte, summary VulnerabilitySummary) error {
|
||||
// Create ManifestRecord from ORAS manifest
|
||||
record, err := atproto.NewManifestRecord(job.Repository, orasDigest, orasManifestJSON)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create manifest record: %w", err)
|
||||
}
|
||||
|
||||
// Set SBOM/vulnerability specific fields
|
||||
record.OwnerDID = job.UserDID
|
||||
record.ScannedAt = time.Now().Format(time.RFC3339)
|
||||
record.ScannerVersion = w.getScannerVersion()
|
||||
|
||||
// Add hold DID (this ORAS artifact is stored in the hold's PDS)
|
||||
record.HoldDID = w.pds.DID()
|
||||
|
||||
// Convert digest to record key (remove "sha256:" prefix)
|
||||
rkey := orasDigest[len("sha256:"):]
|
||||
|
||||
// Store record in hold's PDS
|
||||
slog.Info("Creating manifest record in hold's PDS",
|
||||
"collection", atproto.ManifestCollection,
|
||||
"rkey", rkey,
|
||||
"ownerDid", job.UserDID)
|
||||
|
||||
_, _, err = w.pds.CreateManifestRecord(ctx, record, rkey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to put record in PDS: %w", err)
|
||||
}
|
||||
|
||||
slog.Info("Manifest record created successfully", "uri", fmt.Sprintf("at://%s/%s/%s", w.pds.DID(), atproto.ManifestCollection, rkey))
|
||||
return nil
|
||||
}
|
||||
|
||||
// sha256Bytes calculates SHA256 hash of byte slice
|
||||
func sha256Bytes(data []byte) []byte {
|
||||
hash := sha256.Sum256(data)
|
||||
return hash[:]
|
||||
}
|
||||
128
pkg/hold/scanner/syft.go
Normal file
128
pkg/hold/scanner/syft.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/anchore/syft/syft"
|
||||
"github.com/anchore/syft/syft/format"
|
||||
"github.com/anchore/syft/syft/format/spdxjson"
|
||||
"github.com/anchore/syft/syft/sbom"
|
||||
"github.com/anchore/syft/syft/source/directorysource"
|
||||
)
|
||||
|
||||
// generateSBOM generates an SBOM using Syft from an extracted image directory
|
||||
// Returns the SBOM object, SBOM JSON, its digest, and any error
|
||||
func (w *Worker) generateSBOM(ctx context.Context, imageDir string) (*sbom.SBOM, []byte, string, error) {
|
||||
slog.Info("Generating SBOM with Syft", "imageDir", imageDir)
|
||||
|
||||
// Check if directory exists and is accessible
|
||||
entries, err := os.ReadDir(imageDir)
|
||||
if err != nil {
|
||||
return nil, nil, "", fmt.Errorf("failed to read image directory: %w", err)
|
||||
}
|
||||
slog.Info("Image directory contents",
|
||||
"path", imageDir,
|
||||
"entries", len(entries),
|
||||
"sampleFiles", func() []string {
|
||||
var samples []string
|
||||
for i, e := range entries {
|
||||
if i >= 20 {
|
||||
break
|
||||
}
|
||||
samples = append(samples, e.Name())
|
||||
}
|
||||
return samples
|
||||
}())
|
||||
|
||||
// Create Syft source from directory
|
||||
src, err := directorysource.NewFromPath(imageDir)
|
||||
if err != nil {
|
||||
return nil, nil, "", fmt.Errorf("failed to create Syft source: %w", err)
|
||||
}
|
||||
defer src.Close()
|
||||
|
||||
// Generate SBOM
|
||||
slog.Info("Running Syft cataloging")
|
||||
sbomResult, err := syft.CreateSBOM(ctx, src, nil)
|
||||
if err != nil {
|
||||
return nil, nil, "", fmt.Errorf("failed to generate SBOM: %w", err)
|
||||
}
|
||||
|
||||
if sbomResult == nil {
|
||||
return nil, nil, "", fmt.Errorf("Syft returned nil SBOM")
|
||||
}
|
||||
|
||||
slog.Info("SBOM generated",
|
||||
"packages", sbomResult.Artifacts.Packages.PackageCount(),
|
||||
"distro", func() string {
|
||||
if sbomResult.Artifacts.LinuxDistribution != nil {
|
||||
return fmt.Sprintf("%s %s", sbomResult.Artifacts.LinuxDistribution.Name, sbomResult.Artifacts.LinuxDistribution.Version)
|
||||
}
|
||||
return "none"
|
||||
}())
|
||||
|
||||
// Encode SBOM to SPDX JSON format
|
||||
encoder, err := spdxjson.NewFormatEncoderWithConfig(spdxjson.DefaultEncoderConfig())
|
||||
if err != nil {
|
||||
return nil, nil, "", fmt.Errorf("failed to create SPDX encoder: %w", err)
|
||||
}
|
||||
|
||||
sbomJSON, err := format.Encode(*sbomResult, encoder)
|
||||
if err != nil {
|
||||
return nil, nil, "", fmt.Errorf("failed to encode SBOM to SPDX JSON: %w", err)
|
||||
}
|
||||
|
||||
// Calculate digest
|
||||
hash := sha256.Sum256(sbomJSON)
|
||||
digest := fmt.Sprintf("sha256:%x", hash)
|
||||
|
||||
slog.Info("SBOM encoded", "format", "spdx-json", "size", len(sbomJSON), "digest", digest)
|
||||
|
||||
// Upload SBOM blob to storage
|
||||
if err := w.uploadBlob(ctx, digest, sbomJSON); err != nil {
|
||||
return nil, nil, "", fmt.Errorf("failed to upload SBOM blob: %w", err)
|
||||
}
|
||||
|
||||
return sbomResult, sbomJSON, digest, nil
|
||||
}
|
||||
|
||||
// uploadBlob uploads a blob to storage
|
||||
func (w *Worker) uploadBlob(ctx context.Context, digest string, data []byte) error {
|
||||
// Convert digest to storage path (same format as distribution uses)
|
||||
// Path format: /docker/registry/v2/blobs/sha256/ab/abcd1234.../data
|
||||
algorithm := "sha256"
|
||||
digestHex := digest[len("sha256:"):]
|
||||
if len(digestHex) < 2 {
|
||||
return fmt.Errorf("invalid digest: %s", digest)
|
||||
}
|
||||
|
||||
blobPath := fmt.Sprintf("/docker/registry/v2/blobs/%s/%s/%s/data",
|
||||
algorithm,
|
||||
digestHex[:2],
|
||||
digestHex)
|
||||
|
||||
slog.Info("Uploading blob to storage", "digest", digest, "size", len(data), "path", blobPath)
|
||||
|
||||
// Write blob to storage
|
||||
writer, err := w.driver.Writer(ctx, blobPath, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create storage writer: %w", err)
|
||||
}
|
||||
defer writer.Close()
|
||||
|
||||
if _, err := writer.Write(data); err != nil {
|
||||
writer.Cancel(ctx)
|
||||
return fmt.Errorf("failed to write blob data: %w", err)
|
||||
}
|
||||
|
||||
if err := writer.Commit(ctx); err != nil {
|
||||
return fmt.Errorf("failed to commit blob: %w", err)
|
||||
}
|
||||
|
||||
slog.Info("Successfully uploaded blob", "digest", digest)
|
||||
return nil
|
||||
}
|
||||
116
pkg/hold/scanner/worker.go
Normal file
116
pkg/hold/scanner/worker.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/hold"
|
||||
"atcr.io/pkg/hold/pds"
|
||||
"github.com/distribution/distribution/v3/registry/storage/driver"
|
||||
)
|
||||
|
||||
// Worker performs vulnerability scanning on container images
|
||||
type Worker struct {
|
||||
config *hold.Config
|
||||
driver driver.StorageDriver
|
||||
pds *pds.HoldPDS
|
||||
queue *Queue
|
||||
}
|
||||
|
||||
// NewWorker creates a new scanner worker
|
||||
func NewWorker(config *hold.Config, driver driver.StorageDriver, pds *pds.HoldPDS) *Worker {
|
||||
return &Worker{
|
||||
config: config,
|
||||
driver: driver,
|
||||
pds: pds,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the worker pool and initializes vulnerability database
|
||||
func (w *Worker) Start(queue *Queue) {
|
||||
w.queue = queue
|
||||
|
||||
// Initialize vulnerability database on startup if scanning is enabled
|
||||
if w.config.Scanner.VulnEnabled {
|
||||
go func() {
|
||||
ctx := context.Background()
|
||||
if err := w.initializeVulnDatabase(ctx); err != nil {
|
||||
slog.Error("Failed to initialize vulnerability database", "error", err)
|
||||
slog.Warn("Vulnerability scanning will be disabled until database is available")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
queue.Start(w.processJob)
|
||||
}
|
||||
|
||||
// processJob processes a single scan job
|
||||
func (w *Worker) processJob(ctx context.Context, job *ScanJob) (*ScanResult, error) {
|
||||
slog.Info("Processing scan job",
|
||||
"repository", job.Repository,
|
||||
"tag", job.Tag,
|
||||
"digest", job.ManifestDigest,
|
||||
"layers", len(job.Layers))
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
// Step 1: Extract image layers from storage
|
||||
slog.Info("Extracting image layers", "repository", job.Repository)
|
||||
imageDir, cleanup, err := w.extractLayers(ctx, job)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to extract layers: %w", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
// Step 2: Generate SBOM with Syft
|
||||
slog.Info("Generating SBOM", "repository", job.Repository)
|
||||
sbomResult, _, sbomDigest, err := w.generateSBOM(ctx, imageDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate SBOM: %w", err)
|
||||
}
|
||||
|
||||
// Step 3: Scan SBOM with Grype (if enabled)
|
||||
var vulnJSON []byte
|
||||
var vulnDigest string
|
||||
var summary VulnerabilitySummary
|
||||
|
||||
if w.config.Scanner.VulnEnabled {
|
||||
slog.Info("Scanning for vulnerabilities", "repository", job.Repository)
|
||||
vulnJSON, vulnDigest, summary, err = w.scanVulnerabilities(ctx, sbomResult)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to scan vulnerabilities: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 4: Upload results to storage and create ORAS manifests
|
||||
slog.Info("Storing scan results", "repository", job.Repository)
|
||||
err = w.storeResults(ctx, job, sbomDigest, vulnDigest, vulnJSON, summary)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to store results: %w", err)
|
||||
}
|
||||
|
||||
duration := time.Since(startTime)
|
||||
slog.Info("Scan job completed",
|
||||
"repository", job.Repository,
|
||||
"tag", job.Tag,
|
||||
"duration", duration,
|
||||
"vulnerabilities", summary.Total)
|
||||
|
||||
return &ScanResult{
|
||||
Job: job,
|
||||
VulnerabilitiesJSON: vulnJSON,
|
||||
Summary: summary,
|
||||
SBOMDigest: sbomDigest,
|
||||
VulnDigest: vulnDigest,
|
||||
ScannedAt: time.Now(),
|
||||
ScannerVersion: w.getScannerVersion(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getScannerVersion returns the version string for the scanner
|
||||
func (w *Worker) getScannerVersion() string {
|
||||
// TODO: Get actual Syft and Grype versions dynamically
|
||||
return "syft-v1.36.0/grype-v0.102.0"
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# update-homebrew-formula.sh - Helper script to update Homebrew formula with new release
|
||||
#
|
||||
# Usage: ./scripts/update-homebrew-formula.sh <version>
|
||||
#
|
||||
# Example: ./scripts/update-homebrew-formula.sh v0.0.2
|
||||
#
|
||||
# This script:
|
||||
# 1. Downloads the source tarball from GitHub
|
||||
# 2. Calculates SHA256 checksum
|
||||
# 3. Generates updated formula snippet
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Check arguments
|
||||
if [ $# -ne 1 ]; then
|
||||
echo -e "${RED}Error: Missing required argument${NC}"
|
||||
echo "Usage: $0 <version>"
|
||||
echo ""
|
||||
echo "Example: $0 v0.0.2"
|
||||
echo " $0 0.0.2 (v prefix is optional)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION="$1"
|
||||
|
||||
# Add 'v' prefix if not present
|
||||
if [[ ! "$VERSION" =~ ^v ]]; then
|
||||
VERSION="v${VERSION}"
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Updating Homebrew formula for version ${VERSION}${NC}"
|
||||
echo ""
|
||||
|
||||
# GitHub repository details
|
||||
GITHUB_REPO="atcr-io/atcr"
|
||||
TARBALL_URL="https://github.com/${GITHUB_REPO}/archive/refs/tags/${VERSION}.tar.gz"
|
||||
|
||||
# Create temporary directory
|
||||
TEMP_DIR=$(mktemp -d)
|
||||
trap 'rm -rf "$TEMP_DIR"' EXIT
|
||||
|
||||
TARBALL_FILE="${TEMP_DIR}/${VERSION}.tar.gz"
|
||||
|
||||
echo -e "${YELLOW}Downloading source tarball...${NC}"
|
||||
echo "URL: ${TARBALL_URL}"
|
||||
|
||||
if curl -sSfL -o "$TARBALL_FILE" "$TARBALL_URL"; then
|
||||
# Calculate SHA256
|
||||
if command -v sha256sum &> /dev/null; then
|
||||
CHECKSUM=$(sha256sum "$TARBALL_FILE" | awk '{print $1}')
|
||||
elif command -v shasum &> /dev/null; then
|
||||
CHECKSUM=$(shasum -a 256 "$TARBALL_FILE" | awk '{print $1}')
|
||||
else
|
||||
echo -e "${RED}Error: sha256sum or shasum command not found${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✓ Downloaded successfully${NC}"
|
||||
echo "SHA256: $CHECKSUM"
|
||||
else
|
||||
echo -e "${RED}✗ Failed to download source tarball${NC}"
|
||||
echo ""
|
||||
echo "Make sure the tag ${VERSION} exists on GitHub:"
|
||||
echo " https://github.com/${GITHUB_REPO}/releases/tag/${VERSION}"
|
||||
echo ""
|
||||
echo "If you haven't pushed the tag yet, run:"
|
||||
echo " git tag ${VERSION}"
|
||||
echo " git push origin ${VERSION}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "======================================================================"
|
||||
echo "Copy the following to Formula/docker-credential-atcr.rb:"
|
||||
echo "======================================================================"
|
||||
echo ""
|
||||
|
||||
cat << EOF
|
||||
url "https://github.com/${GITHUB_REPO}/archive/refs/tags/${VERSION}.tar.gz"
|
||||
sha256 "${CHECKSUM}"
|
||||
license "MIT"
|
||||
head "https://github.com/${GITHUB_REPO}.git", branch: "main"
|
||||
EOF
|
||||
|
||||
echo ""
|
||||
echo "======================================================================"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Next steps:${NC}"
|
||||
echo "1. Update Formula/docker-credential-atcr.rb with the url and sha256 above"
|
||||
echo "2. Test the formula locally:"
|
||||
echo " brew install --build-from-source Formula/docker-credential-atcr.rb"
|
||||
echo " docker-credential-atcr version"
|
||||
echo "3. Commit and push to your atcr-io/homebrew-tap repository:"
|
||||
echo " cd /path/to/homebrew-tap"
|
||||
echo " cp Formula/docker-credential-atcr.rb ."
|
||||
echo " git add docker-credential-atcr.rb"
|
||||
echo " git commit -m \"Update docker-credential-atcr to ${VERSION}\""
|
||||
echo " git push"
|
||||
echo "4. Users can upgrade with:"
|
||||
echo " brew update"
|
||||
echo " brew upgrade docker-credential-atcr"
|
||||
Reference in New Issue
Block a user