migate envs to use yaml configs

This commit is contained in:
Evan Jarrett
2026-02-10 22:11:21 -06:00
parent 914328dbf1
commit 9723de0bcd
11 changed files with 20 additions and 705 deletions

View File

@@ -4,7 +4,7 @@ tmp_dir = "tmp"
[build]
pre_cmd = ["go generate ./pkg/hold/..."]
cmd = "go build -buildvcs=false -o ./tmp/atcr-hold ./cmd/hold"
entrypoint = ["./tmp/atcr-hold" , "serve"]
entrypoint = ["./tmp/atcr-hold", "serve", "--config", "config-hold.example.yaml"]
include_ext = ["go", "html", "css", "js"]
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "pkg/appview", "node_modules"]
exclude_regex = ["_test\\.go$", "cbor_gen\\.go$", "\\.min\\.js$", "public/css/style\\.css$", "public/icons\\.svg$"]

View File

@@ -8,7 +8,7 @@ poll_interval = 500
# Pre-build: generate assets if missing (each string is a shell command)
pre_cmd = ["go generate ./pkg/appview/..."]
cmd = "go build -buildvcs=false -o ./tmp/atcr-appview ./cmd/appview"
entrypoint = ["./tmp/atcr-appview", "serve"]
entrypoint = ["./tmp/atcr-appview", "serve", "--config", "config-appview.example.yaml"]
include_ext = ["go", "html", "css", "js"]
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "node_modules", "pkg/hold"]
exclude_regex = ["_test\\.go$", "cbor_gen\\.go$", "\\.min\\.js$", "public/css/style\\.css$", "public/icons\\.svg$"]

View File

@@ -84,4 +84,4 @@ legal:
# Organization name for Terms of Service and Privacy Policy. Defaults to server.client_name.
company_name: ""
# Governing law jurisdiction for legal terms.
jurisdiction: State of Texas, United States
jurisdiction: ""

View File

@@ -1,216 +0,0 @@
# ATCR Production Environment Configuration
# Copy this file to .env and fill in your values
#
# Usage:
# 1. cp deploy/.env.prod.template .env
# 2. Edit .env with your configuration
# 3. systemctl restart atcr
#
# NOTE: This file is loaded by docker-compose.prod.yml
# ==============================================================================
# Domain Configuration
# ==============================================================================
# Main AppView domain (registry API + web UI)
# REQUIRED: Update with your domain
APPVIEW_DOMAIN=atcr.io
# ==============================================================================
# Hold Service Configuration
# ==============================================================================
# Hold service domain (REQUIRED)
# The hostname where the hold service will be accessible
# Used by docker-compose.prod.yml to derive:
# - HOLD_PUBLIC_URL: https://${HOLD_DOMAIN}
# - ATCR_DEFAULT_HOLD_DID: did:web:${HOLD_DOMAIN}
# Example: hold01.atcr.io
HOLD_DOMAIN=hold01.atcr.io
# Your ATProto DID (REQUIRED for hold registration)
# Get your DID from: https://bsky.social/xrpc/com.atproto.identity.resolveHandle?handle=yourhandle.bsky.social
# Example: did:plc:abc123xyz789
HOLD_OWNER=did:plc:pddp4xt5lgnv2qsegbzzs4xg
# Directory path for embedded PDS carstore (SQLite database)
# Default: /var/lib/atcr-hold
# If empty, embedded PDS is disabled
#
# Note: This should be a directory path, NOT a file path
# Carstore creates db.sqlite3 inside this directory
#
# The embedded PDS makes the hold a proper ATProto user with:
# - did:web identity (derived from HOLD_DOMAIN)
# - DID document at /.well-known/did.json
# - XRPC endpoints for crew management
# - ATProto blob endpoints (wraps existing presigned URL logic)
#
# Example: For HOLD_DOMAIN=hold01.atcr.io, the hold becomes did:web:hold01.atcr.io
HOLD_DATABASE_DIR=/var/lib/atcr-hold
# Path to signing key (auto-generated on first run if missing)
# Default: {HOLD_DATABASE_DIR}/signing.key
# HOLD_KEY_PATH=/var/lib/atcr-hold/signing.key
# Allow public blob reads (pulls) without authentication
# - true: Anyone can pull images (read-only)
# - false: Only authenticated users can pull
# Default: false (private)
HOLD_PUBLIC=false
# Allow all authenticated users to write to this hold
# This setting controls write permissions for authenticated ATCR users
#
# - true: Any authenticated ATCR user can push images (treat all as crew)
# Useful for shared/community holds where you want to allow
# multiple users to push without explicit crew membership.
# Users must still authenticate via ATProto OAuth.
#
# - false: Only hold owner and explicit crew members can push (default)
# Write access requires io.atcr.hold.crew record in owner's PDS.
# Most secure option for production holds.
#
# Read permissions are controlled by HOLD_PUBLIC (above).
#
# Security model:
# Read: HOLD_PUBLIC=true → anonymous + authenticated users
# HOLD_PUBLIC=false → authenticated users only
# Write: HOLD_ALLOW_ALL_CREW=true → all authenticated users
# HOLD_ALLOW_ALL_CREW=false → owner + crew only (verified via PDS)
#
# Use cases:
# - Public registry: HOLD_PUBLIC=true, HOLD_ALLOW_ALL_CREW=true
# - ATProto users only: HOLD_PUBLIC=false, HOLD_ALLOW_ALL_CREW=true
# - Private hold (default): HOLD_PUBLIC=false, HOLD_ALLOW_ALL_CREW=false
#
# Default: false
HOLD_ALLOW_ALL_CREW=false
# Enable Bluesky posts when manifests are pushed
# When enabled, the hold service creates Bluesky posts announcing new container
# image pushes. Posts include image name, tag, size, and layer count.
#
# - true: Create Bluesky posts for manifest uploads
# - false: Silent operation (no Bluesky posts)
#
# Note: This requires the hold owner to have OAuth credentials for posting.
# See docs/BLUESKY_MANIFEST_POSTS.md for setup instructions.
#
# Default: false
HOLD_BLUESKY_POSTS_ENABLED=true
# ==============================================================================
# S3/UpCloud Object Storage Configuration (REQUIRED)
# ==============================================================================
# S3 is the only supported storage backend. Presigned URLs are used for direct
# client ↔ S3 transfers, eliminating the hold service as a bandwidth bottleneck.
# S3 Access Credentials
# Get these from UpCloud Object Storage console
AWS_ACCESS_KEY_ID=
AWS_SECRET_ACCESS_KEY=
# S3 Region (for distribution S3 driver)
# For third-party S3 providers (UpCloud, Storj, Minio), this value is ignored
# when S3_ENDPOINT is set, but must be a valid AWS region to pass validation.
# Default: us-east-1
AWS_REGION=us-east-1
# S3 Bucket Name
# Create this bucket in UpCloud Object Storage
# Example: atcr-blobs
S3_BUCKET=atcr
# S3 Endpoint
# Get this from UpCloud Console → Storage → Object Storage → Your bucket → "S3 endpoint"
# Format: https://[bucket-id].upcloudobjects.com
# Example: https://6vmss.upcloudobjects.com
#
# NOTE: Use the bucket-specific endpoint, NOT a custom domain
# Custom domains break presigned URL generation
S3_ENDPOINT=https://6vmss.upcloudobjects.com
# ==============================================================================
# AppView Configuration
# ==============================================================================
# Default hold service DID (derived from HOLD_DOMAIN in docker-compose.prod.yml)
# Uncomment to override if you want to use a different hold service as the default
# ATCR_DEFAULT_HOLD_DID=did:web:some-other-hold.example.com
# OAuth client display name (shown in authorization screens)
# Default: AT Container Registry
# ATCR_CLIENT_NAME=AT Container Registry
# Short brand name for page titles and metadata
# Used in meta tags, page titles, and UI text
# Default: ATCR
# ATCR_CLIENT_SHORT_NAME=ATCR
# ==============================================================================
# Legal Page Customization
# ==============================================================================
# Company/organization name displayed in legal pages (Terms, Privacy)
# Default: AT Container Registry
ATCR_LEGAL_COMPANY_NAME=AT Container Registry
# Governing law jurisdiction for legal terms
# Default: State of Texas, United States
ATCR_LEGAL_JURISDICTION=State of Texas, United States
# ==============================================================================
# Logging Configuration
# ==============================================================================
# Log level: debug, info, warn, error
# Default: info
ATCR_LOG_LEVEL=debug
# Log formatter: text, json
# Default: text
ATCR_LOG_FORMATTER=text
# ==============================================================================
# Jetstream Configuration (ATProto event streaming)
# ==============================================================================
# Jetstream WebSocket URL for real-time ATProto events
# Default: wss://jetstream2.us-west.bsky.network/subscribe
JETSTREAM_URL=wss://jetstream2.us-west.bsky.network/subscribe
# Enable backfill worker to sync historical records
# Default: true (recommended for production)
ATCR_BACKFILL_ENABLED=true
# ATProto relay endpoint for backfill sync API
# Default: https://relay1.us-east.bsky.network
ATCR_RELAY_ENDPOINT=https://relay1.us-east.bsky.network
# ==============================================================================
# CHECKLIST
# ==============================================================================
#
# Before starting ATCR, ensure you have:
#
# ☐ Set APPVIEW_DOMAIN (e.g., atcr.io)
# ☐ Set HOLD_DOMAIN (e.g., hold01.atcr.io)
# ☐ Set HOLD_OWNER (your ATProto DID)
# ☐ Set HOLD_DATABASE_DIR (default: /var/lib/atcr-hold) - enables embedded PDS
# ☐ Set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
# ☐ Set S3_BUCKET (created in UpCloud Object Storage)
# ☐ Set S3_ENDPOINT (UpCloud bucket endpoint, e.g., https://6vmss.upcloudobjects.com)
# ☐ Configured DNS records:
# - A record: atcr.io → server IP
# - A record: hold01.atcr.io → server IP
# - CNAME: blobs.atcr.io → [bucket].upcloudobjects.com
# ☐ Disabled Cloudflare proxy (gray cloud, not orange)
# ☐ Waited for DNS propagation (check with: dig atcr.io)
#
# After starting:
# ☐ Complete hold OAuth registration (run: /opt/atcr/get-hold-oauth.sh)
# ☐ Verify hold PDS: curl https://hold01.atcr.io/.well-known/did.json
# ☐ Test registry: docker pull atcr.io/test/image
# ☐ Monitor logs: /opt/atcr/logs.sh

View File

@@ -44,30 +44,16 @@ services:
image: atcr-appview:latest
container_name: atcr-appview
restart: unless-stopped
command: ["serve", "--config", "/config.yaml"]
# Base config: config-appview.example.yaml
# Env vars below override config file values for this deployment
environment:
# Server configuration
ATCR_HTTP_ADDR: :5000
ATCR_BASE_URL: https://${APPVIEW_DOMAIN:-atcr.io}
# Storage configuration (derived from HOLD_DOMAIN)
ATCR_DEFAULT_HOLD_DID: ${ATCR_DEFAULT_HOLD_DID:-did:web:${HOLD_DOMAIN:-hold01.atcr.io}}
# Authentication
ATCR_AUTH_KEY_PATH: /var/lib/atcr/auth/private-key.pem
ATCR_AUTH_CERT_PATH: /var/lib/atcr/auth/private-key.crt
# UI configuration
ATCR_UI_DATABASE_PATH: /var/lib/atcr/ui.db
# Logging
ATCR_LOG_LEVEL: ${ATCR_LOG_LEVEL:-info}
ATCR_LOG_FORMATTER: ${ATCR_LOG_FORMATTER:-text}
# Jetstream configuration
JETSTREAM_URL: ${JETSTREAM_URL:-wss://jetstream2.us-west.bsky.network/subscribe}
ATCR_BACKFILL_ENABLED: ${ATCR_BACKFILL_ENABLED:-true}
ATCR_RELAY_ENDPOINT: ${ATCR_RELAY_ENDPOINT:-https://relay1.us-east.bsky.network}
volumes:
- ./config-appview.yaml:/config.yaml:ro
# Persistent data: auth keys, UI database, OAuth tokens, Jetstream cache
- atcr-appview-data:/var/lib/atcr
networks:
@@ -86,31 +72,22 @@ services:
image: atcr-hold:latest
container_name: atcr-hold
restart: unless-stopped
command: ["serve", "--config", "/config.yaml"]
# Base config: config-hold.example.yaml
# Env vars below override config file values for this deployment
environment:
HOLD_ADMIN_ENABLED: true
# Hold service configuration (derived from HOLD_DOMAIN)
HOLD_PUBLIC_URL: ${HOLD_PUBLIC_URL:-https://${HOLD_DOMAIN:-hold01.atcr.io}}
HOLD_SERVER_ADDR: :8080
HOLD_ALLOW_ALL_CREW: ${HOLD_ALLOW_ALL_CREW:-false}
HOLD_PUBLIC: ${HOLD_PUBLIC:-false}
HOLD_OWNER: ${HOLD_OWNER:-}
HOLD_BLUESKY_POSTS_ENABLED: ${HOLD_BLUESKY_POSTS_ENABLED:-true}
# Embedded PDS configuration
HOLD_DATABASE_DIR: ${HOLD_DATABASE_DIR:-/var/lib/atcr-hold}
# HOLD_KEY_PATH: ${HOLD_KEY_PATH} # Optional, defaults to {HOLD_DATABASE_DIR}/signing.key
# S3/UpCloud Object Storage configuration (REQUIRED)
# S3/UpCloud Object Storage (REQUIRED)
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-}
AWS_REGION: ${AWS_REGION:-us-east-1}
S3_BUCKET: ${S3_BUCKET:-atcr-blobs}
S3_ENDPOINT: ${S3_ENDPOINT:-}
# Logging
ATCR_LOG_LEVEL: ${ATCR_LOG_LEVEL:-debug}
ATCR_LOG_FORMATTER: ${ATCR_LOG_FORMATTER:-text}
HOLD_LOG_LEVEL: ${ATCR_LOG_LEVEL:-info}
volumes:
- ./config-hold.yaml:/config.yaml:ro
# PDS data (carstore SQLite + signing keys)
- atcr-hold-data:/var/lib/atcr-hold
- ./quotas.yaml:/quotas.yaml:ro

View File

@@ -1,344 +0,0 @@
#!/bin/bash
#
# ATCR UpCloud Initialization Script for Rocky Linux
#
# This script sets up ATCR on a fresh Rocky Linux instance.
# Paste this into UpCloud's "User data" field when creating a server.
#
# What it does:
# - Updates system packages
# - Creates 2GB swap file (for 1GB RAM instances)
# - Installs Docker and Docker Compose
# - Creates directory structure
# - Clones ATCR repository
# - Creates systemd service for auto-start
# - Builds and starts containers
#
# Post-deployment:
# 1. Edit /opt/atcr/.env with your configuration
# 2. Run: systemctl restart atcr
# 3. Check logs: docker logs atcr-hold (for OAuth URL)
# 4. Complete hold registration via OAuth
set -euo pipefail
# Configuration
ATCR_DIR="/opt/atcr"
ATCR_REPO="https://tangled.org/evan.jarrett.net/at-container-registry" # UPDATE THIS
ATCR_BRANCH="main"
# Simple logging without colors (for cloud-init log compatibility)
log_info() {
echo "[INFO] $1"
}
log_warn() {
echo "[WARN] $1"
}
log_error() {
echo "[ERROR] $1"
}
# Function to check if command exists
command_exists() {
command -v "$1" >/dev/null 2>&1
}
log_info "Starting ATCR deployment on Rocky Linux..."
# Update system packages
log_info "Updating system packages..."
dnf update -y
# Install required packages
log_info "Installing prerequisites..."
dnf install -y \
git \
wget \
curl \
nano \
vim
log_info "Required ports: HTTP (80), HTTPS (443), SSH (22)"
# Create swap file for instances with limited RAM
if [ ! -f /swapfile ]; then
log_info "Creating 2GB swap file (allows builds on 1GB RAM instances)..."
dd if=/dev/zero of=/swapfile bs=1M count=2048 status=progress
chmod 600 /swapfile
mkswap /swapfile
swapon /swapfile
# Make swap permanent
echo '/swapfile none swap sw 0 0' >> /etc/fstab
log_info "Swap file created and enabled"
free -h
else
log_info "Swap file already exists"
fi
# Install Docker
if ! command_exists docker; then
log_info "Installing Docker..."
# Add Docker repository
dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# Install Docker
dnf install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
# Start and enable Docker
systemctl enable --now docker
log_info "Docker installed successfully"
else
log_info "Docker already installed"
fi
# Verify Docker Compose
if ! docker compose version >/dev/null 2>&1; then
log_error "Docker Compose plugin not found. Please install manually."
exit 1
fi
log_info "Docker Compose version: $(docker compose version)"
# Create ATCR directory
log_info "Creating ATCR directory: $ATCR_DIR"
mkdir -p "$ATCR_DIR"
cd "$ATCR_DIR"
# Clone repository or create minimal structure
if [ -n "$ATCR_REPO" ] && [ "$ATCR_REPO" != "https://tangled.org/evan.jarrett.net/at-container-registry" ]; then
log_info "Cloning ATCR repository..."
git clone -b "$ATCR_BRANCH" "$ATCR_REPO" .
else
log_warn "ATCR_REPO not configured. You'll need to manually copy files to $ATCR_DIR"
log_warn "Required files:"
log_warn " - deploy/docker-compose.prod.yml"
log_warn " - deploy/.env.prod.template"
log_warn " - Dockerfile.appview"
log_warn " - Dockerfile.hold"
fi
# Create .env file from template if it doesn't exist
if [ -f "deploy/.env.prod.template" ] && [ ! -f "$ATCR_DIR/.env" ]; then
log_info "Creating .env file from template..."
cp deploy/.env.prod.template "$ATCR_DIR/.env"
log_warn "IMPORTANT: Edit $ATCR_DIR/.env with your configuration!"
fi
# Create systemd services (caddy, appview, hold)
log_info "Creating systemd services..."
# Caddy service (reverse proxy for both appview and hold)
cat > /etc/systemd/system/atcr-caddy.service <<'EOF'
[Unit]
Description=ATCR Caddy Reverse Proxy
Requires=docker.service
After=docker.service network-online.target
Wants=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
WorkingDirectory=/opt/atcr
EnvironmentFile=/opt/atcr/.env
# Start caddy container
ExecStart=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml up -d caddy
# Stop caddy container
ExecStop=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml stop caddy
# Restart caddy container
ExecReload=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml restart caddy
# Always restart on failure
Restart=on-failure
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# AppView service (registry + web UI)
cat > /etc/systemd/system/atcr-appview.service <<'EOF'
[Unit]
Description=ATCR AppView (Registry + Web UI)
Requires=docker.service atcr-caddy.service
After=docker.service network-online.target atcr-caddy.service
Wants=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
WorkingDirectory=/opt/atcr
EnvironmentFile=/opt/atcr/.env
# Start appview container
ExecStart=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml up -d atcr-appview
# Stop appview container
ExecStop=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml stop atcr-appview
# Restart appview container
ExecReload=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml restart atcr-appview
# Always restart on failure
Restart=on-failure
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# Hold service (storage backend)
cat > /etc/systemd/system/atcr-hold.service <<'EOF'
[Unit]
Description=ATCR Hold (Storage Service)
Requires=docker.service atcr-caddy.service
After=docker.service network-online.target atcr-caddy.service
Wants=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
WorkingDirectory=/opt/atcr
EnvironmentFile=/opt/atcr/.env
# Start hold container
ExecStart=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml up -d atcr-hold
# Stop hold container
ExecStop=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml stop atcr-hold
# Restart hold container
ExecReload=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml restart atcr-hold
# Always restart on failure
Restart=on-failure
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# Reload systemd
log_info "Reloading systemd daemon..."
systemctl daemon-reload
# Enable all services (but don't start yet - user needs to configure .env)
systemctl enable atcr-caddy.service
systemctl enable atcr-appview.service
systemctl enable atcr-hold.service
log_info "Systemd services created and enabled"
# Create helper scripts
log_info "Creating helper scripts..."
# Script to rebuild and restart
cat > "$ATCR_DIR/rebuild.sh" <<'EOF'
#!/bin/bash
set -e
cd /opt/atcr
docker compose -f deploy/docker-compose.prod.yml build
docker compose -f deploy/docker-compose.prod.yml up -d
docker compose -f deploy/docker-compose.prod.yml logs -f
EOF
chmod +x "$ATCR_DIR/rebuild.sh"
# Script to view logs
cat > "$ATCR_DIR/logs.sh" <<'EOF'
#!/bin/bash
cd /opt/atcr
docker compose -f deploy/docker-compose.prod.yml logs -f "$@"
EOF
chmod +x "$ATCR_DIR/logs.sh"
log_info "Helper scripts created in $ATCR_DIR"
# Print completion message
cat <<'EOF'
================================================================================
ATCR Installation Complete!
================================================================================
NEXT STEPS:
1. Configure environment variables:
nano /opt/atcr/.env
Required settings:
- AWS_ACCESS_KEY_ID (UpCloud S3 credentials)
- AWS_SECRET_ACCESS_KEY
Pre-configured (verify these are correct):
- APPVIEW_DOMAIN=atcr.io
- HOLD_DOMAIN=hold01.atcr.io
- HOLD_OWNER=did:plc:pddp4xt5lgnv2qsegbzzs4xg
- S3_BUCKET=atcr
- S3_ENDPOINT=https://blobs.atcr.io
2. Configure UpCloud Cloud Firewall (in control panel):
Allow: TCP 22 (SSH)
Allow: TCP 80 (HTTP)
Allow: TCP 443 (HTTPS)
Drop: Everything else
3. Configure DNS (Cloudflare - DNS-only mode):
EOF
echo " A atcr.io → $(curl -s ifconfig.me || echo '[server-ip]') (gray cloud)"
echo " A hold01.atcr.io → $(curl -s ifconfig.me || echo '[server-ip]') (gray cloud)"
echo " CNAME blobs.atcr.io → atcr.us-chi1.upcloudobjects.com (gray cloud)"
cat <<'EOF'
4. Start ATCR services:
systemctl start atcr-caddy atcr-appview atcr-hold
5. Check status:
systemctl status atcr-caddy
systemctl status atcr-appview
systemctl status atcr-hold
docker ps
/opt/atcr/logs.sh
Helper Scripts:
/opt/atcr/rebuild.sh - Rebuild and restart containers
/opt/atcr/logs.sh [service] - View logs (e.g., logs.sh atcr-hold)
Service Management:
systemctl start atcr-caddy - Start Caddy reverse proxy
systemctl start atcr-appview - Start AppView (registry + UI)
systemctl start atcr-hold - Start Hold (storage service)
systemctl stop atcr-appview - Stop AppView only
systemctl stop atcr-hold - Stop Hold only
systemctl stop atcr-caddy - Stop all (stops reverse proxy)
systemctl restart atcr-appview - Restart AppView
systemctl restart atcr-hold - Restart Hold
systemctl status atcr-caddy - Check Caddy status
systemctl status atcr-appview - Check AppView status
systemctl status atcr-hold - Check Hold status
Documentation:
https://tangled.org/evan.jarrett.net/at-container-registry
IMPORTANT:
- Edit /opt/atcr/.env with S3 credentials before starting!
- Configure UpCloud cloud firewall (see step 2)
- DNS must be configured and propagated
- Cloudflare proxy must be DISABLED (gray cloud)
- Complete hold OAuth registration before first push
EOF
log_info "Installation complete. Follow the next steps above."

View File

@@ -1,41 +0,0 @@
# ATCR Hold Service Quota Configuration
# Copy this file to quotas.yaml to enable quota enforcement.
# If quotas.yaml doesn't exist, quotas are disabled (unlimited for all users).
# Tiers define quota levels using nautical crew ranks.
# Each tier has a quota limit specified in human-readable format.
# Supported units: B, KB, MB, GB, TB, PB (case-insensitive)
tiers:
# Entry-level crew - starter tier for new users (free)
swabbie:
quota: 2GB
# Standard crew - for regular users
deckhand:
quota: 5GB
# Mid-level crew - for regular contributors
bosun:
quota: 10GB
# Senior crew - for power users or trusted contributors
#quartermaster:
# quota: 50GB
# You can add custom tiers with any name:
# admiral:
# quota: 1TB
defaults:
# Default tier assigned to new crew members who don't have an explicit tier.
# This tier must exist in the tiers section above.
new_crew_tier: swabbie
# Notes:
# - The hold captain (owner) always has unlimited quota regardless of tiers.
# - Crew members can be assigned a specific tier in their crew record.
# - If a crew member's tier doesn't exist in config, they fall back to the default.
# - Quota is calculated per-user by summing unique blob sizes (deduplicated).
# - Quota is checked when pushing manifests (after blobs are already uploaded).
# - Billing configuration (Stripe prices, descriptions) goes in a separate
# top-level "billing:" section. See billing documentation for details.

View File

@@ -1,55 +0,0 @@
#!/bin/bash
#
# Request crawl for a PDS from the Bluesky relay
#
# Usage: ./request-crawl.sh <hostname> [relay-url]
# Example: ./request-crawl.sh hold01.atcr.io
#
set -e
DEFAULT_RELAY="https://bsky.network/xrpc/com.atproto.sync.requestCrawl"
# Parse arguments
HOSTNAME="${1:-}"
RELAY_URL="${2:-$DEFAULT_RELAY}"
# Validate hostname
if [ -z "$HOSTNAME" ]; then
echo "Error: hostname is required" >&2
echo "" >&2
echo "Usage: $0 <hostname> [relay-url]" >&2
echo "Example: $0 hold01.atcr.io" >&2
echo "" >&2
echo "Options:" >&2
echo " hostname Hostname of the PDS to request crawl for (required)" >&2
echo " relay-url Relay URL to send crawl request to (default: $DEFAULT_RELAY)" >&2
exit 1
fi
# Log what we're doing
echo "Requesting crawl for hostname: $HOSTNAME"
echo "Sending to relay: $RELAY_URL"
# Make the request
RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$RELAY_URL" \
-H "Content-Type: application/json" \
-d "{\"hostname\":\"$HOSTNAME\"}")
# Split response and status code
HTTP_BODY=$(echo "$RESPONSE" | head -n -1)
HTTP_CODE=$(echo "$RESPONSE" | tail -n 1)
# Check response
if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ]; then
echo "✅ Success! Crawl requested for $HOSTNAME"
if [ -n "$HTTP_BODY" ]; then
echo "Response: $HTTP_BODY"
fi
else
echo "❌ Failed with status $HTTP_CODE" >&2
if [ -n "$HTTP_BODY" ]; then
echo "Response: $HTTP_BODY" >&2
fi
exit 1
fi

View File

@@ -10,18 +10,14 @@ services:
# Optional: Load from .env.appview file (create from .env.appview.example)
# env_file:
# - .env.appview
# Base config: config-appview.example.yaml (passed via Air entrypoint)
# Env vars below override config file values for local dev
environment:
# ATCR_SERVER_CLIENT_NAME: "Seamark"
# ATCR_SERVER_CLIENT_SHORT_NAME: "Seamark"
# Server configuration
ATCR_SERVER_ADDR: :5000
ATCR_SERVER_DEFAULT_HOLD_DID: did:web:172.28.0.3:8080
ATCR_JETSTREAM_BACKFILL_ENABLED: "true"
# Test mode - fallback to default hold when user's hold is unreachable
ATCR_SERVER_TEST_MODE: "true"
# Logging
ATCR_LOG_LEVEL: debug
# Log shipping (uncomment to enable)
LOG_SHIPPER_BACKEND: victoria
LOG_SHIPPER_URL: http://172.28.0.10:9428
# Limit local Docker logs - real logs go to Victoria Logs
@@ -53,20 +49,18 @@ services:
atcr-hold:
env_file:
- ../atcr-secrets.env # Load S3/Storj credentials from external file
# Base config: config-hold.example.yaml (passed via Air entrypoint)
# Env vars below override config file values for local dev
environment:
HOLD_ADMIN_ENABLED: true
HOLD_SERVER_PUBLIC_URL: http://172.28.0.3:8080
HOLD_REGISTRATION_OWNER_DID: did:plc:pddp4xt5lgnv2qsegbzzs4xg
HOLD_SERVER_PUBLIC: false
HOLD_REGISTRATION_ALLOW_ALL_CREW: true
HOLD_SERVER_TEST_MODE: true
# Stripe billing (only used with -tags billing)
STRIPE_SECRET_KEY: sk_test_
STRIPE_PUBLISHABLE_KEY: pk_test_
STRIPE_WEBHOOK_SECRET: whsec_
# Logging
HOLD_LOG_LEVEL: debug
# Log shipping (uncomment to enable)
LOG_SHIPPER_BACKEND: victoria
LOG_SHIPPER_URL: http://172.28.0.10:9428
# S3 storage config comes from env_file (AWS_*, S3_*)

View File

@@ -181,7 +181,7 @@ func setDefaults(v *viper.Viper) {
// Legal defaults
v.SetDefault("legal.company_name", "")
v.SetDefault("legal.jurisdiction", "State of Texas, United States")
v.SetDefault("legal.jurisdiction", "")
// Log formatter (used by distribution config, not in Config struct)
v.SetDefault("log_formatter", "text")

View File

@@ -163,7 +163,7 @@ func setHoldDefaults(v *viper.Viper) {
// Registration defaults
v.SetDefault("registration.owner_did", "")
v.SetDefault("registration.allow_all_crew", false)
v.SetDefault("registration.profile_avatar_url", "https://imgs.blue/evan.jarrett.net/1TpTOdtS60GdJWBYEqtK22y688jajbQ9a5kbYRFtwuqrkBAE")
v.SetDefault("registration.profile_avatar_url", "https://atcr.io/web-app-manifest-192x192.png")
v.SetDefault("registration.enable_bluesky_posts", false)
// Database defaults
@@ -174,7 +174,7 @@ func setHoldDefaults(v *viper.Viper) {
v.SetDefault("database.libsql_sync_interval", "60s")
// Admin defaults
v.SetDefault("admin.enabled", false)
v.SetDefault("admin.enabled", true)
// Storage defaults
v.SetDefault("storage.access_key", "")