61 Commits

Author SHA1 Message Date
Evan Jarrett
98a2cfea59 improve UI around credential helper authorization. have the hold requestCrawl on restart. Update comments that relay_endpoints must suport listreposbycollection 2026-05-08 20:44:04 -05:00
Evan Jarrett
3533f07ecb minor bug fixes, add ability see starred repos 2026-05-06 21:55:47 -05:00
Evan Jarrett
56f3b2fc2f fix star not being filled on when you star a repo 2026-05-06 21:24:36 -05:00
Evan Jarrett
2d0872790d delete manifests correct when deleted from the PDS fixes https://tangled.org/did:plc:e3kzdezk5gsirzh7eoqplc64/issues/18 2026-05-06 21:13:00 -05:00
Evan Jarrett
419d05057e fix to artifact diff. add missing file 2026-05-04 23:18:40 -05:00
Evan Jarrett
e1483f7aa5 fix favicon in navbar 2026-05-04 21:56:26 -05:00
Evan Jarrett
b2d6842bb7 clean up old migration code. minor bug fixes with appview ui 2026-05-04 21:52:28 -05:00
Evan Jarrett
1ac8af74d5 fix labeler deployment 2026-05-03 20:49:56 -05:00
Evan Jarrett
c2250c8787 remove binary file 2026-05-03 20:03:52 -05:00
Evan Jarrett
ba05b20364 update goreleaser 2026-05-03 14:51:05 -05:00
Evan Jarrett
18b000d0c1 simplify appview label subs 2026-05-03 10:48:36 -05:00
Evan Jarrett
4328eda814 holds now listen for deletes and labelers for takedowns. GC will defer takedowns for a grace period in case of reversal 2026-05-02 23:31:41 -05:00
Evan Jarrett
7b4a2e22a2 fix more db query issues, improve labeler usage 2026-05-02 22:59:32 -05:00
Evan Jarrett
ab66028151 more labeler improvements. standardize did work between labeler and hold. improve sql race conditions on local-only db 2026-05-02 22:13:53 -05:00
Evan Jarrett
410183f13b first pass at implementing a label service 2026-05-02 11:14:44 -05:00
Evan Jarrett
35aa19e2b2 fix captain record check ttl 2026-05-02 11:11:01 -05:00
Evan Jarrett
13a793ca90 improve admin tooling 2026-04-29 10:37:36 -05:00
Evan Jarrett
9af6eccc9d improvements to how scanning works, and helmchart ui 2026-04-29 10:12:25 -05:00
Evan Jarrett
f9ba8ff62b pass through 429 retry-after from pds 2026-04-29 08:50:54 -05:00
Evan Jarrett
e81681d61b remove go install path for now 2026-04-23 10:47:33 -05:00
Evan Jarrett
dfee21e3d3 fix install instructions 2026-04-23 10:38:55 -05:00
Evan Jarrett
1bff09ecbe fix settings page 2026-04-22 21:49:51 -05:00
Evan Jarrett
a602bf08d1 fix missing icons, update light mode theme colors 2026-04-22 21:41:48 -05:00
Evan Jarrett
8f4fca815b update syft/grype, fix scanner build 2026-04-22 20:56:26 -05:00
Evan Jarrett
9e09401cb3 fix play/pause button on carousel 2026-04-22 20:28:55 -05:00
Evan Jarrett
c7783bf87c more hardening, inline tangled svg into the sprite 2026-04-22 20:13:40 -05:00
Evan Jarrett
267012b41e impeccable:harden on all admin panel 2026-04-21 23:14:05 -05:00
Evan Jarrett
6b6ce093d3 new signup flow 2026-04-21 22:29:23 -05:00
Evan Jarrett
f057f169f0 large list of ui fixes for accessibility/hardening etc. 2026-04-21 21:18:13 -05:00
Evan Jarrett
23484645c0 fix tag store implementation for paginating tags new in distribution 3.1 2026-04-19 21:59:27 -05:00
Evan Jarrett
37cf9d0b86 need to also update the docker images 2026-04-19 21:48:07 -05:00
Evan Jarrett
872b637547 update all go modules 2026-04-19 21:45:25 -05:00
Evan Jarrett
185b0d05cb add back formula into this repo 2026-04-19 18:34:37 -05:00
Evan Jarrett
bbd7882a9d improvements to brew instructions (still not fully implemented) 2026-04-19 18:22:28 -05:00
Evan Jarrett
7a6775d4fa missed new files for jetstream improvements 2026-04-19 18:04:57 -05:00
Evan Jarrett
7c6b8945ed improve backfill and jetstream db connections 2026-04-19 18:01:17 -05:00
Evan Jarrett
38c693acc9 impeccable pass 2026-04-19 17:35:41 -05:00
Evan Jarrett
9809c26281 update fonts 2026-04-14 20:56:53 -05:00
Evan Jarrett
e843b7233c more ui fixes and scanner fixes 2026-04-12 20:48:24 -05:00
Evan Jarrett
2f30c22f0a impeccable fixes, scanner fixes 2026-04-12 20:36:57 -05:00
Evan Jarrett
25628dad2c update the login page 2026-04-11 21:01:31 -05:00
Evan Jarrett
564019d1c3 general appview bugfixes 2026-04-09 10:31:19 -05:00
Evan Jarrett
9033d74a19 fix validation on dids with hyphens 2026-04-07 22:26:21 -05:00
Evan Jarrett
03759713e9 fix 500 when trying to use did 2026-04-07 21:59:34 -05:00
Evan Jarrett
a68477033a use hyphens as the encode for dids 2026-04-07 21:47:51 -05:00
Evan Jarrett
21b6f6301a allow dids on docker login 2026-04-07 21:32:51 -05:00
Evan Jarrett
1865377b52 update deployments 2026-04-07 20:14:06 -05:00
Evan Jarrett
20202aa0df update credential helper to pull latest update from tangled directly 2026-04-04 20:59:50 -05:00
Evan Jarrett
90b0b42766 fix goreleaser again 2026-04-04 14:39:50 -05:00
Evan Jarrett
b54d33ccf8 serialize goreleaser 2026-04-04 14:24:39 -05:00
Evan Jarrett
9bb5ae0e84 split release workflows 2026-04-04 13:43:30 -05:00
Evan Jarrett
2a29ec0fdc fix htmx deletes 2026-04-04 10:59:06 -05:00
Evan Jarrett
23bebee838 fix go releaser 2026-04-04 10:50:33 -05:00
Evan Jarrett
2d202ae409 fix cred-helper workflow 2026-04-03 20:26:13 -05:00
Evan Jarrett
c77a180b63 try matrix builds 2026-04-03 20:10:29 -05:00
Evan Jarrett
fd5bfc3c50 ui fixes for repo page, fix scanner priority, cleanup goreleaser scripts 2026-04-03 16:48:21 -05:00
Evan Jarrett
ca69c0d1c2 ai advisor poc 2026-03-29 22:31:45 -07:00
Evan Jarrett
183d89530d add diff support for layers and vulns 2026-03-29 21:42:55 -07:00
Evan Jarrett
f20170f595 digest page improvements 2026-03-29 13:01:40 -07:00
Evan Jarrett
22cf108a5f fix quirks on repo and digest pages. fix ips not showing in server logs. add basic spam blocking to LB. add setting to configure your oci (docker) client. 2026-03-24 22:24:51 -05:00
Evan Jarrett
23db9be665 add repo page editor. fix deleting all untagged actually deleting all untagged 2026-03-23 21:16:13 -05:00
371 changed files with 32835 additions and 6886 deletions

View File

@@ -6,7 +6,7 @@ pre_cmd = ["go generate ./pkg/hold/..."]
cmd = "go build -buildvcs=false -o ./tmp/atcr-hold ./cmd/hold"
entrypoint = ["./tmp/atcr-hold", "serve", "--config", "config-hold.example.yaml"]
include_ext = ["go", "html", "css", "js"]
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "pkg/appview", "node_modules"]
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "node_modules", "scanner", "pkg/appview", "pkg/labeler"]
exclude_regex = ["_test\\.go$", "cbor_gen\\.go$", "\\.min\\.js$", "public/css/style\\.css$", "public/icons\\.svg$"]
delay = 3000
stop_on_error = true

25
.air.labeler.toml Normal file
View File

@@ -0,0 +1,25 @@
root = "."
tmp_dir = "tmp"
[build]
cmd = "go build -buildvcs=false -o ./tmp/atcr-labeler ./cmd/labeler"
entrypoint = ["./tmp/atcr-labeler", "serve", "--config", "config-labeler.example.yaml"]
include_ext = ["go", "html", "css", "js"]
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "node_modules", "scanner", "pkg/appview", "pkg/hold"]
exclude_regex = ["_test\\.go$", "cbor_gen\\.go$", "\\.min\\.js$", "public/css/style\\.css$", "public/icons\\.svg$"]
delay = 3000
stop_on_error = true
send_interrupt = true
kill_delay = 500
[log]
time = false
[color]
main = "cyan"
watcher = "magenta"
build = "yellow"
runner = "green"
[misc]
clean_on_exit = true

View File

@@ -10,7 +10,7 @@ pre_cmd = ["go generate ./pkg/appview/..."]
cmd = "go build -tags billing -buildvcs=false -o ./tmp/atcr-appview ./cmd/appview"
entrypoint = ["./tmp/atcr-appview", "serve", "--config", "config-appview.example.yaml"]
include_ext = ["go", "html", "css", "js"]
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "node_modules", "pkg/hold"]
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "node_modules", "scanner", "pkg/hold", "pkg/labeler"]
exclude_regex = ["_test\\.go$", "cbor_gen\\.go$", "\\.min\\.js$", "public/css/style\\.css$", "public/icons\\.svg$"]
delay = 3000
stop_on_error = true

1
.gitignore vendored
View File

@@ -36,3 +36,4 @@ pkg/hold/admin/public/css/style.css
.DS_Store
Thumbs.db
node_modules
.impeccable.md

View File

@@ -45,6 +45,49 @@ archives:
checksum:
name_template: 'checksums.txt'
brews:
- name: docker-credential-atcr
ids:
- credential-helper
repository:
# Tap lives inside the main repo under Formula/. DID-based URL keeps
# this stable across any future Tangled handle/domain rename.
owner: 'did:plc:pddp4xt5lgnv2qsegbzzs4xg'
name: at-container-registry
branch: main
git:
url: 'git@knot1.tangled.sh:did:plc:pddp4xt5lgnv2qsegbzzs4xg/at-container-registry'
private_key: '{{ .Env.TAP_SSH_KEY }}'
directory: Formula
url_template: "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v{{ .Version }}/download/{{ .ArtifactName }}"
homepage: "https://atcr.io"
description: "Docker credential helper for ATCR (ATProto Container Registry)"
license: "MIT"
commit_author:
name: atcr-bot
email: releases@atcr.io
commit_msg_template: "Brew formula update for {{ .ProjectName }} version {{ .Tag }}"
install: |
bin.install "docker-credential-atcr"
test: |
assert_match version.to_s, shell_output("#{bin}/docker-credential-atcr version 2>&1")
caveats: |
To finish setup, run:
docker-credential-atcr configure-docker
This writes the credential helper entry to ~/.docker/config.json. Or
add manually:
{
"credHelpers": {
"atcr.io": "atcr"
}
}
Then push an image to trigger device authorization:
docker push atcr.io/<your-handle>/<image>:latest
Credentials live in ~/.atcr/device.json (0600).
snapshot:
version_template: "{{ incpatch .Version }}-next"
@@ -60,13 +103,7 @@ changelog:
publishers:
- name: atproto-pds
cmd: ./scripts/publish-artifact.sh
env:
- APP_PASSWORD={{ .Env.APP_PASSWORD }}
- REPO_URL={{ .Env.REPO_URL }}
- TAG={{ .Tag }}
- ARTIFACT_PATH={{ abs .ArtifactPath }}
- ARTIFACT_NAME={{ .ArtifactName }}
cmd: ./scripts/publish-artifact.sh "{{ abs .ArtifactPath }}" "{{ .ArtifactName }}"
# Disable since not using GitHub
release:

View File

@@ -5,7 +5,7 @@ when:
branch: ["main"]
engine: kubernetes
image: golang:1.25-trixie
image: golang:1.26-trixie
architecture: amd64
steps:

View File

@@ -0,0 +1,60 @@
# ATCR AppView Release
# Builds multi-arch (amd64 + arm64) container image and publishes a manifest list.
when:
- event: ["push"]
tag: ["v*"]
engine: kubernetes
image: quay.io/buildah/stable:latest
architecture: [amd64, arm64]
environment:
IMAGE_REGISTRY: atcr.io
IMAGE_USER: atcr.io
IMAGE_NAME: appview
DOCKERFILE: ./Dockerfile.appview
steps:
- name: Build image archive
command: |
set -e
mkdir -p /artifacts
buildah bud \
--tag "${IMAGE_NAME}:${TANGLED_ARCHITECTURE}" \
--file "${DOCKERFILE}" \
.
buildah push \
"${IMAGE_NAME}:${TANGLED_ARCHITECTURE}" \
"oci-archive:/artifacts/${IMAGE_NAME}.tar"
final:
architecture: amd64
image: quay.io/buildah/stable:latest
steps:
- name: Login to registry
command: |
echo "${APP_PASSWORD}" | buildah login \
-u "${IMAGE_USER}" \
--password-stdin \
"${IMAGE_REGISTRY}"
- name: Create and push multi-arch manifest
command: |
set -e
FULL="${IMAGE_REGISTRY}/${IMAGE_USER}/${IMAGE_NAME}"
buildah pull "oci-archive:/artifacts/amd64/${IMAGE_NAME}.tar"
buildah pull "oci-archive:/artifacts/arm64/${IMAGE_NAME}.tar"
buildah manifest create "${FULL}:${TANGLED_REF_NAME}"
buildah manifest add "${FULL}:${TANGLED_REF_NAME}" "${IMAGE_NAME}:amd64"
buildah manifest add "${FULL}:${TANGLED_REF_NAME}" "${IMAGE_NAME}:arm64"
buildah manifest push --all \
"${FULL}:${TANGLED_REF_NAME}" \
"docker://${FULL}:${TANGLED_REF_NAME}"
buildah manifest push --all \
"${FULL}:${TANGLED_REF_NAME}" \
"docker://${FULL}:latest"

View File

@@ -1,155 +1,44 @@
# Tangled Workflow: Release Credential Helper
#
# This workflow builds cross-platform binaries for the credential helper.
# Creates tarballs for curl/bash installation and provides instructions
# for updating the Homebrew formula.
# Builds cross-platform binaries using GoReleaser and publishes
# artifacts to the repo owner's PDS as sh.tangled.repo.artifact records.
#
# Triggers on version tags (v*) pushed to the repository.
#
# Required secrets: PUBLISH_APP_PASSWORD (ATProto app password for artifact publishing)
when:
- event: ["manual"]
- event: ["push"]
tag: ["v*"]
engine: "nixery"
dependencies:
nixpkgs:
- go_1_24 # Go 1.24+ for building
- goreleaser # For building multi-platform binaries
- curl # Required by go generate for downloading vendor assets
- gnugrep # Required for tag detection
- gnutar # Required for creating tarballs
- gzip # Required for compressing tarballs
- coreutils # Required for sha256sum
engine: kubernetes
image: golang:1.26-trixie
architecture: amd64
environment:
CGO_ENABLED: "0" # Build static binaries
CGO_ENABLED: "0"
REPO_RKEY: "3m2pjukohu322"
steps:
- name: Get tag for current commit
- name: Install tools
command: |
# Fetch tags (shallow clone doesn't include them by default)
set -e
go install github.com/bluesky-social/goat@latest
# goreleaser v2.14+ requires Go 1.26.1; pin to a version compatible with the image's Go 1.25.
go install github.com/goreleaser/goreleaser/v2@v2.13.3
- name: Build and publish release
command: |
set -e
export PATH="$(go env GOPATH)/bin:$PATH"
git fetch --tags
# Find the tag that points to the current commit
TAG=$(git tag --points-at HEAD | grep -E '^v[0-9]' | head -n1)
# REPO_URL is built from Tangled-provided env vars
export REPO_URL="at://${TANGLED_REPO_DID}/sh.tangled.repo/${REPO_RKEY}"
export APP_PASSWORD="${PUBLISH_APP_PASSWORD}"
if [ -z "$TAG" ]; then
echo "Error: No version tag found for current commit"
echo "Available tags:"
git tag
echo "Current commit:"
git rev-parse HEAD
exit 1
fi
# Log in once here so parallel publish-artifact.sh invocations share the
# same session file instead of racing on goat's auth-session.json.
goat account login -u "${TANGLED_REPO_DID}" -p "${APP_PASSWORD}"
echo "Building version: $TAG"
echo "$TAG" > .version
# Also get the commit hash for reference
COMMIT_HASH=$(git rev-parse HEAD)
echo "Commit: $COMMIT_HASH"
- name: Build binaries with GoReleaser
command: |
VERSION=$(cat .version)
export VERSION
# Build for all platforms using GoReleaser
goreleaser build --clean --snapshot --config .goreleaser.yaml
# List what was built
echo "Built artifacts:"
if [ -d "dist" ]; then
ls -lh dist/
else
echo "Error: dist/ directory was not created by GoReleaser"
exit 1
fi
- name: Package artifacts
command: |
VERSION=$(cat .version)
VERSION_NO_V=${VERSION#v} # Remove 'v' prefix for filenames
cd dist
# Create tarballs for each platform
# GoReleaser creates directories like: credential-helper_{os}_{arch}_v{goversion}
# Darwin x86_64
if [ -d "credential-helper_darwin_amd64_v1" ]; then
tar czf "docker-credential-atcr_${VERSION_NO_V}_Darwin_x86_64.tar.gz" \
-C credential-helper_darwin_amd64_v1 docker-credential-atcr
echo "Created: docker-credential-atcr_${VERSION_NO_V}_Darwin_x86_64.tar.gz"
fi
# Darwin arm64
for dir in credential-helper_darwin_arm64*; do
if [ -d "$dir" ]; then
tar czf "docker-credential-atcr_${VERSION_NO_V}_Darwin_arm64.tar.gz" \
-C "$dir" docker-credential-atcr
echo "Created: docker-credential-atcr_${VERSION_NO_V}_Darwin_arm64.tar.gz"
break
fi
done
# Linux x86_64
if [ -d "credential-helper_linux_amd64_v1" ]; then
tar czf "docker-credential-atcr_${VERSION_NO_V}_Linux_x86_64.tar.gz" \
-C credential-helper_linux_amd64_v1 docker-credential-atcr
echo "Created: docker-credential-atcr_${VERSION_NO_V}_Linux_x86_64.tar.gz"
fi
# Linux arm64
for dir in credential-helper_linux_arm64*; do
if [ -d "$dir" ]; then
tar czf "docker-credential-atcr_${VERSION_NO_V}_Linux_arm64.tar.gz" \
-C "$dir" docker-credential-atcr
echo "Created: docker-credential-atcr_${VERSION_NO_V}_Linux_arm64.tar.gz"
break
fi
done
echo ""
echo "Tarballs ready:"
ls -lh *.tar.gz 2>/dev/null || echo "Warning: No tarballs created"
- name: Generate checksums
command: |
VERSION=$(cat .version)
VERSION_NO_V=${VERSION#v}
cd dist
echo ""
echo "=========================================="
echo "SHA256 Checksums"
echo "=========================================="
echo ""
# Generate checksums file
sha256sum docker-credential-atcr_${VERSION_NO_V}_*.tar.gz 2>/dev/null | tee checksums.txt || echo "No checksums generated"
- name: Next steps
command: |
VERSION=$(cat .version)
echo ""
echo "=========================================="
echo "Release $VERSION is ready!"
echo "=========================================="
echo ""
echo "Distribution tarballs are in: dist/"
echo ""
echo "Next steps:"
echo ""
echo "1. Upload tarballs to your hosting/CDN (or GitHub releases)"
echo ""
echo "2. For Homebrew users, update the formula:"
echo " ./scripts/update-homebrew-formula.sh $VERSION"
echo " # Then update Formula/docker-credential-atcr.rb and push to homebrew-tap"
echo ""
echo "3. For curl/bash installation, users can download directly:"
echo " curl -L <your-cdn>/docker-credential-atcr_<version>_<os>_<arch>.tar.gz | tar xz"
echo " sudo mv docker-credential-atcr /usr/local/bin/"
goreleaser release --clean

View File

@@ -0,0 +1,60 @@
# ATCR Hold Release
# Builds multi-arch (amd64 + arm64) container image and publishes a manifest list.
when:
- event: ["push"]
tag: ["v*"]
engine: kubernetes
image: quay.io/buildah/stable:latest
architecture: [amd64, arm64]
environment:
IMAGE_REGISTRY: atcr.io
IMAGE_USER: atcr.io
IMAGE_NAME: hold
DOCKERFILE: ./Dockerfile.hold
steps:
- name: Build image archive
command: |
set -e
mkdir -p /artifacts
buildah bud \
--tag "${IMAGE_NAME}:${TANGLED_ARCHITECTURE}" \
--file "${DOCKERFILE}" \
.
buildah push \
"${IMAGE_NAME}:${TANGLED_ARCHITECTURE}" \
"oci-archive:/artifacts/${IMAGE_NAME}.tar"
final:
architecture: amd64
image: quay.io/buildah/stable:latest
steps:
- name: Login to registry
command: |
echo "${APP_PASSWORD}" | buildah login \
-u "${IMAGE_USER}" \
--password-stdin \
"${IMAGE_REGISTRY}"
- name: Create and push multi-arch manifest
command: |
set -e
FULL="${IMAGE_REGISTRY}/${IMAGE_USER}/${IMAGE_NAME}"
buildah pull "oci-archive:/artifacts/amd64/${IMAGE_NAME}.tar"
buildah pull "oci-archive:/artifacts/arm64/${IMAGE_NAME}.tar"
buildah manifest create "${FULL}:${TANGLED_REF_NAME}"
buildah manifest add "${FULL}:${TANGLED_REF_NAME}" "${IMAGE_NAME}:amd64"
buildah manifest add "${FULL}:${TANGLED_REF_NAME}" "${IMAGE_NAME}:arm64"
buildah manifest push --all \
"${FULL}:${TANGLED_REF_NAME}" \
"docker://${FULL}:${TANGLED_REF_NAME}"
buildah manifest push --all \
"${FULL}:${TANGLED_REF_NAME}" \
"docker://${FULL}:latest"

View File

@@ -0,0 +1,60 @@
# ATCR Scanner Release
# Builds multi-arch (amd64 + arm64) container image and publishes a manifest list.
when:
- event: ["push"]
tag: ["v*"]
engine: kubernetes
image: quay.io/buildah/stable:latest
architecture: [amd64, arm64]
environment:
IMAGE_REGISTRY: atcr.io
IMAGE_USER: atcr.io
IMAGE_NAME: scanner
DOCKERFILE: ./Dockerfile.scanner
steps:
- name: Build image archive
command: |
set -e
mkdir -p /artifacts
buildah bud \
--tag "${IMAGE_NAME}:${TANGLED_ARCHITECTURE}" \
--file "${DOCKERFILE}" \
.
buildah push \
"${IMAGE_NAME}:${TANGLED_ARCHITECTURE}" \
"oci-archive:/artifacts/${IMAGE_NAME}.tar"
final:
architecture: amd64
image: quay.io/buildah/stable:latest
steps:
- name: Login to registry
command: |
echo "${APP_PASSWORD}" | buildah login \
-u "${IMAGE_USER}" \
--password-stdin \
"${IMAGE_REGISTRY}"
- name: Create and push multi-arch manifest
command: |
set -e
FULL="${IMAGE_REGISTRY}/${IMAGE_USER}/${IMAGE_NAME}"
buildah pull "oci-archive:/artifacts/amd64/${IMAGE_NAME}.tar"
buildah pull "oci-archive:/artifacts/arm64/${IMAGE_NAME}.tar"
buildah manifest create "${FULL}:${TANGLED_REF_NAME}"
buildah manifest add "${FULL}:${TANGLED_REF_NAME}" "${IMAGE_NAME}:amd64"
buildah manifest add "${FULL}:${TANGLED_REF_NAME}" "${IMAGE_NAME}:arm64"
buildah manifest push --all \
"${FULL}:${TANGLED_REF_NAME}" \
"docker://${FULL}:${TANGLED_REF_NAME}"
buildah manifest push --all \
"${FULL}:${TANGLED_REF_NAME}" \
"docker://${FULL}:latest"

View File

@@ -1,44 +0,0 @@
# ATCR Release Pipeline for Tangled.org
# Triggers on version tags and builds cross-platform binaries using buildah
when:
- event: ["push"]
tag: ["v*"]
engine: kubernetes
image: quay.io/buildah/stable:latest
architecture: amd64
environment:
IMAGE_REGISTRY: atcr.io
IMAGE_USER: atcr.io
steps:
- name: Login to registry
command: |
echo "${APP_PASSWORD}" | buildah login \
-u "${IMAGE_USER}" \
--password-stdin \
${IMAGE_REGISTRY}
- name: Build and push AppView image
command: |
buildah bud \
--tag ${IMAGE_REGISTRY}/${IMAGE_USER}/appview:${TANGLED_REF_NAME} \
--tag ${IMAGE_REGISTRY}/${IMAGE_USER}/appview:latest \
--file ./Dockerfile.appview \
.
buildah push \
${IMAGE_REGISTRY}/${IMAGE_USER}/appview:latest
- name: Build and push Hold image
command: |
buildah bud \
--tag ${IMAGE_REGISTRY}/${IMAGE_USER}/hold:${TANGLED_REF_NAME} \
--tag ${IMAGE_REGISTRY}/${IMAGE_USER}/hold:latest \
--file ./Dockerfile.hold \
.
buildah push \
${IMAGE_REGISTRY}/${IMAGE_USER}/hold:latest

View File

@@ -5,7 +5,7 @@ when:
branch: ["main"]
engine: kubernetes
image: golang:1.25-trixie
image: golang:1.26-trixie
architecture: amd64
steps:

View File

@@ -1,6 +1,6 @@
# Production build for ATCR AppView
# Result: ~30MB scratch image with static binary
FROM docker.io/golang:1.25.7-trixie AS builder
FROM docker.io/golang:1.26.2-trixie AS builder
ENV DEBIAN_FRONTEND=noninteractive
@@ -18,8 +18,13 @@ COPY . .
RUN npm ci
RUN go generate ./...
# Legal "Last updated" dates — pass from host (see Makefile docker-appview
# target). Empty falls back to the hardcoded default in legal.go.
ARG PRIVACY_DATE=""
ARG TERMS_DATE=""
RUN CGO_ENABLED=1 go build \
-ldflags="-s -w -linkmode external -extldflags '-static'" \
-ldflags="-s -w -linkmode external -extldflags '-static' -X 'atcr.io/pkg/appview/handlers.privacyLastUpdated=${PRIVACY_DATE}' -X 'atcr.io/pkg/appview/handlers.termsLastUpdated=${TERMS_DATE}'" \
-tags sqlite_omit_load_extension \
-trimpath \
-o atcr-appview ./cmd/appview

View File

@@ -1,7 +1,7 @@
# Development image with Air hot reload
# Build: docker build -f Dockerfile.dev -t atcr-dev .
# Run: docker run -v $(pwd):/app -p 5000:5000 atcr-dev
FROM docker.io/golang:1.25.7-trixie
FROM docker.io/golang:1.26.2-trixie
ARG AIR_CONFIG=.air.toml

View File

@@ -1,4 +1,4 @@
FROM docker.io/golang:1.25.7-trixie AS builder
FROM docker.io/golang:1.26.2-trixie AS builder
# Build argument to enable Stripe billing integration
# Usage: docker build --build-arg BILLING_ENABLED=true -f Dockerfile.hold .

View File

@@ -1,4 +1,4 @@
FROM docker.io/golang:1.25.7-trixie AS builder
FROM docker.io/golang:1.26.2-trixie AS builder
ENV DEBIAN_FRONTEND=noninteractive

View File

@@ -4,28 +4,28 @@
class DockerCredentialAtcr < Formula
desc "Docker credential helper for ATCR (ATProto Container Registry)"
homepage "https://atcr.io"
version "0.0.1"
version "0.1.2"
license "MIT"
on_macos do
on_arm do
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v0.0.1/download/docker-credential-atcr_0.0.1_Darwin_arm64.tar.gz"
sha256 "REPLACE_WITH_SHA256"
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v#{version}/download/docker-credential-atcr_#{version}_Darwin_arm64.tar.gz"
sha256 "938f7de369c1a93f8275b91f238ae6ffca2f304f5bd236208b3398574c853b52"
end
on_intel do
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v0.0.1/download/docker-credential-atcr_0.0.1_Darwin_x86_64.tar.gz"
sha256 "REPLACE_WITH_SHA256"
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v#{version}/download/docker-credential-atcr_#{version}_Darwin_x86_64.tar.gz"
sha256 "3ce955317f52b049ee6c6348c0f758d36a0ed4d52f60f96b20d68798b5830c88"
end
end
on_linux do
on_arm do
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v0.0.1/download/docker-credential-atcr_0.0.1_Linux_arm64.tar.gz"
sha256 "REPLACE_WITH_SHA256"
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v#{version}/download/docker-credential-atcr_#{version}_Linux_arm64.tar.gz"
sha256 "be951a136a79be39935ff57f561cfc8f714fa6abfa1b0f81f9ef383e37e6278f"
end
on_intel do
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v0.0.1/download/docker-credential-atcr_0.0.1_Linux_x86_64.tar.gz"
sha256 "REPLACE_WITH_SHA256"
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v#{version}/download/docker-credential-atcr_#{version}_Linux_x86_64.tar.gz"
sha256 "08965d9177ab993c229e20c1c3a9675caa20bae795c57b37eb9c72127360d4c7"
end
end
@@ -39,21 +39,21 @@ class DockerCredentialAtcr < Formula
def caveats
<<~EOS
To configure Docker to use ATCR credential helper, add the following
to your ~/.docker/config.json:
To finish setup, run:
docker-credential-atcr configure-docker
This writes the credential helper entry to ~/.docker/config.json. Or
add manually:
{
"credHelpers": {
"atcr.io": "atcr"
}
}
Or run: docker-credential-atcr configure-docker
To authenticate with ATCR:
Then push an image to trigger device authorization:
docker push atcr.io/<your-handle>/<image>:latest
Configuration is stored in: ~/.atcr/config.json
Credentials live in ~/.atcr/device.json (0600).
EOS
end
end

View File

@@ -8,13 +8,13 @@ The ATCR credential helper enables Docker to authenticate with ATCR registries u
**Linux/macOS:**
```bash
curl -fsSL https://atcr.io/install.sh | bash
curl -fsSL https://atcr.io/static/install.sh | bash
```
Or download and run manually:
```bash
curl -fsSLO https://atcr.io/install.sh
curl -fsSLO https://atcr.io/static/install.sh
chmod +x install.sh
./install.sh
```
@@ -22,7 +22,7 @@ chmod +x install.sh
Custom installation directory:
```bash
INSTALL_DIR=$HOME/.local/bin curl -fsSL https://atcr.io/install.sh | bash
INSTALL_DIR=$HOME/.local/bin curl -fsSL https://atcr.io/static/install.sh | bash
```
**Windows (PowerShell as Administrator):**
@@ -40,8 +40,9 @@ Invoke-WebRequest -Uri https://atcr.io/install.ps1 -OutFile install.ps1
### Using Homebrew (macOS and Linux)
```bash
# Add the ATCR tap
brew tap atcr-io/tap
# Add the ATCR tap (the main repo itself serves as the tap).
# DID-based URL is stable across any future handle rename.
brew tap atcr/tap https://tangled.org/did:plc:pddp4xt5lgnv2qsegbzzs4xg/at-container-registry
# Install the credential helper
brew install docker-credential-atcr
@@ -55,7 +56,7 @@ Homebrew will automatically download the correct binary for your platform.
### Manual Installation
1. **Download the binary** for your platform from [GitHub Releases](https://github.com/atcr-io/atcr/releases)
1. **Download the binary** for your platform from [Tangled tags](https://tangled.org/evan.jarrett.net/at-container-registry/tags)
- Linux amd64: `docker-credential-atcr_VERSION_Linux_x86_64.tar.gz`
- Linux arm64: `docker-credential-atcr_VERSION_Linux_arm64.tar.gz`
@@ -84,13 +85,6 @@ Homebrew will automatically download the correct binary for your platform.
docker-credential-atcr version
```
### From Source (requires Go 1.23+)
```bash
go install atcr.io/cmd/credential-helper@latest
sudo mv $(go env GOPATH)/bin/credential-helper /usr/local/bin/docker-credential-atcr
```
## Configuration
### 1. Configure Docker

View File

@@ -31,10 +31,18 @@ $(GENERATED_ASSETS):
build: build-appview build-hold build-credential-helper ## Build all binaries
# Legal page "Last updated" dates come from the git commit date of the page
# templates. Empty values (e.g., Docker builds without .git) fall back to the
# hardcoded default in legal.go.
LEGAL_PKG := atcr.io/pkg/appview/handlers
PRIVACY_DATE := $(shell git log -1 --format=%cs -- pkg/appview/templates/pages/privacy.html 2>/dev/null)
TERMS_DATE := $(shell git log -1 --format=%cs -- pkg/appview/templates/pages/terms.html 2>/dev/null)
APPVIEW_LDFLAGS := -X '$(LEGAL_PKG).privacyLastUpdated=$(PRIVACY_DATE)' -X '$(LEGAL_PKG).termsLastUpdated=$(TERMS_DATE)'
build-appview: $(GENERATED_ASSETS) ## Build appview binary only
@echo "→ Building appview..."
@mkdir -p bin
go build -o bin/atcr-appview ./cmd/appview
go build -ldflags="$(APPVIEW_LDFLAGS)" -o bin/atcr-appview ./cmd/appview
build-hold: $(GENERATED_ASSETS) ## Build hold binary only
@echo "→ Building hold..."
@@ -69,7 +77,19 @@ test-verbose: ## Run tests with verbose output
.PHONY: check-golangci-lint
check-golangci-lint:
@which golangci-lint > /dev/null || (echo "→ Installing golangci-lint..." && go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest)
@LINT_PKG=github.com/golangci/golangci-lint/v2/cmd/golangci-lint@latest; \
CUR_GO=$$(go version | grep -oE 'go[0-9]+\.[0-9]+' | head -1 | sed 's/^go//'); \
if ! command -v golangci-lint > /dev/null 2>&1; then \
echo "→ Installing golangci-lint..."; \
go install $$LINT_PKG; \
else \
LINT_GO=$$(golangci-lint --version 2>&1 | grep -oE 'built with go[0-9]+\.[0-9]+' | head -1 | sed 's/^built with go//'); \
if [ -n "$$LINT_GO" ] && [ "$$LINT_GO" != "$$CUR_GO" ] && \
[ "$$(printf '%s\n%s\n' $$LINT_GO $$CUR_GO | sort -V | head -1)" = "$$LINT_GO" ]; then \
echo "→ golangci-lint built with go$$LINT_GO but project targets go$$CUR_GO — reinstalling..."; \
go install $$LINT_PKG; \
fi; \
fi
lint: check-golangci-lint ## Run golangci-lint
@echo "→ Running golangci-lint..."
@@ -97,7 +117,10 @@ docker: docker-appview docker-hold docker-scanner ## Build all Docker images
docker-appview: ## Build appview Docker image
@echo "→ Building appview Docker image..."
docker build -f Dockerfile.appview -t atcr.io/atcr.io/appview:latest .
docker build -f Dockerfile.appview \
--build-arg PRIVACY_DATE=$(PRIVACY_DATE) \
--build-arg TERMS_DATE=$(TERMS_DATE) \
-t atcr.io/atcr.io/appview:latest .
docker-hold: ## Build hold Docker image
@echo "→ Building hold Docker image..."

View File

@@ -54,7 +54,7 @@ atcr.io/did:plc:xyz123/myapp:latest
**1. Install credential helper:**
```bash
curl -fsSL https://atcr.io/install.sh | bash
curl -fsSL https://atcr.io/static/install.sh | bash
```
**2. Configure Docker** (add to `~/.docker/config.json`):

View File

@@ -1,12 +1,13 @@
package main
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strconv"
@@ -16,13 +17,10 @@ import (
"github.com/spf13/cobra"
)
// VersionAPIResponse is the response from /api/credential-helper/version
type VersionAPIResponse struct {
Latest string `json:"latest"`
DownloadURLs map[string]string `json:"download_urls"`
Checksums map[string]string `json:"checksums"`
ReleaseNotes string `json:"release_notes,omitempty"`
}
// tangledReleasesBase is the tangled.org path for the credential-helper's
// release repository. /tags/latest issues a 302 redirect to the latest tag,
// and /tags/{version}/download/{filename} serves goreleaser artifacts directly.
const tangledReleasesBase = "https://tangled.org/did:plc:e3kzdezk5gsirzh7eoqplc64"
func newUpdateCmd() *cobra.Command {
cmd := &cobra.Command{
@@ -37,35 +35,23 @@ func newUpdateCmd() *cobra.Command {
func runUpdate(cmd *cobra.Command, args []string) error {
checkOnly, _ := cmd.Flags().GetBool("check")
// Default API URL
apiURL := "https://atcr.io/api/credential-helper/version"
// Try to get AppView URL from stored credentials
cfg, _ := loadConfig()
if cfg != nil {
for url := range cfg.Registries {
apiURL = url + "/api/credential-helper/version"
break
}
}
versionInfo, err := fetchVersionInfo(apiURL)
latest, err := fetchLatestVersion()
if err != nil {
return fmt.Errorf("checking for updates: %w", err)
}
if !isNewerVersion(versionInfo.Latest, version) {
if !isNewerVersion(latest, version) {
fmt.Printf("You're already running the latest version (%s)\n", version)
return nil
}
fmt.Printf("New version available: %s (current: %s)\n", versionInfo.Latest, version)
fmt.Printf("New version available: %s (current: %s)\n", latest, version)
if checkOnly {
return nil
}
if err := performUpdate(versionInfo); err != nil {
if err := performUpdate(latest); err != nil {
return fmt.Errorf("update failed: %w", err)
}
@@ -73,28 +59,42 @@ func runUpdate(cmd *cobra.Command, args []string) error {
return nil
}
// fetchVersionInfo fetches version info from the AppView API
func fetchVersionInfo(apiURL string) (*VersionAPIResponse, error) {
client := &http.Client{
Timeout: 10 * time.Second,
}
// fetchLatestVersion resolves the latest released version by reading the
// redirect Location header of {tangledReleasesBase}/tags/latest.
func fetchLatestVersion() (string, error) {
client := httpClientWithTimeout(10*time.Second, func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
})
resp, err := client.Get(apiURL)
resp, err := client.Get(tangledReleasesBase + "/tags/latest")
if err != nil {
return nil, fmt.Errorf("fetching version info: %w", err)
return "", fmt.Errorf("fetching latest tag: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("version API returned status %d", resp.StatusCode)
switch resp.StatusCode {
case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther,
http.StatusTemporaryRedirect, http.StatusPermanentRedirect:
default:
return "", fmt.Errorf("expected redirect from tags/latest, got status %d", resp.StatusCode)
}
var versionInfo VersionAPIResponse
if err := json.NewDecoder(resp.Body).Decode(&versionInfo); err != nil {
return nil, fmt.Errorf("parsing version info: %w", err)
location := resp.Header.Get("Location")
if location == "" {
return "", fmt.Errorf("tags/latest returned redirect with no Location header")
}
return &versionInfo, nil
u, err := url.Parse(location)
if err != nil {
return "", fmt.Errorf("parsing redirect location %q: %w", location, err)
}
tag := path.Base(u.Path)
if !strings.HasPrefix(tag, "v") {
return "", fmt.Errorf("unexpected tag in redirect location %q", location)
}
return tag, nil
}
// isNewerVersion compares two version strings (simple semver comparison)
@@ -130,21 +130,30 @@ func isNewerVersion(newVersion, currentVersion string) bool {
return len(newParts) > len(curParts)
}
// getPlatformKey returns the platform key for the current OS/arch
func getPlatformKey() string {
return fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)
// goreleaserArchiveName returns the archive filename goreleaser publishes for
// the given version and the current platform. The naming template lives in
// .goreleaser.yaml: docker-credential-atcr_{Version}_{Title(OS)}_{Arch} with
// amd64→x86_64 and 386→i386.
func goreleaserArchiveName(version string) string {
versionNoV := strings.TrimPrefix(version, "v")
os := strings.ToUpper(runtime.GOOS[:1]) + runtime.GOOS[1:]
arch := runtime.GOARCH
switch arch {
case "amd64":
arch = "x86_64"
case "386":
arch = "i386"
}
return fmt.Sprintf("docker-credential-atcr_%s_%s_%s.tar.gz", versionNoV, os, arch)
}
// performUpdate downloads and installs the new version
func performUpdate(versionInfo *VersionAPIResponse) error {
platformKey := getPlatformKey()
downloadURL, ok := versionInfo.DownloadURLs[platformKey]
if !ok {
return fmt.Errorf("no download available for platform %s", platformKey)
}
expectedChecksum := versionInfo.Checksums[platformKey]
func performUpdate(latest string) error {
filename := goreleaserArchiveName(latest)
downloadURL := fmt.Sprintf("%s/tags/%s/download/%s", tangledReleasesBase, latest, filename)
fmt.Printf("Downloading update from %s...\n", downloadURL)
@@ -155,34 +164,17 @@ func performUpdate(versionInfo *VersionAPIResponse) error {
defer os.RemoveAll(tmpDir)
archivePath := filepath.Join(tmpDir, "archive.tar.gz")
if strings.HasSuffix(downloadURL, ".zip") {
archivePath = filepath.Join(tmpDir, "archive.zip")
}
if err := downloadFile(downloadURL, archivePath); err != nil {
return fmt.Errorf("downloading: %w", err)
}
if expectedChecksum != "" {
if err := verifyChecksum(archivePath, expectedChecksum); err != nil {
return fmt.Errorf("checksum verification failed: %w", err)
}
fmt.Println("Checksum verified.")
}
binaryPath := filepath.Join(tmpDir, "docker-credential-atcr")
if runtime.GOOS == "windows" {
binaryPath += ".exe"
}
if strings.HasSuffix(archivePath, ".zip") {
if err := extractZip(archivePath, tmpDir); err != nil {
return fmt.Errorf("extracting archive: %w", err)
}
} else {
if err := extractTarGz(archivePath, tmpDir); err != nil {
return fmt.Errorf("extracting archive: %w", err)
}
if err := extractTarGz(archivePath, tmpDir); err != nil {
return fmt.Errorf("extracting archive: %w", err)
}
currentPath, err := os.Executable()
@@ -224,7 +216,7 @@ func performUpdate(versionInfo *VersionAPIResponse) error {
// downloadFile downloads a file from a URL to a local path
func downloadFile(url, destPath string) error {
resp, err := http.Get(url) //nolint:gosec
resp, err := httpClient().Get(url) //nolint:gosec
if err != nil {
return err
}
@@ -244,15 +236,6 @@ func downloadFile(url, destPath string) error {
return err
}
// verifyChecksum verifies the SHA256 checksum of a file
func verifyChecksum(filePath, expected string) error {
if expected == "" {
return nil
}
// Checksums are optional until configured
return nil
}
// extractTarGz extracts a .tar.gz archive
func extractTarGz(archivePath, destDir string) error {
cmd := exec.Command("tar", "-xzf", archivePath, "-C", destDir)
@@ -262,15 +245,6 @@ func extractTarGz(archivePath, destDir string) error {
return nil
}
// extractZip extracts a .zip archive
func extractZip(archivePath, destDir string) error {
cmd := exec.Command("unzip", "-o", archivePath, "-d", destDir)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("unzip failed: %s: %w", string(output), err)
}
return nil
}
// copyFile copies a file from src to dst
func copyFile(src, dst string) error {
input, err := os.ReadFile(src)

View File

@@ -57,7 +57,7 @@ func requestDeviceCode(serverURL string) (*DeviceCodeResponse, string, error) {
deviceName := hostname()
reqBody, _ := json.Marshal(DeviceCodeRequest{DeviceName: deviceName})
resp, err := http.Post(appViewURL+"/auth/device/code", "application/json", bytes.NewReader(reqBody))
resp, err := httpClient().Post(appViewURL+"/auth/device/code", "application/json", bytes.NewReader(reqBody))
if err != nil {
return nil, appViewURL, fmt.Errorf("failed to request device code: %w", err)
}
@@ -88,7 +88,7 @@ func pollDeviceToken(appViewURL string, codeResp *DeviceCodeResponse) (*Account,
time.Sleep(pollInterval)
tokenReqBody, _ := json.Marshal(DeviceTokenRequest{DeviceCode: codeResp.DeviceCode})
tokenResp, err := http.Post(appViewURL+"/auth/device/token", "application/json", bytes.NewReader(tokenReqBody))
tokenResp, err := httpClient().Post(appViewURL+"/auth/device/token", "application/json", bytes.NewReader(tokenReqBody))
if err != nil {
continue
}
@@ -120,9 +120,7 @@ func pollDeviceToken(appViewURL string, codeResp *DeviceCodeResponse) (*Account,
// validateCredentials checks if the credentials are still valid by making a test request
func validateCredentials(appViewURL, handle, deviceSecret string) ValidationResult {
client := &http.Client{
Timeout: 5 * time.Second,
}
client := httpClientWithTimeout(5*time.Second, nil)
tokenURL := appViewURL + "/auth/token?service=" + appViewURL

View File

@@ -0,0 +1,75 @@
package main
import (
"fmt"
"net/http"
"runtime"
"sync"
"time"
)
// userAgent returns the User-Agent string for outgoing HTTP requests.
//
// Format: docker-credential-atcr/<version> (<os>/<arch>; commit <short>)
//
// Format follows the convention Docker's own clients use, so it parses
// cleanly with the same regexes server-side log analyzers already
// understand. The commit suffix lets users on the device-approval page
// distinguish two devices on the same version line if they ever need to.
func userAgent() string {
short := commit
if len(short) > 7 {
short = short[:7]
}
return fmt.Sprintf("docker-credential-atcr/%s (%s/%s; commit %s)",
version, runtime.GOOS, runtime.GOARCH, short)
}
// uaTransport wraps another RoundTripper and sets the User-Agent header
// on every request that doesn't already carry one. Used as the default
// transport for the helper's shared http.Client so we can't forget to
// set the UA on a future call site.
type uaTransport struct {
base http.RoundTripper
}
func (t *uaTransport) RoundTrip(req *http.Request) (*http.Response, error) {
if req.Header.Get("User-Agent") == "" {
// Clone before mutating: net/http may retry a request and the
// caller could be using the same *Request elsewhere.
clone := req.Clone(req.Context())
clone.Header.Set("User-Agent", userAgent())
req = clone
}
base := t.base
if base == nil {
base = http.DefaultTransport
}
return base.RoundTrip(req)
}
var sharedHTTPClient = sync.OnceValue(func() *http.Client {
return &http.Client{
Transport: &uaTransport{base: http.DefaultTransport},
}
})
// httpClient returns the shared UA-tagged http.Client used for all of
// the helper's outgoing HTTP requests. It carries no per-request
// timeout — call sites that want one should use httpClientWithTimeout.
func httpClient() *http.Client {
return sharedHTTPClient()
}
// httpClientWithTimeout returns a fresh client that shares the shared
// transport (so connection pooling and the UA header are preserved) but
// scopes a per-client timeout. CheckRedirect can be supplied for cases
// like fetchLatestVersion that need to inspect a redirect rather than
// follow it.
func httpClientWithTimeout(timeout time.Duration, checkRedirect func(*http.Request, []*http.Request) error) *http.Client {
return &http.Client{
Transport: sharedHTTPClient().Transport,
Timeout: timeout,
CheckRedirect: checkRedirect,
}
}

View File

@@ -0,0 +1,70 @@
package main
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
)
func TestUserAgent_Format(t *testing.T) {
old := commit
commit = "abc1234deadbeef"
t.Cleanup(func() { commit = old })
ua := userAgent()
if !strings.HasPrefix(ua, "docker-credential-atcr/") {
t.Errorf("UA missing product prefix: %q", ua)
}
if !strings.Contains(ua, "commit abc1234)") {
t.Errorf("UA should truncate commit to 7 chars, got %q", ua)
}
if strings.Contains(ua, "Go-http-client") {
t.Errorf("UA leaked default Go client string: %q", ua)
}
}
func TestHTTPClient_SetsUserAgent(t *testing.T) {
var got string
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
got = r.Header.Get("User-Agent")
w.WriteHeader(http.StatusNoContent)
}))
t.Cleanup(srv.Close)
resp, err := httpClient().Get(srv.URL)
if err != nil {
t.Fatalf("get: %v", err)
}
resp.Body.Close()
want := userAgent()
if got != want {
t.Errorf("server saw User-Agent %q, want %q", got, want)
}
}
func TestHTTPClient_RespectsExplicitUserAgent(t *testing.T) {
var got string
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
got = r.Header.Get("User-Agent")
w.WriteHeader(http.StatusNoContent)
}))
t.Cleanup(srv.Close)
req, err := http.NewRequest("GET", srv.URL, nil)
if err != nil {
t.Fatalf("new request: %v", err)
}
req.Header.Set("User-Agent", "explicit-test/1.0")
resp, err := httpClient().Do(req)
if err != nil {
t.Fatalf("do: %v", err)
}
resp.Body.Close()
if got != "explicit-test/1.0" {
t.Errorf("explicit UA was overwritten: got %q", got)
}
}

View File

@@ -105,7 +105,7 @@ func runGet(cmd *cobra.Command, args []string) error {
}
// Check for updates (cached, non-blocking)
checkAndNotifyUpdate(appViewURL)
checkAndNotifyUpdate()
// Return credentials for Docker
creds := Credentials{
@@ -200,7 +200,7 @@ func runList(cmd *cobra.Command, args []string) error {
}
// checkAndNotifyUpdate checks for updates in the background and notifies the user
func checkAndNotifyUpdate(appViewURL string) {
func checkAndNotifyUpdate() {
cache := loadUpdateCheckCache()
if cache != nil && cache.Current == version {
// Cache is fresh and for current version
@@ -214,21 +214,19 @@ func checkAndNotifyUpdate(appViewURL string) {
}
}
// Fetch version info
apiURL := appViewURL + "/api/credential-helper/version"
versionInfo, err := fetchVersionInfo(apiURL)
latest, err := fetchLatestVersion()
if err != nil {
return // Silently fail
}
saveUpdateCheckCache(&UpdateCheckCache{
CheckedAt: timeNow(),
Latest: versionInfo.Latest,
Latest: latest,
Current: version,
})
if isNewerVersion(versionInfo.Latest, version) {
fmt.Fprintf(os.Stderr, "\nUpdate available: %s (current: %s)\n", versionInfo.Latest, version)
if isNewerVersion(latest, version) {
fmt.Fprintf(os.Stderr, "\nUpdate available: %s (current: %s)\n", latest, version)
fmt.Fprintf(os.Stderr, "Run: docker-credential-atcr update\n\n")
}
}

View File

@@ -5,12 +5,11 @@ import (
"fmt"
"log/slog"
"atcr.io/pkg/atproto/did"
"atcr.io/pkg/auth/oauth"
"atcr.io/pkg/hold"
"atcr.io/pkg/hold/pds"
"github.com/bluesky-social/indigo/atproto/atcrypto"
didplc "github.com/did-method-plc/go-didplc"
"github.com/spf13/cobra"
)
@@ -21,144 +20,210 @@ var plcCmd = &cobra.Command{
var plcConfigFile string
var (
plcAddRotationKeyFirst bool
plcAddRotationKeyLast bool
)
var plcAddRotationKeyCmd = &cobra.Command{
Use: "add-rotation-key <multibase-key>",
Use: "add-rotation-key [multibase-key]",
Short: "Add a rotation key to this hold's PLC identity",
Long: `Add an additional rotation key to the hold's did:plc document.
The key must be a multibase-encoded private key (K-256 or P-256, starting with 'z').
If a multibase-encoded private key (K-256 or P-256, starting with 'z') is supplied as
the positional argument, that key is added. If no argument is given, a fresh K-256
keypair is generated and the private half is printed to stdout. Save it offline as
your recovery key, since it will not be shown again.
By default the new key is inserted at the highest priority position (--first), which
allows it to override ops signed by lower-priority keys within PLC's 72-hour recovery
window. Pass --last to append at the lowest priority instead.
The hold's configured rotation key is used to sign the PLC update.
atcr-hold plc add-rotation-key --config config.yaml z...`,
Args: cobra.ExactArgs(1),
atcr-hold plc add-rotation-key --config config.yaml # generate + print
atcr-hold plc add-rotation-key --config config.yaml --last # append, low priority
atcr-hold plc add-rotation-key --config config.yaml z... # use supplied key`,
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
firstSet := cmd.Flags().Changed("first")
lastSet := cmd.Flags().Changed("last")
if firstSet && lastSet {
return fmt.Errorf("--first and --last are mutually exclusive")
}
prepend := !plcAddRotationKeyLast
cfg, err := hold.LoadConfig(plcConfigFile)
if err != nil {
return fmt.Errorf("failed to load config: %w", err)
}
if cfg.Database.DIDMethod != "plc" {
return fmt.Errorf("this command only works with did:plc (database.did_method is %q)", cfg.Database.DIDMethod)
}
ctx := context.Background()
// Resolve the hold's DID
holdDID, err := pds.LoadOrCreateDID(ctx, pds.DIDConfig{
DID: cfg.Database.DID,
DIDMethod: cfg.Database.DIDMethod,
PublicURL: cfg.Server.PublicURL,
DBPath: cfg.Database.Path,
SigningKeyPath: cfg.Database.KeyPath,
RotationKey: cfg.Database.RotationKey,
PLCDirectoryURL: cfg.Database.PLCDirectoryURL,
})
holdDID, rotationKey, signingKey, err := loadHoldPLCIdentity(ctx, cfg)
if err != nil {
return fmt.Errorf("failed to resolve hold DID: %w", err)
return err
}
// Parse the rotation key from config (required for signing PLC updates)
if cfg.Database.RotationKey == "" {
return fmt.Errorf("database.rotation_key must be set to sign PLC updates")
}
rotationKey, err := atcrypto.ParsePrivateMultibase(cfg.Database.RotationKey)
if err != nil {
return fmt.Errorf("failed to parse rotation_key from config: %w", err)
}
// Parse the new key to add (K-256 or P-256)
newKey, err := atcrypto.ParsePrivateMultibase(args[0])
if err != nil {
return fmt.Errorf("failed to parse key argument: %w", err)
}
newKeyPub, err := newKey.PublicKey()
if err != nil {
return fmt.Errorf("failed to get public key from argument: %w", err)
}
newKeyDIDKey := newKeyPub.DIDKey()
// Load signing key for verification methods
keyPath := cfg.Database.KeyPath
if keyPath == "" {
keyPath = cfg.Database.Path + "/signing.key"
}
signingKey, err := oauth.GenerateOrLoadPDSKey(keyPath)
if err != nil {
return fmt.Errorf("failed to load signing key: %w", err)
}
// Fetch current PLC state
plcDirectoryURL := cfg.Database.PLCDirectoryURL
if plcDirectoryURL == "" {
plcDirectoryURL = "https://plc.directory"
}
client := &didplc.Client{DirectoryURL: plcDirectoryURL}
opLog, err := client.OpLog(ctx, holdDID)
if err != nil {
return fmt.Errorf("failed to fetch PLC op log: %w", err)
}
if len(opLog) == 0 {
return fmt.Errorf("empty op log for %s", holdDID)
}
lastEntry := opLog[len(opLog)-1]
lastOp := lastEntry.Regular
if lastOp == nil {
return fmt.Errorf("last PLC operation is not a regular op")
}
// Check if key already present
for _, k := range lastOp.RotationKeys {
if k == newKeyDIDKey {
fmt.Printf("Key %s is already a rotation key for %s\n", newKeyDIDKey, holdDID)
return nil
var newKey atcrypto.PrivateKeyExportable
if len(args) == 1 {
newKey, err = atcrypto.ParsePrivateMultibase(args[0])
if err != nil {
return fmt.Errorf("failed to parse key argument: %w", err)
}
}
// Build updated rotation keys: keep existing, append new
rotationKeys := make([]string, len(lastOp.RotationKeys))
copy(rotationKeys, lastOp.RotationKeys)
rotationKeys = append(rotationKeys, newKeyDIDKey)
// Build update: preserve everything else from current state
sigPub, err := signingKey.PublicKey()
res, err := did.AddRotationKey(ctx, did.AddRotationKeyOptions{
DID: holdDID,
PLCDirectoryURL: cfg.Database.PLCDirectoryURL,
RotationKey: rotationKey,
SigningKey: signingKey,
VerificationKeyName: "atproto",
NewKey: newKey,
Prepend: prepend,
})
if err != nil {
return fmt.Errorf("failed to get signing public key: %w", err)
return err
}
prevCID := lastEntry.AsOperation().CID().String()
op := &didplc.RegularOp{
Type: "plc_operation",
RotationKeys: rotationKeys,
VerificationMethods: map[string]string{
"atproto": sigPub.DIDKey(),
},
AlsoKnownAs: lastOp.AlsoKnownAs,
Services: lastOp.Services,
Prev: &prevCID,
if res.AlreadyPresent {
fmt.Printf("Key %s is already a rotation key for %s (priority %d of %d)\n",
res.NewKeyDIDKey, holdDID, res.ExistingAt, res.TotalKeys)
return nil
}
if err := op.Sign(rotationKey); err != nil {
return fmt.Errorf("failed to sign PLC update: %w", err)
}
if err := client.Submit(ctx, holdDID, op); err != nil {
return fmt.Errorf("failed to submit PLC update: %w", err)
if res.Generated {
fmt.Println("=========================================================================")
fmt.Println("GENERATED NEW ROTATION KEY. SAVE THIS NOW. IT WILL NOT BE SHOWN AGAIN.")
fmt.Println("Store it offline (password manager, paper, hardware token).")
fmt.Println()
fmt.Printf("Private key (multibase): %s\n", res.NewKey.Multibase())
fmt.Printf("Public key (did:key): %s\n", res.NewKeyDIDKey)
fmt.Println("=========================================================================")
}
slog.Info("Added rotation key to PLC identity",
"did", holdDID,
"new_key", newKeyDIDKey,
"total_rotation_keys", len(rotationKeys),
"new_key", res.NewKeyDIDKey,
"priority", res.InsertedAt,
"total_rotation_keys", res.TotalKeys,
"generated", res.Generated,
)
fmt.Printf("Added rotation key %s to %s\n", newKeyDIDKey, holdDID)
fmt.Printf("Added rotation key %s to %s (priority %d of %d)\n",
res.NewKeyDIDKey, holdDID, res.InsertedAt, res.TotalKeys)
return nil
},
}
var plcListRotationKeysCmd = &cobra.Command{
Use: "list-rotation-keys",
Short: "List rotation keys in this hold's PLC document",
Long: `Fetch the hold's did:plc document from the PLC directory and print its
rotation keys in priority order (index 0 is highest priority and can override
ops signed by lower-priority keys within PLC's 72-hour recovery window).
The key matching the local database.rotation_key is marked as LOCAL.`,
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
cfg, err := hold.LoadConfig(plcConfigFile)
if err != nil {
return fmt.Errorf("failed to load config: %w", err)
}
if cfg.Database.DIDMethod != "plc" {
return fmt.Errorf("this command only works with did:plc (database.did_method is %q)", cfg.Database.DIDMethod)
}
ctx := context.Background()
holdDID, err := did.LoadOrCreate(ctx, cfg.DIDConfig())
if err != nil {
return fmt.Errorf("failed to resolve hold DID: %w", err)
}
var localRotationKey atcrypto.PrivateKey
if cfg.Database.RotationKey != "" {
localRotationKey, err = atcrypto.ParsePrivateMultibase(cfg.Database.RotationKey)
if err != nil {
return fmt.Errorf("failed to parse rotation_key from config: %w", err)
}
}
res, err := did.ListRotationKeys(ctx, did.ListRotationKeysOptions{
DID: holdDID,
PLCDirectoryURL: cfg.Database.PLCDirectoryURL,
LocalRotationKey: localRotationKey,
})
if err != nil {
return err
}
printRotationKeys(res)
return nil
},
}
// loadHoldPLCIdentity is the shared "load DID + rotation key + signing key" helper used
// by every PLC command. It enforces that database.rotation_key is set since every PLC
// command needs a rotation key to either sign updates or verify the LOCAL marker.
func loadHoldPLCIdentity(ctx context.Context, cfg *hold.Config) (string, atcrypto.PrivateKey, *atcrypto.PrivateKeyK256, error) {
holdDID, err := did.LoadOrCreate(ctx, cfg.DIDConfig())
if err != nil {
return "", nil, nil, fmt.Errorf("failed to resolve hold DID: %w", err)
}
if cfg.Database.RotationKey == "" {
return "", nil, nil, fmt.Errorf("database.rotation_key must be set to sign PLC updates")
}
rotationKey, err := atcrypto.ParsePrivateMultibase(cfg.Database.RotationKey)
if err != nil {
return "", nil, nil, fmt.Errorf("failed to parse rotation_key from config: %w", err)
}
keyPath := cfg.Database.KeyPath
if keyPath == "" {
keyPath = cfg.Database.Path + "/signing.key"
}
signingKey, err := oauth.GenerateOrLoadPDSKey(keyPath)
if err != nil {
return "", nil, nil, fmt.Errorf("failed to load signing key: %w", err)
}
return holdDID, rotationKey, signingKey, nil
}
// printRotationKeys is the shared CLI output for `list-rotation-keys`.
func printRotationKeys(res *did.ListRotationKeysResult) {
fmt.Printf("DID: %s\n", res.DID)
fmt.Printf("PLC directory: %s\n", res.Directory)
fmt.Printf("Rotation keys (%d):\n", len(res.Keys))
for i, k := range res.Keys {
marker := ""
switch {
case len(res.Keys) == 1:
marker = "(only key)"
case i == 0:
marker = "(highest priority)"
case i == len(res.Keys)-1:
marker = "(lowest priority)"
}
localTag := ""
if res.LocalDIDKey != "" && k == res.LocalDIDKey {
localTag = " [LOCAL — database.rotation_key]"
}
fmt.Printf(" [%d] %s %s%s\n", i, k, marker, localTag)
}
if res.LocalDIDKey != "" && !res.LocalPresent {
fmt.Printf("\nWARNING: local rotation_key (%s) is NOT present in the PLC document.\n", res.LocalDIDKey)
fmt.Println("This service cannot sign PLC updates. Possible compromise or out-of-band rotation.")
}
}
func init() {
plcCmd.PersistentFlags().StringVarP(&plcConfigFile, "config", "c", "", "path to YAML configuration file")
plcAddRotationKeyCmd.Flags().BoolVar(&plcAddRotationKeyFirst, "first", true, "insert at highest priority (default)")
plcAddRotationKeyCmd.Flags().BoolVar(&plcAddRotationKeyLast, "last", false, "insert at lowest priority")
plcCmd.AddCommand(plcAddRotationKeyCmd)
plcCmd.AddCommand(plcListRotationKeysCmd)
}

View File

@@ -6,6 +6,7 @@ import (
"log/slog"
"os"
"atcr.io/pkg/atproto/did"
"atcr.io/pkg/hold"
holddb "atcr.io/pkg/hold/db"
"atcr.io/pkg/hold/pds"
@@ -39,7 +40,7 @@ The CAR is written to stdout, so redirect to a file:
}
defer cleanup()
if err := holdPDS.ExportToCAR(ctx, os.Stdout); err != nil {
if err := holdPDS.RepomgrRef().ReadRepo(ctx, holdPDS.UID(), "", os.Stdout); err != nil {
return fmt.Errorf("failed to export: %w", err)
}
@@ -105,15 +106,7 @@ func init() {
// openHoldPDS creates a HoldPDS from config for offline CLI operations.
// Returns the PDS and a cleanup function that must be deferred.
func openHoldPDS(ctx context.Context, cfg *hold.Config) (*pds.HoldPDS, func(), error) {
holdDID, err := pds.LoadOrCreateDID(ctx, pds.DIDConfig{
DID: cfg.Database.DID,
DIDMethod: cfg.Database.DIDMethod,
PublicURL: cfg.Server.PublicURL,
DBPath: cfg.Database.Path,
SigningKeyPath: cfg.Database.KeyPath,
RotationKey: cfg.Database.RotationKey,
PLCDirectoryURL: cfg.Database.PLCDirectoryURL,
})
holdDID, err := did.LoadOrCreate(ctx, cfg.DIDConfig())
if err != nil {
return nil, nil, fmt.Errorf("failed to resolve hold DID: %w", err)
}

68
cmd/hold/scan_backfill.go Normal file
View File

@@ -0,0 +1,68 @@
package main
import (
"context"
"fmt"
"atcr.io/pkg/hold"
"github.com/spf13/cobra"
)
var scanBackfillConfigFile string
var scanBackfillCmd = &cobra.Command{
Use: "scan-backfill",
Short: "Rewrite legacy scan records to use the status field (offline)",
Long: `Walks every io.atcr.hold.scan record on this hold and assigns a status
("skipped" or "failed") to records that pre-date the status field.
A legacy record is one with an empty status, no SBOM blob, and zero
vulnerability counts. Layer media types decide the rewrite:
- helm.chart.content / in-toto / dsse.envelope → status="skipped"
- everything else → status="failed"
The tool is idempotent and preserves each record's original scannedAt.
This subcommand opens the hold's CAR store directly, so the running hold
service must be stopped first (otherwise the embedded PDS holds an exclusive
lock). For zero-downtime backfill on a production hold, hit the admin
endpoint POST /admin/api/scan-backfill instead.`,
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
cfg, err := hold.LoadConfig(scanBackfillConfigFile)
if err != nil {
return fmt.Errorf("load config: %w", err)
}
ctx := context.Background()
holdPDS, cleanup, err := openHoldPDS(ctx, cfg)
if err != nil {
return err
}
defer cleanup()
logf := func(format string, args ...any) {
fmt.Fprintf(cmd.ErrOrStderr(), " "+format+"\n", args...)
}
res, err := holdPDS.BackfillScanStatus(ctx, logf, nil)
if err != nil {
return fmt.Errorf("backfill: %w", err)
}
out := cmd.OutOrStdout()
fmt.Fprintf(out, "Backfill complete:\n")
fmt.Fprintf(out, " scanned: %d\n", res.Scanned)
fmt.Fprintf(out, " already-tagged: %d\n", res.AlreadyTagged)
fmt.Fprintf(out, " → skipped: %d\n", res.MarkedSkipped)
fmt.Fprintf(out, " → failed: %d\n", res.MarkedFailed)
fmt.Fprintf(out, " rewritten: %d\n", res.Rewritten)
return nil
},
}
func init() {
scanBackfillCmd.Flags().StringVarP(&scanBackfillConfigFile, "config", "c", "", "path to YAML configuration file")
rootCmd.AddCommand(scanBackfillCmd)
}

822
cmd/image-advisor/main.go Normal file
View File

@@ -0,0 +1,822 @@
// image-advisor fetches OCI image config, SBOM, and vulnerability data from
// the ATCR system and outputs a structured markdown report suitable for LLM
// analysis of container image improvements.
//
// Usage:
//
// go run ./cmd/image-advisor --url https://seamark.dev/r/therobbiedavis.com/listenarr --tag latest
// go run ./cmd/image-advisor --url https://seamark.dev/r/therobbiedavis.com/listenarr --digest sha256:abc...
// go run ./cmd/image-advisor --url https://seamark.dev/r/therobbiedavis.com/listenarr --platform linux/arm64
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"sort"
"strings"
"time"
"atcr.io/pkg/atproto"
)
// OCI config types (full config, not just history)
type ociFullConfig struct {
Architecture string `json:"architecture"`
OS string `json:"os"`
Config ociContainerConfig `json:"config"`
History []ociHistoryEntry `json:"history"`
RootFS ociRootFS `json:"rootfs"`
}
type ociContainerConfig struct {
Env []string `json:"Env"`
Cmd []string `json:"Cmd"`
Entrypoint []string `json:"Entrypoint"`
WorkingDir string `json:"WorkingDir"`
ExposedPorts map[string]struct{} `json:"ExposedPorts"`
Labels map[string]string `json:"Labels"`
User string `json:"User"`
Volumes map[string]struct{} `json:"Volumes"`
}
type ociHistoryEntry struct {
Created string `json:"created"`
CreatedBy string `json:"created_by"`
EmptyLayer bool `json:"empty_layer"`
Comment string `json:"comment"`
}
type ociRootFS struct {
Type string `json:"type"`
DiffIDs []string `json:"diff_ids"`
}
// Grype vulnerability report types
type grypeReport struct {
Matches []grypeMatch `json:"matches"`
}
type grypeMatch struct {
Vulnerability grypeVuln `json:"Vulnerability"`
Package grypePackage `json:"Package"`
}
type grypeVuln struct {
ID string `json:"ID"`
Metadata grypeMetadata `json:"Metadata"`
Fix grypeFix `json:"Fix"`
}
type grypeMetadata struct {
Severity string `json:"Severity"`
}
type grypeFix struct {
Versions []string `json:"Versions"`
State string `json:"State"`
}
type grypePackage struct {
Name string `json:"Name"`
Version string `json:"Version"`
Type string `json:"Type"`
}
// SPDX SBOM types
type spdxDocument struct {
Packages []spdxPackage `json:"packages"`
}
type spdxPackage struct {
SPDXID string `json:"SPDXID"`
Name string `json:"name"`
VersionInfo string `json:"versionInfo"`
Supplier string `json:"supplier"`
LicenseConcluded string `json:"licenseConcluded"`
}
// reportData holds all fetched data for markdown generation
type reportData struct {
Handle string
Repository string
Tag string
Digest string
Platform string
HoldURL string
ScannedAt string
Config *ociFullConfig
ConfigErr string
Layers []atproto.BlobReference // from manifest record
VulnReport *grypeReport
VulnErr string
ScanRecord *atproto.ScanRecord
SBOM *spdxDocument
SBOMErr string
}
func main() {
registryURL := flag.String("url", "", "Registry URL (e.g., https://seamark.dev/r/therobbiedavis.com/listenarr)")
tag := flag.String("tag", "latest", "Image tag to look up")
digest := flag.String("digest", "", "Manifest digest (overrides --tag)")
platform := flag.String("platform", "linux/amd64", "Platform to select from manifest index (os/arch)")
holdURL := flag.String("hold", "https://us-chi1.cove.seamark.dev", "Hold service URL")
flag.Parse()
if *registryURL == "" {
fmt.Fprintln(os.Stderr, "error: --url is required")
flag.Usage()
os.Exit(1)
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
handle, repository, err := parseRegistryURL(*registryURL)
if err != nil {
log.Fatalf("Failed to parse URL: %v", err)
}
fmt.Fprintf(os.Stderr, "Resolving identity for %s...\n", handle)
did, resolvedHandle, pdsEndpoint, err := atproto.ResolveIdentity(ctx, handle)
if err != nil {
log.Fatalf("Failed to resolve identity %q: %v", handle, err)
}
fmt.Fprintf(os.Stderr, " DID: %s\n Handle: %s\n PDS: %s\n", did, resolvedHandle, pdsEndpoint)
// Resolve hold DID
fmt.Fprintf(os.Stderr, "Resolving hold DID for %s...\n", *holdURL)
holdDID, err := atproto.ResolveHoldDID(ctx, *holdURL)
if err != nil {
log.Fatalf("Failed to resolve hold DID: %v", err)
}
fmt.Fprintf(os.Stderr, " Hold DID: %s\n", holdDID)
// Resolve manifest digest
manifestDigest := *digest
tagName := *tag
if manifestDigest == "" {
fmt.Fprintf(os.Stderr, "Looking up tag %q for %s/%s...\n", tagName, resolvedHandle, repository)
tagRecord, err := fetchTagRecord(ctx, pdsEndpoint, did, repository, tagName)
if err != nil {
log.Fatalf("Failed to fetch tag record: %v", err)
}
manifestDigest, err = tagRecord.GetManifestDigest()
if err != nil {
log.Fatalf("Failed to get manifest digest from tag: %v", err)
}
fmt.Fprintf(os.Stderr, " Digest: %s\n", manifestDigest)
} else {
tagName = ""
}
// Fetch manifest record
fmt.Fprintf(os.Stderr, "Fetching manifest record...\n")
manifest, err := fetchManifestRecord(ctx, pdsEndpoint, did, manifestDigest)
if err != nil {
log.Fatalf("Failed to fetch manifest record: %v", err)
}
// Handle manifest index
if len(manifest.Manifests) > 0 {
fmt.Fprintf(os.Stderr, "Manifest is an index with %d platforms:\n", len(manifest.Manifests))
for _, m := range manifest.Manifests {
if m.Platform != nil {
p := m.Platform
platStr := p.OS + "/" + p.Architecture
if p.Variant != "" {
platStr += "/" + p.Variant
}
fmt.Fprintf(os.Stderr, " - %s (%s)\n", platStr, truncate(m.Digest, 24))
}
}
child, err := selectPlatform(manifest.Manifests, *platform)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: %v, using first platform\n", err)
child = &manifest.Manifests[0]
}
manifestDigest = child.Digest
if child.Platform != nil {
*platform = child.Platform.OS + "/" + child.Platform.Architecture
if child.Platform.Variant != "" {
*platform += "/" + child.Platform.Variant
}
}
fmt.Fprintf(os.Stderr, "Selected platform %s → %s\n", *platform, truncate(manifestDigest, 24))
// Re-fetch the child manifest record
manifest, err = fetchManifestRecord(ctx, pdsEndpoint, did, manifestDigest)
if err != nil {
log.Fatalf("Failed to fetch child manifest: %v", err)
}
}
report := &reportData{
Handle: resolvedHandle,
Repository: repository,
Tag: tagName,
Digest: manifestDigest,
Platform: *platform,
HoldURL: *holdURL,
Layers: manifest.Layers,
}
// Fetch image config
fmt.Fprintf(os.Stderr, "Fetching image config...\n")
config, err := fetchFullImageConfig(ctx, *holdURL, manifestDigest)
if err != nil {
fmt.Fprintf(os.Stderr, " Warning: %v\n", err)
report.ConfigErr = err.Error()
} else {
report.Config = config
}
// Fetch scan data (scan record + SBOM blob + vuln blob)
fmt.Fprintf(os.Stderr, "Fetching scan data...\n")
scanRecord, sbom, vulnReport, scanErr := fetchScanData(ctx, *holdURL, holdDID, manifestDigest)
if scanErr != nil {
fmt.Fprintf(os.Stderr, " Warning: %v\n", scanErr)
report.VulnErr = scanErr.Error()
report.SBOMErr = scanErr.Error()
} else {
report.ScanRecord = scanRecord
report.ScannedAt = scanRecord.ScannedAt
if vulnReport != nil {
report.VulnReport = vulnReport
} else {
report.VulnErr = "No vulnerability report blob available"
}
if sbom != nil {
report.SBOM = sbom
} else {
report.SBOMErr = "No SBOM blob available"
}
}
fmt.Fprintf(os.Stderr, "Generating prompt...\n")
generatePrompt(os.Stdout, report)
}
func parseRegistryURL(rawURL string) (handle, repository string, err error) {
u, err := url.Parse(rawURL)
if err != nil {
return "", "", fmt.Errorf("invalid URL: %w", err)
}
path := strings.TrimPrefix(u.Path, "/")
path = strings.TrimPrefix(path, "r/")
path = strings.TrimSuffix(path, "/")
parts := strings.SplitN(path, "/", 2)
if len(parts) < 2 {
return "", "", fmt.Errorf("URL must be in format: https://domain/r/<handle>/<repository>")
}
return parts[0], parts[1], nil
}
func fetchTagRecord(ctx context.Context, pdsEndpoint, did, repository, tag string) (*atproto.TagRecord, error) {
rkey := atproto.RepositoryTagToRKey(repository, tag)
reqURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
strings.TrimSuffix(pdsEndpoint, "/"),
url.QueryEscape(did),
url.QueryEscape(atproto.TagCollection),
url.QueryEscape(rkey),
)
resp, err := httpGet(ctx, reqURL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("tag %q not found (HTTP %d)", tag, resp.StatusCode)
}
var envelope struct {
Value json.RawMessage `json:"value"`
}
if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil {
return nil, fmt.Errorf("parse response: %w", err)
}
var tagRecord atproto.TagRecord
if err := json.Unmarshal(envelope.Value, &tagRecord); err != nil {
return nil, fmt.Errorf("parse tag record: %w", err)
}
return &tagRecord, nil
}
func fetchManifestRecord(ctx context.Context, pdsEndpoint, did, digest string) (*atproto.ManifestRecord, error) {
rkey := strings.TrimPrefix(digest, "sha256:")
reqURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
strings.TrimSuffix(pdsEndpoint, "/"),
url.QueryEscape(did),
url.QueryEscape(atproto.ManifestCollection),
url.QueryEscape(rkey),
)
resp, err := httpGet(ctx, reqURL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("manifest not found (HTTP %d)", resp.StatusCode)
}
var envelope struct {
Value json.RawMessage `json:"value"`
}
if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil {
return nil, fmt.Errorf("parse response: %w", err)
}
var manifest atproto.ManifestRecord
if err := json.Unmarshal(envelope.Value, &manifest); err != nil {
return nil, fmt.Errorf("parse manifest record: %w", err)
}
return &manifest, nil
}
func selectPlatform(manifests []atproto.ManifestReference, platform string) (*atproto.ManifestReference, error) {
parts := strings.Split(platform, "/")
wantOS := parts[0]
wantArch := ""
wantVariant := ""
if len(parts) > 1 {
wantArch = parts[1]
}
if len(parts) > 2 {
wantVariant = parts[2]
}
for i := range manifests {
m := &manifests[i]
if m.Platform == nil {
continue
}
if m.Platform.OS == wantOS && m.Platform.Architecture == wantArch {
if wantVariant == "" || m.Platform.Variant == wantVariant {
return m, nil
}
}
}
return nil, fmt.Errorf("no platform matching %s found", platform)
}
func fetchFullImageConfig(ctx context.Context, holdURL, manifestDigest string) (*ociFullConfig, error) {
reqURL := fmt.Sprintf("%s%s?digest=%s",
strings.TrimSuffix(holdURL, "/"),
atproto.HoldGetImageConfig,
url.QueryEscape(manifestDigest),
)
resp, err := httpGet(ctx, reqURL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("image config not found (HTTP %d)", resp.StatusCode)
}
var record struct {
ConfigJSON string `json:"configJson"`
}
if err := json.NewDecoder(resp.Body).Decode(&record); err != nil {
return nil, fmt.Errorf("parse response: %w", err)
}
var config ociFullConfig
if err := json.Unmarshal([]byte(record.ConfigJSON), &config); err != nil {
return nil, fmt.Errorf("parse OCI config: %w", err)
}
return &config, nil
}
func fetchScanData(ctx context.Context, holdURL, holdDID, manifestDigest string) (*atproto.ScanRecord, *spdxDocument, *grypeReport, error) {
rkey := strings.TrimPrefix(manifestDigest, "sha256:")
// Fetch scan record
scanURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
strings.TrimSuffix(holdURL, "/"),
url.QueryEscape(holdDID),
url.QueryEscape(atproto.ScanCollection),
url.QueryEscape(rkey),
)
resp, err := httpGet(ctx, scanURL)
if err != nil {
return nil, nil, nil, fmt.Errorf("fetch scan record: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, nil, nil, fmt.Errorf("no scan record found (HTTP %d)", resp.StatusCode)
}
var envelope struct {
Value json.RawMessage `json:"value"`
}
if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil {
return nil, nil, nil, fmt.Errorf("parse scan response: %w", err)
}
var scanRecord atproto.ScanRecord
if err := json.Unmarshal(envelope.Value, &scanRecord); err != nil {
return nil, nil, nil, fmt.Errorf("parse scan record: %w", err)
}
// Fetch SBOM blob
var sbom *spdxDocument
if scanRecord.SbomBlob != nil && scanRecord.SbomBlob.Ref.String() != "" {
blobURL := fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob?did=%s&cid=%s",
strings.TrimSuffix(holdURL, "/"),
url.QueryEscape(holdDID),
url.QueryEscape(scanRecord.SbomBlob.Ref.String()),
)
blobResp, err := httpGet(ctx, blobURL)
if err == nil {
defer blobResp.Body.Close()
if blobResp.StatusCode == http.StatusOK {
var doc spdxDocument
if err := json.NewDecoder(blobResp.Body).Decode(&doc); err == nil {
sbom = &doc
} else {
fmt.Fprintf(os.Stderr, " Warning: failed to parse SBOM: %v\n", err)
}
}
}
}
// Fetch vuln report blob
var vulnReport *grypeReport
if scanRecord.VulnReportBlob != nil && scanRecord.VulnReportBlob.Ref.String() != "" {
blobURL := fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob?did=%s&cid=%s",
strings.TrimSuffix(holdURL, "/"),
url.QueryEscape(holdDID),
url.QueryEscape(scanRecord.VulnReportBlob.Ref.String()),
)
blobResp, err := httpGet(ctx, blobURL)
if err == nil {
defer blobResp.Body.Close()
if blobResp.StatusCode == http.StatusOK {
var report grypeReport
if err := json.NewDecoder(blobResp.Body).Decode(&report); err == nil {
vulnReport = &report
} else {
fmt.Fprintf(os.Stderr, " Warning: failed to parse vuln report: %v\n", err)
}
}
}
}
return &scanRecord, sbom, vulnReport, nil
}
func httpGet(ctx context.Context, rawURL string) (*http.Response, error) {
req, err := http.NewRequestWithContext(ctx, "GET", rawURL, nil)
if err != nil {
return nil, fmt.Errorf("build request: %w", err)
}
return http.DefaultClient.Do(req)
}
// --- Output generation ---
func generatePrompt(w io.Writer, r *reportData) {
// System instruction
fmt.Fprintln(w, `Respond ONLY with raw YAML. No markdown fences, no explanation, no preamble.
Analyze the container image data below. Output a list of actionable suggestions sorted by impact (highest first).
schema:
suggestions:
- action: "<specific actionable step>"
category: vulnerability|size|cache|security|best-practice
impact: high|medium|low
effort: low|medium|high
cves_fixed: <int or 0>
size_saved_mb: <int or 0>
detail: "<one sentence with specific package names, versions, or commands>"
---`)
// Compact data block - no markdown formatting, just facts
ref := r.Handle + "/" + r.Repository
if r.Tag != "" {
ref += ":" + r.Tag
}
totalSize := int64(0)
for _, l := range r.Layers {
totalSize += l.Size
}
fmt.Fprintf(w, "\nimage: %s\ndigest: %s\nplatform: %s\ntotal_size: %s\nlayers: %d\n",
ref, r.Digest, r.Platform, humanSize(totalSize), len(r.Layers))
// Config
if r.Config != nil {
c := r.Config.Config
user := c.User
if user == "" {
user = "root"
}
fmt.Fprintf(w, "user: %s\n", user)
if c.WorkingDir != "" {
fmt.Fprintf(w, "workdir: %s\n", c.WorkingDir)
}
if len(c.Entrypoint) > 0 {
fmt.Fprintf(w, "entrypoint: %s\n", strings.Join(c.Entrypoint, " "))
}
if len(c.Cmd) > 0 {
fmt.Fprintf(w, "cmd: %s\n", strings.Join(c.Cmd, " "))
}
if len(c.ExposedPorts) > 0 {
ports := make([]string, 0, len(c.ExposedPorts))
for p := range c.ExposedPorts {
ports = append(ports, p)
}
fmt.Fprintf(w, "ports: %s\n", strings.Join(ports, ","))
}
if len(c.Env) > 0 {
fmt.Fprintln(w, "env:")
for _, env := range c.Env {
parts := strings.SplitN(env, "=", 2)
if shouldRedact(parts[0]) {
fmt.Fprintf(w, " - %s=[REDACTED]\n", parts[0])
} else {
fmt.Fprintf(w, " - %s\n", env)
}
}
}
if len(c.Labels) > 0 {
fmt.Fprintln(w, "labels:")
keys := make([]string, 0, len(c.Labels))
for k := range c.Labels {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := c.Labels[k]
if len(v) > 80 {
v = v[:77] + "..."
}
fmt.Fprintf(w, " %s: %s\n", k, v)
}
}
// History as compact list
fmt.Fprintln(w, "history:")
layerIdx := 0
for _, h := range r.Config.History {
cmd := cleanCommand(h.CreatedBy)
if len(cmd) > 100 {
cmd = cmd[:97] + "..."
}
if !h.EmptyLayer && layerIdx < len(r.Layers) {
fmt.Fprintf(w, " - [%s] %s\n", humanSize(r.Layers[layerIdx].Size), cmd)
layerIdx++
} else {
fmt.Fprintf(w, " - %s\n", cmd)
}
}
}
// Vuln summary
if r.ScanRecord != nil {
sr := r.ScanRecord
fmt.Fprintf(w, "vulns: {critical: %d, high: %d, medium: %d, low: %d, total: %d}\n",
sr.Critical, sr.High, sr.Medium, sr.Low, sr.Total)
}
// Fixable vulns - compact list
if r.VulnReport != nil {
// Group by package: name -> {version, type, fixes[], cves[]}
type pkgInfo struct {
version string
typ string
fixes map[string]bool
cves []string
maxSev int
}
pkgs := map[string]*pkgInfo{}
for _, m := range r.VulnReport.Matches {
sev := m.Vulnerability.Metadata.Severity
if sev != "Critical" && sev != "High" {
continue
}
key := m.Package.Name
p, ok := pkgs[key]
if !ok {
p = &pkgInfo{version: m.Package.Version, typ: m.Package.Type, fixes: map[string]bool{}, maxSev: 5}
pkgs[key] = p
}
p.cves = append(p.cves, m.Vulnerability.ID)
for _, f := range m.Vulnerability.Fix.Versions {
p.fixes[f] = true
}
if s := severityOrder(sev); s < p.maxSev {
p.maxSev = s
}
}
if len(pkgs) > 0 {
fmt.Fprintln(w, "fixable_critical_high:")
// Sort by severity then CVE count
type entry struct {
name string
info *pkgInfo
}
sorted := make([]entry, 0, len(pkgs))
for n, p := range pkgs {
sorted = append(sorted, entry{n, p})
}
sort.Slice(sorted, func(i, j int) bool {
if sorted[i].info.maxSev != sorted[j].info.maxSev {
return sorted[i].info.maxSev < sorted[j].info.maxSev
}
return len(sorted[i].info.cves) > len(sorted[j].info.cves)
})
for _, e := range sorted {
fixes := make([]string, 0, len(e.info.fixes))
for f := range e.info.fixes {
fixes = append(fixes, f)
}
sort.Strings(fixes)
fmt.Fprintf(w, " - pkg: %s@%s (%s) cves: %d fix: %s\n",
e.name, e.info.version, e.info.typ, len(e.info.cves), strings.Join(fixes, ","))
}
}
// Unfixable counts
unfixable := map[string]int{}
for _, m := range r.VulnReport.Matches {
if len(m.Vulnerability.Fix.Versions) == 0 {
unfixable[m.Vulnerability.Metadata.Severity]++
}
}
if len(unfixable) > 0 {
fmt.Fprintf(w, "unfixable:")
for _, sev := range []string{"Critical", "High", "Medium", "Low", "Negligible", "Unknown"} {
if c, ok := unfixable[sev]; ok {
fmt.Fprintf(w, " %s=%d", strings.ToLower(sev), c)
}
}
fmt.Fprintln(w)
}
}
// SBOM summary - just type counts
if r.SBOM != nil {
typeCounts := map[string]int{}
total := 0
for _, p := range r.SBOM.Packages {
if strings.HasPrefix(p.SPDXID, "SPDXRef-DocumentRoot") || p.SPDXID == "SPDXRef-DOCUMENT" {
continue
}
total++
pkgType := extractPackageType(p.Supplier)
if pkgType == "" {
pkgType = "other"
}
typeCounts[pkgType]++
}
fmt.Fprintf(w, "sbom_packages: %d", total)
for t, c := range typeCounts {
fmt.Fprintf(w, " %s=%d", t, c)
}
fmt.Fprintln(w)
// Top vulnerable packages
if r.VulnReport != nil {
vulnPkgs := map[string]int{}
for _, m := range r.VulnReport.Matches {
vulnPkgs[m.Package.Name]++
}
type pv struct {
name string
count int
}
sorted := make([]pv, 0, len(vulnPkgs))
for n, c := range vulnPkgs {
sorted = append(sorted, pv{n, c})
}
sort.Slice(sorted, func(i, j int) bool { return sorted[i].count > sorted[j].count })
// Top 10 only
if len(sorted) > 10 {
sorted = sorted[:10]
}
fmt.Fprintln(w, "top_vulnerable_packages:")
for _, p := range sorted {
fmt.Fprintf(w, " - %s: %d\n", p.name, p.count)
}
}
}
}
// --- Helpers ---
func severityOrder(s string) int {
switch s {
case "Critical":
return 0
case "High":
return 1
case "Medium":
return 2
case "Low":
return 3
case "Negligible":
return 4
default:
return 5
}
}
func humanSize(bytes int64) string {
const (
KB = 1024
MB = 1024 * KB
GB = 1024 * MB
)
switch {
case bytes >= GB:
return fmt.Sprintf("%.1f GB", float64(bytes)/float64(GB))
case bytes >= MB:
return fmt.Sprintf("%.1f MB", float64(bytes)/float64(MB))
case bytes >= KB:
return fmt.Sprintf("%.1f KB", float64(bytes)/float64(KB))
default:
return fmt.Sprintf("%d B", bytes)
}
}
func cleanCommand(cmd string) string {
// Remove common prefixes that add noise
cmd = strings.TrimPrefix(cmd, "/bin/sh -c ")
cmd = strings.TrimPrefix(cmd, "#(nop) ")
return strings.TrimSpace(cmd)
}
func shouldRedact(envName string) bool {
upper := strings.ToUpper(envName)
for _, suffix := range []string{"_KEY", "_SECRET", "_PASSWORD", "_TOKEN", "_CREDENTIALS", "_API_KEY"} {
if strings.HasSuffix(upper, suffix) {
return true
}
}
return false
}
func truncate(s string, n int) string {
if len(s) <= n {
return s
}
return s[:n] + "..."
}
func extractPackageType(supplier string) string {
s := strings.ToLower(supplier)
switch {
case strings.Contains(s, "npmjs") || strings.Contains(s, "npm"):
return "npm"
case strings.Contains(s, "pypi") || strings.Contains(s, "python"):
return "python"
case strings.Contains(s, "rubygems"):
return "gem"
case strings.Contains(s, "golang") || strings.Contains(s, "go"):
return "go"
case strings.Contains(s, "debian") || strings.Contains(s, "ubuntu"):
return "deb"
case strings.Contains(s, "alpine"):
return "apk"
case strings.Contains(s, "redhat") || strings.Contains(s, "fedora") || strings.Contains(s, "centos"):
return "rpm"
case strings.Contains(s, "maven") || strings.Contains(s, "java"):
return "java"
case strings.Contains(s, "nuget") || strings.Contains(s, ".net"):
return "nuget"
case strings.Contains(s, "cargo") || strings.Contains(s, "rust"):
return "rust"
default:
return ""
}
}

83
cmd/labeler/main.go Normal file
View File

@@ -0,0 +1,83 @@
package main
import (
"fmt"
"os"
"github.com/spf13/cobra"
"atcr.io/pkg/labeler"
)
var configFile string
var rootCmd = &cobra.Command{
Use: "atcr-labeler",
Short: "ATCR Labeler Service - ATProto content moderation",
}
var serveCmd = &cobra.Command{
Use: "serve",
Short: "Start the labeler service",
Long: `Start the ATCR labeler service with admin UI and subscribeLabels endpoint.
Configuration is loaded from the appview config YAML (labeler section).
Use --config to specify the config file path.`,
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
cfg, err := labeler.LoadConfig(configFile)
if err != nil {
return fmt.Errorf("failed to load config: %w", err)
}
server, err := labeler.NewServer(cfg)
if err != nil {
return fmt.Errorf("failed to initialize labeler: %w", err)
}
return server.Serve()
},
}
var configCmd = &cobra.Command{
Use: "config",
Short: "Configuration management commands",
}
var configInitCmd = &cobra.Command{
Use: "init [path]",
Short: "Generate an example configuration file",
Long: `Generate an example YAML configuration file with all available options.`,
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
yamlBytes, err := labeler.ExampleYAML()
if err != nil {
return fmt.Errorf("failed to generate example config: %w", err)
}
if len(args) == 1 {
if err := os.WriteFile(args[0], yamlBytes, 0644); err != nil {
return fmt.Errorf("failed to write config file: %w", err)
}
fmt.Fprintf(os.Stderr, "Wrote example config to %s\n", args[0])
return nil
}
fmt.Print(string(yamlBytes))
return nil
},
}
func init() {
serveCmd.Flags().StringVarP(&configFile, "config", "c", "", "path to YAML configuration file")
configCmd.AddCommand(configInitCmd)
rootCmd.AddCommand(serveCmd)
rootCmd.AddCommand(configCmd)
rootCmd.AddCommand(plcCmd)
}
func main() {
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
}
}

225
cmd/labeler/plc.go Normal file
View File

@@ -0,0 +1,225 @@
package main
import (
"context"
"fmt"
"log/slog"
"atcr.io/pkg/atproto/did"
"atcr.io/pkg/auth/oauth"
"atcr.io/pkg/labeler"
"github.com/bluesky-social/indigo/atproto/atcrypto"
"github.com/spf13/cobra"
)
var plcCmd = &cobra.Command{
Use: "plc",
Short: "PLC directory management commands",
}
var plcConfigFile string
var (
plcAddRotationKeyFirst bool
plcAddRotationKeyLast bool
)
var plcAddRotationKeyCmd = &cobra.Command{
Use: "add-rotation-key [multibase-key]",
Short: "Add a rotation key to this labeler's PLC identity",
Long: `Add an additional rotation key to the labeler's did:plc document.
If a multibase-encoded private key (K-256 or P-256, starting with 'z') is supplied as
the positional argument, that key is added. If no argument is given, a fresh K-256
keypair is generated and the private half is printed to stdout. Save it offline as
your recovery key, since it will not be shown again.
By default the new key is inserted at the highest priority position (--first), which
allows it to override ops signed by lower-priority keys within PLC's 72-hour recovery
window. Pass --last to append at the lowest priority instead.
The labeler's configured rotation key is used to sign the PLC update.
atcr-labeler plc add-rotation-key --config config.yaml # generate + print
atcr-labeler plc add-rotation-key --config config.yaml --last # append, low priority
atcr-labeler plc add-rotation-key --config config.yaml z... # use supplied key`,
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
firstSet := cmd.Flags().Changed("first")
lastSet := cmd.Flags().Changed("last")
if firstSet && lastSet {
return fmt.Errorf("--first and --last are mutually exclusive")
}
prepend := !plcAddRotationKeyLast
cfg, err := labeler.LoadConfig(plcConfigFile)
if err != nil {
return fmt.Errorf("failed to load config: %w", err)
}
if cfg.Labeler.DIDMethod != "plc" {
return fmt.Errorf("this command only works with did:plc (labeler.did_method is %q)", cfg.Labeler.DIDMethod)
}
ctx := context.Background()
labelerDID, rotationKey, signingKey, err := loadLabelerPLCIdentity(ctx, cfg)
if err != nil {
return err
}
var newKey atcrypto.PrivateKeyExportable
if len(args) == 1 {
newKey, err = atcrypto.ParsePrivateMultibase(args[0])
if err != nil {
return fmt.Errorf("failed to parse key argument: %w", err)
}
}
res, err := did.AddRotationKey(ctx, did.AddRotationKeyOptions{
DID: labelerDID,
PLCDirectoryURL: cfg.PLCDirectoryURL(),
RotationKey: rotationKey,
SigningKey: signingKey,
VerificationKeyName: "atproto_label",
NewKey: newKey,
Prepend: prepend,
})
if err != nil {
return err
}
if res.AlreadyPresent {
fmt.Printf("Key %s is already a rotation key for %s (priority %d of %d)\n",
res.NewKeyDIDKey, labelerDID, res.ExistingAt, res.TotalKeys)
return nil
}
if res.Generated {
fmt.Println("=========================================================================")
fmt.Println("GENERATED NEW ROTATION KEY. SAVE THIS NOW. IT WILL NOT BE SHOWN AGAIN.")
fmt.Println("Store it offline (password manager, paper, hardware token).")
fmt.Println()
fmt.Printf("Private key (multibase): %s\n", res.NewKey.Multibase())
fmt.Printf("Public key (did:key): %s\n", res.NewKeyDIDKey)
fmt.Println("=========================================================================")
}
slog.Info("Added rotation key to PLC identity",
"did", labelerDID,
"new_key", res.NewKeyDIDKey,
"priority", res.InsertedAt,
"total_rotation_keys", res.TotalKeys,
"generated", res.Generated,
)
fmt.Printf("Added rotation key %s to %s (priority %d of %d)\n",
res.NewKeyDIDKey, labelerDID, res.InsertedAt, res.TotalKeys)
return nil
},
}
var plcListRotationKeysCmd = &cobra.Command{
Use: "list-rotation-keys",
Short: "List rotation keys in this labeler's PLC document",
Long: `Fetch the labeler's did:plc document from the PLC directory and print its
rotation keys in priority order (index 0 is highest priority and can override
ops signed by lower-priority keys within PLC's 72-hour recovery window).
The key matching the local labeler.rotation_key is marked as LOCAL.`,
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
cfg, err := labeler.LoadConfig(plcConfigFile)
if err != nil {
return fmt.Errorf("failed to load config: %w", err)
}
if cfg.Labeler.DIDMethod != "plc" {
return fmt.Errorf("this command only works with did:plc (labeler.did_method is %q)", cfg.Labeler.DIDMethod)
}
ctx := context.Background()
labelerDID, _, _, err := loadLabelerPLCIdentity(ctx, cfg)
if err != nil {
return err
}
var localRotationKey atcrypto.PrivateKey
if cfg.Labeler.RotationKey != "" {
localRotationKey, err = atcrypto.ParsePrivateMultibase(cfg.Labeler.RotationKey)
if err != nil {
return fmt.Errorf("failed to parse rotation_key from config: %w", err)
}
}
res, err := did.ListRotationKeys(ctx, did.ListRotationKeysOptions{
DID: labelerDID,
PLCDirectoryURL: cfg.PLCDirectoryURL(),
LocalRotationKey: localRotationKey,
})
if err != nil {
return err
}
printRotationKeys(res)
return nil
},
}
// loadLabelerPLCIdentity is the shared "load DID + rotation key + signing key" helper
// used by every PLC command. Mirrors loadHoldPLCIdentity over in cmd/hold/plc.go.
func loadLabelerPLCIdentity(ctx context.Context, cfg *labeler.Config) (string, atcrypto.PrivateKey, *atcrypto.PrivateKeyK256, error) {
labelerDID, _, err := labeler.LoadIdentity(ctx, cfg)
if err != nil {
return "", nil, nil, err
}
if cfg.Labeler.RotationKey == "" {
return "", nil, nil, fmt.Errorf("labeler.rotation_key must be set to sign PLC updates")
}
rotationKey, err := atcrypto.ParsePrivateMultibase(cfg.Labeler.RotationKey)
if err != nil {
return "", nil, nil, fmt.Errorf("failed to parse rotation_key from config: %w", err)
}
signingKey, err := oauth.GenerateOrLoadPDSKey(cfg.SigningKeyPath())
if err != nil {
return "", nil, nil, fmt.Errorf("failed to load signing key: %w", err)
}
return labelerDID, rotationKey, signingKey, nil
}
// printRotationKeys is the shared CLI output for list-rotation-keys, kept identical to
// the hold version since the formatting is service-agnostic.
func printRotationKeys(res *did.ListRotationKeysResult) {
fmt.Printf("DID: %s\n", res.DID)
fmt.Printf("PLC directory: %s\n", res.Directory)
fmt.Printf("Rotation keys (%d):\n", len(res.Keys))
for i, k := range res.Keys {
marker := ""
switch {
case len(res.Keys) == 1:
marker = "(only key)"
case i == 0:
marker = "(highest priority)"
case i == len(res.Keys)-1:
marker = "(lowest priority)"
}
localTag := ""
if res.LocalDIDKey != "" && k == res.LocalDIDKey {
localTag = " [LOCAL — labeler.rotation_key]"
}
fmt.Printf(" [%d] %s %s%s\n", i, k, marker, localTag)
}
if res.LocalDIDKey != "" && !res.LocalPresent {
fmt.Printf("\nWARNING: local rotation_key (%s) is NOT present in the PLC document.\n", res.LocalDIDKey)
fmt.Println("This service cannot sign PLC updates. Possible compromise or out-of-band rotation.")
}
}
func init() {
plcCmd.PersistentFlags().StringVarP(&plcConfigFile, "config", "c", "", "path to YAML configuration file")
plcAddRotationKeyCmd.Flags().BoolVar(&plcAddRotationKeyFirst, "first", true, "insert at highest priority (default)")
plcAddRotationKeyCmd.Flags().BoolVar(&plcAddRotationKeyLast, "last", false, "insert at lowest priority")
plcCmd.AddCommand(plcAddRotationKeyCmd)
plcCmd.AddCommand(plcListRotationKeysCmd)
}

View File

@@ -29,8 +29,6 @@ server:
default_hold_did: ""
# Allows HTTP (not HTTPS) for DID resolution and uses transition:generic OAuth scope.
test_mode: false
# Path to P-256 private key for OAuth client authentication. Auto-generated on first run.
oauth_key_path: /var/lib/atcr/oauth/client.key
# Display name shown on OAuth authorization screens.
client_name: AT Container Registry
# Short name used in page titles and browser tabs.
@@ -52,6 +50,8 @@ ui:
libsql_auth_token: ""
# How often to sync with remote libSQL server. Default: 60s.
libsql_sync_interval: 1m0s
# Source code URL displayed in the footer "Source" link. Defaults to the upstream ATCR project.
source_url: https://tangled.org/evan.jarrett.net/at-container-registry
# Health check and cache settings.
health:
# How long to cache hold health check results.
@@ -70,16 +70,13 @@ jetstream:
backfill_enabled: true
# How often to re-run backfill to catch missed events. Set to 0 to only backfill on startup.
backfill_interval: 24h0m0s
# Relay endpoints for backfill, tried in order on failure.
# Endpoints used for backfill. MUST support com.atproto.sync.listReposByCollection. Tried in order on failure.
relay_endpoints:
- https://relay1.us-east.bsky.network
- https://relay1.us-west.bsky.network
- https://zlay.waow.tech
# JWT authentication settings.
auth:
# RSA private key for signing registry JWTs issued to Docker clients.
key_path: /var/lib/atcr/auth/private-key.pem
# X.509 certificate matching the JWT signing key.
# X.509 certificate matching the JWT signing key (auto-generated on each boot from the JWT key in the database).
cert_path: /var/lib/atcr/auth/private-key.crt
# Credential helper download settings.
credential_helper:
@@ -91,6 +88,14 @@ legal:
company_name: ""
# Governing law jurisdiction for legal terms.
jurisdiction: ""
# AI-powered image advisor settings.
ai:
# Anthropic API key for AI Image Advisor. Also reads CLAUDE_API_KEY env var as fallback.
api_key: ""
# ATProto labeler for content moderation (DMCA takedowns).
labeler:
# DID or URL of the ATProto labeler (e.g., did:web:labeler.atcr.io). Empty disables label filtering.
did: ""
# Stripe billing integration (requires -tags billing build).
billing:
# Stripe secret key. Can also be set via STRIPE_SECRET_KEY env var (takes precedence). Billing is enabled automatically when set.
@@ -100,9 +105,9 @@ billing:
# ISO 4217 currency code (e.g. "usd").
currency: usd
# Redirect URL after successful checkout. Use {base_url} placeholder.
success_url: '{base_url}/settings#storage'
success_url: '{base_url}/settings/billing'
# Redirect URL after cancelled checkout. Use {base_url} placeholder.
cancel_url: '{base_url}/settings#storage'
cancel_url: '{base_url}/settings/billing'
# Subscription tiers ordered by rank (lowest to highest).
tiers:
- # Tier name. Position in list determines rank (0-based).
@@ -119,6 +124,9 @@ billing:
max_webhooks: 1
# Allow all webhook trigger types (not just first-scan).
webhook_all_triggers: false
# Enable AI Image Advisor for this tier.
ai_advisor: false
# Show supporter badge on user profiles for subscribers at this tier.
supporter_badge: false
- # Tier name. Position in list determines rank (0-based).
name: Supporter
@@ -133,7 +141,10 @@ billing:
# Maximum webhooks for this tier (-1 = unlimited).
max_webhooks: 1
# Allow all webhook trigger types (not just first-scan).
webhook_all_triggers: false
webhook_all_triggers: true
# Enable AI Image Advisor for this tier.
ai_advisor: true
# Show supporter badge on user profiles for subscribers at this tier.
supporter_badge: true
- # Tier name. Position in list determines rank (0-based).
name: bosun
@@ -149,18 +160,9 @@ billing:
max_webhooks: 10
# Allow all webhook trigger types (not just first-scan).
webhook_all_triggers: true
# Enable AI Image Advisor for this tier.
ai_advisor: true
# Show supporter badge on user profiles for subscribers at this tier.
supporter_badge: true
# - # Tier name. Position in list determines rank (0-based).
# name: quartermaster
# # Short description shown on the plan card.
# description: Maximum storage for power users
# # List of features included in this tier.
# features: []
# # Stripe price ID for monthly billing. Empty = free tier.
# stripe_price_monthly: price_xxx
# # Stripe price ID for yearly billing.
# stripe_price_yearly: price_yyy
# # Maximum webhooks for this tier (-1 = unlimited).
# max_webhooks: -1
# # Allow all webhook trigger types (not just first-scan).
# webhook_all_triggers: true
# Show supporter badge on hold owner profiles.
owner_badge: true

View File

@@ -45,8 +45,10 @@ server:
successor: ""
# Use localhost for OAuth redirects during development.
test_mode: false
# Request crawl from this relay on startup to make the embedded PDS discoverable.
relay_endpoint: ""
# Endpoints used for proactive scan discovery. MUST support com.atproto.sync.listReposByCollection. Also sent requestCrawl on startup (best-effort, in addition to built-in known relays).
relay_endpoints:
- https://relay1.us-east.bsky.network
- https://relay1.us-west.bsky.network
# DID of the appview this hold is managed by (e.g. did:web:atcr.io). Resolved via did:web for URL and public key.
appview_did: did:web:172.28.0.2%3A5000
# Read timeout for HTTP requests.
@@ -135,3 +137,9 @@ scanner:
secret: ""
# Minimum interval between re-scans of the same manifest. When set, the hold proactively scans manifests when the scanner is idle. Default: 168h (7 days). Set to 0 to disable.
rescan_interval: 168h0m0s
# Labeler subscription settings. When configured, the hold consumes takedown labels from the named labeler and purges affected records on receipt; GC consults the cache to gate blob cleanup. Empty subscribe_url disables.
labeler:
# DID or URL of the ATProto labeler (e.g., did:web:labeler.atcr.io). Empty disables labeler integration.
did: ""
# Reversibility window for takedowns. Blobs survive this long after a takedown so the action can be reversed. After this window the GC reclaims them. Default: 720h (30 days).
grace_window: 720h0m0s

View File

@@ -0,0 +1,53 @@
# ATCR Labeler Configuration
# Generated with defaults — edit as needed.
# Configuration format version.
version: "0.1"
# Log level: debug, info, warn, error.
log_level: info
# Labeler service settings.
labeler:
# Enable the labeler service.
enabled: true
# Listen address for labeler (e.g., :5002).
addr: :5002
# Externally reachable labeler URL (required, e.g. https://labeler.example.com).
public_url: https://labeler.example.com
# OAuth client display name (e.g., "ATCR Labeler").
client_name: ATCR Labeler
# Short brand label used in UI copy (e.g., "ATCR").
client_short_name: ATCR
# DID of the labeler admin. Only this DID can log into the admin panel.
owner_did: did:plc:your-did-here
# Directory for labeler state (database, signing key, did.txt).
data_dir: /var/lib/atcr-labeler
# DID method: "plc" (recommended) or "web".
did_method: plc
# Explicit did:plc identifier for adoption/recovery (optional).
did: ""
# Path to K-256 signing key (defaults to <data_dir>/signing.key).
key_path: ""
# Multibase-encoded rotation key (K-256 or P-256). Required to update the PLC document.
rotation_key: ""
# PLC directory URL (default https://plc.directory).
plc_directory_url: https://plc.directory
# Optional libSQL/Bunny remote sync URL. Empty = local-only.
libsql_sync_url: ""
# Auth token for libsql_sync_url.
libsql_auth_token: ""
# Embedded-replica pull interval (e.g. 30s). 0 = manual sync only.
libsql_sync_interval: 0s
# Remote log shipping settings.
log_shipper:
# Log shipping backend: "victoria", "opensearch", or "loki". Empty disables shipping.
backend: ""
# Remote log service endpoint, e.g. "http://victorialogs:9428".
url: ""
# Number of log entries to buffer before flushing to the remote service.
batch_size: 0
# Maximum time between flushes, even if batch is not full.
flush_interval: 0s
# Basic auth username for the log service (optional).
username: ""
# Basic auth password for the log service (optional).
password: ""

View File

@@ -28,6 +28,12 @@ var holdConfigTmpl string
//go:embed configs/scanner.yaml.tmpl
var scannerConfigTmpl string
//go:embed systemd/labeler.service.tmpl
var labelerServiceTmpl string
//go:embed configs/labeler.yaml.tmpl
var labelerConfigTmpl string
//go:embed configs/cloudinit.sh.tmpl
var cloudInitTmpl string
@@ -43,10 +49,11 @@ type ConfigValues struct {
S3SecretKey string
// Infrastructure (computed from zone + config)
Zone string // e.g. "us-chi1"
HoldDomain string // e.g. "us-chi1.cove.seamark.dev"
HoldDid string // e.g. "did:web:us-chi1.cove.seamark.dev"
BasePath string // e.g. "/var/lib/seamark"
Zone string // e.g. "us-chi1"
HoldDomain string // e.g. "us-chi1.cove.seamark.dev"
HoldDid string // e.g. "did:web:us-chi1.cove.seamark.dev"
LabelerDomain string // e.g. "labeler.seamark.dev"
BasePath string // e.g. "/var/lib/seamark"
// Scanner (auto-generated shared secret)
ScannerSecret string // hex-encoded 32-byte secret; empty disables scanning
@@ -111,9 +118,33 @@ func renderScannerServiceUnit(p scannerServiceUnitParams) (string, error) {
return buf.String(), nil
}
// labelerServiceUnitParams holds values for rendering the labeler systemd unit.
type labelerServiceUnitParams struct {
DisplayName string // e.g. "Seamark"
User string // e.g. "seamark"
BinaryPath string // e.g. "/opt/seamark/bin/seamark-labeler"
ConfigPath string // e.g. "/etc/seamark/labeler.yaml"
DataDir string // e.g. "/var/lib/seamark"
ServiceName string // e.g. "seamark-labeler"
AppviewServiceName string // e.g. "seamark-appview" (After= dependency)
}
func renderLabelerServiceUnit(p labelerServiceUnitParams) (string, error) {
t, err := template.New("labeler-service").Parse(labelerServiceTmpl)
if err != nil {
return "", fmt.Errorf("parse labeler service template: %w", err)
}
var buf bytes.Buffer
if err := t.Execute(&buf, p); err != nil {
return "", fmt.Errorf("render labeler service template: %w", err)
}
return buf.String(), nil
}
// generateAppviewCloudInit generates the cloud-init user-data script for the appview server.
// Sets up the OS, directories, config, and systemd unit. Binaries are deployed separately via SCP.
func generateAppviewCloudInit(cfg *InfraConfig, vals *ConfigValues) (string, error) {
// When withLabeler is true, a second phase is appended that creates labeler data
// directories and installs a labeler systemd service. Binaries are deployed separately via SCP.
func generateAppviewCloudInit(cfg *InfraConfig, vals *ConfigValues, withLabeler bool) (string, error) {
naming := cfg.Naming()
configYAML, err := renderConfig(appviewConfigTmpl, vals)
@@ -133,7 +164,7 @@ func generateAppviewCloudInit(cfg *InfraConfig, vals *ConfigValues) (string, err
return "", fmt.Errorf("appview service unit: %w", err)
}
return generateCloudInit(cloudInitParams{
script, err := generateCloudInit(cloudInitParams{
BinaryName: naming.Appview(),
ServiceUnit: serviceUnit,
ConfigYAML: configYAML,
@@ -146,6 +177,69 @@ func generateAppviewCloudInit(cfg *InfraConfig, vals *ConfigValues) (string, err
LogFile: naming.LogFile(),
DisplayName: naming.DisplayName(),
})
if err != nil {
return "", err
}
if !withLabeler {
return script, nil
}
// Render labeler config YAML
labelerConfigYAML, err := renderConfig(labelerConfigTmpl, vals)
if err != nil {
return "", fmt.Errorf("labeler config: %w", err)
}
// Append labeler setup phase
labelerUnit, err := renderLabelerServiceUnit(labelerServiceUnitParams{
DisplayName: naming.DisplayName(),
User: naming.SystemUser(),
BinaryPath: naming.InstallDir() + "/bin/" + naming.Labeler(),
ConfigPath: naming.LabelerConfigPath(),
DataDir: naming.BasePath(),
ServiceName: naming.Labeler(),
AppviewServiceName: naming.Appview(),
})
if err != nil {
return "", fmt.Errorf("labeler service unit: %w", err)
}
// Escape single quotes for heredoc embedding
labelerUnit = strings.ReplaceAll(labelerUnit, "'", "'\\''")
labelerConfigYAML = strings.ReplaceAll(labelerConfigYAML, "'", "'\\''")
labelerPhase := fmt.Sprintf(`
# === Labeler Setup ===
# Labeler data dirs
mkdir -p %s
chown -R %s:%s %s
# Labeler config
cat > %s << 'CFGEOF'
%s
CFGEOF
# Labeler systemd service
cat > /etc/systemd/system/%s.service << 'SVCEOF'
%s
SVCEOF
systemctl daemon-reload
systemctl enable %s
echo "=== Labeler setup complete ==="
`,
naming.LabelerDataDir(),
naming.SystemUser(), naming.SystemUser(), naming.LabelerDataDir(),
naming.LabelerConfigPath(),
labelerConfigYAML,
naming.Labeler(),
labelerUnit,
naming.Labeler(),
)
return script + labelerPhase, nil
}
// generateHoldCloudInit generates the cloud-init user-data script for the hold server.
@@ -280,8 +374,10 @@ func generateCloudInit(p cloudInitParams) (string, error) {
}
// syncServiceUnit compares a rendered systemd service unit against what's on
// the server. If they differ, it writes the new unit file. Returns true if the
// unit was updated (caller should daemon-reload before restart).
// the server. If they differ, it writes the new unit file. If the unit is
// missing entirely, it installs it and runs `systemctl enable` so the service
// starts on boot. Returns true if the unit was created or updated (caller
// should daemon-reload before restart).
func syncServiceUnit(name, ip, serviceName, renderedUnit string) (bool, error) {
unitPath := "/etc/systemd/system/" + serviceName + ".service"
@@ -294,8 +390,15 @@ func syncServiceUnit(name, ip, serviceName, renderedUnit string) (bool, error) {
rendered := strings.TrimSpace(renderedUnit)
if remote == "__MISSING__" {
fmt.Printf(" service unit: %s not found (cloud-init will handle it)\n", name)
return false, nil
// First-time install: write file, daemon-reload, and enable so the
// service comes up on boot. The caller's restart will start it.
script := fmt.Sprintf("cat > %s << 'SVCEOF'\n%s\nSVCEOF\nsystemctl daemon-reload\nsystemctl enable %s",
unitPath, rendered, serviceName)
if _, err := runSSH(ip, script, false); err != nil {
return false, fmt.Errorf("install service unit: %w", err)
}
fmt.Printf(" service unit: %s installed and enabled\n", name)
return true, nil
}
if remote == rendered {
@@ -323,7 +426,17 @@ func syncConfigKeys(name, ip, configPath, templateYAML string) error {
remote = strings.TrimSpace(remote)
if remote == "__MISSING__" {
fmt.Printf(" config sync: %s not yet created (cloud-init will handle it)\n", name)
// First-time install: write the rendered template as-is. Subsequent
// runs use the merge-keys path below to preserve operator edits.
dir := configPath[:strings.LastIndex(configPath, "/")]
if _, err := runSSH(ip, fmt.Sprintf("mkdir -p %s", dir), false); err != nil {
return fmt.Errorf("create config dir: %w", err)
}
script := fmt.Sprintf("cat > %s << 'CFGEOF'\n%s\nCFGEOF", configPath, strings.TrimRight(templateYAML, "\n"))
if _, err := runSSH(ip, script, false); err != nil {
return fmt.Errorf("write initial config: %w", err)
}
fmt.Printf(" config sync: %s installed\n", name)
return nil
}

View File

@@ -90,10 +90,12 @@ func extractFromAppviewTemplate() (clientName, baseDomain string, registryDomain
return clientName, baseDomain, registryDomains, nil
}
// readSSHPublicKey reads an SSH public key from a file path.
// readSSHPublicKey reads an SSH public key from a file path. An empty path
// returns an empty key without error — callers that need the key (e.g. when
// creating new servers) must check for empty before use.
func readSSHPublicKey(path string) (string, error) {
if path == "" {
return "", fmt.Errorf("--ssh-key is required (path to SSH public key file)")
return "", nil
}
data, err := os.ReadFile(path)
if err != nil {

View File

@@ -11,13 +11,13 @@ server:
addr: :5000
base_url: "https://seamark.dev"
default_hold_did: "{{.HoldDid}}"
oauth_key_path: "{{.BasePath}}/oauth/client.key"
client_name: Seamark
test_mode: false
client_short_name: Seamark
registry_domains:
- "buoy.cr"
- "bouy.cr"
- "seamark.cr"
ui:
database_path: "{{.BasePath}}/ui.db"
theme: seamark
@@ -39,10 +39,11 @@ jetstream:
- https://relay1.us-east.bsky.network
- https://relay1.us-west.bsky.network
auth:
key_path: "{{.BasePath}}/auth/private-key.pem"
cert_path: "{{.BasePath}}/auth/private-key.crt"
credential_helper:
tangled_repo: ""
legal:
company_name: Seamark
jurisdiction: State of Texas, United States
ai:
api_key: ""
labeler:
did: ""

View File

@@ -21,6 +21,17 @@ apt-get install -y git gcc make curl libsqlite3-dev nodejs npm htop systemd-time
sed -i 's/^#NTP=.*/NTP=0.debian.pool.ntp.org 1.debian.pool.ntp.org 2.debian.pool.ntp.org 3.debian.pool.ntp.org/' /etc/systemd/timesyncd.conf
timedatectl set-ntp true
# Persistent journald (capped, 7-day retention)
mkdir -p /var/log/journal
mkdir -p /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/atcr.conf << 'JOURNALEOF'
[Journal]
Storage=persistent
SystemMaxUse=500M
MaxRetentionSec=7day
JOURNALEOF
systemctl restart systemd-journald
# Swap (for small instances)
if [ ! -f /swapfile ]; then
dd if=/dev/zero of=/swapfile bs=1M count=2048

View File

@@ -20,7 +20,9 @@ server:
public: false
successor: ""
test_mode: false
relay_endpoint: ""
relay_endpoints:
- https://relay1.us-east.bsky.network
- https://relay1.us-west.bsky.network
appview_did: did:web:seamark.dev
read_timeout: 5m0s
write_timeout: 5m0s
@@ -61,4 +63,9 @@ quota:
scanner:
secret: "{{.ScannerSecret}}"
rescan_interval: 168h0m0s
labeler:
# Subscribe to the appview's labeler so takedowns purge records on this
# hold and the GC honors the reversibility window. Empty disables.
did: "did:web:seamark.dev"
grace_window: 720h0m0s

View File

@@ -0,0 +1,22 @@
version: "0.1"
log_level: info
log_shipper:
backend: ""
url: ""
batch_size: 100
flush_interval: 5s
username: ""
password: ""
labeler:
enabled: true
addr: :5002
public_url: "https://{{.LabelerDomain}}"
client_name: "Seamark Labeler"
client_short_name: Seamark
owner_did: ""
data_dir: "{{.BasePath}}/labeler"
did_method: plc
did: ""
key_path: ""
rotation_key: ""
plc_directory_url: https://plc.directory

View File

@@ -1,10 +1,10 @@
module atcr.io/deploy
go 1.25.7
go 1.26.2
require (
github.com/UpCloudLtd/upcloud-go-api/v8 v8.34.3
github.com/charmbracelet/huh v0.8.0
github.com/UpCloudLtd/upcloud-go-api/v8 v8.35.0
github.com/charmbracelet/huh v1.0.0
github.com/spf13/cobra v1.10.2
go.yaml.in/yaml/v3 v3.0.4
)
@@ -15,23 +15,23 @@ require (
github.com/catppuccin/go v0.3.0 // indirect
github.com/charmbracelet/bubbles v1.0.0 // indirect
github.com/charmbracelet/bubbletea v1.3.10 // indirect
github.com/charmbracelet/colorprofile v0.4.2 // indirect
github.com/charmbracelet/colorprofile v0.4.3 // indirect
github.com/charmbracelet/lipgloss v1.1.0 // indirect
github.com/charmbracelet/x/ansi v0.11.6 // indirect
github.com/charmbracelet/x/ansi v0.11.7 // indirect
github.com/charmbracelet/x/cellbuf v0.0.15 // indirect
github.com/charmbracelet/x/exp/strings v0.1.0 // indirect
github.com/charmbracelet/x/term v0.2.2 // indirect
github.com/clipperhouse/displaywidth v0.10.0 // indirect
github.com/clipperhouse/uax29/v2 v2.6.0 // indirect
github.com/clipperhouse/displaywidth v0.11.0 // indirect
github.com/clipperhouse/uax29/v2 v2.7.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lucasb-eyer/go-colorful v1.3.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/lucasb-eyer/go-colorful v1.4.0 // indirect
github.com/mattn/go-isatty v0.0.21 // indirect
github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75 // indirect
github.com/mattn/go-runewidth v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.23 // indirect
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
github.com/muesli/cancelreader v0.2.2 // indirect
@@ -41,7 +41,7 @@ require (
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect
golang.org/x/sys v0.41.0 // indirect
golang.org/x/text v0.34.0 // indirect
golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f // indirect
golang.org/x/sys v0.43.0 // indirect
golang.org/x/text v0.36.0 // indirect
)

View File

@@ -1,7 +1,7 @@
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
github.com/UpCloudLtd/upcloud-go-api/v8 v8.34.3 h1:7ba03u4L5LafZPVO2k6B0/f114k5dFF3GtAN7FEKfno=
github.com/UpCloudLtd/upcloud-go-api/v8 v8.34.3/go.mod h1:NBh1d/ip1bhdAIhuPWbyPme7tbLzDTV7dhutUmU1vg8=
github.com/UpCloudLtd/upcloud-go-api/v8 v8.35.0 h1:AIt07ExXzCaC9YVszkVPT+CteoyXldw0C8DGUMxtjD4=
github.com/UpCloudLtd/upcloud-go-api/v8 v8.35.0/go.mod h1:sxG94uNhC31OQH+zK0RhZjVj+PdkhObsNAt5bvq2J8c=
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
@@ -14,14 +14,14 @@ github.com/charmbracelet/bubbles v1.0.0 h1:12J8/ak/uCZEMQ6KU7pcfwceyjLlWsDLAxB5f
github.com/charmbracelet/bubbles v1.0.0/go.mod h1:9d/Zd5GdnauMI5ivUIVisuEm3ave1XwXtD1ckyV6r3E=
github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4=
github.com/charmbracelet/colorprofile v0.4.2 h1:BdSNuMjRbotnxHSfxy+PCSa4xAmz7szw70ktAtWRYrY=
github.com/charmbracelet/colorprofile v0.4.2/go.mod h1:0rTi81QpwDElInthtrQ6Ni7cG0sDtwAd4C4le060fT8=
github.com/charmbracelet/huh v0.8.0 h1:Xz/Pm2h64cXQZn/Jvele4J3r7DDiqFCNIVteYukxDvY=
github.com/charmbracelet/huh v0.8.0/go.mod h1:5YVc+SlZ1IhQALxRPpkGwwEKftN/+OlJlnJYlDRFqN4=
github.com/charmbracelet/colorprofile v0.4.3 h1:QPa1IWkYI+AOB+fE+mg/5/4HRMZcaXex9t5KX76i20Q=
github.com/charmbracelet/colorprofile v0.4.3/go.mod h1:/zT4BhpD5aGFpqQQqw7a+VtHCzu+zrQtt1zhMt9mR4Q=
github.com/charmbracelet/huh v1.0.0 h1:wOnedH8G4qzJbmhftTqrpppyqHakl/zbbNdXIWJyIxw=
github.com/charmbracelet/huh v1.0.0/go.mod h1:5YVc+SlZ1IhQALxRPpkGwwEKftN/+OlJlnJYlDRFqN4=
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
github.com/charmbracelet/x/ansi v0.11.6 h1:GhV21SiDz/45W9AnV2R61xZMRri5NlLnl6CVF7ihZW8=
github.com/charmbracelet/x/ansi v0.11.6/go.mod h1:2JNYLgQUsyqaiLovhU2Rv/pb8r6ydXKS3NIttu3VGZQ=
github.com/charmbracelet/x/ansi v0.11.7 h1:kzv1kJvjg2S3r9KHo8hDdHFQLEqn4RBCb39dAYC84jI=
github.com/charmbracelet/x/ansi v0.11.7/go.mod h1:9qGpnAVYz+8ACONkZBUWPtL7lulP9No6p1epAihUZwQ=
github.com/charmbracelet/x/cellbuf v0.0.15 h1:ur3pZy0o6z/R7EylET877CBxaiE1Sp1GMxoFPAIztPI=
github.com/charmbracelet/x/cellbuf v0.0.15/go.mod h1:J1YVbR7MUuEGIFPCaaZ96KDl5NoS0DAWkskup+mOY+Q=
github.com/charmbracelet/x/conpty v0.1.0 h1:4zc8KaIcbiL4mghEON8D72agYtSeIgq8FSThSPQIb+U=
@@ -38,10 +38,10 @@ github.com/charmbracelet/x/termios v0.1.1 h1:o3Q2bT8eqzGnGPOYheoYS8eEleT5ZVNYNy8
github.com/charmbracelet/x/termios v0.1.1/go.mod h1:rB7fnv1TgOPOyyKRJ9o+AsTU/vK5WHJ2ivHeut/Pcwo=
github.com/charmbracelet/x/xpty v0.1.2 h1:Pqmu4TEJ8KeA9uSkISKMU3f+C1F6OGBn8ABuGlqCbtI=
github.com/charmbracelet/x/xpty v0.1.2/go.mod h1:XK2Z0id5rtLWcpeNiMYBccNNBrP2IJnzHI0Lq13Xzq4=
github.com/clipperhouse/displaywidth v0.10.0 h1:GhBG8WuerxjFQQYeuZAeVTuyxuX+UraiZGD4HJQ3Y8g=
github.com/clipperhouse/displaywidth v0.10.0/go.mod h1:XqJajYsaiEwkxOj4bowCTMcT1SgvHo9flfF3jQasdbs=
github.com/clipperhouse/uax29/v2 v2.6.0 h1:z0cDbUV+aPASdFb2/ndFnS9ts/WNXgTNNGFoKXuhpos=
github.com/clipperhouse/uax29/v2 v2.6.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
github.com/clipperhouse/displaywidth v0.11.0 h1:lBc6kY44VFw+TDx4I8opi/EtL9m20WSEFgwIwO+UVM8=
github.com/clipperhouse/displaywidth v0.11.0/go.mod h1:bkrFNkf81G8HyVqmKGxsPufD3JhNl3dSqnGhOoSD/o0=
github.com/clipperhouse/uax29/v2 v2.7.0 h1:+gs4oBZ2gPfVrKPthwbMzWZDaAFPGYK72F0NJv2v7Vk=
github.com/clipperhouse/uax29/v2 v2.7.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
@@ -60,13 +60,14 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag=
github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/lucasb-eyer/go-colorful v1.4.0 h1:UtrWVfLdarDgc44HcS7pYloGHJUjHV/4FwW4TvVgFr4=
github.com/lucasb-eyer/go-colorful v1.4.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs=
github.com/mattn/go-isatty v0.0.21/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4=
github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75 h1:P8UmIzZMYDR+NGImiFvErt6VWfIRPuGM+vyjiEdkmIw=
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
github.com/mattn/go-runewidth v0.0.23 h1:7ykA0T0jkPpzSvMS5i9uoNn2Xy3R383f9HDx3RybWcw=
github.com/mattn/go-runewidth v0.0.23/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
@@ -93,13 +94,13 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavM
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o=
golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f h1:W3F4c+6OLc6H2lb//N1q4WpJkhzJCK5J6kUi1NTVXfM=
golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f/go.mod h1:J1xhfL/vlindoeF/aINzNzt2Bket5bjo9sdOYzOsU80=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg=
golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@@ -57,5 +57,14 @@ func (n Naming) ScannerConfigPath() string { return n.ConfigDir() + "/scanner.ya
// ScannerDataDir returns the scanner data directory (e.g. "/var/lib/seamark/scanner").
func (n Naming) ScannerDataDir() string { return n.BasePath() + "/scanner" }
// Labeler returns the labeler binary/service name (e.g. "seamark-labeler").
func (n Naming) Labeler() string { return n.ClientName + "-labeler" }
// LabelerConfigPath returns the labeler config file path.
func (n Naming) LabelerConfigPath() string { return n.ConfigDir() + "/labeler.yaml" }
// LabelerDataDir returns the labeler data directory (e.g. "/var/lib/seamark/labeler").
func (n Naming) LabelerDataDir() string { return n.BasePath() + "/labeler" }
// S3Name returns the name used for S3 storage, user, and bucket.
func (n Naming) S3Name() string { return n.ClientName }

View File

@@ -29,21 +29,22 @@ var provisionCmd = &cobra.Command{
sshKey, _ := cmd.Flags().GetString("ssh-key")
s3Secret, _ := cmd.Flags().GetString("s3-secret")
withScanner, _ := cmd.Flags().GetBool("with-scanner")
return cmdProvision(token, zone, plan, sshKey, s3Secret, withScanner)
withLabeler, _ := cmd.Flags().GetBool("with-labeler")
return cmdProvision(token, zone, plan, sshKey, s3Secret, withScanner, withLabeler)
},
}
func init() {
provisionCmd.Flags().String("zone", "", "UpCloud zone (interactive picker if omitted)")
provisionCmd.Flags().String("plan", "", "Server plan (interactive picker if omitted)")
provisionCmd.Flags().String("ssh-key", "", "Path to SSH public key file (required)")
provisionCmd.Flags().String("ssh-key", "", "Path to SSH public key file (required when creating new servers)")
provisionCmd.Flags().String("s3-secret", "", "S3 secret access key (for existing object storage)")
provisionCmd.Flags().Bool("with-scanner", false, "Deploy vulnerability scanner alongside hold")
_ = provisionCmd.MarkFlagRequired("ssh-key")
provisionCmd.Flags().Bool("with-labeler", false, "Deploy content moderation labeler alongside appview")
rootCmd.AddCommand(provisionCmd)
}
func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bool) error {
func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner, withLabeler bool) error {
cfg, err := loadConfig(zone, plan, sshKeyPath, s3Secret)
if err != nil {
return err
@@ -98,6 +99,12 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
_ = saveState(state)
}
// Labeler setup
if withLabeler {
state.LabelerEnabled = true
_ = saveState(state)
}
fmt.Printf("Provisioning %s infrastructure in zone %s...\n", naming.DisplayName(), cfg.Zone)
if needsServers {
fmt.Printf("Server plan: %s\n", cfg.Plan)
@@ -146,6 +153,8 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
// Hold domain is zone-based (e.g. us-chi1.cove.seamark.dev)
holdDomain := cfg.Zone + ".cove." + cfg.BaseDomain
// Labeler domain is a fixed subdomain on the base domain (e.g. labeler.seamark.dev)
labelerDomain := "labeler." + cfg.BaseDomain
// Build config template values
vals := &ConfigValues{
@@ -157,6 +166,7 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
Zone: cfg.Zone,
HoldDomain: holdDomain,
HoldDid: "did:web:" + holdDomain,
LabelerDomain: labelerDomain,
BasePath: naming.BasePath(),
ScannerSecret: state.ScannerSecret,
}
@@ -198,7 +208,7 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
appviewCreated := false
if state.Appview.UUID != "" {
fmt.Printf("Appview: %s (exists)\n", state.Appview.UUID)
appviewScript, err := generateAppviewCloudInit(cfg, vals)
appviewScript, err := generateAppviewCloudInit(cfg, vals, state.LabelerEnabled)
if err != nil {
return err
}
@@ -212,9 +222,18 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
if err := syncConfigKeys("appview", state.Appview.PublicIP, naming.AppviewConfigPath(), appviewConfigYAML); err != nil {
return fmt.Errorf("appview config sync: %w", err)
}
if state.LabelerEnabled {
labelerConfigYAML, err := renderConfig(labelerConfigTmpl, vals)
if err != nil {
return fmt.Errorf("render labeler config: %w", err)
}
if err := syncConfigKeys("labeler", state.Appview.PublicIP, naming.LabelerConfigPath(), labelerConfigYAML); err != nil {
return fmt.Errorf("labeler config sync: %w", err)
}
}
} else {
fmt.Println("Creating appview server...")
appviewUserData, err := generateAppviewCloudInit(cfg, vals)
appviewUserData, err := generateAppviewCloudInit(cfg, vals, state.LabelerEnabled)
if err != nil {
return err
}
@@ -290,7 +309,7 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
fmt.Printf("Load balancer: %s (exists)\n", state.LB.UUID)
} else {
fmt.Println("Creating load balancer (Essentials tier)...")
lb, err := createLoadBalancer(ctx, svc, cfg, naming, state.Network.UUID, state.Appview.PrivateIP, state.Hold.PrivateIP, holdDomain)
lb, err := createLoadBalancer(ctx, svc, cfg, naming, state.Network.UUID, state.Appview.PrivateIP, state.Hold.PrivateIP, holdDomain, labelerDomain, state.LabelerEnabled)
if err != nil {
return fmt.Errorf("create LB: %w", err)
}
@@ -303,10 +322,30 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
return fmt.Errorf("LB forwarded headers: %w", err)
}
// Ensure route-hold rule includes forwarded headers action
if err := ensureLBHoldForwardedHeaders(ctx, svc, state.LB.UUID, holdDomain); err != nil {
return fmt.Errorf("LB hold forwarded headers: %w", err)
}
// Ensure labeler backend + route-labeler rule when labeler is enabled
if state.LabelerEnabled {
if err := ensureLBLabelerRoute(ctx, svc, state.LB.UUID, state.Appview.PrivateIP, labelerDomain); err != nil {
return fmt.Errorf("LB labeler route: %w", err)
}
}
// Always reconcile scanner block rule
if err := ensureLBScannerBlock(ctx, svc, state.LB.UUID); err != nil {
return fmt.Errorf("LB scanner block: %w", err)
}
// Always reconcile TLS certs (handles partial failures and re-runs)
tlsDomains := []string{cfg.BaseDomain}
tlsDomains = append(tlsDomains, cfg.RegistryDomains...)
tlsDomains = append(tlsDomains, holdDomain)
if state.LabelerEnabled {
tlsDomains = append(tlsDomains, labelerDomain)
}
if err := ensureLBCertificates(ctx, svc, state.LB.UUID, tlsDomains); err != nil {
return fmt.Errorf("LB certificates: %w", err)
}
@@ -338,6 +377,12 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
if err := buildLocal(rootDir, outputPath, "./cmd/appview"); err != nil {
return fmt.Errorf("build appview: %w", err)
}
if state.LabelerEnabled {
outputPath := filepath.Join(rootDir, "bin", "atcr-labeler")
if err := buildLocal(rootDir, outputPath, "./cmd/labeler"); err != nil {
return fmt.Errorf("build labeler: %w", err)
}
}
}
if holdCreated {
outputPath := filepath.Join(rootDir, "bin", "atcr-hold")
@@ -371,6 +416,13 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
if err := scpFile(localPath, state.Appview.PublicIP, remotePath); err != nil {
return fmt.Errorf("upload appview: %w", err)
}
if state.LabelerEnabled {
labelerLocal := filepath.Join(rootDir, "bin", "atcr-labeler")
labelerRemote := naming.InstallDir() + "/bin/" + naming.Labeler()
if err := scpFile(labelerLocal, state.Appview.PublicIP, labelerRemote); err != nil {
return fmt.Errorf("upload labeler: %w", err)
}
}
}
if holdCreated {
localPath := filepath.Join(rootDir, "bin", "atcr-hold")
@@ -388,6 +440,25 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
}
}
// Labeler binary: build and upload when labeler is enabled but appview was
// not freshly created (the appviewCreated branch above already handled it).
if state.LabelerEnabled && !appviewCreated {
rootDir := projectRoot()
if err := runGenerate(rootDir); err != nil {
return fmt.Errorf("go generate: %w", err)
}
fmt.Println("\nBuilding labeler locally (GOOS=linux GOARCH=amd64)...")
labelerLocal := filepath.Join(rootDir, "bin", "atcr-labeler")
if err := buildLocal(rootDir, labelerLocal, "./cmd/labeler"); err != nil {
return fmt.Errorf("build labeler: %w", err)
}
labelerRemote := naming.InstallDir() + "/bin/" + naming.Labeler()
fmt.Println("Deploying labeler binary...")
if err := scpFile(labelerLocal, state.Appview.PublicIP, labelerRemote); err != nil {
return fmt.Errorf("upload labeler: %w", err)
}
}
fmt.Println("\n=== Provisioning Complete ===")
fmt.Println()
fmt.Println("DNS records needed:")
@@ -397,6 +468,9 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
fmt.Printf(" CNAME %-24s → %s\n", rd, lbDNS)
}
fmt.Printf(" CNAME %-24s → %s\n", holdDomain, lbDNS)
if state.LabelerEnabled {
fmt.Printf(" CNAME %-24s → %s\n", labelerDomain, lbDNS)
}
} else {
fmt.Println(" (LB DNS name not yet available — check 'status' in a few minutes)")
}
@@ -411,11 +485,14 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
} else {
fmt.Println(" 1. Start services:")
}
services := []string{naming.Appview(), naming.Hold()}
if state.ScannerEnabled {
fmt.Printf(" systemctl start %s / %s / %s\n", naming.Appview(), naming.Hold(), naming.Scanner())
} else {
fmt.Printf(" systemctl start %s / %s\n", naming.Appview(), naming.Hold())
services = append(services, naming.Scanner())
}
if state.LabelerEnabled {
services = append(services, naming.Labeler())
}
fmt.Printf(" systemctl start %s\n", strings.Join(services, " / "))
fmt.Println(" 2. Configure DNS records above")
return nil
@@ -531,6 +608,9 @@ func objectStorageRegion(zone string) string {
}
func createServer(ctx context.Context, svc *service.Service, cfg *InfraConfig, templateUUID, networkUUID, title, userData string) (*ServerState, error) {
if cfg.SSHPublicKey == "" {
return nil, fmt.Errorf("creating server %s requires --ssh-key (path to SSH public key file)", title)
}
storageTier := "maxiops"
if strings.HasPrefix(strings.ToUpper(cfg.Plan), "DEV-") {
storageTier = "standard"
@@ -666,7 +746,83 @@ func createFirewallRules(ctx context.Context, svc *service.Service, serverUUID,
})
}
func createLoadBalancer(ctx context.Context, svc *service.Service, cfg *InfraConfig, naming Naming, networkUUID, appviewIP, holdIP, holdDomain string) (*upcloud.LoadBalancer, error) {
func createLoadBalancer(ctx context.Context, svc *service.Service, cfg *InfraConfig, naming Naming, networkUUID, appviewIP, holdIP, holdDomain, labelerDomain string, withLabeler bool) (*upcloud.LoadBalancer, error) {
frontendRules := []request.LoadBalancerFrontendRule{
{
Name: "set-forwarded-headers",
Priority: 1,
Matchers: []upcloud.LoadBalancerMatcher{},
Actions: []upcloud.LoadBalancerAction{
request.NewLoadBalancerSetForwardedHeadersAction(),
},
},
{
Name: "route-hold",
Priority: 10,
Matchers: []upcloud.LoadBalancerMatcher{
{
Type: upcloud.LoadBalancerMatcherTypeHost,
Host: &upcloud.LoadBalancerMatcherHost{
Value: holdDomain,
},
},
},
Actions: []upcloud.LoadBalancerAction{
request.NewLoadBalancerSetForwardedHeadersAction(),
{
Type: upcloud.LoadBalancerActionTypeUseBackend,
UseBackend: &upcloud.LoadBalancerActionUseBackend{
Backend: "hold",
},
},
},
},
}
backends := []request.LoadBalancerBackend{
{
Name: "appview",
Members: []request.LoadBalancerBackendMember{
{
Name: "appview-1",
Type: upcloud.LoadBalancerBackendMemberTypeStatic,
IP: appviewIP,
Port: 5000,
Weight: 100,
MaxSessions: 1000,
Enabled: true,
},
},
Properties: &upcloud.LoadBalancerBackendProperties{
HealthCheckType: upcloud.LoadBalancerHealthCheckTypeHTTP,
HealthCheckURL: "/health",
},
},
{
Name: "hold",
Members: []request.LoadBalancerBackendMember{
{
Name: "hold-1",
Type: upcloud.LoadBalancerBackendMemberTypeStatic,
IP: holdIP,
Port: 8080,
Weight: 100,
MaxSessions: 1000,
Enabled: true,
},
},
Properties: &upcloud.LoadBalancerBackendProperties{
HealthCheckType: upcloud.LoadBalancerHealthCheckTypeHTTP,
HealthCheckURL: "/xrpc/_health",
},
},
}
if withLabeler {
frontendRules = append(frontendRules, labelerFrontendRule(labelerDomain))
backends = append(backends, labelerBackend(appviewIP))
}
lb, err := svc.CreateLoadBalancer(ctx, &request.CreateLoadBalancerRequest{
Name: naming.LBName(),
Plan: "essentials",
@@ -694,36 +850,7 @@ func createLoadBalancer(ctx context.Context, svc *service.Service, cfg *InfraCon
Networks: []upcloud.LoadBalancerFrontendNetwork{
{Name: "public"},
},
Rules: []request.LoadBalancerFrontendRule{
{
Name: "set-forwarded-headers",
Priority: 1,
Matchers: []upcloud.LoadBalancerMatcher{},
Actions: []upcloud.LoadBalancerAction{
request.NewLoadBalancerSetForwardedHeadersAction(),
},
},
{
Name: "route-hold",
Priority: 10,
Matchers: []upcloud.LoadBalancerMatcher{
{
Type: upcloud.LoadBalancerMatcherTypeHost,
Host: &upcloud.LoadBalancerMatcherHost{
Value: holdDomain,
},
},
},
Actions: []upcloud.LoadBalancerAction{
{
Type: upcloud.LoadBalancerActionTypeUseBackend,
UseBackend: &upcloud.LoadBalancerActionUseBackend{
Backend: "hold",
},
},
},
},
},
Rules: frontendRules,
},
{
Name: "http-redirect",
@@ -759,44 +886,7 @@ func createLoadBalancer(ctx context.Context, svc *service.Service, cfg *InfraCon
},
},
Resolvers: []request.LoadBalancerResolver{},
Backends: []request.LoadBalancerBackend{
{
Name: "appview",
Members: []request.LoadBalancerBackendMember{
{
Name: "appview-1",
Type: upcloud.LoadBalancerBackendMemberTypeStatic,
IP: appviewIP,
Port: 5000,
Weight: 100,
MaxSessions: 1000,
Enabled: true,
},
},
Properties: &upcloud.LoadBalancerBackendProperties{
HealthCheckType: upcloud.LoadBalancerHealthCheckTypeHTTP,
HealthCheckURL: "/health",
},
},
{
Name: "hold",
Members: []request.LoadBalancerBackendMember{
{
Name: "hold-1",
Type: upcloud.LoadBalancerBackendMemberTypeStatic,
IP: holdIP,
Port: 8080,
Weight: 100,
MaxSessions: 1000,
Enabled: true,
},
},
Properties: &upcloud.LoadBalancerBackendProperties{
HealthCheckType: upcloud.LoadBalancerHealthCheckTypeHTTP,
HealthCheckURL: "/xrpc/_health",
},
},
},
Backends: backends,
})
if err != nil {
return nil, err
@@ -805,6 +895,55 @@ func createLoadBalancer(ctx context.Context, svc *service.Service, cfg *InfraCon
return lb, nil
}
// labelerBackend builds the labeler LB backend pointing at the appview server's
// private IP on the labeler listen port.
func labelerBackend(appviewIP string) request.LoadBalancerBackend {
return request.LoadBalancerBackend{
Name: "labeler",
Members: []request.LoadBalancerBackendMember{
{
Name: "labeler-1",
Type: upcloud.LoadBalancerBackendMemberTypeStatic,
IP: appviewIP,
Port: 5002,
Weight: 100,
MaxSessions: 1000,
Enabled: true,
},
},
Properties: &upcloud.LoadBalancerBackendProperties{
HealthCheckType: upcloud.LoadBalancerHealthCheckTypeHTTP,
HealthCheckURL: "/.well-known/did.json",
},
}
}
// labelerFrontendRule returns a host-match rule routing labelerDomain to the
// labeler backend with forwarded headers.
func labelerFrontendRule(labelerDomain string) request.LoadBalancerFrontendRule {
return request.LoadBalancerFrontendRule{
Name: "route-labeler",
Priority: 20,
Matchers: []upcloud.LoadBalancerMatcher{
{
Type: upcloud.LoadBalancerMatcherTypeHost,
Host: &upcloud.LoadBalancerMatcherHost{
Value: labelerDomain,
},
},
},
Actions: []upcloud.LoadBalancerAction{
request.NewLoadBalancerSetForwardedHeadersAction(),
{
Type: upcloud.LoadBalancerActionTypeUseBackend,
UseBackend: &upcloud.LoadBalancerActionUseBackend{
Backend: "labeler",
},
},
},
}
}
// ensureLBCertificates reconciles TLS certificate bundles on the load balancer.
// It skips domains that already have a TLS config attached and creates missing ones.
func ensureLBCertificates(ctx context.Context, svc *service.Service, lbUUID string, tlsDomains []string) error {
@@ -871,8 +1010,23 @@ func ensureLBForwardedHeaders(ctx context.Context, svc *service.Service, lbUUID
for _, r := range rules {
if r.Name == "set-forwarded-headers" {
fmt.Println(" Forwarded headers rule: exists")
return nil
// Verify it has the set_forwarded_headers action
for _, a := range r.Actions {
if a.SetForwardedHeaders != nil {
fmt.Println(" Forwarded headers rule: exists and valid")
return nil
}
}
// Rule exists but is misconfigured — delete and recreate
fmt.Println(" Forwarded headers rule: exists but misconfigured, recreating")
if err := svc.DeleteLoadBalancerFrontendRule(ctx, &request.DeleteLoadBalancerFrontendRuleRequest{
ServiceUUID: lbUUID,
FrontendName: "https",
Name: r.Name,
}); err != nil {
return fmt.Errorf("delete misconfigured forwarded headers rule: %w", err)
}
break
}
}
@@ -896,6 +1050,224 @@ func ensureLBForwardedHeaders(ctx context.Context, svc *service.Service, lbUUID
return nil
}
// ensureLBHoldForwardedHeaders ensures the "route-hold" rule includes a
// set_forwarded_headers action alongside use_backend. Without this, the LB
// doesn't set X-Forwarded-For on hold-routed traffic.
func ensureLBHoldForwardedHeaders(ctx context.Context, svc *service.Service, lbUUID, holdDomain string) error {
rules, err := svc.GetLoadBalancerFrontendRules(ctx, &request.GetLoadBalancerFrontendRulesRequest{
ServiceUUID: lbUUID,
FrontendName: "https",
})
if err != nil {
return fmt.Errorf("get frontend rules: %w", err)
}
for _, r := range rules {
if r.Name == "route-hold" {
hasForwarded := false
for _, a := range r.Actions {
if a.SetForwardedHeaders != nil {
hasForwarded = true
break
}
}
if hasForwarded {
fmt.Println(" Route-hold forwarded headers: exists")
return nil
}
// Delete and recreate with both actions
fmt.Println(" Route-hold forwarded headers: missing, recreating rule")
if err := svc.DeleteLoadBalancerFrontendRule(ctx, &request.DeleteLoadBalancerFrontendRuleRequest{
ServiceUUID: lbUUID,
FrontendName: "https",
Name: r.Name,
}); err != nil {
return fmt.Errorf("delete route-hold rule: %w", err)
}
break
}
}
_, err = svc.CreateLoadBalancerFrontendRule(ctx, &request.CreateLoadBalancerFrontendRuleRequest{
ServiceUUID: lbUUID,
FrontendName: "https",
Rule: request.LoadBalancerFrontendRule{
Name: "route-hold",
Priority: 10,
Matchers: []upcloud.LoadBalancerMatcher{
{
Type: upcloud.LoadBalancerMatcherTypeHost,
Host: &upcloud.LoadBalancerMatcherHost{
Value: holdDomain,
},
},
},
Actions: []upcloud.LoadBalancerAction{
request.NewLoadBalancerSetForwardedHeadersAction(),
{
Type: upcloud.LoadBalancerActionTypeUseBackend,
UseBackend: &upcloud.LoadBalancerActionUseBackend{
Backend: "hold",
},
},
},
},
})
if err != nil {
return fmt.Errorf("create route-hold rule: %w", err)
}
fmt.Println(" Route-hold forwarded headers: created")
return nil
}
// ensureLBLabelerRoute idempotently ensures the LB has a "labeler" backend
// pointing at the appview server's private IP and a "route-labeler" frontend
// rule matching labelerDomain. Used to add labeler routing to a pre-existing LB
// during a re-provision with --with-labeler.
func ensureLBLabelerRoute(ctx context.Context, svc *service.Service, lbUUID, appviewIP, labelerDomain string) error {
// 1. Ensure backend exists
backends, err := svc.GetLoadBalancerBackends(ctx, &request.GetLoadBalancerBackendsRequest{ServiceUUID: lbUUID})
if err != nil {
return fmt.Errorf("get backends: %w", err)
}
hasBackend := false
for _, b := range backends {
if b.Name == "labeler" {
hasBackend = true
break
}
}
if !hasBackend {
_, err := svc.CreateLoadBalancerBackend(ctx, &request.CreateLoadBalancerBackendRequest{
ServiceUUID: lbUUID,
Backend: labelerBackend(appviewIP),
})
if err != nil {
return fmt.Errorf("create labeler backend: %w", err)
}
fmt.Println(" Labeler backend: created")
} else {
fmt.Println(" Labeler backend: exists")
}
// 2. Ensure frontend rule exists with correct host matcher
rules, err := svc.GetLoadBalancerFrontendRules(ctx, &request.GetLoadBalancerFrontendRulesRequest{
ServiceUUID: lbUUID,
FrontendName: "https",
})
if err != nil {
return fmt.Errorf("get frontend rules: %w", err)
}
for _, r := range rules {
if r.Name == "route-labeler" {
// Verify the host matcher and use_backend action are correct
hostOK := false
for _, m := range r.Matchers {
if m.Host != nil && m.Host.Value == labelerDomain {
hostOK = true
break
}
}
backendOK := false
for _, a := range r.Actions {
if a.UseBackend != nil && a.UseBackend.Backend == "labeler" {
backendOK = true
break
}
}
if hostOK && backendOK {
fmt.Println(" Route-labeler rule: exists and valid")
return nil
}
fmt.Println(" Route-labeler rule: exists but misconfigured, recreating")
if err := svc.DeleteLoadBalancerFrontendRule(ctx, &request.DeleteLoadBalancerFrontendRuleRequest{
ServiceUUID: lbUUID,
FrontendName: "https",
Name: r.Name,
}); err != nil {
return fmt.Errorf("delete route-labeler rule: %w", err)
}
break
}
}
if _, err := svc.CreateLoadBalancerFrontendRule(ctx, &request.CreateLoadBalancerFrontendRuleRequest{
ServiceUUID: lbUUID,
FrontendName: "https",
Rule: labelerFrontendRule(labelerDomain),
}); err != nil {
return fmt.Errorf("create route-labeler rule: %w", err)
}
fmt.Println(" Route-labeler rule: created")
return nil
}
// ensureLBScannerBlock ensures the "https" frontend has a rule that returns 403
// for common scanner paths (.php, .asp, .aspx, .jsp, .cgi, .env).
func ensureLBScannerBlock(ctx context.Context, svc *service.Service, lbUUID string) error {
rules, err := svc.GetLoadBalancerFrontendRules(ctx, &request.GetLoadBalancerFrontendRulesRequest{
ServiceUUID: lbUUID,
FrontendName: "https",
})
if err != nil {
return fmt.Errorf("get frontend rules: %w", err)
}
for _, r := range rules {
if r.Name == "block-scanners" {
for _, a := range r.Actions {
if a.HTTPReturn != nil {
fmt.Println(" Scanner block rule: exists and valid")
return nil
}
}
fmt.Println(" Scanner block rule: exists but misconfigured, recreating")
if err := svc.DeleteLoadBalancerFrontendRule(ctx, &request.DeleteLoadBalancerFrontendRuleRequest{
ServiceUUID: lbUUID,
FrontendName: "https",
Name: r.Name,
}); err != nil {
return fmt.Errorf("delete misconfigured scanner block rule: %w", err)
}
break
}
}
ignoreCase := true
_, err = svc.CreateLoadBalancerFrontendRule(ctx, &request.CreateLoadBalancerFrontendRuleRequest{
ServiceUUID: lbUUID,
FrontendName: "https",
Rule: request.LoadBalancerFrontendRule{
Name: "block-scanners",
Priority: 2,
Matchers: []upcloud.LoadBalancerMatcher{
request.NewLoadBalancerPathMatcher(
upcloud.LoadBalancerStringMatcherMethodRegexp,
`\.(php|asp|aspx|jsp|cgi|env)$`,
&ignoreCase,
),
},
Actions: []upcloud.LoadBalancerAction{
{
Type: upcloud.LoadBalancerActionTypeHTTPReturn,
HTTPReturn: &upcloud.LoadBalancerActionHTTPReturn{
Status: 403,
ContentType: "text/plain",
Payload: base64.StdEncoding.EncodeToString([]byte("Forbidden")),
},
},
},
},
})
if err != nil {
return fmt.Errorf("create scanner block rule: %w", err)
}
fmt.Println(" Scanner block rule: created")
return nil
}
// lookupObjectStorage discovers details of an existing Managed Object Storage.
func lookupObjectStorage(ctx context.Context, svc *service.Service, uuid string) (ObjectStorageState, error) {
storage, err := svc.GetManagedObjectStorage(ctx, &request.GetManagedObjectStorageRequest{

View File

@@ -20,6 +20,7 @@ type InfraState struct {
ObjectStorage ObjectStorageState `json:"object_storage"`
ScannerEnabled bool `json:"scanner_enabled,omitempty"`
ScannerSecret string `json:"scanner_secret,omitempty"`
LabelerEnabled bool `json:"labeler_enabled,omitempty"`
}
// Naming returns a Naming helper, defaulting to "seamark" if ClientName is empty.

View File

@@ -0,0 +1,25 @@
[Unit]
Description={{.DisplayName}} Labeler (Content Moderation)
After=network-online.target {{.AppviewServiceName}}.service
Wants=network-online.target
[Service]
Type=simple
User={{.User}}
Group={{.User}}
ExecStart={{.BinaryPath}} serve --config {{.ConfigPath}}
Restart=on-failure
RestartSec=10
ReadWritePaths={{.DataDir}}
ProtectSystem=strict
ProtectHome=yes
NoNewPrivileges=yes
PrivateTmp=yes
StandardOutput=journal
StandardError=journal
SyslogIdentifier={{.ServiceName}}
[Install]
WantedBy=multi-user.target

View File

@@ -24,7 +24,8 @@ var updateCmd = &cobra.Command{
target = args[0]
}
withScanner, _ := cmd.Flags().GetBool("with-scanner")
return cmdUpdate(target, withScanner)
withLabeler, _ := cmd.Flags().GetBool("with-labeler")
return cmdUpdate(target, withScanner, withLabeler)
},
}
@@ -40,11 +41,12 @@ var sshCmd = &cobra.Command{
func init() {
updateCmd.Flags().Bool("with-scanner", false, "Enable and deploy vulnerability scanner alongside hold")
updateCmd.Flags().Bool("with-labeler", false, "Enable and deploy content moderation labeler alongside appview")
rootCmd.AddCommand(updateCmd)
rootCmd.AddCommand(sshCmd)
}
func cmdUpdate(target string, withScanner bool) error {
func cmdUpdate(target string, withScanner, withLabeler bool) error {
state, err := loadState()
if err != nil {
return err
@@ -67,6 +69,12 @@ func cmdUpdate(target string, withScanner bool) error {
_ = saveState(state)
}
// Enable labeler retroactively via --with-labeler on update
if withLabeler && !state.LabelerEnabled {
state.LabelerEnabled = true
_ = saveState(state)
}
vals := configValsFromState(state)
targets := map[string]struct {
@@ -144,6 +152,21 @@ func cmdUpdate(target string, withScanner bool) error {
}
}
// Build labeler locally if needed
needLabeler := false
for _, name := range toUpdate {
if name == "appview" && state.LabelerEnabled {
needLabeler = true
break
}
}
if needLabeler {
outputPath := filepath.Join(rootDir, "bin", "atcr-labeler")
if err := buildLocal(rootDir, outputPath, "./cmd/labeler"); err != nil {
return fmt.Errorf("build labeler: %w", err)
}
}
// Deploy each target
for _, name := range toUpdate {
t := targets[name]
@@ -244,13 +267,65 @@ curl -sf http://localhost:9090/healthz > /dev/null && echo "SCANNER_HEALTH_OK" |
`
}
// Labeler additions for appview server
labelerRestart := ""
if name == "appview" && state.LabelerEnabled {
// Sync labeler config keys
labelerConfigYAML, err := renderConfig(labelerConfigTmpl, vals)
if err != nil {
return fmt.Errorf("render labeler config: %w", err)
}
if err := syncConfigKeys("labeler", t.ip, naming.LabelerConfigPath(), labelerConfigYAML); err != nil {
return fmt.Errorf("labeler config sync: %w", err)
}
// Sync labeler service unit
labelerUnit, err := renderLabelerServiceUnit(labelerServiceUnitParams{
DisplayName: naming.DisplayName(),
User: naming.SystemUser(),
BinaryPath: naming.InstallDir() + "/bin/" + naming.Labeler(),
ConfigPath: naming.LabelerConfigPath(),
DataDir: naming.BasePath(),
ServiceName: naming.Labeler(),
AppviewServiceName: naming.Appview(),
})
if err != nil {
return fmt.Errorf("render labeler service unit: %w", err)
}
labelerUnitChanged, err := syncServiceUnit("labeler", t.ip, naming.Labeler(), labelerUnit)
if err != nil {
return fmt.Errorf("labeler service unit sync: %w", err)
}
if labelerUnitChanged {
daemonReload = "systemctl daemon-reload"
}
// Upload labeler binary
labelerLocal := filepath.Join(rootDir, "bin", "atcr-labeler")
labelerRemote := naming.InstallDir() + "/bin/" + naming.Labeler()
if err := scpFile(labelerLocal, t.ip, labelerRemote); err != nil {
return fmt.Errorf("upload labeler: %w", err)
}
// Ensure labeler data dirs exist
labelerSetup := fmt.Sprintf(`mkdir -p %s
chown -R %s:%s %s`,
naming.LabelerDataDir(),
naming.SystemUser(), naming.SystemUser(), naming.LabelerDataDir())
if _, err := runSSH(t.ip, labelerSetup, false); err != nil {
return fmt.Errorf("labeler dir setup: %w", err)
}
labelerRestart = fmt.Sprintf("\nsystemctl restart %s", naming.Labeler())
}
// Restart services and health check
restartScript := fmt.Sprintf(`set -euo pipefail
%s
systemctl restart %s%s
systemctl restart %s%s%s
sleep 2
curl -sf %s > /dev/null && echo "HEALTH_OK" || echo "HEALTH_FAIL"
%s`, daemonReload, t.serviceName, scannerRestart, t.healthURL, scannerHealthCheck)
%s`, daemonReload, t.serviceName, scannerRestart, labelerRestart, t.healthURL, scannerHealthCheck)
output, err := runSSH(t.ip, restartScript, true)
if err != nil {
@@ -289,6 +364,7 @@ func configValsFromState(state *InfraState) *ConfigValues {
naming := state.Naming()
_, baseDomain, _, _ := extractFromAppviewTemplate()
holdDomain := state.Zone + ".cove." + baseDomain
labelerDomain := "labeler." + baseDomain
return &ConfigValues{
S3Endpoint: state.ObjectStorage.Endpoint,
@@ -299,6 +375,7 @@ func configValsFromState(state *InfraState) *ConfigValues {
Zone: state.Zone,
HoldDomain: holdDomain,
HoldDid: "did:web:" + holdDomain,
LabelerDomain: labelerDomain,
BasePath: naming.BasePath(),
ScannerSecret: state.ScannerSecret,
}

View File

@@ -19,6 +19,9 @@ services:
# ATCR_SERVER_CLIENT_SHORT_NAME: "Seamark"
ATCR_SERVER_MANAGED_HOLDS: did:web:172.28.0.3%3A8080
ATCR_SERVER_DEFAULT_HOLD_DID: did:web:172.28.0.3%3A8080
# Labeler URL (HTTP for dev — ParseLabelerURL accepts it directly so we don't
# have to round-trip through did:web → https:// resolution).
ATCR_LABELER_DID: http://172.28.0.4:5002
ATCR_SERVER_TEST_MODE: true
ATCR_LOG_LEVEL: debug
LOG_SHIPPER_BACKEND: victoria
@@ -32,7 +35,7 @@ services:
max-file: "1"
volumes:
# Mount source code for Air hot reload
- .:/app
- .:/app:z
# Cache go modules between rebuilds
- go-mod-cache:/go/pkg/mod
# UI database (includes OAuth sessions, devices, and Jetstream cache)
@@ -62,6 +65,13 @@ services:
HOLD_REGISTRATION_ALLOW_ALL_CREW: true
HOLD_SERVER_TEST_MODE: true
HOLD_LOG_LEVEL: debug
# Subscribe to the dev labeler so takedowns purge records on this hold and
# GC honors the reversibility window. Same value the appview uses for
# ATCR_LABELER_DID — accepts a did:web identifier or a raw URL.
HOLD_LABELER_DID: http://172.28.0.4:5002
# Short grace window for dev so the takedown→GC path is exercisable without
# waiting weeks. Production default is 720h (30 days).
HOLD_LABELER_GRACE_WINDOW: 1h
LOG_SHIPPER_BACKEND: victoria
LOG_SHIPPER_URL: http://172.28.0.10:9428
# S3 storage config comes from env_file (AWS_*, S3_*)
@@ -84,7 +94,7 @@ services:
- "8080:8080"
volumes:
# Mount source code for Air hot reload
- .:/app
- .:/app:z
# Cache go modules between rebuilds
- go-mod-cache:/go/pkg/mod
# PDS data (carstore SQLite + signing keys)
@@ -97,6 +107,52 @@ services:
atcr-network:
ipv4_address: 172.28.0.3
atcr-labeler:
# Base config: config-labeler.example.yaml (passed via Air entrypoint).
# Env vars below override config file values for local dev.
#
# Why did:web for dev: did:plc would submit a real PLC operation to plc.directory
# for every fresh dev environment, polluting production with throwaway DIDs that
# point at 172.28.0.x. did:web is purely self-served via /.well-known/did.json so
# nothing leaks. Switch to plc + a real public_url for production.
environment:
LABELER_LABELER_DID_METHOD: web
LABELER_LABELER_PUBLIC_URL: http://172.28.0.4:5002
LABELER_LABELER_OWNER_DID: did:plc:pddp4xt5lgnv2qsegbzzs4xg
LABELER_LABELER_DATA_DIR: /var/lib/atcr-labeler
LABELER_SERVER_TEST_MODE: true
LABELER_LOG_LEVEL: debug
LOG_SHIPPER_BACKEND: victoria
LOG_SHIPPER_URL: http://172.28.0.10:9428
logging:
driver: json-file
options:
max-size: "10m"
max-file: "1"
build:
context: .
dockerfile: Dockerfile.dev
args:
AIR_CONFIG: .air.labeler.toml
image: atcr-labeler-dev:latest
container_name: atcr-labeler
ports:
- "5002:5002"
volumes:
# Mount source code for Air hot reload
- .:/app:z
- go-mod-cache:/go/pkg/mod
# Persist signing key + did.txt + label database across container restarts so
# dev signatures stay verifiable. Wipe with `docker compose down -v` to reset.
- atcr-labeler:/var/lib/atcr-labeler
restart: unless-stopped
dns:
- 8.8.8.8
- 1.1.1.1
networks:
atcr-network:
ipv4_address: 172.28.0.4
# Victoria Logs for centralized log storage
# Uncomment to enable, then set LOG_SHIPPER_* env vars above
victorialogs:
@@ -123,6 +179,7 @@ networks:
volumes:
atcr-hold:
atcr-labeler:
atcr-auth:
atcr-ui:
go-mod-cache:

View File

@@ -206,8 +206,8 @@ server:
billing:
enabled: true
currency: usd
success_url: "{base_url}/settings#storage"
cancel_url: "{base_url}/settings#storage"
success_url: "{base_url}/settings/billing"
cancel_url: "{base_url}/settings/billing"
tiers:
- name: "Free"
# No stripe_price = free tier

View File

@@ -68,7 +68,7 @@ git push origin v1.0.0
**Usage:**
```bash
# Linux/macOS
curl -fsSL https://atcr.io/install.sh | bash
curl -fsSL https://atcr.io/static/install.sh | bash
# Windows (PowerShell)
iwr -useb https://atcr.io/install.ps1 | iex
@@ -113,7 +113,7 @@ go install atcr.io/cmd/credential-helper@latest
sudo mv $(go env GOPATH)/bin/credential-helper /usr/local/bin/docker-credential-atcr
```
**Note:** This requires Go 1.23+ and compiles locally.
**Note:** This requires Go 1.26+ and compiles locally.
## Release Process
@@ -138,7 +138,7 @@ sudo mv $(go env GOPATH)/bin/credential-helper /usr/local/bin/docker-credential-
- Visit: https://github.com/atcr-io/atcr/releases
- Test install script:
```bash
ATCR_VERSION=v1.0.0 curl -fsSL https://atcr.io/install.sh | bash
ATCR_VERSION=v1.0.0 curl -fsSL https://atcr.io/static/install.sh | bash
docker-credential-atcr version
```
@@ -266,7 +266,7 @@ Docker looks for binaries named `docker-credential-*` in PATH:
# Clean install in fresh environment
docker run --rm -it ubuntu:latest bash
apt update && apt install -y curl
curl -fsSL https://atcr.io/install.sh | bash
curl -fsSL https://atcr.io/static/install.sh | bash
```
2. **Test Docker integration:**

View File

@@ -37,6 +37,7 @@ This document lists all XRPC endpoints implemented in the Hold service (`pkg/hol
|----------|--------|-------------|
| `/xrpc/com.atproto.repo.deleteRecord` | POST | Delete a record |
| `/xrpc/com.atproto.repo.uploadBlob` | POST | Upload ATProto blob |
| `/xrpc/io.atcr.hold.purgeManifest` | POST | Purge layer/scan/image-config records for a manifest (eager delete + takedown). Idempotent. |
### Auth Required (Service Token or DPoP)
@@ -82,6 +83,7 @@ All require `blob:write` permission via service token:
| `/xrpc/io.atcr.hold.getQuota` | GET | none | Get user quota info |
| `/xrpc/io.atcr.hold.getLayersForManifest` | GET | none | Get layer records for a manifest AT-URI |
| `/xrpc/io.atcr.hold.image.getConfig` | GET | none | Get OCI image config record for a manifest digest |
| `/xrpc/io.atcr.hold.purgeManifest` | POST | owner/crew admin | Purge layer/scan/image-config records for a single manifest URI. Called by appview on UI delete; called internally on takedown receipt. Does not delete S3 blobs (GC handles those). |
| `/xrpc/io.atcr.hold.listTiers` | GET | none | List hold's available tiers with quotas and features (scanOnPush) |
| `/xrpc/io.atcr.hold.updateCrewTier` | POST | appview token | Update crew member's tier |

View File

@@ -0,0 +1,68 @@
# How container registries count image pulls
**A GET request to the manifest endpoint is the universal unit of a "pull," but every registry counts differently.** The OCI Distribution Specification deliberately says nothing about pull counting, rate limiting, or metrics — these are left entirely to each registry implementation. Docker Hub, the most consequential registry for pull counting, defines a pull as a `GET /v2/<name>/manifests/<reference>` request; HEAD requests and blob downloads do not count. Other registries diverge significantly: AWS ECR throttles per-API-endpoint, Google Artifact Registry counts raw HTTP requests against a project quota, Azure ACR tracks ReadOps per SKU tier, and Quay.io essentially imposes no pull-specific limits at all.
## The OCI spec intentionally stays silent on pull counting
The OCI Distribution Specification (v1.1.1) defines "Pull" only as a **workflow category** — the highest-priority conformance tier that all registries must support. The spec describes the process: "The process of pulling an object centers around retrieving two components: the manifest and one or more blobs. Typically, the first step in pulling an object is to retrieve the manifest." It defines four HTTP endpoints in this category: `GET` and `HEAD` on both `/v2/<name>/manifests/<reference>` and `/v2/<name>/blobs/<digest>`.
Critically, the spec contains **zero guidance** on how to count pulls, implement rate limiting, define deduplication windows, or report usage metrics. There are no rate-limit response headers, no 429 status codes, and no throttling semantics in the specification. A search of the opencontainers/distribution-spec GitHub repository reveals no open or closed issues proposing standardization of pull counting — the community treats this as firmly out of scope for a wire protocol specification. Every registry is free to define "pull" however it chooses.
At the protocol level, a single `docker pull` command generates a cascade of HTTP requests: one or more manifest GETs, plus one GET per blob (config + layers). For a multi-arch image, an additional manifest GET is required for platform resolution. The Docker daemon is somewhat intelligent here — it issues a HEAD request first to check the digest, and only proceeds to a GET if the image has changed.
## Docker Hub counts manifest GETs in a 6-hour sliding window
Docker Hub's pull counting is the most consequential and best-documented system. The official documentation states: **"Using GET emulates a real pull and counts towards the limit. Using HEAD won't."** Only `GET` requests to `/v2/*/manifests/*` are counted. Blob/layer downloads (`/v2/*/blobs/*`) do not count. Even if all layers are already cached locally and nothing is actually downloaded, the manifest GET still counts as a pull.
**Rate limits operate on a 6-hour (21,600-second) sliding window:**
- Unauthenticated users: **100 pulls per IPv4 address** (or per /64 IPv6 subnet)
- Personal accounts (authenticated): **200 pulls per user**
- Pro, Team, and Business accounts: **unlimited**
The response headers `ratelimit-limit: 100;w=21600`, `ratelimit-remaining`, and `docker-ratelimit-source` allow clients to monitor their status. When exceeded, Docker Hub returns HTTP 429.
**There is no deduplication for rate limiting.** Every manifest GET counts individually, even for the same image pulled repeatedly. As one analysis confirmed: "If you execute `docker pull alpine` twice, you come two steps closer to exhausting your rate limit. Even if on the second command execution no image was transferred, two pull requests referring to the same image tag count as two and not one." However, the Docker daemon's built-in behavior mitigates this: it sends a HEAD request first (which doesn't count), compares the digest locally, and only issues a GET if the image has changed. Tools that bypass this optimization and issue GETs directly will consume quota needlessly.
An important distinction: the **pull count statistic** displayed on Docker Hub image pages (the cumulative "10M+" counter) uses a **different tracking system** than rate limiting. The dashboard counter represents all-time manifest fetches, is not real-time, and has known quirks — for instance, GitHub issue docker/hub-feedback#2182 reports the counter incrementing by 4 per pushed tag. Docker's usage dashboard separately tracks "version_checks" (HEAD requests) and "pulls" (GET requests) in exportable CSV data.
## Multi-arch images count as one pull per architecture
When pulling a multi-arch image (OCI image index or Docker manifest list), the client first GETs the manifest list, then GETs the platform-specific manifest, then downloads blobs. **Docker Hub groups the manifest list GET and the platform-specific manifest GET together as one pull per architecture.** A Docker community moderator confirmed: "for multi-arch images 1 request contains one on the index and one on the platform specific manifest so basically 20 requests would be 10 pulls." The official docs state: "A pull for a multi-arch image will count as one pull for each different architecture."
This has significant implications for CI/CD. A `docker buildx build --platform linux/amd64,linux/arm64 --push` command pulls base images for each platform, so a `FROM` directive referencing a Docker Hub image will consume **two pulls** (one per architecture). Matrix CI strategies that build many platform combinations can exhaust rate limits quickly.
The `docker buildx imagetools create` command — used to assemble manifest lists from existing platform-specific images — also counts against rate limits. Buildx maintainer tonistiigi confirmed: "Yes, pulling down a manifest is what impacts the rate limit counter." Creating a manifest list from 4 platform digests produces at least 4 manifest GETs. Similarly, `docker manifest inspect` performs a GET and counts as a pull; there is no HEAD-only alternative.
For GitHub Actions specifically, GitHub-hosted runners pulling public Docker Hub images are **exempt from rate limits** due to an IP whitelisting agreement with Docker. Self-hosted runners are not exempt. When using buildx's `docker-container` driver, credentials must be explicitly passed to the builder container — a common misconfiguration that causes builds to hit unauthenticated rate limits even when the user has logged in.
## Each registry takes a fundamentally different approach
No two registries count pulls the same way. The differences are architectural, not just numerical.
**GitHub Container Registry (ghcr.io)** bills by **bandwidth transferred**, not request counts. Public container pulls have no apparent rate limit (or an extremely generous one — one user observed an internal threshold of ~44,000 requests/minute). GHCR displays no public pull counter. Pulls from GitHub Actions using `GITHUB_TOKEN` are completely free and uncounted. The rate limit documentation is minimal, with a notable open issue (github/docs#24504) requesting better documentation.
**AWS ECR** uses **per-API-endpoint token bucket throttling** — a fundamentally different model. Each registry API has its own rate: `BatchGetImage` (manifest retrieval) allows **2,000 requests/second**, while `GetDownloadUrlForLayer` allows **3,000/second**. These are per-account, per-region limits using a burst-capable token bucket. All quotas are adjustable via AWS Service Quotas. There is no single "pull count" — each API call counts against its respective endpoint quota.
**Google Artifact Registry** counts **every HTTP request** against a per-project, per-region quota of **60,000 requests/minute**. Google's docs explicitly state: "A Docker pull or push usually makes multiple HTTP requests, so quota is charged for each request." A pull of an image with 5 layers could consume 6-7 quota units. Remote repositories acting as pull-through caches have separate upstream limits (e.g., 600 reads/minute from Docker Hub per organization per region).
**Azure Container Registry** tracks **ReadOps per minute** on a SKU-tiered model: Basic (~1,000/min), Standard (~3,000/min), Premium (~10,000/min). Microsoft documents that "a docker pull translates to multiple read operations based on the number of layers in the image, plus the manifest retrieval." Each layer GET and manifest GET count as separate ReadOps. Bandwidth is throttled independently (30-100 Mbps depending on tier). HEAD requests count as ReadOps.
**Quay.io** is the most permissive: it "does not restrict anonymous pulls against its repositories and only rate limits in the most severe circumstances to maintain service levels (e.g., tens of requests per second from the same IP address)." There is no pull-specific rate limit — only a general abuse-prevention API rate limit of a few requests per second per IP. No public pull counter exists.
| Registry | Unit of counting | Effective limit | HEAD counts? | Deduplication | Public counter |
|---|---|---|---|---|---|
| Docker Hub | Manifest GET | 100-200/6hrs (free) | No | None | Yes (all-time) |
| GHCR | Bandwidth | ~44K req/min (observed) | Unknown | None | No |
| AWS ECR | Per-API calls | 2,000-3,000/sec | N/A (AWS API) | None | No |
| Google AR | All HTTP requests | 60,000/min/project | Likely yes | None | No |
| Azure ACR | ReadOps | 1,000-10,000/min by SKU | Yes | None | No |
| Quay.io | API requests (abuse only) | ~tens/sec/IP | Likely yes | None | No |
## Practical implications and key takeaways
The fragmentation in pull counting creates real operational complexity. Docker Hub's model — counting only manifest GETs, excluding HEAD requests and blob downloads, using a 6-hour window with no deduplication — is the most restrictive and the most precisely defined. Every other major registry offers orders-of-magnitude more headroom, but counts differently: some charge per HTTP request (including blob downloads), others per API call, others per bandwidth.
For teams optimizing pull behavior, the critical insight is that **HEAD requests are the escape hatch on Docker Hub**. Tools like Watchtower switched to HEAD-based digest checks specifically to avoid incrementing pull counts. The Docker daemon itself uses this optimization — issuing HEAD first, then GET only if needed. For multi-arch workflows in CI, each platform multiplies the pull cost for base images, making authenticated access or registry mirrors essential for high-volume pipelines. The `docker buildx imagetools create` and `docker manifest inspect` commands both consume pulls, a fact confirmed by maintainers but poorly documented.
The absence of any OCI-level standard means this landscape will likely remain fragmented. Docker Hub's definition — manifest GET as the unit of pull — has become a de facto convention that influences how the community thinks about pulls, but registries that charge per HTTP request (Google, Azure) or per bandwidth (GHCR) are measuring fundamentally different things under the same word.

View File

@@ -202,6 +202,52 @@ SBOM results are stored in two places:
- Record key: SBOM manifest digest
- Contains reference to subject image
## Scan Record Status
Every scan attempt produces an `io.atcr.hold.scan` record. The `status` field
tells the appview how to render the result:
| Status | Meaning | Stale-loop behavior |
|-------------|--------------------------------------------------------------------------------------------------|--------------------------------------------------|
| `ok` (or empty) | Scanner produced an SBOM. Vulnerability counts populated; SBOM blob populated. | Re-scanned on the rescan interval (default 7d). |
| `failed` | Scanner ran but errored (network, OOM, parse failure). No SBOM, no counts. | Re-scanned on the rescan interval — failures may be transient. |
| `skipped` | Scanner intentionally bypassed the artifact (helm chart, in-toto attestation, DSSE envelope). The `reason` field explains why. | **Never re-queued.** A skipped record won't change without a code change in the scanner. |
Records written before the `status` field existed have an empty status. The
appview treats empty + nil-blob + zero-count as failed (legacy fallback).
### Unscannable artifact types
The scanner skips artifacts whose config media type appears in
`unscannableConfigTypes` (`scanner/internal/scan/worker.go`). Currently:
- `application/vnd.cncf.helm.config.v1+json` — Helm charts. Rendered with a
helm-aware digest page (`pkg/appview/handlers/digest.go`) that shows
Chart.yaml metadata instead of layers / vulns / SBOM.
- `application/vnd.in-toto+json` — in-toto attestations.
- `application/vnd.dsse.envelope.v1+json` — DSSE envelopes (SLSA provenance).
For these types the appview's vuln/SBOM tabs render
*"Vulnerability scanning isn't applied to this artifact type."* — no retry hint.
To add a new unscannable type: append the media type to
`unscannableConfigTypes`. Existing records won't auto-rewrite — run
`atcr-hold scan-backfill` once to convert any pre-existing failure records
into skipped records (see below).
### Backfill tool
`atcr-hold scan-backfill --config <path>` walks every scan record on the
hold and rewrites legacy ones (empty status + nil blob + zero counts) using
the manifest's layer media types as a signal:
- Layer media type contains `helm.chart.content`, `in-toto`, or
`dsse.envelope``status="skipped"`.
- Otherwise → `status="failed"`.
The tool is idempotent and preserves the original `scannedAt`, so it can be
re-run safely. Run once per hold after upgrading.
## Accessing SBOMs
Multiple methods for discovering and retrieving SBOM data.

View File

@@ -141,7 +141,6 @@ jetstream.backfill_enabled → ATCR_JETSTREAM_BACKFILL_ENABLED
| `health` | Hold health check interval and cache TTL | Sensible defaults (15m) |
| `log_shipper` | Remote log shipping (Victoria, OpenSearch, Loki) | Disabled by default |
| `legal` | Terms/privacy page customization | Optional |
| `credential_helper` | Credential helper download source | Optional |
### Auto-generated files

179
go.mod
View File

@@ -1,19 +1,19 @@
module atcr.io
go 1.25.7
go 1.26.2
require (
github.com/aws/aws-sdk-go v1.55.8
github.com/aws/aws-sdk-go-v2 v1.41.1
github.com/aws/aws-sdk-go-v2/config v1.32.7
github.com/aws/aws-sdk-go-v2/credentials v1.19.7
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0
github.com/bluesky-social/indigo v0.0.0-20260213003059-85cdd0d6871c
github.com/charmbracelet/huh v0.8.0
github.com/charmbracelet/huh/spinner v0.0.0-20260216111231-bffc99a26329
github.com/aws/aws-sdk-go-v2 v1.41.6
github.com/aws/aws-sdk-go-v2/config v1.32.16
github.com/aws/aws-sdk-go-v2/credentials v1.19.15
github.com/aws/aws-sdk-go-v2/service/s3 v1.99.1
github.com/bluesky-social/indigo v0.0.0-20260417172304-7da09df6081d
github.com/charmbracelet/huh v1.0.0
github.com/charmbracelet/huh/spinner v0.0.0-20260223110133-9dc45e34a40b
github.com/charmbracelet/lipgloss v1.1.0
github.com/did-method-plc/go-didplc v0.0.0-20251009212921-7b7a252b8019
github.com/distribution/distribution/v3 v3.0.0
github.com/distribution/distribution/v3 v3.1.0
github.com/distribution/reference v0.6.0
github.com/earthboundkid/versioninfo/v2 v2.24.1
github.com/go-chi/chi/v5 v5.2.5
@@ -21,16 +21,17 @@ require (
github.com/go-viper/mapstructure/v2 v2.5.0
github.com/goki/freetype v1.0.5
github.com/golang-jwt/jwt/v5 v5.3.1
github.com/google/go-querystring v1.2.0
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
github.com/ipfs/go-block-format v0.2.3
github.com/ipfs/go-cid v0.6.0
github.com/ipfs/go-cid v0.6.1
github.com/ipfs/go-ipfs-blockstore v1.3.1
github.com/ipfs/go-ipld-cbor v0.2.1
github.com/ipfs/go-ipld-format v0.6.3
github.com/ipfs/go-libipfs v0.7.0
github.com/ipld/go-car v0.6.3
github.com/klauspost/compress v1.18.4
github.com/klauspost/compress v1.18.5
github.com/microcosm-cc/bluemonday v1.0.27
github.com/multiformats/go-multihash v0.2.3
github.com/opencontainers/go-digest v1.0.0
@@ -39,39 +40,38 @@ require (
github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c
github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef
github.com/stretchr/testify v1.11.1
github.com/stripe/stripe-go/v84 v84.3.0
github.com/stripe/stripe-go/v84 v84.4.1
github.com/tursodatabase/go-libsql v0.0.0-20251219133454-43644db490ff
github.com/whyrusleeping/cbor-gen v0.3.1
github.com/yuin/goldmark v1.7.16
go.opentelemetry.io/otel v1.40.0
github.com/yuin/goldmark v1.8.2
go.opentelemetry.io/otel v1.43.0
go.yaml.in/yaml/v4 v4.0.0-rc.4
golang.org/x/crypto v0.48.0
golang.org/x/image v0.36.0
golang.org/x/sys v0.41.0
golang.org/x/crypto v0.50.0
golang.org/x/image v0.39.0
golang.org/x/sys v0.43.0
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da
)
require (
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
github.com/RussellLuo/slidingwindow v0.0.0-20200528002341-535bb99d338b // indirect
github.com/ajg/form v1.6.1 // indirect
github.com/ajg/form v1.7.1 // indirect
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/atotto/clipboard v0.1.4 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
github.com/aws/smithy-go v1.24.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.9 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.22 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.14 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.22 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 // indirect
github.com/aws/smithy-go v1.25.0 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
@@ -79,13 +79,15 @@ require (
github.com/catppuccin/go v0.3.0 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7 // indirect
github.com/charmbracelet/bubbles v1.0.0 // indirect
github.com/charmbracelet/bubbletea v1.3.10 // indirect
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
github.com/charmbracelet/x/ansi v0.10.1 // indirect
github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0 // indirect
github.com/charmbracelet/x/term v0.2.1 // indirect
github.com/charmbracelet/colorprofile v0.4.3 // indirect
github.com/charmbracelet/x/ansi v0.11.7 // indirect
github.com/charmbracelet/x/cellbuf v0.0.15 // indirect
github.com/charmbracelet/x/exp/strings v0.1.0 // indirect
github.com/charmbracelet/x/term v0.2.2 // indirect
github.com/clipperhouse/displaywidth v0.11.0 // indirect
github.com/clipperhouse/uax29/v2 v2.7.0 // indirect
github.com/coreos/go-systemd/v22 v22.7.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
@@ -94,75 +96,74 @@ require (
github.com/docker/go-metrics v0.0.1 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/fatih/color v1.19.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/gammazero/chanqueue v1.1.2 // indirect
github.com/gammazero/deque v1.2.1 // indirect
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
github.com/go-jose/go-jose/v4 v4.1.4 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v1.0.0 // indirect
github.com/google/go-querystring v1.2.0 // indirect
github.com/gorilla/css v1.0.1 // indirect
github.com/gorilla/handlers v1.5.2 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.29.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/golang-lru/arc/v2 v2.0.7 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/boxo v0.36.0 // indirect
github.com/ipfs/bbloom v0.1.0 // indirect
github.com/ipfs/boxo v0.38.0 // indirect
github.com/ipfs/go-cidutil v0.1.1 // indirect
github.com/ipfs/go-datastore v0.9.1 // indirect
github.com/ipfs/go-dsqueue v0.2.0 // indirect
github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect
github.com/ipfs/go-ipfs-util v0.0.3 // indirect
github.com/ipfs/go-ipld-legacy v0.2.2 // indirect
github.com/ipfs/go-ipld-legacy v0.3.0 // indirect
github.com/ipfs/go-log v1.0.5 // indirect
github.com/ipfs/go-log/v2 v2.9.1 // indirect
github.com/ipfs/go-metrics-interface v0.3.0 // indirect
github.com/ipld/go-codec-dagpb v1.7.0 // indirect
github.com/ipld/go-ipld-prime v0.21.0 // indirect
github.com/ipld/go-ipld-prime v0.22.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/libsql/sqlite-antlr4-parser v0.0.0-20240721121621-c0bdc870f11c // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/lucasb-eyer/go-colorful v1.4.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mattn/go-isatty v0.0.21 // indirect
github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75 // indirect
github.com/mattn/go-runewidth v0.0.23 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/mr-tron/base58 v1.3.0 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/termenv v0.16.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multibase v0.3.0 // indirect
github.com/multiformats/go-multicodec v0.10.0 // indirect
github.com/multiformats/go-varint v0.1.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pelletier/go-toml/v2 v2.3.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f // indirect
github.com/polydawn/refmt v0.89.1-0.20231129105047-37766d95467a // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.67.5 // indirect
github.com/prometheus/otlptranslator v1.0.0 // indirect
github.com/prometheus/procfs v0.19.2 // indirect
github.com/redis/go-redis/extra/rediscmd/v9 v9.17.3 // indirect
github.com/redis/go-redis/extra/redisotel/v9 v9.17.3 // indirect
github.com/redis/go-redis/v9 v9.17.3 // indirect
github.com/prometheus/procfs v0.20.1 // indirect
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 // indirect
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 // indirect
github.com/redis/go-redis/v9 v9.18.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/sagikazarmark/locafero v0.12.0 // indirect
github.com/sirupsen/logrus v1.9.4 // indirect
@@ -175,40 +176,40 @@ require (
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/bridges/prometheus v0.65.0 // indirect
go.opentelemetry.io/contrib/exporters/autoexport v0.65.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.16.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.62.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 // indirect
go.opentelemetry.io/otel/log v0.16.0 // indirect
go.opentelemetry.io/otel/metric v1.40.0 // indirect
go.opentelemetry.io/otel/sdk v1.40.0 // indirect
go.opentelemetry.io/otel/sdk/log v0.16.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect
go.opentelemetry.io/otel/trace v1.40.0 // indirect
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
go.opentelemetry.io/contrib/bridges/prometheus v0.68.0 // indirect
go.opentelemetry.io/contrib/exporters/autoexport v0.68.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.19.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.19.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.65.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.19.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 // indirect
go.opentelemetry.io/otel/log v0.19.0 // indirect
go.opentelemetry.io/otel/metric v1.43.0 // indirect
go.opentelemetry.io/otel/sdk v1.43.0 // indirect
go.opentelemetry.io/otel/sdk/log v0.19.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect
go.opentelemetry.io/otel/trace v1.43.0 // indirect
go.opentelemetry.io/proto/otlp v1.10.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.1 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v2 v2.4.4 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect
golang.org/x/net v0.50.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/text v0.34.0 // indirect
golang.org/x/time v0.14.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect
google.golang.org/grpc v1.79.0 // indirect
golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f // indirect
golang.org/x/net v0.53.0 // indirect
golang.org/x/sync v0.20.0 // indirect
golang.org/x/text v0.36.0 // indirect
golang.org/x/time v0.15.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260414002931-afd174a4e478 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260414002931-afd174a4e478 // indirect
google.golang.org/grpc v1.80.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

912
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
go 1.25.7
go 1.26.2
use (
.

File diff suppressed because it is too large Load Diff

View File

@@ -18,7 +18,7 @@
"configJson": {
"type": "string",
"description": "Raw OCI image config JSON blob",
"maxLength": 65536
"maxLength": 1000000
},
"createdAt": {
"type": "string",

View File

@@ -0,0 +1,54 @@
{
"lexicon": 1,
"id": "io.atcr.hold.purgeManifest",
"defs": {
"main": {
"type": "procedure",
"description": "Purge layer, scan, and image-config records associated with a manifest. Used by the appview when a user deletes a manifest, and by the hold's own labeler subscriber on takedown receipt. Idempotent: missing records are not errors. Does not delete S3 blobs (GC handles that based on remaining references).",
"input": {
"encoding": "application/json",
"schema": {
"type": "object",
"required": ["manifestUri"],
"properties": {
"manifestUri": {
"type": "string",
"format": "at-uri",
"description": "AT-URI of the manifest record, e.g. at://did:plc:xyz/io.atcr.manifest/<digest>"
}
}
}
},
"output": {
"encoding": "application/json",
"schema": {
"type": "object",
"required": ["success", "layersDeleted", "scanDeleted", "imageConfigDeleted"],
"properties": {
"success": {
"type": "boolean",
"description": "Whether the purge completed successfully"
},
"layersDeleted": {
"type": "integer",
"description": "Number of layer records deleted"
},
"scanDeleted": {
"type": "boolean",
"description": "Whether a scan record was deleted"
},
"imageConfigDeleted": {
"type": "boolean",
"description": "Whether an image config record was deleted"
}
}
}
},
"errors": [
{ "name": "AuthRequired" },
{ "name": "InvalidRequest" },
{ "name": "PurgeFailed" }
]
}
}
}

View File

@@ -69,6 +69,17 @@
"type": "string",
"format": "datetime",
"description": "RFC3339 timestamp of when the scan completed"
},
"status": {
"type": "string",
"knownValues": ["ok", "failed", "skipped"],
"description": "Outcome of the scan attempt. 'ok' (or omitted, for back-compat) means the scanner produced an SBOM. 'failed' means the scanner ran but errored. 'skipped' means the scanner intentionally bypassed this artifact type (e.g. helm charts).",
"maxLength": 32
},
"reason": {
"type": "string",
"description": "Optional human-readable explanation for non-ok status (e.g. 'unscannable artifact type application/vnd.cncf.helm.config.v1+json').",
"maxLength": 256
}
}
}

View File

@@ -0,0 +1,47 @@
{
"lexicon": 1,
"id": "io.atcr.hold.stats.daily",
"defs": {
"main": {
"type": "record",
"key": "any",
"description": "Daily repository statistics stored in the hold's embedded PDS. Tracks pull/push counts per owner+repository+date combination. Record key is deterministic: base32(sha256(ownerDID + \"/\" + repository + \"/\" + date)[:16]). Complements cumulative io.atcr.hold.stats records by providing daily granularity for trend charts.",
"record": {
"type": "object",
"required": ["ownerDid", "repository", "date", "pullCount", "pushCount", "updatedAt"],
"properties": {
"ownerDid": {
"type": "string",
"format": "did",
"description": "DID of the image owner (e.g., did:plc:xyz123)"
},
"repository": {
"type": "string",
"description": "Repository name (e.g., myapp)",
"maxLength": 256
},
"date": {
"type": "string",
"description": "Date in YYYY-MM-DD format (UTC)",
"maxLength": 10
},
"pullCount": {
"type": "integer",
"minimum": 0,
"description": "Number of manifest downloads on this date"
},
"pushCount": {
"type": "integer",
"minimum": 0,
"description": "Number of manifest uploads on this date"
},
"updatedAt": {
"type": "string",
"format": "datetime",
"description": "RFC3339 timestamp of when this record was last updated"
}
}
}
}
}
}

View File

@@ -26,6 +26,10 @@
"accept": ["image/png", "image/jpeg", "image/webp"],
"maxSize": 3000000
},
"userEdited": {
"type": "boolean",
"description": "Whether the description was manually edited by the user. When true, auto-population from manifest annotations is skipped on push."
},
"createdAt": {
"type": "string",
"format": "datetime",

View File

@@ -25,6 +25,11 @@
"format": "at-uri",
"description": "AT-URI of the manifest this tag points to (e.g., 'at://did:plc:xyz/io.atcr.manifest/abc123'). Preferred over manifestDigest for new records."
},
"mediaType": {
"type": "string",
"description": "OCI media type of the manifest (e.g., 'application/vnd.oci.image.manifest.v1+json' or 'application/vnd.oci.image.index.v1+json')",
"maxLength": 255
},
"manifestDigest": {
"type": "string",
"description": "DEPRECATED: Digest of the manifest (e.g., 'sha256:...'). Kept for backward compatibility with old records. New records should use 'manifest' field instead.",

471
package-lock.json generated
View File

@@ -8,24 +8,23 @@
"name": "atcr-styles",
"version": "1.0.0",
"dependencies": {
"actor-typeahead": "^0.1.2",
"htmx-ext-json-enc": "^2.0.3",
"htmx.org": "^2.0.8",
"lucide": "^0.577.0"
"lucide": "^1.8.0"
},
"devDependencies": {
"@tailwindcss/cli": "^4.2.1",
"@tailwindcss/cli": "^4.2.2",
"@tailwindcss/typography": "^0.5.19",
"daisyui": "^5.5.19",
"esbuild": "^0.27.4",
"esbuild": "^0.28.0",
"glob": "^13.0.6",
"tailwindcss": "^4.2"
"tailwindcss": "^4.2.2"
}
},
"node_modules/@esbuild/aix-ppc64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.4.tgz",
"integrity": "sha512-cQPwL2mp2nSmHHJlCyoXgHGhbEPMrEEU5xhkcy3Hs/O7nGZqEpZ2sUtLaL9MORLtDfRvVl2/3PAuEkYZH0Ty8Q==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.28.0.tgz",
"integrity": "sha512-lhRUCeuOyJQURhTxl4WkpFTjIsbDayJHih5kZC1giwE+MhIzAb7mEsQMqMf18rHLsrb5qI1tafG20mLxEWcWlA==",
"cpu": [
"ppc64"
],
@@ -40,9 +39,9 @@
}
},
"node_modules/@esbuild/android-arm": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.4.tgz",
"integrity": "sha512-X9bUgvxiC8CHAGKYufLIHGXPJWnr0OCdR0anD2e21vdvgCI8lIfqFbnoeOz7lBjdrAGUhqLZLcQo6MLhTO2DKQ==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.28.0.tgz",
"integrity": "sha512-wqh0ByljabXLKHeWXYLqoJ5jKC4XBaw6Hk08OfMrCRd2nP2ZQ5eleDZC41XHyCNgktBGYMbqnrJKq/K/lzPMSQ==",
"cpu": [
"arm"
],
@@ -57,9 +56,9 @@
}
},
"node_modules/@esbuild/android-arm64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.4.tgz",
"integrity": "sha512-gdLscB7v75wRfu7QSm/zg6Rx29VLdy9eTr2t44sfTW7CxwAtQghZ4ZnqHk3/ogz7xao0QAgrkradbBzcqFPasw==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.28.0.tgz",
"integrity": "sha512-+WzIXQOSaGs33tLEgYPYe/yQHf0WTU0X42Jca3y8NWMbUVhp7rUnw+vAsRC/QiDrdD31IszMrZy+qwPOPjd+rw==",
"cpu": [
"arm64"
],
@@ -74,9 +73,9 @@
}
},
"node_modules/@esbuild/android-x64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.4.tgz",
"integrity": "sha512-PzPFnBNVF292sfpfhiyiXCGSn9HZg5BcAz+ivBuSsl6Rk4ga1oEXAamhOXRFyMcjwr2DVtm40G65N3GLeH1Lvw==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.28.0.tgz",
"integrity": "sha512-+VJggoaKhk2VNNqVL7f6S189UzShHC/mR9EE8rDdSkdpN0KflSwWY/gWjDrNxxisg8Fp1ZCD9jLMo4m0OUfeUA==",
"cpu": [
"x64"
],
@@ -91,9 +90,9 @@
}
},
"node_modules/@esbuild/darwin-arm64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.4.tgz",
"integrity": "sha512-b7xaGIwdJlht8ZFCvMkpDN6uiSmnxxK56N2GDTMYPr2/gzvfdQN8rTfBsvVKmIVY/X7EM+/hJKEIbbHs9oA4tQ==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.28.0.tgz",
"integrity": "sha512-0T+A9WZm+bZ84nZBtk1ckYsOvyA3x7e2Acj1KdVfV4/2tdG4fzUp91YHx+GArWLtwqp77pBXVCPn2We7Letr0Q==",
"cpu": [
"arm64"
],
@@ -108,9 +107,9 @@
}
},
"node_modules/@esbuild/darwin-x64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.4.tgz",
"integrity": "sha512-sR+OiKLwd15nmCdqpXMnuJ9W2kpy0KigzqScqHI3Hqwr7IXxBp3Yva+yJwoqh7rE8V77tdoheRYataNKL4QrPw==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.28.0.tgz",
"integrity": "sha512-fyzLm/DLDl/84OCfp2f/XQ4flmORsjU7VKt8HLjvIXChJoFFOIL6pLJPH4Yhd1n1gGFF9mPwtlN5Wf82DZs+LQ==",
"cpu": [
"x64"
],
@@ -125,9 +124,9 @@
}
},
"node_modules/@esbuild/freebsd-arm64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.4.tgz",
"integrity": "sha512-jnfpKe+p79tCnm4GVav68A7tUFeKQwQyLgESwEAUzyxk/TJr4QdGog9sqWNcUbr/bZt/O/HXouspuQDd9JxFSw==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.28.0.tgz",
"integrity": "sha512-l9GeW5UZBT9k9brBYI+0WDffcRxgHQD8ShN2Ur4xWq/NFzUKm3k5lsH4PdaRgb2w7mI9u61nr2gI2mLI27Nh3Q==",
"cpu": [
"arm64"
],
@@ -142,9 +141,9 @@
}
},
"node_modules/@esbuild/freebsd-x64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.4.tgz",
"integrity": "sha512-2kb4ceA/CpfUrIcTUl1wrP/9ad9Atrp5J94Lq69w7UwOMolPIGrfLSvAKJp0RTvkPPyn6CIWrNy13kyLikZRZQ==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.28.0.tgz",
"integrity": "sha512-BXoQai/A0wPO6Es3yFJ7APCiKGc1tdAEOgeTNy3SsB491S3aHn4S4r3e976eUnPdU+NbdtmBuLncYir2tMU9Nw==",
"cpu": [
"x64"
],
@@ -159,9 +158,9 @@
}
},
"node_modules/@esbuild/linux-arm": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.4.tgz",
"integrity": "sha512-aBYgcIxX/wd5n2ys0yESGeYMGF+pv6g0DhZr3G1ZG4jMfruU9Tl1i2Z+Wnj9/KjGz1lTLCcorqE2viePZqj4Eg==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.28.0.tgz",
"integrity": "sha512-CjaaREJagqJp7iTaNQjjidaNbCKYcd4IDkzbwwxtSvjI7NZm79qiHc8HqciMddQ6CKvJT6aBd8lO9kN/ZudLlw==",
"cpu": [
"arm"
],
@@ -176,9 +175,9 @@
}
},
"node_modules/@esbuild/linux-arm64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.4.tgz",
"integrity": "sha512-7nQOttdzVGth1iz57kxg9uCz57dxQLHWxopL6mYuYthohPKEK0vU0C3O21CcBK6KDlkYVcnDXY099HcCDXd9dA==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.28.0.tgz",
"integrity": "sha512-RVyzfb3FWsGA55n6WY0MEIEPURL1FcbhFE6BffZEMEekfCzCIMtB5yyDcFnVbTnwk+CLAgTujmV/Lgvih56W+A==",
"cpu": [
"arm64"
],
@@ -193,9 +192,9 @@
}
},
"node_modules/@esbuild/linux-ia32": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.4.tgz",
"integrity": "sha512-oPtixtAIzgvzYcKBQM/qZ3R+9TEUd1aNJQu0HhGyqtx6oS7qTpvjheIWBbes4+qu1bNlo2V4cbkISr8q6gRBFA==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.28.0.tgz",
"integrity": "sha512-KBnSTt1kxl9x70q+ydterVdl+Cn0H18ngRMRCEQfrbqdUuntQQ0LoMZv47uB97NljZFzY6HcfqEZ2SAyIUTQBQ==",
"cpu": [
"ia32"
],
@@ -210,9 +209,9 @@
}
},
"node_modules/@esbuild/linux-loong64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.4.tgz",
"integrity": "sha512-8mL/vh8qeCoRcFH2nM8wm5uJP+ZcVYGGayMavi8GmRJjuI3g1v6Z7Ni0JJKAJW+m0EtUuARb6Lmp4hMjzCBWzA==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.28.0.tgz",
"integrity": "sha512-zpSlUce1mnxzgBADvxKXX5sl8aYQHo2ezvMNI8I0lbblJtp8V4odlm3Yzlj7gPyt3T8ReksE6bK+pT3WD+aJRg==",
"cpu": [
"loong64"
],
@@ -227,9 +226,9 @@
}
},
"node_modules/@esbuild/linux-mips64el": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.4.tgz",
"integrity": "sha512-1RdrWFFiiLIW7LQq9Q2NES+HiD4NyT8Itj9AUeCl0IVCA459WnPhREKgwrpaIfTOe+/2rdntisegiPWn/r/aAw==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.28.0.tgz",
"integrity": "sha512-2jIfP6mmjkdmeTlsX/9vmdmhBmKADrWqN7zcdtHIeNSCH1SqIoNI63cYsjQR8J+wGa4Y5izRcSHSm8K3QWmk3w==",
"cpu": [
"mips64el"
],
@@ -244,9 +243,9 @@
}
},
"node_modules/@esbuild/linux-ppc64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.4.tgz",
"integrity": "sha512-tLCwNG47l3sd9lpfyx9LAGEGItCUeRCWeAx6x2Jmbav65nAwoPXfewtAdtbtit/pJFLUWOhpv0FpS6GQAmPrHA==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.28.0.tgz",
"integrity": "sha512-bc0FE9wWeC0WBm49IQMPSPILRocGTQt3j5KPCA8os6VprfuJ7KD+5PzESSrJ6GmPIPJK965ZJHTUlSA6GNYEhg==",
"cpu": [
"ppc64"
],
@@ -261,9 +260,9 @@
}
},
"node_modules/@esbuild/linux-riscv64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.4.tgz",
"integrity": "sha512-BnASypppbUWyqjd1KIpU4AUBiIhVr6YlHx/cnPgqEkNoVOhHg+YiSVxM1RLfiy4t9cAulbRGTNCKOcqHrEQLIw==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.28.0.tgz",
"integrity": "sha512-SQPZOwoTTT/HXFXQJG/vBX8sOFagGqvZyXcgLA3NhIqcBv1BJU1d46c0rGcrij2B56Z2rNiSLaZOYW5cUk7yLQ==",
"cpu": [
"riscv64"
],
@@ -278,9 +277,9 @@
}
},
"node_modules/@esbuild/linux-s390x": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.4.tgz",
"integrity": "sha512-+eUqgb/Z7vxVLezG8bVB9SfBie89gMueS+I0xYh2tJdw3vqA/0ImZJ2ROeWwVJN59ihBeZ7Tu92dF/5dy5FttA==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.28.0.tgz",
"integrity": "sha512-SCfR0HN8CEEjnYnySJTd2cw0k9OHB/YFzt5zgJEwa+wL/T/raGWYMBqwDNAC6dqFKmJYZoQBRfHjgwLHGSrn3Q==",
"cpu": [
"s390x"
],
@@ -295,9 +294,9 @@
}
},
"node_modules/@esbuild/linux-x64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.4.tgz",
"integrity": "sha512-S5qOXrKV8BQEzJPVxAwnryi2+Iq5pB40gTEIT69BQONqR7JH1EPIcQ/Uiv9mCnn05jff9umq/5nqzxlqTOg9NA==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.28.0.tgz",
"integrity": "sha512-us0dSb9iFxIi8srnpl931Nvs65it/Jd2a2K3qs7fz2WfGPHqzfzZTfec7oxZJRNPXPnNYZtanmRc4AL/JwVzHQ==",
"cpu": [
"x64"
],
@@ -312,9 +311,9 @@
}
},
"node_modules/@esbuild/netbsd-arm64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.4.tgz",
"integrity": "sha512-xHT8X4sb0GS8qTqiwzHqpY00C95DPAq7nAwX35Ie/s+LO9830hrMd3oX0ZMKLvy7vsonee73x0lmcdOVXFzd6Q==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.28.0.tgz",
"integrity": "sha512-CR/RYotgtCKwtftMwJlUU7xCVNg3lMYZ0RzTmAHSfLCXw3NtZtNpswLEj/Kkf6kEL3Gw+BpOekRX0BYCtklhUw==",
"cpu": [
"arm64"
],
@@ -329,9 +328,9 @@
}
},
"node_modules/@esbuild/netbsd-x64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.4.tgz",
"integrity": "sha512-RugOvOdXfdyi5Tyv40kgQnI0byv66BFgAqjdgtAKqHoZTbTF2QqfQrFwa7cHEORJf6X2ht+l9ABLMP0dnKYsgg==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.28.0.tgz",
"integrity": "sha512-nU1yhmYutL+fQ71Kxnhg8uEOdC0pwEW9entHykTgEbna2pw2dkbFSMeqjjyHZoCmt8SBkOSvV+yNmm94aUrrqw==",
"cpu": [
"x64"
],
@@ -346,9 +345,9 @@
}
},
"node_modules/@esbuild/openbsd-arm64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.4.tgz",
"integrity": "sha512-2MyL3IAaTX+1/qP0O1SwskwcwCoOI4kV2IBX1xYnDDqthmq5ArrW94qSIKCAuRraMgPOmG0RDTA74mzYNQA9ow==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.28.0.tgz",
"integrity": "sha512-cXb5vApOsRsxsEl4mcZ1XY3D4DzcoMxR/nnc4IyqYs0rTI8ZKmW6kyyg+11Z8yvgMfAEldKzP7AdP64HnSC/6g==",
"cpu": [
"arm64"
],
@@ -363,9 +362,9 @@
}
},
"node_modules/@esbuild/openbsd-x64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.4.tgz",
"integrity": "sha512-u8fg/jQ5aQDfsnIV6+KwLOf1CmJnfu1ShpwqdwC0uA7ZPwFws55Ngc12vBdeUdnuWoQYx/SOQLGDcdlfXhYmXQ==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.28.0.tgz",
"integrity": "sha512-8wZM2qqtv9UP3mzy7HiGYNH/zjTA355mpeuA+859TyR+e+Tc08IHYpLJuMsfpDJwoLo1ikIJI8jC3GFjnRClzA==",
"cpu": [
"x64"
],
@@ -380,9 +379,9 @@
}
},
"node_modules/@esbuild/openharmony-arm64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.4.tgz",
"integrity": "sha512-JkTZrl6VbyO8lDQO3yv26nNr2RM2yZzNrNHEsj9bm6dOwwu9OYN28CjzZkH57bh4w0I2F7IodpQvUAEd1mbWXg==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.28.0.tgz",
"integrity": "sha512-FLGfyizszcef5C3YtoyQDACyg95+dndv79i2EekILBofh5wpCa1KuBqOWKrEHZg3zrL3t5ouE5jgr94vA+Wb2w==",
"cpu": [
"arm64"
],
@@ -397,9 +396,9 @@
}
},
"node_modules/@esbuild/sunos-x64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.4.tgz",
"integrity": "sha512-/gOzgaewZJfeJTlsWhvUEmUG4tWEY2Spp5M20INYRg2ZKl9QPO3QEEgPeRtLjEWSW8FilRNacPOg8R1uaYkA6g==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.28.0.tgz",
"integrity": "sha512-1ZgjUoEdHZZl/YlV76TSCz9Hqj9h9YmMGAgAPYd+q4SicWNX3G5GCyx9uhQWSLcbvPW8Ni7lj4gDa1T40akdlw==",
"cpu": [
"x64"
],
@@ -414,9 +413,9 @@
}
},
"node_modules/@esbuild/win32-arm64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.4.tgz",
"integrity": "sha512-Z9SExBg2y32smoDQdf1HRwHRt6vAHLXcxD2uGgO/v2jK7Y718Ix4ndsbNMU/+1Qiem9OiOdaqitioZwxivhXYg==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.28.0.tgz",
"integrity": "sha512-Q9StnDmQ/enxnpxCCLSg0oo4+34B9TdXpuyPeTedN/6+iXBJ4J+zwfQI28u/Jl40nOYAxGoNi7mFP40RUtkmUA==",
"cpu": [
"arm64"
],
@@ -431,9 +430,9 @@
}
},
"node_modules/@esbuild/win32-ia32": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.4.tgz",
"integrity": "sha512-DAyGLS0Jz5G5iixEbMHi5KdiApqHBWMGzTtMiJ72ZOLhbu/bzxgAe8Ue8CTS3n3HbIUHQz/L51yMdGMeoxXNJw==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.28.0.tgz",
"integrity": "sha512-zF3ag/gfiCe6U2iczcRzSYJKH1DCI+ByzSENHlM2FcDbEeo5Zd2C86Aq0tKUYAJJ1obRP84ymxIAksZUcdztHA==",
"cpu": [
"ia32"
],
@@ -448,9 +447,9 @@
}
},
"node_modules/@esbuild/win32-x64": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.4.tgz",
"integrity": "sha512-+knoa0BDoeXgkNvvV1vvbZX4+hizelrkwmGJBdT17t8FNPwG2lKemmuMZlmaNQ3ws3DKKCxpb4zRZEIp3UxFCg==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.28.0.tgz",
"integrity": "sha512-pEl1bO9mfAmIC+tW5btTmrKaujg3zGtUmWNdCw/xs70FBjwAL3o9OEKNHvNmnyylD6ubxUERiEhdsL0xBQ9efw==",
"cpu": [
"x64"
],
@@ -824,68 +823,68 @@
}
},
"node_modules/@tailwindcss/cli": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/cli/-/cli-4.2.1.tgz",
"integrity": "sha512-b7MGn51IA80oSG+7fuAgzfQ+7pZBgjzbqwmiv6NO7/+a1sev32cGqnwhscT7h0EcAvMa9r7gjRylqOH8Xhc4DA==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/cli/-/cli-4.2.2.tgz",
"integrity": "sha512-iJS+8kAFZ8HPqnh0O5DHCLjo4L6dD97DBQEkrhfSO4V96xeefUus2jqsBs1dUMt3OU9Ks4qIkiY0mpL5UW+4LQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@parcel/watcher": "^2.5.1",
"@tailwindcss/node": "4.2.1",
"@tailwindcss/oxide": "4.2.1",
"@tailwindcss/node": "4.2.2",
"@tailwindcss/oxide": "4.2.2",
"enhanced-resolve": "^5.19.0",
"mri": "^1.2.0",
"picocolors": "^1.1.1",
"tailwindcss": "4.2.1"
"tailwindcss": "4.2.2"
},
"bin": {
"tailwindcss": "dist/index.mjs"
}
},
"node_modules/@tailwindcss/node": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.2.1.tgz",
"integrity": "sha512-jlx6sLk4EOwO6hHe1oCGm1Q4AN/s0rSrTTPBGPM0/RQ6Uylwq17FuU8IeJJKEjtc6K6O07zsvP+gDO6MMWo7pg==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.2.2.tgz",
"integrity": "sha512-pXS+wJ2gZpVXqFaUEjojq7jzMpTGf8rU6ipJz5ovJV6PUGmlJ+jvIwGrzdHdQ80Sg+wmQxUFuoW1UAAwHNEdFA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jridgewell/remapping": "^2.3.5",
"enhanced-resolve": "^5.19.0",
"jiti": "^2.6.1",
"lightningcss": "1.31.1",
"lightningcss": "1.32.0",
"magic-string": "^0.30.21",
"source-map-js": "^1.2.1",
"tailwindcss": "4.2.1"
"tailwindcss": "4.2.2"
}
},
"node_modules/@tailwindcss/oxide": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.2.1.tgz",
"integrity": "sha512-yv9jeEFWnjKCI6/T3Oq50yQEOqmpmpfzG1hcZsAOaXFQPfzWprWrlHSdGPEF3WQTi8zu8ohC9Mh9J470nT5pUw==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.2.2.tgz",
"integrity": "sha512-qEUA07+E5kehxYp9BVMpq9E8vnJuBHfJEC0vPC5e7iL/hw7HR61aDKoVoKzrG+QKp56vhNZe4qwkRmMC0zDLvg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 20"
},
"optionalDependencies": {
"@tailwindcss/oxide-android-arm64": "4.2.1",
"@tailwindcss/oxide-darwin-arm64": "4.2.1",
"@tailwindcss/oxide-darwin-x64": "4.2.1",
"@tailwindcss/oxide-freebsd-x64": "4.2.1",
"@tailwindcss/oxide-linux-arm-gnueabihf": "4.2.1",
"@tailwindcss/oxide-linux-arm64-gnu": "4.2.1",
"@tailwindcss/oxide-linux-arm64-musl": "4.2.1",
"@tailwindcss/oxide-linux-x64-gnu": "4.2.1",
"@tailwindcss/oxide-linux-x64-musl": "4.2.1",
"@tailwindcss/oxide-wasm32-wasi": "4.2.1",
"@tailwindcss/oxide-win32-arm64-msvc": "4.2.1",
"@tailwindcss/oxide-win32-x64-msvc": "4.2.1"
"@tailwindcss/oxide-android-arm64": "4.2.2",
"@tailwindcss/oxide-darwin-arm64": "4.2.2",
"@tailwindcss/oxide-darwin-x64": "4.2.2",
"@tailwindcss/oxide-freebsd-x64": "4.2.2",
"@tailwindcss/oxide-linux-arm-gnueabihf": "4.2.2",
"@tailwindcss/oxide-linux-arm64-gnu": "4.2.2",
"@tailwindcss/oxide-linux-arm64-musl": "4.2.2",
"@tailwindcss/oxide-linux-x64-gnu": "4.2.2",
"@tailwindcss/oxide-linux-x64-musl": "4.2.2",
"@tailwindcss/oxide-wasm32-wasi": "4.2.2",
"@tailwindcss/oxide-win32-arm64-msvc": "4.2.2",
"@tailwindcss/oxide-win32-x64-msvc": "4.2.2"
}
},
"node_modules/@tailwindcss/oxide-android-arm64": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.2.1.tgz",
"integrity": "sha512-eZ7G1Zm5EC8OOKaesIKuw77jw++QJ2lL9N+dDpdQiAB/c/B2wDh0QPFHbkBVrXnwNugvrbJFk1gK2SsVjwWReg==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.2.2.tgz",
"integrity": "sha512-dXGR1n+P3B6748jZO/SvHZq7qBOqqzQ+yFrXpoOWWALWndF9MoSKAT3Q0fYgAzYzGhxNYOoysRvYlpixRBBoDg==",
"cpu": [
"arm64"
],
@@ -900,9 +899,9 @@
}
},
"node_modules/@tailwindcss/oxide-darwin-arm64": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.2.1.tgz",
"integrity": "sha512-q/LHkOstoJ7pI1J0q6djesLzRvQSIfEto148ppAd+BVQK0JYjQIFSK3JgYZJa+Yzi0DDa52ZsQx2rqytBnf8Hw==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.2.2.tgz",
"integrity": "sha512-iq9Qjr6knfMpZHj55/37ouZeykwbDqF21gPFtfnhCCKGDcPI/21FKC9XdMO/XyBM7qKORx6UIhGgg6jLl7BZlg==",
"cpu": [
"arm64"
],
@@ -917,9 +916,9 @@
}
},
"node_modules/@tailwindcss/oxide-darwin-x64": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.2.1.tgz",
"integrity": "sha512-/f/ozlaXGY6QLbpvd/kFTro2l18f7dHKpB+ieXz+Cijl4Mt9AI2rTrpq7V+t04nK+j9XBQHnSMdeQRhbGyt6fw==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.2.2.tgz",
"integrity": "sha512-BlR+2c3nzc8f2G639LpL89YY4bdcIdUmiOOkv2GQv4/4M0vJlpXEa0JXNHhCHU7VWOKWT/CjqHdTP8aUuDJkuw==",
"cpu": [
"x64"
],
@@ -934,9 +933,9 @@
}
},
"node_modules/@tailwindcss/oxide-freebsd-x64": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.2.1.tgz",
"integrity": "sha512-5e/AkgYJT/cpbkys/OU2Ei2jdETCLlifwm7ogMC7/hksI2fC3iiq6OcXwjibcIjPung0kRtR3TxEITkqgn0TcA==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.2.2.tgz",
"integrity": "sha512-YUqUgrGMSu2CDO82hzlQ5qSb5xmx3RUrke/QgnoEx7KvmRJHQuZHZmZTLSuuHwFf0DJPybFMXMYf+WJdxHy/nQ==",
"cpu": [
"x64"
],
@@ -951,9 +950,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.2.1.tgz",
"integrity": "sha512-Uny1EcVTTmerCKt/1ZuKTkb0x8ZaiuYucg2/kImO5A5Y/kBz41/+j0gxUZl+hTF3xkWpDmHX+TaWhOtba2Fyuw==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.2.2.tgz",
"integrity": "sha512-FPdhvsW6g06T9BWT0qTwiVZYE2WIFo2dY5aCSpjG/S/u1tby+wXoslXS0kl3/KXnULlLr1E3NPRRw0g7t2kgaQ==",
"cpu": [
"arm"
],
@@ -968,9 +967,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-arm64-gnu": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.2.1.tgz",
"integrity": "sha512-CTrwomI+c7n6aSSQlsPL0roRiNMDQ/YzMD9EjcR+H4f0I1SQ8QqIuPnsVp7QgMkC1Qi8rtkekLkOFjo7OlEFRQ==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.2.2.tgz",
"integrity": "sha512-4og1V+ftEPXGttOO7eCmW7VICmzzJWgMx+QXAJRAhjrSjumCwWqMfkDrNu1LXEQzNAwz28NCUpucgQPrR4S2yw==",
"cpu": [
"arm64"
],
@@ -985,9 +984,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-arm64-musl": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.2.1.tgz",
"integrity": "sha512-WZA0CHRL/SP1TRbA5mp9htsppSEkWuQ4KsSUumYQnyl8ZdT39ntwqmz4IUHGN6p4XdSlYfJwM4rRzZLShHsGAQ==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.2.2.tgz",
"integrity": "sha512-oCfG/mS+/+XRlwNjnsNLVwnMWYH7tn/kYPsNPh+JSOMlnt93mYNCKHYzylRhI51X+TbR+ufNhhKKzm6QkqX8ag==",
"cpu": [
"arm64"
],
@@ -1002,9 +1001,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-x64-gnu": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.2.1.tgz",
"integrity": "sha512-qMFzxI2YlBOLW5PhblzuSWlWfwLHaneBE0xHzLrBgNtqN6mWfs+qYbhryGSXQjFYB1Dzf5w+LN5qbUTPhW7Y5g==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.2.2.tgz",
"integrity": "sha512-rTAGAkDgqbXHNp/xW0iugLVmX62wOp2PoE39BTCGKjv3Iocf6AFbRP/wZT/kuCxC9QBh9Pu8XPkv/zCZB2mcMg==",
"cpu": [
"x64"
],
@@ -1019,9 +1018,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-x64-musl": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.2.1.tgz",
"integrity": "sha512-5r1X2FKnCMUPlXTWRYpHdPYUY6a1Ar/t7P24OuiEdEOmms5lyqjDRvVY1yy9Rmioh+AunQ0rWiOTPE8F9A3v5g==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.2.2.tgz",
"integrity": "sha512-XW3t3qwbIwiSyRCggeO2zxe3KWaEbM0/kW9e8+0XpBgyKU4ATYzcVSMKteZJ1iukJ3HgHBjbg9P5YPRCVUxlnQ==",
"cpu": [
"x64"
],
@@ -1036,9 +1035,9 @@
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.2.1.tgz",
"integrity": "sha512-MGFB5cVPvshR85MTJkEvqDUnuNoysrsRxd6vnk1Lf2tbiqNlXpHYZqkqOQalydienEWOHHFyyuTSYRsLfxFJ2Q==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.2.2.tgz",
"integrity": "sha512-eKSztKsmEsn1O5lJ4ZAfyn41NfG7vzCg496YiGtMDV86jz1q/irhms5O0VrY6ZwTUkFy/EKG3RfWgxSI3VbZ8Q==",
"bundleDependencies": [
"@napi-rs/wasm-runtime",
"@emnapi/core",
@@ -1066,9 +1065,9 @@
}
},
"node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.2.1.tgz",
"integrity": "sha512-YlUEHRHBGnCMh4Nj4GnqQyBtsshUPdiNroZj8VPkvTZSoHsilRCwXcVKnG9kyi0ZFAS/3u+qKHBdDc81SADTRA==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.2.2.tgz",
"integrity": "sha512-qPmaQM4iKu5mxpsrWZMOZRgZv1tOZpUm+zdhhQP0VhJfyGGO3aUKdbh3gDZc/dPLQwW4eSqWGrrcWNBZWUWaXQ==",
"cpu": [
"arm64"
],
@@ -1083,9 +1082,9 @@
}
},
"node_modules/@tailwindcss/oxide-win32-x64-msvc": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.2.1.tgz",
"integrity": "sha512-rbO34G5sMWWyrN/idLeVxAZgAKWrn5LiR3/I90Q9MkA67s6T1oB0xtTe+0heoBvHSpbU9Mk7i6uwJnpo4u21XQ==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.2.2.tgz",
"integrity": "sha512-1T/37VvI7WyH66b+vqHj/cLwnCxt7Qt3WFu5Q8hk65aOvlwAhs7rAp1VkulBJw/N4tMirXjVnylTR72uI0HGcA==",
"cpu": [
"x64"
],
@@ -1112,12 +1111,6 @@
"tailwindcss": ">=3.0.0 || insiders || >=4.0.0-alpha.20 || >=4.0.0-beta.1"
}
},
"node_modules/actor-typeahead": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/actor-typeahead/-/actor-typeahead-0.1.2.tgz",
"integrity": "sha512-I97YqqNl7Kar0J/bIJvgY/KmHpssHcDElhfwVTLP7wRFlkxso2ZLBqiS2zol5A8UVUJbQK2JXYaqNpZXz8Uk2A==",
"license": "MPL-2.0"
},
"node_modules/balanced-match": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz",
@@ -1175,9 +1168,9 @@
}
},
"node_modules/enhanced-resolve": {
"version": "5.20.0",
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.20.0.tgz",
"integrity": "sha512-/ce7+jQ1PQ6rVXwe+jKEg5hW5ciicHwIQUagZkp6IufBoY3YDgdTTY1azVs0qoRgVmvsNB+rbjLJxDAeHHtwsQ==",
"version": "5.20.1",
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.20.1.tgz",
"integrity": "sha512-Qohcme7V1inbAfvjItgw0EaxVX5q2rdVEZHRBrEQdRZTssLDGsL8Lwrznl8oQ/6kuTJONLaDcGjkNP247XEhcA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -1189,9 +1182,9 @@
}
},
"node_modules/esbuild": {
"version": "0.27.4",
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.4.tgz",
"integrity": "sha512-Rq4vbHnYkK5fws5NF7MYTU68FPRE1ajX7heQ/8QXXWqNgqqJ/GkmmyxIzUnf2Sr/bakf8l54716CcMGHYhMrrQ==",
"version": "0.28.0",
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.28.0.tgz",
"integrity": "sha512-sNR9MHpXSUV/XB4zmsFKN+QgVG82Cc7+/aaxJ8Adi8hyOac+EXptIp45QBPaVyX3N70664wRbTcLTOemCAnyqw==",
"dev": true,
"hasInstallScript": true,
"license": "MIT",
@@ -1202,32 +1195,32 @@
"node": ">=18"
},
"optionalDependencies": {
"@esbuild/aix-ppc64": "0.27.4",
"@esbuild/android-arm": "0.27.4",
"@esbuild/android-arm64": "0.27.4",
"@esbuild/android-x64": "0.27.4",
"@esbuild/darwin-arm64": "0.27.4",
"@esbuild/darwin-x64": "0.27.4",
"@esbuild/freebsd-arm64": "0.27.4",
"@esbuild/freebsd-x64": "0.27.4",
"@esbuild/linux-arm": "0.27.4",
"@esbuild/linux-arm64": "0.27.4",
"@esbuild/linux-ia32": "0.27.4",
"@esbuild/linux-loong64": "0.27.4",
"@esbuild/linux-mips64el": "0.27.4",
"@esbuild/linux-ppc64": "0.27.4",
"@esbuild/linux-riscv64": "0.27.4",
"@esbuild/linux-s390x": "0.27.4",
"@esbuild/linux-x64": "0.27.4",
"@esbuild/netbsd-arm64": "0.27.4",
"@esbuild/netbsd-x64": "0.27.4",
"@esbuild/openbsd-arm64": "0.27.4",
"@esbuild/openbsd-x64": "0.27.4",
"@esbuild/openharmony-arm64": "0.27.4",
"@esbuild/sunos-x64": "0.27.4",
"@esbuild/win32-arm64": "0.27.4",
"@esbuild/win32-ia32": "0.27.4",
"@esbuild/win32-x64": "0.27.4"
"@esbuild/aix-ppc64": "0.28.0",
"@esbuild/android-arm": "0.28.0",
"@esbuild/android-arm64": "0.28.0",
"@esbuild/android-x64": "0.28.0",
"@esbuild/darwin-arm64": "0.28.0",
"@esbuild/darwin-x64": "0.28.0",
"@esbuild/freebsd-arm64": "0.28.0",
"@esbuild/freebsd-x64": "0.28.0",
"@esbuild/linux-arm": "0.28.0",
"@esbuild/linux-arm64": "0.28.0",
"@esbuild/linux-ia32": "0.28.0",
"@esbuild/linux-loong64": "0.28.0",
"@esbuild/linux-mips64el": "0.28.0",
"@esbuild/linux-ppc64": "0.28.0",
"@esbuild/linux-riscv64": "0.28.0",
"@esbuild/linux-s390x": "0.28.0",
"@esbuild/linux-x64": "0.28.0",
"@esbuild/netbsd-arm64": "0.28.0",
"@esbuild/netbsd-x64": "0.28.0",
"@esbuild/openbsd-arm64": "0.28.0",
"@esbuild/openbsd-x64": "0.28.0",
"@esbuild/openharmony-arm64": "0.28.0",
"@esbuild/sunos-x64": "0.28.0",
"@esbuild/win32-arm64": "0.28.0",
"@esbuild/win32-ia32": "0.28.0",
"@esbuild/win32-x64": "0.28.0"
}
},
"node_modules/glob": {
@@ -1303,9 +1296,9 @@
}
},
"node_modules/lightningcss": {
"version": "1.31.1",
"resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.31.1.tgz",
"integrity": "sha512-l51N2r93WmGUye3WuFoN5k10zyvrVs0qfKBhyC5ogUQ6Ew6JUSswh78mbSO+IU3nTWsyOArqPCcShdQSadghBQ==",
"version": "1.32.0",
"resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.32.0.tgz",
"integrity": "sha512-NXYBzinNrblfraPGyrbPoD19C1h9lfI/1mzgWYvXUTe414Gz/X1FD2XBZSZM7rRTrMA8JL3OtAaGifrIKhQ5yQ==",
"dev": true,
"license": "MPL-2.0",
"dependencies": {
@@ -1319,23 +1312,23 @@
"url": "https://opencollective.com/parcel"
},
"optionalDependencies": {
"lightningcss-android-arm64": "1.31.1",
"lightningcss-darwin-arm64": "1.31.1",
"lightningcss-darwin-x64": "1.31.1",
"lightningcss-freebsd-x64": "1.31.1",
"lightningcss-linux-arm-gnueabihf": "1.31.1",
"lightningcss-linux-arm64-gnu": "1.31.1",
"lightningcss-linux-arm64-musl": "1.31.1",
"lightningcss-linux-x64-gnu": "1.31.1",
"lightningcss-linux-x64-musl": "1.31.1",
"lightningcss-win32-arm64-msvc": "1.31.1",
"lightningcss-win32-x64-msvc": "1.31.1"
"lightningcss-android-arm64": "1.32.0",
"lightningcss-darwin-arm64": "1.32.0",
"lightningcss-darwin-x64": "1.32.0",
"lightningcss-freebsd-x64": "1.32.0",
"lightningcss-linux-arm-gnueabihf": "1.32.0",
"lightningcss-linux-arm64-gnu": "1.32.0",
"lightningcss-linux-arm64-musl": "1.32.0",
"lightningcss-linux-x64-gnu": "1.32.0",
"lightningcss-linux-x64-musl": "1.32.0",
"lightningcss-win32-arm64-msvc": "1.32.0",
"lightningcss-win32-x64-msvc": "1.32.0"
}
},
"node_modules/lightningcss-android-arm64": {
"version": "1.31.1",
"resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.31.1.tgz",
"integrity": "sha512-HXJF3x8w9nQ4jbXRiNppBCqeZPIAfUo8zE/kOEGbW5NZvGc/K7nMxbhIr+YlFlHW5mpbg/YFPdbnCh1wAXCKFg==",
"version": "1.32.0",
"resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.32.0.tgz",
"integrity": "sha512-YK7/ClTt4kAK0vo6w3X+Pnm0D2cf2vPHbhOXdoNti1Ga0al1P4TBZhwjATvjNwLEBCnKvjJc2jQgHXH0NEwlAg==",
"cpu": [
"arm64"
],
@@ -1354,9 +1347,9 @@
}
},
"node_modules/lightningcss-darwin-arm64": {
"version": "1.31.1",
"resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.31.1.tgz",
"integrity": "sha512-02uTEqf3vIfNMq3h/z2cJfcOXnQ0GRwQrkmPafhueLb2h7mqEidiCzkE4gBMEH65abHRiQvhdcQ+aP0D0g67sg==",
"version": "1.32.0",
"resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.32.0.tgz",
"integrity": "sha512-RzeG9Ju5bag2Bv1/lwlVJvBE3q6TtXskdZLLCyfg5pt+HLz9BqlICO7LZM7VHNTTn/5PRhHFBSjk5lc4cmscPQ==",
"cpu": [
"arm64"
],
@@ -1375,9 +1368,9 @@
}
},
"node_modules/lightningcss-darwin-x64": {
"version": "1.31.1",
"resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.31.1.tgz",
"integrity": "sha512-1ObhyoCY+tGxtsz1lSx5NXCj3nirk0Y0kB/g8B8DT+sSx4G9djitg9ejFnjb3gJNWo7qXH4DIy2SUHvpoFwfTA==",
"version": "1.32.0",
"resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.32.0.tgz",
"integrity": "sha512-U+QsBp2m/s2wqpUYT/6wnlagdZbtZdndSmut/NJqlCcMLTWp5muCrID+K5UJ6jqD2BFshejCYXniPDbNh73V8w==",
"cpu": [
"x64"
],
@@ -1396,9 +1389,9 @@
}
},
"node_modules/lightningcss-freebsd-x64": {
"version": "1.31.1",
"resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.31.1.tgz",
"integrity": "sha512-1RINmQKAItO6ISxYgPwszQE1BrsVU5aB45ho6O42mu96UiZBxEXsuQ7cJW4zs4CEodPUioj/QrXW1r9pLUM74A==",
"version": "1.32.0",
"resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.32.0.tgz",
"integrity": "sha512-JCTigedEksZk3tHTTthnMdVfGf61Fky8Ji2E4YjUTEQX14xiy/lTzXnu1vwiZe3bYe0q+SpsSH/CTeDXK6WHig==",
"cpu": [
"x64"
],
@@ -1417,9 +1410,9 @@
}
},
"node_modules/lightningcss-linux-arm-gnueabihf": {
"version": "1.31.1",
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.31.1.tgz",
"integrity": "sha512-OOCm2//MZJ87CdDK62rZIu+aw9gBv4azMJuA8/KB74wmfS3lnC4yoPHm0uXZ/dvNNHmnZnB8XLAZzObeG0nS1g==",
"version": "1.32.0",
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.32.0.tgz",
"integrity": "sha512-x6rnnpRa2GL0zQOkt6rts3YDPzduLpWvwAF6EMhXFVZXD4tPrBkEFqzGowzCsIWsPjqSK+tyNEODUBXeeVHSkw==",
"cpu": [
"arm"
],
@@ -1438,9 +1431,9 @@
}
},
"node_modules/lightningcss-linux-arm64-gnu": {
"version": "1.31.1",
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.31.1.tgz",
"integrity": "sha512-WKyLWztD71rTnou4xAD5kQT+982wvca7E6QoLpoawZ1gP9JM0GJj4Tp5jMUh9B3AitHbRZ2/H3W5xQmdEOUlLg==",
"version": "1.32.0",
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.32.0.tgz",
"integrity": "sha512-0nnMyoyOLRJXfbMOilaSRcLH3Jw5z9HDNGfT/gwCPgaDjnx0i8w7vBzFLFR1f6CMLKF8gVbebmkUN3fa/kQJpQ==",
"cpu": [
"arm64"
],
@@ -1459,9 +1452,9 @@
}
},
"node_modules/lightningcss-linux-arm64-musl": {
"version": "1.31.1",
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.31.1.tgz",
"integrity": "sha512-mVZ7Pg2zIbe3XlNbZJdjs86YViQFoJSpc41CbVmKBPiGmC4YrfeOyz65ms2qpAobVd7WQsbW4PdsSJEMymyIMg==",
"version": "1.32.0",
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.32.0.tgz",
"integrity": "sha512-UpQkoenr4UJEzgVIYpI80lDFvRmPVg6oqboNHfoH4CQIfNA+HOrZ7Mo7KZP02dC6LjghPQJeBsvXhJod/wnIBg==",
"cpu": [
"arm64"
],
@@ -1480,9 +1473,9 @@
}
},
"node_modules/lightningcss-linux-x64-gnu": {
"version": "1.31.1",
"resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.31.1.tgz",
"integrity": "sha512-xGlFWRMl+0KvUhgySdIaReQdB4FNudfUTARn7q0hh/V67PVGCs3ADFjw+6++kG1RNd0zdGRlEKa+T13/tQjPMA==",
"version": "1.32.0",
"resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.32.0.tgz",
"integrity": "sha512-V7Qr52IhZmdKPVr+Vtw8o+WLsQJYCTd8loIfpDaMRWGUZfBOYEJeyJIkqGIDMZPwPx24pUMfwSxxI8phr/MbOA==",
"cpu": [
"x64"
],
@@ -1501,9 +1494,9 @@
}
},
"node_modules/lightningcss-linux-x64-musl": {
"version": "1.31.1",
"resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.31.1.tgz",
"integrity": "sha512-eowF8PrKHw9LpoZii5tdZwnBcYDxRw2rRCyvAXLi34iyeYfqCQNA9rmUM0ce62NlPhCvof1+9ivRaTY6pSKDaA==",
"version": "1.32.0",
"resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.32.0.tgz",
"integrity": "sha512-bYcLp+Vb0awsiXg/80uCRezCYHNg1/l3mt0gzHnWV9XP1W5sKa5/TCdGWaR/zBM2PeF/HbsQv/j2URNOiVuxWg==",
"cpu": [
"x64"
],
@@ -1522,9 +1515,9 @@
}
},
"node_modules/lightningcss-win32-arm64-msvc": {
"version": "1.31.1",
"resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.31.1.tgz",
"integrity": "sha512-aJReEbSEQzx1uBlQizAOBSjcmr9dCdL3XuC/6HLXAxmtErsj2ICo5yYggg1qOODQMtnjNQv2UHb9NpOuFtYe4w==",
"version": "1.32.0",
"resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.32.0.tgz",
"integrity": "sha512-8SbC8BR40pS6baCM8sbtYDSwEVQd4JlFTOlaD3gWGHfThTcABnNDBda6eTZeqbofalIJhFx0qKzgHJmcPTnGdw==",
"cpu": [
"arm64"
],
@@ -1543,9 +1536,9 @@
}
},
"node_modules/lightningcss-win32-x64-msvc": {
"version": "1.31.1",
"resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.31.1.tgz",
"integrity": "sha512-I9aiFrbd7oYHwlnQDqr1Roz+fTz61oDDJX7n9tYF9FJymH1cIN1DtKw3iYt6b8WZgEjoNwVSncwF4wx/ZedMhw==",
"version": "1.32.0",
"resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.32.0.tgz",
"integrity": "sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q==",
"cpu": [
"x64"
],
@@ -1574,9 +1567,9 @@
}
},
"node_modules/lucide": {
"version": "0.577.0",
"resolved": "https://registry.npmjs.org/lucide/-/lucide-0.577.0.tgz",
"integrity": "sha512-PpC/m5eOItp/WU/GlQPFBXDOhq6HibL73KzYP37OX3LM7VmzWQF8voEj8QRWUFvy9FIKfeDQkWYoyS1D/MdWFA==",
"version": "1.8.0",
"resolved": "https://registry.npmjs.org/lucide/-/lucide-1.8.0.tgz",
"integrity": "sha512-JjV/QnadgFLj1Pyu9IKl0lknrolFEzo04B64QcYLLeRzZl/iEHpdbSrRRKbyXcv45SZNv+WGjIUCT33e7xHO6Q==",
"license": "ISC"
},
"node_modules/magic-string": {
@@ -1694,16 +1687,16 @@
}
},
"node_modules/tailwindcss": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.2.1.tgz",
"integrity": "sha512-/tBrSQ36vCleJkAOsy9kbNTgaxvGbyOamC30PRePTQe/o1MFwEKHQk4Cn7BNGaPtjp+PuUrByJehM1hgxfq4sw==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.2.2.tgz",
"integrity": "sha512-KWBIxs1Xb6NoLdMVqhbhgwZf2PGBpPEiwOqgI4pFIYbNTfBXiKYyWoTsXgBQ9WFg/OlhnvHaY+AEpW7wSmFo2Q==",
"dev": true,
"license": "MIT"
},
"node_modules/tapable": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz",
"integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==",
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.2.tgz",
"integrity": "sha512-1MOpMXuhGzGL5TTCZFItxCc0AARf1EZFQkGqMm7ERKj8+Hgr5oLvJOVFcC+lRmR8hCe2S3jC4T5D7Vg/d7/fhA==",
"dev": true,
"license": "MIT",
"engines": {

View File

@@ -4,9 +4,9 @@
"private": true,
"scripts": {
"icons:build": "node scripts/generate-icons.js",
"css:build": "BROWSERSLIST_IGNORE_OLD_DATA=1 npx tailwindcss -i ./pkg/appview/src/css/main.css -o ./pkg/appview/public/css/style.css --minify",
"css:build:hold": "BROWSERSLIST_IGNORE_OLD_DATA=1 npx tailwindcss -i ./pkg/hold/admin/src/css/main.css -o ./pkg/hold/admin/public/css/style.css --minify",
"css:watch": "BROWSERSLIST_IGNORE_OLD_DATA=1 npx tailwindcss -i ./pkg/appview/src/css/main.css -o ./pkg/appview/public/css/style.css --watch",
"css:build": "npx tailwindcss -i ./pkg/appview/src/css/main.css -o ./pkg/appview/public/css/style.css --minify",
"css:build:hold": "npx tailwindcss -i ./pkg/hold/admin/src/css/main.css -o ./pkg/hold/admin/public/css/style.css --minify",
"css:watch": "npx tailwindcss -i ./pkg/appview/src/css/main.css -o ./pkg/appview/public/css/style.css --watch",
"js:build": "esbuild pkg/appview/src/js/main.js --bundle --minify --format=esm --outfile=pkg/appview/public/js/bundle.min.js",
"js:build:hold": "esbuild pkg/hold/admin/src/js/main.js --bundle --minify --format=esm --outfile=pkg/hold/admin/public/js/bundle.min.js",
"js:watch": "esbuild pkg/appview/src/js/main.js --bundle --watch --format=esm --outfile=pkg/appview/public/js/bundle.min.js",
@@ -16,17 +16,16 @@
"watch": "npm run css:watch & npm run js:watch"
},
"devDependencies": {
"@tailwindcss/cli": "^4.2.1",
"@tailwindcss/cli": "^4.2.2",
"@tailwindcss/typography": "^0.5.19",
"daisyui": "^5.5.19",
"esbuild": "^0.27.4",
"esbuild": "^0.28.0",
"glob": "^13.0.6",
"tailwindcss": "^4.2"
"lucide": "^1.8.0",
"tailwindcss": "^4.2.2"
},
"dependencies": {
"actor-typeahead": "^0.1.2",
"htmx-ext-json-enc": "^2.0.3",
"htmx.org": "^2.0.8",
"lucide": "^0.577.0"
"htmx.org": "^2.0.8"
}
}

View File

@@ -32,6 +32,8 @@ type Config struct {
Auth AuthConfig `yaml:"auth" comment:"JWT authentication settings."`
CredentialHelper CredentialHelperConfig `yaml:"credential_helper" comment:"Credential helper download settings."`
Legal LegalConfig `yaml:"legal" comment:"Legal page customization for self-hosted instances."`
AI AIConfig `yaml:"ai" comment:"AI-powered image advisor settings."`
Labeler LabelerRefConfig `yaml:"labeler" comment:"ATProto labeler for content moderation (DMCA takedowns)."`
Billing billing.Config `yaml:"billing" comment:"Stripe billing integration (requires -tags billing build)."`
Distribution *configuration.Configuration `yaml:"-"` // Wrapped distribution config for compatibility
}
@@ -50,9 +52,6 @@ type ServerConfig struct {
// Allows HTTP (not HTTPS) for DID resolution.
TestMode bool `yaml:"test_mode" comment:"Allows HTTP (not HTTPS) for DID resolution and uses transition:generic OAuth scope."`
// Path to P-256 private key for OAuth client authentication.
OAuthKeyPath string `yaml:"oauth_key_path" comment:"Path to P-256 private key for OAuth client authentication. Auto-generated on first run."`
// Display name shown on OAuth authorization screens.
ClientName string `yaml:"client_name" comment:"Display name shown on OAuth authorization screens."`
@@ -83,6 +82,9 @@ type UIConfig struct {
// How often to sync with the remote libSQL server.
LibsqlSyncInterval time.Duration `yaml:"libsql_sync_interval" comment:"How often to sync with remote libSQL server. Default: 60s."`
// Source code URL displayed in the footer "Source" link.
SourceURL string `yaml:"source_url" comment:"Source code URL displayed in the footer \"Source\" link. Defaults to the upstream ATCR project."`
}
// HealthConfig defines health check and cache settings
@@ -105,17 +107,14 @@ type JetstreamConfig struct {
// How often to re-run backfill to catch missed events. Set to 0 to only backfill on startup.
BackfillInterval time.Duration `yaml:"backfill_interval" comment:"How often to re-run backfill to catch missed events. Set to 0 to only backfill on startup."`
// Relay endpoints for backfill, tried in order on failure.
RelayEndpoints []string `yaml:"relay_endpoints" comment:"Relay endpoints for backfill, tried in order on failure."`
// Relay endpoints for backfill — MUST support com.atproto.sync.listReposByCollection. Tried in order on failure.
RelayEndpoints []string `yaml:"relay_endpoints" comment:"Endpoints used for backfill. MUST support com.atproto.sync.listReposByCollection. Tried in order on failure."`
}
// AuthConfig defines authentication settings
type AuthConfig struct {
// RSA private key for signing registry JWTs.
KeyPath string `yaml:"key_path" comment:"RSA private key for signing registry JWTs issued to Docker clients."`
// X.509 certificate matching the JWT signing key.
CertPath string `yaml:"cert_path" comment:"X.509 certificate matching the JWT signing key."`
CertPath string `yaml:"cert_path" comment:"X.509 certificate matching the JWT signing key (auto-generated on each boot from the JWT key in the database)."`
// TokenExpiration is the JWT expiration duration (5 minutes, not configurable)
TokenExpiration time.Duration `yaml:"-"`
@@ -140,6 +139,18 @@ type LegalConfig struct {
Jurisdiction string `yaml:"jurisdiction" comment:"Governing law jurisdiction for legal terms."`
}
// AIConfig defines AI-powered image advisor settings
type AIConfig struct {
// Anthropic API key for the AI Image Advisor feature.
APIKey string `yaml:"api_key" comment:"Anthropic API key for AI Image Advisor. Also reads CLAUDE_API_KEY env var as fallback."`
}
// LabelerRefConfig defines the connection to an ATProto labeler service.
type LabelerRefConfig struct {
// DID or URL of the labeler service for content moderation.
DID string `yaml:"did" comment:"DID or URL of the ATProto labeler (e.g., did:web:labeler.atcr.io). Empty disables label filtering."`
}
// setDefaults registers all default values on the given Viper instance.
func setDefaults(v *viper.Viper) {
v.SetDefault("version", "0.1")
@@ -152,7 +163,6 @@ func setDefaults(v *viper.Viper) {
v.SetDefault("server.test_mode", false)
v.SetDefault("server.client_name", "AT Container Registry")
v.SetDefault("server.client_short_name", "ATCR")
v.SetDefault("server.oauth_key_path", "/var/lib/atcr/oauth/client.key")
v.SetDefault("server.registry_domains", []string{})
v.SetDefault("server.managed_holds", []string{})
@@ -162,6 +172,7 @@ func setDefaults(v *viper.Viper) {
v.SetDefault("ui.libsql_sync_url", "")
v.SetDefault("ui.libsql_auth_token", "")
v.SetDefault("ui.libsql_sync_interval", "60s")
v.SetDefault("ui.source_url", "https://tangled.org/evan.jarrett.net/at-container-registry")
// Health defaults
v.SetDefault("health.cache_ttl", "15m")
@@ -182,17 +193,22 @@ func setDefaults(v *viper.Viper) {
})
// Auth defaults
v.SetDefault("auth.key_path", "/var/lib/atcr/auth/private-key.pem")
v.SetDefault("auth.cert_path", "/var/lib/atcr/auth/private-key.crt")
// Log shipper defaults
v.SetDefault("log_shipper.batch_size", 100)
v.SetDefault("log_shipper.flush_interval", "5s")
// AI defaults
v.SetDefault("ai.api_key", "")
// Legal defaults
v.SetDefault("legal.company_name", "")
v.SetDefault("legal.jurisdiction", "")
// Labeler defaults
v.SetDefault("labeler.did", "")
// Log formatter (used by distribution config, not in Config struct)
v.SetDefault("log_formatter", "text")
}
@@ -213,8 +229,8 @@ func ExampleYAML() ([]byte, error) {
// Populate example billing tiers so operators see the structure
cfg.Billing.Currency = "usd"
cfg.Billing.SuccessURL = "{base_url}/settings#storage"
cfg.Billing.CancelURL = "{base_url}/settings#storage"
cfg.Billing.SuccessURL = "{base_url}/settings/billing"
cfg.Billing.CancelURL = "{base_url}/settings/billing"
cfg.Billing.OwnerBadge = true
cfg.Billing.Tiers = []billing.BillingTierConfig{
{Name: "deckhand", Description: "Get started with basic storage", MaxWebhooks: 1},
@@ -255,6 +271,11 @@ func LoadConfig(yamlPath string) (*Config, error) {
cfg.Legal.CompanyName = cfg.Server.ClientName
}
// Post-load: AI API key fallback to CLAUDE_API_KEY env
if cfg.AI.APIKey == "" {
cfg.AI.APIKey = os.Getenv("CLAUDE_API_KEY")
}
// Validation
if cfg.Server.DefaultHoldDID == "" {
return nil, fmt.Errorf("server.default_hold_did is required (env: ATCR_SERVER_DEFAULT_HOLD_DID)")

View File

@@ -18,10 +18,8 @@ import (
"github.com/bluesky-social/indigo/atproto/atcrypto"
)
// loadOAuthKey loads the OAuth P-256 key with priority: DB → file → generate.
// Keys loaded from file or newly generated are stored in the DB.
func loadOAuthKey(database *sql.DB, keyPath string) (*atcrypto.PrivateKeyP256, error) {
// Try database first
// loadOAuthKey loads the OAuth P-256 key from the DB, generating one if absent.
func loadOAuthKey(database *sql.DB) (*atcrypto.PrivateKeyP256, error) {
data, err := db.GetCryptoKey(database, "oauth_p256")
if err != nil {
return nil, fmt.Errorf("failed to query crypto_keys: %w", err)
@@ -35,23 +33,6 @@ func loadOAuthKey(database *sql.DB, keyPath string) (*atcrypto.PrivateKeyP256, e
return key, nil
}
// Try file fallback
if keyPath != "" {
if fileData, err := os.ReadFile(keyPath); err == nil {
key, err := atcrypto.ParsePrivateBytesP256(fileData)
if err != nil {
return nil, fmt.Errorf("failed to parse OAuth key from file %s: %w", keyPath, err)
}
// Migrate to database
if err := db.PutCryptoKey(database, "oauth_p256", fileData); err != nil {
return nil, fmt.Errorf("failed to store OAuth key in database: %w", err)
}
slog.Info("Migrated OAuth P-256 key from file to database", "path", keyPath)
return key, nil
}
}
// Generate new key
p256Key, err := atcrypto.GeneratePrivateKeyP256()
if err != nil {
return nil, fmt.Errorf("failed to generate OAuth P-256 key: %w", err)
@@ -66,16 +47,15 @@ func loadOAuthKey(database *sql.DB, keyPath string) (*atcrypto.PrivateKeyP256, e
return p256Key, nil
}
// loadJWTKeyAndCert loads the JWT RSA key from DB (with file fallback) and generates
// a self-signed certificate. The cert is always regenerated and written to certPath
// on disk because the distribution library reads it via os.Open().
func loadJWTKeyAndCert(database *sql.DB, keyPath, certPath string) (*rsa.PrivateKey, []byte, error) {
rsaKey, err := loadRSAKey(database, keyPath)
// loadJWTKeyAndCert loads the JWT RSA key from the DB and generates a self-signed
// certificate. The cert is always regenerated and written to certPath on disk
// because the distribution library reads it via os.Open().
func loadJWTKeyAndCert(database *sql.DB, certPath string) (*rsa.PrivateKey, []byte, error) {
rsaKey, err := loadRSAKey(database)
if err != nil {
return nil, nil, err
}
// Generate cert and write to disk for distribution library
certDER, err := generateAndWriteCert(rsaKey, certPath)
if err != nil {
return nil, nil, err
@@ -84,9 +64,8 @@ func loadJWTKeyAndCert(database *sql.DB, keyPath, certPath string) (*rsa.Private
return rsaKey, certDER, nil
}
// loadRSAKey loads the RSA private key with priority: DB → file → generate.
func loadRSAKey(database *sql.DB, keyPath string) (*rsa.PrivateKey, error) {
// Try database first
// loadRSAKey loads the RSA private key from the DB, generating one if absent.
func loadRSAKey(database *sql.DB) (*rsa.PrivateKey, error) {
data, err := db.GetCryptoKey(database, "jwt_rsa")
if err != nil {
return nil, fmt.Errorf("failed to query crypto_keys: %w", err)
@@ -100,23 +79,6 @@ func loadRSAKey(database *sql.DB, keyPath string) (*rsa.PrivateKey, error) {
return key, nil
}
// Try file fallback
if keyPath != "" {
if fileData, err := os.ReadFile(keyPath); err == nil {
key, err := parseRSAKeyPEM(fileData)
if err != nil {
return nil, fmt.Errorf("failed to parse RSA key from file %s: %w", keyPath, err)
}
// Migrate to database
if err := db.PutCryptoKey(database, "jwt_rsa", fileData); err != nil {
return nil, fmt.Errorf("failed to store RSA key in database: %w", err)
}
slog.Info("Migrated JWT RSA key from file to database", "path", keyPath)
return key, nil
}
}
// Generate new key
rsaKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil, fmt.Errorf("failed to generate RSA key: %w", err)

View File

@@ -29,6 +29,36 @@ func GetRepositoryAnnotations(db DBTX, did, repository string) (map[string]strin
return annotations, rows.Err()
}
// GetRepositoryAnnotationsByDID retrieves all annotations for every
// repository owned by a DID, grouped as map[repository]map[key]value.
// Used by bulk-fetch paths to avoid issuing one query per repository.
func GetRepositoryAnnotationsByDID(db DBTX, did string) (map[string]map[string]string, error) {
rows, err := db.Query(`
SELECT repository, key, value
FROM repository_annotations
WHERE did = ?
`, did)
if err != nil {
return nil, err
}
defer rows.Close()
out := make(map[string]map[string]string)
for rows.Next() {
var repo, key, value string
if err := rows.Scan(&repo, &key, &value); err != nil {
return nil, err
}
m, ok := out[repo]
if !ok {
m = make(map[string]string)
out[repo] = m
}
m[key] = value
}
return out, rows.Err()
}
// UpsertRepositoryAnnotations upserts annotations for a repository.
// Stale keys not present in the new map are deleted.
// Unchanged values are skipped to avoid unnecessary writes.

580
pkg/appview/db/batch.go Normal file
View File

@@ -0,0 +1,580 @@
package db
import (
"encoding/json"
"fmt"
"strings"
"time"
)
// BatchSize is the maximum number of rows included in a single multi-row INSERT.
// Kept well under SQLite's default SQLITE_MAX_VARIABLE_NUMBER (32766) and any
// remote libsql parameter ceiling — at 11 columns this is 1100 placeholders.
const BatchSize = 100
// buildPlaceholders returns a comma-separated list of `rows` groups of the form
// `(?,?,?)`, each group containing `cols` placeholders. Used to construct the
// VALUES clause of multi-row INSERT statements.
func buildPlaceholders(rows, cols int) string {
if rows <= 0 || cols <= 0 {
return ""
}
group := "(" + strings.Repeat("?,", cols-1) + "?)"
var sb strings.Builder
sb.Grow((len(group) + 1) * rows)
for i := 0; i < rows; i++ {
if i > 0 {
sb.WriteByte(',')
}
sb.WriteString(group)
}
return sb.String()
}
// chunk returns the half-open range [start, end) for the i-th chunk of size
// BatchSize within a slice of length n.
func chunk(n, i int) (start, end int) {
start = i * BatchSize
end = start + BatchSize
if end > n {
end = n
}
return start, end
}
// BatchInsertManifests upserts a batch of manifests and returns a map of
// digest → manifest id for the inserted rows (both new and existing). Rows
// are keyed by (did, repository, digest); callers that need the id must
// group their input so that digest is unique per (did, repository) in one
// batch call.
//
// Implementation: one multi-row INSERT per sub-batch, followed by one SELECT
// to fetch ids back (libsql's RETURNING support across replica modes is
// uneven; a second SELECT is reliable and still a single round-trip per
// sub-batch).
func BatchInsertManifests(db DBTX, manifests []Manifest) (map[string]int64, error) {
out := make(map[string]int64, len(manifests))
if len(manifests) == 0 {
return out, nil
}
for i := 0; i*BatchSize < len(manifests); i++ {
start, end := chunk(len(manifests), i)
batch := manifests[start:end]
const cols = 11
args := make([]any, 0, len(batch)*cols)
for _, m := range batch {
args = append(args,
m.DID, m.Repository, m.Digest, m.HoldEndpoint,
m.SchemaVersion, m.MediaType, m.ConfigDigest,
m.ConfigSize, m.ArtifactType,
nullString(m.SubjectDigest),
m.CreatedAt,
)
}
query := `
INSERT INTO manifests
(did, repository, digest, hold_endpoint, schema_version, media_type,
config_digest, config_size, artifact_type, subject_digest, created_at)
VALUES ` + buildPlaceholders(len(batch), cols) + `
ON CONFLICT(did, repository, digest) DO UPDATE SET
hold_endpoint = excluded.hold_endpoint,
schema_version = excluded.schema_version,
media_type = excluded.media_type,
config_digest = excluded.config_digest,
config_size = excluded.config_size,
artifact_type = excluded.artifact_type,
subject_digest = excluded.subject_digest
WHERE excluded.hold_endpoint != manifests.hold_endpoint
OR excluded.schema_version != manifests.schema_version
OR excluded.media_type != manifests.media_type
OR excluded.config_digest IS NOT manifests.config_digest
OR excluded.config_size IS NOT manifests.config_size
OR excluded.artifact_type != manifests.artifact_type
OR excluded.subject_digest IS NOT manifests.subject_digest
`
if _, err := db.Exec(query, args...); err != nil {
return nil, fmt.Errorf("batch insert manifests: %w", err)
}
// Fetch ids for this sub-batch by (did, digest) — digests are unique enough
// that matching on (did, digest) avoids needing a three-column IN list.
// repository is included in the row to disambiguate if a user genuinely has
// the same digest across repos.
selectArgs := make([]any, 0, 1+2*len(batch))
// Group by did (caller usually supplies one did per call, but be safe).
didSet := make(map[string]struct{})
for _, m := range batch {
didSet[m.DID] = struct{}{}
}
// Build a per-did IN (?) query; usually exactly one iteration.
for did := range didSet {
digests := make([]string, 0, len(batch))
for _, m := range batch {
if m.DID == did {
digests = append(digests, m.Digest)
}
}
selectArgs = append(selectArgs[:0], did)
for _, d := range digests {
selectArgs = append(selectArgs, d)
}
selectQuery := `
SELECT repository, digest, id FROM manifests
WHERE did = ? AND digest IN (` +
strings.TrimSuffix(strings.Repeat("?,", len(digests)), ",") + `)
`
rows, err := db.Query(selectQuery, selectArgs...)
if err != nil {
return nil, fmt.Errorf("batch select manifest ids: %w", err)
}
for rows.Next() {
var repo, digest string
var id int64
if err := rows.Scan(&repo, &digest, &id); err != nil {
rows.Close()
return nil, fmt.Errorf("scan manifest id: %w", err)
}
// Key format matches what callers use: "did|repo|digest".
out[manifestKey(did, repo, digest)] = id
}
rows.Close()
}
}
return out, nil
}
// ManifestKey builds the lookup key used by BatchInsertManifests' result map.
// Callers construct the same key from their in-memory Manifest structs to
// find the assigned id.
func ManifestKey(did, repository, digest string) string {
return manifestKey(did, repository, digest)
}
func manifestKey(did, repository, digest string) string {
return did + "|" + repository + "|" + digest
}
// BatchInsertLayers inserts a batch of layers, skipping any that already exist.
// Layers are immutable, so ON CONFLICT DO NOTHING matches the single-row
// InsertLayer semantics.
func BatchInsertLayers(db DBTX, layers []Layer) error {
if len(layers) == 0 {
return nil
}
for i := 0; i*BatchSize < len(layers); i++ {
start, end := chunk(len(layers), i)
batch := layers[start:end]
const cols = 6
args := make([]any, 0, len(batch)*cols)
for _, l := range batch {
var annotationsJSON any
if len(l.Annotations) > 0 {
b, err := json.Marshal(l.Annotations)
if err != nil {
return fmt.Errorf("marshal layer annotations: %w", err)
}
s := string(b)
annotationsJSON = &s
}
args = append(args, l.ManifestID, l.Digest, l.Size, l.MediaType, l.LayerIndex, annotationsJSON)
}
query := `
INSERT INTO layers (manifest_id, digest, size, media_type, layer_index, annotations)
VALUES ` + buildPlaceholders(len(batch), cols) + `
ON CONFLICT(manifest_id, layer_index) DO NOTHING
`
if _, err := db.Exec(query, args...); err != nil {
return fmt.Errorf("batch insert layers: %w", err)
}
}
return nil
}
// BatchInsertManifestReferences inserts a batch of manifest references.
// The table has PRIMARY KEY(manifest_id, reference_index); duplicates skip.
func BatchInsertManifestReferences(db DBTX, refs []ManifestReference) error {
if len(refs) == 0 {
return nil
}
for i := 0; i*BatchSize < len(refs); i++ {
start, end := chunk(len(refs), i)
batch := refs[start:end]
const cols = 10
args := make([]any, 0, len(batch)*cols)
for _, r := range batch {
args = append(args,
r.ManifestID, r.Digest, r.Size, r.MediaType,
r.PlatformArchitecture, r.PlatformOS,
r.PlatformVariant, r.PlatformOSVersion,
r.IsAttestation, r.ReferenceIndex,
)
}
query := `
INSERT INTO manifest_references (manifest_id, digest, size, media_type,
platform_architecture, platform_os,
platform_variant, platform_os_version,
is_attestation, reference_index)
VALUES ` + buildPlaceholders(len(batch), cols) + `
ON CONFLICT(manifest_id, reference_index) DO NOTHING
`
if _, err := db.Exec(query, args...); err != nil {
return fmt.Errorf("batch insert manifest references: %w", err)
}
}
return nil
}
// BatchUpsertTags upserts a batch of tag records, matching UpsertTag semantics.
func BatchUpsertTags(db DBTX, tags []Tag) error {
if len(tags) == 0 {
return nil
}
for i := 0; i*BatchSize < len(tags); i++ {
start, end := chunk(len(tags), i)
batch := tags[start:end]
const cols = 5
args := make([]any, 0, len(batch)*cols)
for _, t := range batch {
args = append(args, t.DID, t.Repository, t.Tag, t.Digest, t.CreatedAt)
}
query := `
INSERT INTO tags (did, repository, tag, digest, created_at)
VALUES ` + buildPlaceholders(len(batch), cols) + `
ON CONFLICT(did, repository, tag) DO UPDATE SET
digest = excluded.digest,
created_at = excluded.created_at
WHERE excluded.digest != tags.digest
OR excluded.created_at != tags.created_at
`
if _, err := db.Exec(query, args...); err != nil {
return fmt.Errorf("batch upsert tags: %w", err)
}
}
return nil
}
// StarInput is a struct projection of the UpsertStar argument list for use with BatchUpsertStars.
type StarInput struct {
StarrerDID string
OwnerDID string
Repository string
CreatedAt time.Time
}
// BatchUpsertStars upserts a batch of stars. Stars are immutable.
func BatchUpsertStars(db DBTX, stars []StarInput) error {
if len(stars) == 0 {
return nil
}
for i := 0; i*BatchSize < len(stars); i++ {
start, end := chunk(len(stars), i)
batch := stars[start:end]
const cols = 4
args := make([]any, 0, len(batch)*cols)
for _, s := range batch {
args = append(args, s.StarrerDID, s.OwnerDID, s.Repository, s.CreatedAt)
}
query := `
INSERT INTO stars (starrer_did, owner_did, repository, created_at)
VALUES ` + buildPlaceholders(len(batch), cols) + `
ON CONFLICT(starrer_did, owner_did, repository) DO NOTHING
`
if _, err := db.Exec(query, args...); err != nil {
return fmt.Errorf("batch upsert stars: %w", err)
}
}
return nil
}
// BatchUpsertRepoPages upserts a batch of repo page records.
func BatchUpsertRepoPages(db DBTX, pages []RepoPage) error {
if len(pages) == 0 {
return nil
}
for i := 0; i*BatchSize < len(pages); i++ {
start, end := chunk(len(pages), i)
batch := pages[start:end]
const cols = 7
args := make([]any, 0, len(batch)*cols)
for _, p := range batch {
args = append(args,
p.DID, p.Repository, p.Description, p.AvatarCID,
p.UserEdited, p.CreatedAt, p.UpdatedAt,
)
}
query := `
INSERT INTO repo_pages (did, repository, description, avatar_cid, user_edited, created_at, updated_at)
VALUES ` + buildPlaceholders(len(batch), cols) + `
ON CONFLICT(did, repository) DO UPDATE SET
description = excluded.description,
avatar_cid = excluded.avatar_cid,
user_edited = excluded.user_edited,
updated_at = excluded.updated_at
WHERE excluded.description IS NOT repo_pages.description
OR excluded.avatar_cid IS NOT repo_pages.avatar_cid
OR excluded.user_edited IS NOT repo_pages.user_edited
`
if _, err := db.Exec(query, args...); err != nil {
return fmt.Errorf("batch upsert repo pages: %w", err)
}
}
return nil
}
// BatchUpsertDailyStats upserts a batch of daily stats rows.
func BatchUpsertDailyStats(db DBTX, stats []DailyStats) error {
if len(stats) == 0 {
return nil
}
for i := 0; i*BatchSize < len(stats); i++ {
start, end := chunk(len(stats), i)
batch := stats[start:end]
const cols = 5
args := make([]any, 0, len(batch)*cols)
for _, s := range batch {
args = append(args, s.DID, s.Repository, s.Date, s.PullCount, s.PushCount)
}
query := `
INSERT INTO repository_stats_daily (did, repository, date, pull_count, push_count)
VALUES ` + buildPlaceholders(len(batch), cols) + `
ON CONFLICT(did, repository, date) DO UPDATE SET
pull_count = excluded.pull_count,
push_count = excluded.push_count
WHERE excluded.pull_count != repository_stats_daily.pull_count
OR excluded.push_count != repository_stats_daily.push_count
`
if _, err := db.Exec(query, args...); err != nil {
return fmt.Errorf("batch upsert daily stats: %w", err)
}
}
return nil
}
// BatchUpsertRepositoryStats upserts aggregated repository stats.
func BatchUpsertRepositoryStats(db DBTX, stats []RepositoryStats) error {
if len(stats) == 0 {
return nil
}
for i := 0; i*BatchSize < len(stats); i++ {
start, end := chunk(len(stats), i)
batch := stats[start:end]
const cols = 6
args := make([]any, 0, len(batch)*cols)
for _, s := range batch {
args = append(args,
s.DID, s.Repository, s.PullCount, s.LastPull, s.PushCount, s.LastPush,
)
}
query := `
INSERT INTO repository_stats (did, repository, pull_count, last_pull, push_count, last_push)
VALUES ` + buildPlaceholders(len(batch), cols) + `
ON CONFLICT(did, repository) DO UPDATE SET
pull_count = excluded.pull_count,
last_pull = excluded.last_pull,
push_count = excluded.push_count,
last_push = excluded.last_push
WHERE excluded.pull_count != repository_stats.pull_count
OR excluded.last_pull IS NOT repository_stats.last_pull
OR excluded.push_count != repository_stats.push_count
OR excluded.last_push IS NOT repository_stats.last_push
`
if _, err := db.Exec(query, args...); err != nil {
return fmt.Errorf("batch upsert repository stats: %w", err)
}
}
return nil
}
// BatchUpsertCaptainRecords upserts a batch of captain records.
func BatchUpsertCaptainRecords(db DBTX, records []HoldCaptainRecord) error {
if len(records) == 0 {
return nil
}
for i := 0; i*BatchSize < len(records); i++ {
start, end := chunk(len(records), i)
batch := records[start:end]
const cols = 8
args := make([]any, 0, len(batch)*cols)
for _, r := range batch {
args = append(args,
r.HoldDID, r.OwnerDID, r.Public, r.AllowAllCrew,
nullString(r.DeployedAt),
nullString(r.Region),
nullString(r.Successor),
r.UpdatedAt,
)
}
query := `
INSERT INTO hold_captain_records (
hold_did, owner_did, public, allow_all_crew,
deployed_at, region, successor, updated_at
) VALUES ` + buildPlaceholders(len(batch), cols) + `
ON CONFLICT(hold_did) DO UPDATE SET
owner_did = excluded.owner_did,
public = excluded.public,
allow_all_crew = excluded.allow_all_crew,
deployed_at = excluded.deployed_at,
region = excluded.region,
successor = excluded.successor,
updated_at = excluded.updated_at
WHERE excluded.owner_did != hold_captain_records.owner_did
OR excluded.public != hold_captain_records.public
OR excluded.allow_all_crew != hold_captain_records.allow_all_crew
OR excluded.deployed_at IS NOT hold_captain_records.deployed_at
OR excluded.region IS NOT hold_captain_records.region
OR excluded.successor IS NOT hold_captain_records.successor
`
if _, err := db.Exec(query, args...); err != nil {
return fmt.Errorf("batch upsert captain records: %w", err)
}
}
return nil
}
// BatchUpsertCrewMembers upserts a batch of crew members.
func BatchUpsertCrewMembers(db DBTX, members []CrewMember) error {
if len(members) == 0 {
return nil
}
for i := 0; i*BatchSize < len(members); i++ {
start, end := chunk(len(members), i)
batch := members[start:end]
// updated_at uses CURRENT_TIMESTAMP literal, so it's not a placeholder.
const cols = 7
args := make([]any, 0, len(batch)*cols)
for _, m := range batch {
args = append(args,
m.HoldDID, m.MemberDID, m.Rkey,
nullString(m.Role),
nullString(m.Permissions),
nullString(m.Tier),
nullString(m.AddedAt),
)
}
// Replace each group with `(?,?,?,?,?,?,?,CURRENT_TIMESTAMP)` — we build it
// manually because buildPlaceholders only handles uniform placeholders.
group := "(" + strings.Repeat("?,", cols) + "CURRENT_TIMESTAMP)"
var sb strings.Builder
sb.Grow((len(group) + 1) * len(batch))
for i := 0; i < len(batch); i++ {
if i > 0 {
sb.WriteByte(',')
}
sb.WriteString(group)
}
query := `
INSERT INTO hold_crew_members (
hold_did, member_did, rkey, role, permissions, tier, added_at, updated_at
) VALUES ` + sb.String() + `
ON CONFLICT(hold_did, member_did) DO UPDATE SET
rkey = excluded.rkey,
role = excluded.role,
permissions = excluded.permissions,
tier = excluded.tier,
added_at = excluded.added_at,
updated_at = CURRENT_TIMESTAMP
WHERE excluded.rkey != hold_crew_members.rkey
OR excluded.role IS NOT hold_crew_members.role
OR excluded.permissions IS NOT hold_crew_members.permissions
OR excluded.tier IS NOT hold_crew_members.tier
OR excluded.added_at IS NOT hold_crew_members.added_at
`
if _, err := db.Exec(query, args...); err != nil {
return fmt.Errorf("batch upsert crew members: %w", err)
}
}
return nil
}
// AnnotationRow represents a single key/value annotation for a repository,
// used by BatchUpsertRepositoryAnnotations.
type AnnotationRow struct {
DID string
Repository string
Key string
Value string
}
// BatchUpsertRepositoryAnnotations upserts annotation rows and deletes any
// stale keys for each (did, repository) represented in the input. The caller
// is responsible for pre-filtering: rows should represent only repositories
// whose newest manifest has at least one non-empty annotation, matching the
// single-row UpsertRepositoryAnnotations semantics.
func BatchUpsertRepositoryAnnotations(db DBTX, rows []AnnotationRow) error {
if len(rows) == 0 {
return nil
}
// Group rows by (did, repository) so we can delete stale keys per repo.
type repoKey struct{ did, repo string }
keysByRepo := make(map[repoKey][]string)
for _, r := range rows {
k := repoKey{r.DID, r.Repository}
keysByRepo[k] = append(keysByRepo[k], r.Key)
}
// Delete stale keys per repository in one statement each. We could batch
// further with OR chains, but DELETE is cheap and each repo has few keys.
for k, keys := range keysByRepo {
placeholders := strings.TrimSuffix(strings.Repeat("?,", len(keys)), ",")
args := make([]any, 0, 2+len(keys))
args = append(args, k.did, k.repo)
for _, key := range keys {
args = append(args, key)
}
if _, err := db.Exec(`
DELETE FROM repository_annotations
WHERE did = ? AND repository = ? AND key NOT IN (`+placeholders+`)
`, args...); err != nil {
return fmt.Errorf("batch delete stale annotations: %w", err)
}
}
// Upsert all annotation rows in sub-batches.
now := time.Now()
for i := 0; i*BatchSize < len(rows); i++ {
start, end := chunk(len(rows), i)
batch := rows[start:end]
const cols = 5
args := make([]any, 0, len(batch)*cols)
for _, r := range batch {
args = append(args, r.DID, r.Repository, r.Key, r.Value, now)
}
query := `
INSERT INTO repository_annotations (did, repository, key, value, updated_at)
VALUES ` + buildPlaceholders(len(batch), cols) + `
ON CONFLICT(did, repository, key) DO UPDATE SET
value = excluded.value,
updated_at = excluded.updated_at
WHERE excluded.value != repository_annotations.value
`
if _, err := db.Exec(query, args...); err != nil {
return fmt.Errorf("batch upsert annotations: %w", err)
}
}
return nil
}

View File

@@ -0,0 +1,383 @@
package db
import (
"database/sql"
"fmt"
"strings"
"testing"
"time"
)
// setupBatchTestDB spins up a fresh in-memory libsql database with the full
// schema applied, so every batch test can write realistic data without
// stubbing individual tables.
func setupBatchTestDB(t *testing.T) *sql.DB {
t.Helper()
safeName := strings.ReplaceAll(t.Name(), "/", "_")
d, err := InitDB(fmt.Sprintf("file:%s?mode=memory&cache=shared", safeName), LibsqlConfig{})
if err != nil {
t.Fatalf("init db: %v", err)
}
// Single conn to avoid cross-test contention in the shared in-memory cache.
d.SetMaxOpenConns(1)
t.Cleanup(func() { d.Close() })
return d
}
func createBatchTestUser(t *testing.T, d *sql.DB, did string) {
t.Helper()
_, err := d.Exec(`
INSERT OR IGNORE INTO users (did, handle, pds_endpoint, last_seen)
VALUES (?, ?, ?, datetime('now'))
`, did, did+".bsky.social", "https://pds.example.com")
if err != nil {
t.Fatalf("seed user: %v", err)
}
}
func countRows(t *testing.T, d *sql.DB, query string, args ...any) int {
t.Helper()
var n int
if err := d.QueryRow(query, args...).Scan(&n); err != nil {
t.Fatalf("count: %v", err)
}
return n
}
func TestBuildPlaceholders(t *testing.T) {
cases := []struct {
rows, cols int
want string
}{
{1, 1, "(?)"},
{2, 1, "(?),(?)"},
{1, 3, "(?,?,?)"},
{3, 2, "(?,?),(?,?),(?,?)"},
{0, 5, ""},
{5, 0, ""},
}
for _, c := range cases {
got := buildPlaceholders(c.rows, c.cols)
if got != c.want {
t.Errorf("buildPlaceholders(%d,%d) = %q, want %q", c.rows, c.cols, got, c.want)
}
}
}
func TestBatchInsertManifests_InsertsAndReturnsIDs(t *testing.T) {
d := setupBatchTestDB(t)
createBatchTestUser(t, d, "did:plc:alice")
now := time.Now()
manifests := []Manifest{
{DID: "did:plc:alice", Repository: "app1", Digest: "sha256:aaa", HoldEndpoint: "did:web:hold", SchemaVersion: 2, MediaType: "application/vnd.oci.image.manifest.v1+json", ArtifactType: "container-image", CreatedAt: now},
{DID: "did:plc:alice", Repository: "app2", Digest: "sha256:bbb", HoldEndpoint: "did:web:hold", SchemaVersion: 2, MediaType: "application/vnd.oci.image.manifest.v1+json", ArtifactType: "container-image", CreatedAt: now},
}
ids, err := BatchInsertManifests(d, manifests)
if err != nil {
t.Fatalf("batch insert: %v", err)
}
if len(ids) != 2 {
t.Fatalf("expected 2 ids, got %d", len(ids))
}
if ids[ManifestKey("did:plc:alice", "app1", "sha256:aaa")] == 0 {
t.Errorf("missing id for app1")
}
if ids[ManifestKey("did:plc:alice", "app2", "sha256:bbb")] == 0 {
t.Errorf("missing id for app2")
}
if got := countRows(t, d, `SELECT COUNT(*) FROM manifests`); got != 2 {
t.Errorf("row count = %d, want 2", got)
}
}
func TestBatchInsertManifests_Idempotent(t *testing.T) {
d := setupBatchTestDB(t)
createBatchTestUser(t, d, "did:plc:alice")
now := time.Now()
m := []Manifest{{
DID: "did:plc:alice", Repository: "app", Digest: "sha256:aaa",
HoldEndpoint: "did:web:hold", SchemaVersion: 2,
MediaType: "application/vnd.oci.image.manifest.v1+json",
ArtifactType: "container-image", CreatedAt: now,
}}
if _, err := BatchInsertManifests(d, m); err != nil {
t.Fatalf("first insert: %v", err)
}
if _, err := BatchInsertManifests(d, m); err != nil {
t.Fatalf("second insert: %v", err)
}
if got := countRows(t, d, `SELECT COUNT(*) FROM manifests`); got != 1 {
t.Errorf("expected idempotent; row count = %d", got)
}
}
func TestBatchInsertManifests_Chunking(t *testing.T) {
// Exceed one sub-batch to exercise the chunk loop.
d := setupBatchTestDB(t)
createBatchTestUser(t, d, "did:plc:alice")
const n = BatchSize + 17
now := time.Now()
manifests := make([]Manifest, n)
for i := 0; i < n; i++ {
manifests[i] = Manifest{
DID: "did:plc:alice", Repository: "app", Digest: fmt.Sprintf("sha256:%04d", i),
HoldEndpoint: "did:web:hold", SchemaVersion: 2,
MediaType: "application/vnd.oci.image.manifest.v1+json",
ArtifactType: "container-image", CreatedAt: now,
}
}
ids, err := BatchInsertManifests(d, manifests)
if err != nil {
t.Fatalf("batch insert: %v", err)
}
if len(ids) != n {
t.Errorf("ids len = %d, want %d", len(ids), n)
}
if got := countRows(t, d, `SELECT COUNT(*) FROM manifests`); got != n {
t.Errorf("row count = %d, want %d", got, n)
}
}
func TestBatchInsertLayers_RespectsFK(t *testing.T) {
d := setupBatchTestDB(t)
createBatchTestUser(t, d, "did:plc:alice")
now := time.Now()
ids, err := BatchInsertManifests(d, []Manifest{{
DID: "did:plc:alice", Repository: "app", Digest: "sha256:aaa",
HoldEndpoint: "did:web:hold", SchemaVersion: 2,
MediaType: "application/vnd.oci.image.manifest.v1+json",
ArtifactType: "container-image", CreatedAt: now,
}})
if err != nil {
t.Fatalf("insert manifest: %v", err)
}
mid := ids[ManifestKey("did:plc:alice", "app", "sha256:aaa")]
layers := []Layer{
{ManifestID: mid, Digest: "sha256:L0", Size: 100, MediaType: "application/vnd.oci.image.layer.v1.tar+gzip", LayerIndex: 0},
{ManifestID: mid, Digest: "sha256:L1", Size: 200, MediaType: "application/vnd.oci.image.layer.v1.tar+gzip", LayerIndex: 1},
}
if err := BatchInsertLayers(d, layers); err != nil {
t.Fatalf("batch insert layers: %v", err)
}
if got := countRows(t, d, `SELECT COUNT(*) FROM layers`); got != 2 {
t.Errorf("layers count = %d, want 2", got)
}
// Re-run to confirm ON CONFLICT DO NOTHING doesn't error.
if err := BatchInsertLayers(d, layers); err != nil {
t.Fatalf("idempotent layers: %v", err)
}
if got := countRows(t, d, `SELECT COUNT(*) FROM layers`); got != 2 {
t.Errorf("layers after re-insert = %d, want 2", got)
}
}
func TestBatchUpsertTags_Idempotent(t *testing.T) {
d := setupBatchTestDB(t)
createBatchTestUser(t, d, "did:plc:alice")
now := time.Now()
tags := []Tag{
{DID: "did:plc:alice", Repository: "app", Tag: "v1", Digest: "sha256:aaa", CreatedAt: now},
{DID: "did:plc:alice", Repository: "app", Tag: "v2", Digest: "sha256:bbb", CreatedAt: now},
}
if err := BatchUpsertTags(d, tags); err != nil {
t.Fatalf("batch upsert: %v", err)
}
if err := BatchUpsertTags(d, tags); err != nil {
t.Fatalf("rerun: %v", err)
}
if got := countRows(t, d, `SELECT COUNT(*) FROM tags`); got != 2 {
t.Errorf("tags count = %d, want 2", got)
}
}
func TestBatchUpsertStars(t *testing.T) {
d := setupBatchTestDB(t)
createBatchTestUser(t, d, "did:plc:alice")
createBatchTestUser(t, d, "did:plc:bob")
now := time.Now()
stars := []StarInput{
{StarrerDID: "did:plc:bob", OwnerDID: "did:plc:alice", Repository: "app", CreatedAt: now},
}
if err := BatchUpsertStars(d, stars); err != nil {
t.Fatalf("batch upsert stars: %v", err)
}
// Re-insert to confirm ON CONFLICT DO NOTHING.
if err := BatchUpsertStars(d, stars); err != nil {
t.Fatalf("rerun: %v", err)
}
if got := countRows(t, d, `SELECT COUNT(*) FROM stars`); got != 1 {
t.Errorf("stars count = %d, want 1", got)
}
}
func TestBatchUpsertRepoPages(t *testing.T) {
d := setupBatchTestDB(t)
createBatchTestUser(t, d, "did:plc:alice")
now := time.Now()
pages := []RepoPage{
{DID: "did:plc:alice", Repository: "app", Description: "desc", CreatedAt: now, UpdatedAt: now},
}
if err := BatchUpsertRepoPages(d, pages); err != nil {
t.Fatalf("batch upsert: %v", err)
}
// Update with new description.
pages[0].Description = "new desc"
if err := BatchUpsertRepoPages(d, pages); err != nil {
t.Fatalf("update: %v", err)
}
var desc string
if err := d.QueryRow(`SELECT description FROM repo_pages WHERE did=? AND repository=?`,
"did:plc:alice", "app").Scan(&desc); err != nil {
t.Fatalf("select: %v", err)
}
if desc != "new desc" {
t.Errorf("description = %q, want %q", desc, "new desc")
}
}
func TestBatchUpsertDailyStats(t *testing.T) {
d := setupBatchTestDB(t)
createBatchTestUser(t, d, "did:plc:alice")
stats := []DailyStats{
{DID: "did:plc:alice", Repository: "app", Date: "2026-04-19", PullCount: 5, PushCount: 2},
}
if err := BatchUpsertDailyStats(d, stats); err != nil {
t.Fatalf("upsert: %v", err)
}
stats[0].PullCount = 10
if err := BatchUpsertDailyStats(d, stats); err != nil {
t.Fatalf("update: %v", err)
}
var pull int
if err := d.QueryRow(`SELECT pull_count FROM repository_stats_daily WHERE did=? AND repository=? AND date=?`,
"did:plc:alice", "app", "2026-04-19").Scan(&pull); err != nil {
t.Fatalf("select: %v", err)
}
if pull != 10 {
t.Errorf("pull = %d, want 10", pull)
}
}
func TestBatchUpsertRepositoryAnnotations_DropsStaleKeys(t *testing.T) {
d := setupBatchTestDB(t)
createBatchTestUser(t, d, "did:plc:alice")
rows := []AnnotationRow{
{DID: "did:plc:alice", Repository: "app", Key: "a", Value: "1"},
{DID: "did:plc:alice", Repository: "app", Key: "b", Value: "2"},
}
if err := BatchUpsertRepositoryAnnotations(d, rows); err != nil {
t.Fatalf("initial: %v", err)
}
if got := countRows(t, d, `SELECT COUNT(*) FROM repository_annotations WHERE did=? AND repository=?`,
"did:plc:alice", "app"); got != 2 {
t.Errorf("initial count = %d, want 2", got)
}
// Second call drops stale key "b".
rows = []AnnotationRow{
{DID: "did:plc:alice", Repository: "app", Key: "a", Value: "1-updated"},
}
if err := BatchUpsertRepositoryAnnotations(d, rows); err != nil {
t.Fatalf("update: %v", err)
}
if got := countRows(t, d, `SELECT COUNT(*) FROM repository_annotations WHERE did=? AND repository=?`,
"did:plc:alice", "app"); got != 1 {
t.Errorf("after update = %d, want 1", got)
}
var val string
if err := d.QueryRow(`SELECT value FROM repository_annotations WHERE key=? AND did=? AND repository=?`,
"a", "did:plc:alice", "app").Scan(&val); err != nil {
t.Fatalf("select: %v", err)
}
if val != "1-updated" {
t.Errorf("value = %q, want 1-updated", val)
}
}
func TestBatchUpsertCaptainRecords(t *testing.T) {
d := setupBatchTestDB(t)
now := time.Now()
records := []HoldCaptainRecord{
{HoldDID: "did:web:hold1", OwnerDID: "did:plc:alice", Public: true, AllowAllCrew: false, UpdatedAt: now},
}
if err := BatchUpsertCaptainRecords(d, records); err != nil {
t.Fatalf("upsert: %v", err)
}
if got := countRows(t, d, `SELECT COUNT(*) FROM hold_captain_records`); got != 1 {
t.Errorf("count = %d, want 1", got)
}
}
func TestBatchUpsertCrewMembers(t *testing.T) {
d := setupBatchTestDB(t)
members := []CrewMember{
{HoldDID: "did:web:hold1", MemberDID: "did:plc:alice", Rkey: "rkey1", Role: "owner"},
}
if err := BatchUpsertCrewMembers(d, members); err != nil {
t.Fatalf("upsert: %v", err)
}
// Update the rkey: triggers the ON CONFLICT path.
members[0].Rkey = "rkey2"
if err := BatchUpsertCrewMembers(d, members); err != nil {
t.Fatalf("update: %v", err)
}
var rkey string
if err := d.QueryRow(`SELECT rkey FROM hold_crew_members WHERE hold_did=? AND member_did=?`,
"did:web:hold1", "did:plc:alice").Scan(&rkey); err != nil {
t.Fatalf("select: %v", err)
}
if rkey != "rkey2" {
t.Errorf("rkey = %q, want rkey2", rkey)
}
}
func TestBatchEmptySlices(t *testing.T) {
d := setupBatchTestDB(t)
// Every batch function must tolerate an empty input slice without erroring.
if _, err := BatchInsertManifests(d, nil); err != nil {
t.Errorf("manifests: %v", err)
}
if err := BatchInsertLayers(d, nil); err != nil {
t.Errorf("layers: %v", err)
}
if err := BatchInsertManifestReferences(d, nil); err != nil {
t.Errorf("refs: %v", err)
}
if err := BatchUpsertTags(d, nil); err != nil {
t.Errorf("tags: %v", err)
}
if err := BatchUpsertStars(d, nil); err != nil {
t.Errorf("stars: %v", err)
}
if err := BatchUpsertRepoPages(d, nil); err != nil {
t.Errorf("repo pages: %v", err)
}
if err := BatchUpsertDailyStats(d, nil); err != nil {
t.Errorf("daily: %v", err)
}
if err := BatchUpsertRepositoryStats(d, nil); err != nil {
t.Errorf("repo stats: %v", err)
}
if err := BatchUpsertCaptainRecords(d, nil); err != nil {
t.Errorf("captain: %v", err)
}
if err := BatchUpsertCrewMembers(d, nil); err != nil {
t.Errorf("crew: %v", err)
}
if err := BatchUpsertRepositoryAnnotations(d, nil); err != nil {
t.Errorf("annotations: %v", err)
}
}

69
pkg/appview/db/conn.go Normal file
View File

@@ -0,0 +1,69 @@
package db
import (
"context"
"database/sql"
"database/sql/driver"
"errors"
"strings"
)
// poisonedTxSubstrings are error-message substrings emitted when go-libsql or the
// remote libsql server leaves a connection in a state that cannot safely be reused.
// Most come from Bunny Database killing a transaction that exceeded its server-side
// timeout; the follow-on COMMIT then sees the connection in a poisoned state.
var poisonedTxSubstrings = []string{
"Transaction timed-out",
"no transaction is active",
"connection has reached an invalid state",
"invalid state, started with",
}
// IsPoisonedTxErr reports whether err indicates the underlying connection is no
// longer usable for further statements. Callers should evict the connection from
// the pool when this returns true.
func IsPoisonedTxErr(err error) bool {
if err == nil {
return false
}
msg := err.Error()
for _, s := range poisonedTxSubstrings {
if strings.Contains(msg, s) {
return true
}
}
return false
}
// ExecResilient borrows a dedicated connection from db, runs fn against it, and
// evicts the connection from the pool when fn returns a poisoned-transaction
// error. The connection is always released via Close.
//
// Poison eviction works by returning driver.ErrBadConn from within conn.Raw:
// database/sql treats that as a signal to discard the underlying driver conn
// rather than returning it to the idle pool.
//
// ExecResilient does NOT retry. Callers wrap the call in their own retry policy
// when that is desired (for example, a single retry on the live Jetstream path).
func ExecResilient(ctx context.Context, db *sql.DB, fn func(*sql.Conn) error) error {
conn, err := db.Conn(ctx)
if err != nil {
return err
}
defer conn.Close()
execErr := fn(conn)
if IsPoisonedTxErr(execErr) {
// Discard the underlying driver conn so it never serves another caller.
// The Raw callback's return value is what triggers eviction; we ignore
// any error from Raw itself.
_ = conn.Raw(func(any) error { return driver.ErrBadConn })
}
return execErr
}
// ErrNoPoolConn is returned by ExecResilient when a connection cannot be
// obtained from the pool (e.g. context cancelled). It wraps the underlying
// pool error for callers that want to distinguish pool-exhaustion from
// statement-level errors.
var ErrNoPoolConn = errors.New("db: failed to acquire pool connection")

View File

@@ -0,0 +1,28 @@
package db
import (
"errors"
"testing"
)
func TestIsPoisonedTxErr(t *testing.T) {
cases := []struct {
name string
err error
want bool
}{
{"nil", nil, false},
{"unrelated", errors.New("disk full"), false},
{"bunny timeout", errors.New("Remote SQlite failure: `2:0:Transaction timed-out`"), true},
{"no active tx", errors.New("Remote SQlite failure: `3:1:cannot commit - no transaction is active`"), true},
{"init state", errors.New("error code = 2: Error executing statement: connection has reached an invalid state, started with Init"), true},
{"just invalid state", errors.New("generic failure: invalid state, started with Query"), true},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
if got := IsPoisonedTxErr(c.err); got != c.want {
t.Errorf("IsPoisonedTxErr(%v) = %v, want %v", c.err, got, c.want)
}
})
}
}

View File

@@ -333,9 +333,10 @@ func GetAvailableHolds(db DBTX, userDID string) ([]AvailableHold, error) {
c.permissions
FROM hold_captain_records h
LEFT JOIN hold_crew_members c ON h.hold_did = c.hold_did AND c.member_did = ?1
WHERE h.allow_all_crew = 1
WHERE (h.successor IS NULL OR h.successor = '')
AND (h.allow_all_crew = 1
OR h.owner_did = ?1
OR c.member_did IS NOT NULL
OR c.member_did IS NOT NULL)
ORDER BY
CASE
WHEN h.owner_did = ?1 THEN 0
@@ -387,6 +388,54 @@ func GetAvailableHolds(db DBTX, userDID string) ([]AvailableHold, error) {
return holds, nil
}
// GetAccessibleHoldDIDs returns the set of hold DIDs whose content the viewer
// is allowed to see in listings. If viewerDID is empty (anonymous), this
// returns holds with public=1 OR allow_all_crew=1. For signed-in viewers it
// additionally includes holds where the viewer is owner or crew.
//
// The returned slice is suitable for use in an IN (...) clause against
// manifests.hold_endpoint / tags.hold_endpoint (which store the hold DID).
func GetAccessibleHoldDIDs(db DBTX, viewerDID string) ([]string, error) {
var rows *sql.Rows
var err error
if viewerDID == "" {
rows, err = db.Query(`
SELECT hold_did
FROM hold_captain_records
WHERE public = 1 OR allow_all_crew = 1
`)
} else {
rows, err = db.Query(`
SELECT DISTINCT h.hold_did
FROM hold_captain_records h
LEFT JOIN hold_crew_members c
ON h.hold_did = c.hold_did AND c.member_did = ?1
WHERE h.public = 1
OR h.allow_all_crew = 1
OR h.owner_did = ?1
OR c.member_did IS NOT NULL
`, viewerDID)
}
if err != nil {
return nil, fmt.Errorf("failed to query accessible holds: %w", err)
}
defer rows.Close()
var dids []string
for rows.Next() {
var did string
if err := rows.Scan(&did); err != nil {
return nil, fmt.Errorf("failed to scan accessible hold: %w", err)
}
dids = append(dids, did)
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("error iterating accessible holds: %w", err)
}
return dids, nil
}
// GetCrewMemberships returns all holds where a user is a crew member
func GetCrewMemberships(db DBTX, memberDID string) ([]CrewMember, error) {
query := `

View File

@@ -464,3 +464,94 @@ func TestListHoldDIDs_OrderByUpdatedAt(t *testing.T) {
}
}
}
// TestGetAccessibleHoldDIDs tests the viewer→hold visibility computation
// used to filter listings to what the viewer is allowed to see.
func TestGetAccessibleHoldDIDs(t *testing.T) {
db := setupHoldTestDB(t)
// Seed 4 captain records covering each visibility combo
records := []*HoldCaptainRecord{
{HoldDID: "did:web:public.example", OwnerDID: "did:plc:alice", Public: true, AllowAllCrew: false, UpdatedAt: time.Now()},
{HoldDID: "did:web:selfserv.example", OwnerDID: "did:plc:bob", Public: false, AllowAllCrew: true, UpdatedAt: time.Now()},
{HoldDID: "did:web:invite.example", OwnerDID: "did:plc:carol", Public: false, AllowAllCrew: false, UpdatedAt: time.Now()},
{HoldDID: "did:web:carol-hold.example", OwnerDID: "did:plc:carol", Public: false, AllowAllCrew: false, UpdatedAt: time.Now()},
}
for _, r := range records {
if err := UpsertCaptainRecord(db, r); err != nil {
t.Fatalf("seed captain %s: %v", r.HoldDID, err)
}
}
// dave is crew of did:web:invite.example
if err := UpsertCrewMember(db, &CrewMember{
HoldDID: "did:web:invite.example", MemberDID: "did:plc:dave", Rkey: "rk1",
}); err != nil {
t.Fatalf("seed crew: %v", err)
}
contains := func(haystack []string, needle string) bool {
for _, s := range haystack {
if s == needle {
return true
}
}
return false
}
t.Run("anonymous viewer sees public + self-service only", func(t *testing.T) {
dids, err := GetAccessibleHoldDIDs(db, "")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(dids) != 2 {
t.Fatalf("expected 2 DIDs (public+self-service), got %d: %v", len(dids), dids)
}
if !contains(dids, "did:web:public.example") {
t.Errorf("missing public hold: %v", dids)
}
if !contains(dids, "did:web:selfserv.example") {
t.Errorf("missing self-service hold: %v", dids)
}
if contains(dids, "did:web:invite.example") {
t.Errorf("anon should not see invite-only hold: %v", dids)
}
})
t.Run("crew member also sees invite-only hold", func(t *testing.T) {
dids, err := GetAccessibleHoldDIDs(db, "did:plc:dave")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !contains(dids, "did:web:invite.example") {
t.Errorf("crew member should see invite-only hold they belong to: %v", dids)
}
if contains(dids, "did:web:carol-hold.example") {
t.Errorf("dave is not crew of carol's private hold: %v", dids)
}
})
t.Run("owner sees their own private hold", func(t *testing.T) {
dids, err := GetAccessibleHoldDIDs(db, "did:plc:carol")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// carol owns invite.example and carol-hold.example, both private
if !contains(dids, "did:web:invite.example") {
t.Errorf("owner should see their invite-only hold: %v", dids)
}
if !contains(dids, "did:web:carol-hold.example") {
t.Errorf("owner should see their second private hold: %v", dids)
}
})
t.Run("random authenticated viewer gets same set as anonymous", func(t *testing.T) {
dids, err := GetAccessibleHoldDIDs(db, "did:plc:nobody")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(dids) != 2 {
t.Fatalf("expected 2 DIDs, got %d: %v", len(dids), dids)
}
})
}

View File

@@ -0,0 +1,33 @@
package db
import (
"database/sql"
"errors"
)
// GetJetstreamCursor returns the last persisted Jetstream cursor (time_us).
// Returns 0 when no cursor has been saved yet (e.g. fresh database).
func GetJetstreamCursor(db DBTX) (int64, error) {
var cursor int64
err := db.QueryRow(`SELECT cursor FROM jetstream_cursor WHERE id = 1`).Scan(&cursor)
if errors.Is(err, sql.ErrNoRows) {
return 0, nil
}
if err != nil {
return 0, err
}
return cursor, nil
}
// SaveJetstreamCursor writes the given cursor to the singleton jetstream_cursor row.
// Idempotent — safe to call on every tick.
func SaveJetstreamCursor(db DBTX, cursor int64) error {
_, err := db.Exec(`
INSERT INTO jetstream_cursor (id, cursor, updated_at)
VALUES (1, ?, CURRENT_TIMESTAMP)
ON CONFLICT(id) DO UPDATE SET
cursor = excluded.cursor,
updated_at = excluded.updated_at
`, cursor)
return err
}

View File

@@ -0,0 +1,50 @@
package db
import (
"fmt"
"strings"
"testing"
)
func TestJetstreamCursor_RoundTrip(t *testing.T) {
safeName := strings.ReplaceAll(t.Name(), "/", "_")
d, err := InitDB(fmt.Sprintf("file:%s?mode=memory&cache=shared", safeName), LibsqlConfig{})
if err != nil {
t.Fatalf("init db: %v", err)
}
d.SetMaxOpenConns(1)
defer d.Close()
// Fresh DB: no persisted cursor.
got, err := GetJetstreamCursor(d)
if err != nil {
t.Fatalf("get empty: %v", err)
}
if got != 0 {
t.Errorf("initial cursor = %d, want 0", got)
}
// Save → read.
if err := SaveJetstreamCursor(d, 1234567890); err != nil {
t.Fatalf("save: %v", err)
}
got, err = GetJetstreamCursor(d)
if err != nil {
t.Fatalf("get after save: %v", err)
}
if got != 1234567890 {
t.Errorf("cursor = %d, want 1234567890", got)
}
// Overwrite with newer value.
if err := SaveJetstreamCursor(d, 9999999999); err != nil {
t.Fatalf("save 2: %v", err)
}
got, err = GetJetstreamCursor(d)
if err != nil {
t.Fatalf("get 2: %v", err)
}
if got != 9999999999 {
t.Errorf("cursor after overwrite = %d, want 9999999999", got)
}
}

79
pkg/appview/db/labels.go Normal file
View File

@@ -0,0 +1,79 @@
package db
import (
"database/sql"
"time"
)
// LabelChecker wraps a database connection to check takedown labels.
// Implements middleware.LabelChecker interface.
type LabelChecker struct {
db *sql.DB
}
// NewLabelChecker creates a new LabelChecker.
func NewLabelChecker(database *sql.DB) *LabelChecker {
return &LabelChecker{db: database}
}
// IsTakenDown checks if a (DID, repository) pair has an active takedown.
func (lc *LabelChecker) IsTakenDown(did, repository string) (bool, error) {
return IsTakenDown(lc.db, did, repository)
}
// IsTakenDown reports whether the given (did, repo) pair is currently taken
// down, either by an exact-repo row or by a user-level row (repo=”).
func IsTakenDown(db DBTX, did, repository string) (bool, error) {
var exists bool
err := db.QueryRow(
`SELECT EXISTS(
SELECT 1 FROM taken_down_subjects
WHERE did = ? AND (repo = ? OR repo = '')
)`,
did, repository,
).Scan(&exists)
return exists, err
}
// SetTakedown records a positive takedown for (src, did, repo). Idempotent:
// re-applying updates the timestamp.
func SetTakedown(db DBTX, src, did, repo string, cts time.Time) error {
_, err := db.Exec(
`INSERT INTO taken_down_subjects (src, did, repo, cts) VALUES (?, ?, ?, ?)
ON CONFLICT(src, did, repo) DO UPDATE SET cts = excluded.cts`,
src, did, repo, cts.UTC().Format(time.RFC3339),
)
return err
}
// RemoveTakedown drops the takedown row for (src, did, repo). Idempotent.
func RemoveTakedown(db DBTX, src, did, repo string) error {
_, err := db.Exec(
`DELETE FROM taken_down_subjects WHERE src = ? AND did = ? AND repo = ?`,
src, did, repo,
)
return err
}
// GetCursor returns the last persisted cursor for a labeler src (0 if none).
func GetCursor(db DBTX, src string) (int64, error) {
var cursor int64
err := db.QueryRow(
`SELECT cursor FROM labeler_cursor WHERE src = ?`,
src,
).Scan(&cursor)
if err == sql.ErrNoRows {
return 0, nil
}
return cursor, err
}
// SetCursor persists the cursor for a labeler src.
func SetCursor(db DBTX, src string, cursor int64) error {
_, err := db.Exec(
`INSERT INTO labeler_cursor (src, cursor) VALUES (?, ?)
ON CONFLICT(src) DO UPDATE SET cursor = excluded.cursor`,
src, cursor,
)
return err
}

View File

@@ -0,0 +1,214 @@
package db
import (
"testing"
"time"
)
const testLabelerSrc = "did:plc:labeler"
// TestSetTakedown_RepoLevel verifies that an exact-repo row blocks only that
// repo and leaves siblings untouched.
func TestSetTakedown_RepoLevel(t *testing.T) {
db, err := InitDB("file:TestSetTakedown_RepoLevel?mode=memory&cache=shared", LibsqlConfig{})
if err != nil {
t.Fatalf("init db: %v", err)
}
defer db.Close()
if err := SetTakedown(db, testLabelerSrc, "did:plc:user", "myrepo", time.Now().UTC()); err != nil {
t.Fatalf("set takedown: %v", err)
}
hit, err := IsTakenDown(db, "did:plc:user", "myrepo")
if err != nil {
t.Fatalf("is taken down (target): %v", err)
}
if !hit {
t.Fatal("expected target repo to be taken down")
}
hit, err = IsTakenDown(db, "did:plc:user", "otherrepo")
if err != nil {
t.Fatalf("is taken down (sibling): %v", err)
}
if hit {
t.Fatal("sibling repo should not be taken down")
}
hit, err = IsTakenDown(db, "did:plc:other", "myrepo")
if err != nil {
t.Fatalf("is taken down (other did): %v", err)
}
if hit {
t.Fatal("repo on different did should not be taken down")
}
}
// TestSetTakedown_UserLevel verifies that a user-level row (repo=”) matches
// any repo lookup for that DID.
func TestSetTakedown_UserLevel(t *testing.T) {
db, err := InitDB("file:TestSetTakedown_UserLevel?mode=memory&cache=shared", LibsqlConfig{})
if err != nil {
t.Fatalf("init db: %v", err)
}
defer db.Close()
if err := SetTakedown(db, testLabelerSrc, "did:plc:user", "", time.Now().UTC()); err != nil {
t.Fatalf("set user-level takedown: %v", err)
}
for _, repo := range []string{"alpha", "beta", "anything"} {
hit, err := IsTakenDown(db, "did:plc:user", repo)
if err != nil {
t.Fatalf("is taken down %q: %v", repo, err)
}
if !hit {
t.Fatalf("user-level takedown should cover repo %q", repo)
}
}
hit, err := IsTakenDown(db, "did:plc:bystander", "alpha")
if err != nil {
t.Fatalf("is taken down (bystander): %v", err)
}
if hit {
t.Fatal("user-level takedown should not affect a different did")
}
}
// TestRemoveTakedown_Idempotent verifies that removing a missing row is a
// no-op and removing an existing row flips IsTakenDown back to false.
func TestRemoveTakedown_Idempotent(t *testing.T) {
db, err := InitDB("file:TestRemoveTakedown_Idempotent?mode=memory&cache=shared", LibsqlConfig{})
if err != nil {
t.Fatalf("init db: %v", err)
}
defer db.Close()
if err := RemoveTakedown(db, testLabelerSrc, "did:plc:ghost", "ghost"); err != nil {
t.Fatalf("remove missing row: %v", err)
}
if err := SetTakedown(db, testLabelerSrc, "did:plc:user", "myrepo", time.Now().UTC()); err != nil {
t.Fatalf("set takedown: %v", err)
}
if err := RemoveTakedown(db, testLabelerSrc, "did:plc:user", "myrepo"); err != nil {
t.Fatalf("remove existing row: %v", err)
}
hit, err := IsTakenDown(db, "did:plc:user", "myrepo")
if err != nil {
t.Fatalf("is taken down after remove: %v", err)
}
if hit {
t.Fatal("expected takedown to be cleared after remove")
}
}
// TestSetTakedown_OnConflictUpdatesCts verifies that re-applying with a later
// cts updates the row's timestamp instead of inserting a duplicate.
func TestSetTakedown_OnConflictUpdatesCts(t *testing.T) {
db, err := InitDB("file:TestSetTakedown_OnConflictUpdatesCts?mode=memory&cache=shared", LibsqlConfig{})
if err != nil {
t.Fatalf("init db: %v", err)
}
defer db.Close()
earlier := time.Now().Add(-2 * time.Hour).UTC().Truncate(time.Second)
later := time.Now().UTC().Truncate(time.Second)
if err := SetTakedown(db, testLabelerSrc, "did:plc:user", "repo", earlier); err != nil {
t.Fatalf("set initial: %v", err)
}
if err := SetTakedown(db, testLabelerSrc, "did:plc:user", "repo", later); err != nil {
t.Fatalf("set update: %v", err)
}
var stored string
err = db.QueryRow(
`SELECT cts FROM taken_down_subjects WHERE src=? AND did=? AND repo=?`,
testLabelerSrc, "did:plc:user", "repo",
).Scan(&stored)
if err != nil {
t.Fatalf("query cts: %v", err)
}
got, err := time.Parse(time.RFC3339, stored)
if err != nil {
t.Fatalf("parse stored cts %q: %v", stored, err)
}
if !got.Equal(later) {
t.Fatalf("expected cts to be updated to %s, got %s", later, got)
}
}
// TestCursor_GetSetRoundtrip verifies that an unknown src returns 0 and
// SetCursor/GetCursor round-trip and overwrite.
func TestCursor_GetSetRoundtrip(t *testing.T) {
db, err := InitDB("file:TestCursor_GetSetRoundtrip?mode=memory&cache=shared", LibsqlConfig{})
if err != nil {
t.Fatalf("init db: %v", err)
}
defer db.Close()
got, err := GetCursor(db, "did:plc:nobody")
if err != nil {
t.Fatalf("get cursor unknown: %v", err)
}
if got != 0 {
t.Fatalf("expected 0 for unknown src, got %d", got)
}
if err := SetCursor(db, testLabelerSrc, 42); err != nil {
t.Fatalf("set cursor: %v", err)
}
got, err = GetCursor(db, testLabelerSrc)
if err != nil {
t.Fatalf("get cursor after set: %v", err)
}
if got != 42 {
t.Fatalf("expected 42, got %d", got)
}
if err := SetCursor(db, testLabelerSrc, 7); err != nil {
t.Fatalf("set cursor (overwrite): %v", err)
}
got, err = GetCursor(db, testLabelerSrc)
if err != nil {
t.Fatalf("get cursor after overwrite: %v", err)
}
if got != 7 {
t.Fatalf("expected 7 after overwrite, got %d", got)
}
}
// TestSetTakedown_PerSrcIsolation verifies that two labeler srcs can each
// own a row for the same (did, repo) and removing one src's row leaves the
// other intact.
func TestSetTakedown_PerSrcIsolation(t *testing.T) {
db, err := InitDB("file:TestSetTakedown_PerSrcIsolation?mode=memory&cache=shared", LibsqlConfig{})
if err != nil {
t.Fatalf("init db: %v", err)
}
defer db.Close()
now := time.Now().UTC()
if err := SetTakedown(db, "did:plc:labeler-a", "did:plc:user", "repo", now); err != nil {
t.Fatalf("set src a: %v", err)
}
if err := SetTakedown(db, "did:plc:labeler-b", "did:plc:user", "repo", now); err != nil {
t.Fatalf("set src b: %v", err)
}
if err := RemoveTakedown(db, "did:plc:labeler-a", "did:plc:user", "repo"); err != nil {
t.Fatalf("remove src a: %v", err)
}
hit, err := IsTakenDown(db, "did:plc:user", "repo")
if err != nil {
t.Fatalf("is taken down: %v", err)
}
if !hit {
t.Fatal("repo should still be taken down by src b after src a row removed")
}
}

View File

@@ -0,0 +1,3 @@
description: Add user_edited flag to repo_pages to prevent auto-overwrite of manually edited descriptions
query: |
ALTER TABLE repo_pages ADD COLUMN user_edited BOOLEAN NOT NULL DEFAULT 0;

View File

@@ -0,0 +1,3 @@
description: Add oci_client column to users table for OCI client preference
query: |
ALTER TABLE users ADD COLUMN oci_client TEXT DEFAULT '';

View File

@@ -0,0 +1,7 @@
description: Cache AI image advisor suggestions per manifest digest
query: |
CREATE TABLE IF NOT EXISTS advisor_suggestions (
manifest_digest TEXT PRIMARY KEY,
suggestions_json TEXT NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);

View File

@@ -0,0 +1,13 @@
description: Add subject_digest column to manifests for tracking OCI referrers (attestations, signatures)
query: |
ALTER TABLE manifests ADD COLUMN subject_digest TEXT;
CREATE INDEX IF NOT EXISTS idx_manifests_subject_digest ON manifests(subject_digest);
UPDATE manifests SET subject_digest = 'backfill'
WHERE artifact_type = 'unknown'
AND media_type NOT LIKE '%index%'
AND media_type NOT LIKE '%manifest.list%'
AND id IN (
SELECT m.id FROM manifests m
JOIN manifest_references mr ON mr.digest = m.digest
WHERE mr.is_attestation = 1
);

View File

@@ -0,0 +1,12 @@
description: Add daily repository stats table for pull/push trend tracking
query: |
CREATE TABLE IF NOT EXISTS repository_stats_daily (
did TEXT NOT NULL,
repository TEXT NOT NULL,
date TEXT NOT NULL,
pull_count INTEGER NOT NULL DEFAULT 0,
push_count INTEGER NOT NULL DEFAULT 0,
PRIMARY KEY(did, repository, date),
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_repo_stats_daily_date ON repository_stats_daily(date DESC);

View File

@@ -0,0 +1,7 @@
description: Persist Jetstream cursor so reconnects resume from last processed event
query: |
CREATE TABLE IF NOT EXISTS jetstream_cursor (
id INTEGER PRIMARY KEY CHECK (id = 1),
cursor INTEGER NOT NULL,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);

View File

@@ -0,0 +1,16 @@
description: Create labels table for ATProto content moderation (takedowns)
query: |
CREATE TABLE IF NOT EXISTS labels (
id INTEGER PRIMARY KEY AUTOINCREMENT,
src TEXT NOT NULL,
uri TEXT NOT NULL,
val TEXT NOT NULL,
neg BOOLEAN NOT NULL DEFAULT 0,
cts TIMESTAMP NOT NULL,
subject_did TEXT NOT NULL,
subject_repo TEXT NOT NULL DEFAULT '',
seq INTEGER NOT NULL DEFAULT 0,
UNIQUE(src, uri, val, neg)
);
CREATE INDEX IF NOT EXISTS idx_labels_subject ON labels(subject_did, subject_repo);
CREATE INDEX IF NOT EXISTS idx_labels_val ON labels(val);

View File

@@ -0,0 +1,3 @@
description: Add optional exp (expiration) column to labels for ATProto label spec
query: |
ALTER TABLE labels ADD COLUMN exp TIMESTAMP;

View File

@@ -0,0 +1,16 @@
description: Replace labels (append-only mirror) with taken_down_subjects + labeler_cursor (current-state model)
query: |
DROP TABLE IF EXISTS labels;
CREATE TABLE IF NOT EXISTS taken_down_subjects (
src TEXT NOT NULL,
did TEXT NOT NULL,
repo TEXT NOT NULL DEFAULT '',
cts TIMESTAMP NOT NULL,
PRIMARY KEY (src, did, repo)
);
CREATE INDEX IF NOT EXISTS idx_taken_down_subjects_did ON taken_down_subjects(did);
CREATE INDEX IF NOT EXISTS idx_taken_down_subjects_did_repo ON taken_down_subjects(did, repo);
CREATE TABLE IF NOT EXISTS labeler_cursor (
src TEXT PRIMARY KEY,
cursor INTEGER NOT NULL
);

View File

@@ -9,6 +9,7 @@ type User struct {
PDSEndpoint string
Avatar string
DefaultHoldDID string
OciClient string
LastSeen time.Time
}
@@ -24,6 +25,7 @@ type Manifest struct {
ConfigDigest string
ConfigSize int64
ArtifactType string // container-image, helm-chart, unknown
SubjectDigest string // digest of the parent manifest (for attestations/referrers)
CreatedAt time.Time
// Annotations removed - now stored in repository_annotations table
}
@@ -91,6 +93,15 @@ type RepositoryStats struct {
LastPush *time.Time `json:"last_push,omitempty"`
}
// DailyStats represents daily pull/push statistics for a repository
type DailyStats struct {
DID string `json:"did"`
Repository string `json:"repository"`
Date string `json:"date"`
PullCount int `json:"pull_count"`
PushCount int `json:"push_count"`
}
// RepositoryWithStats combines repository data with statistics
type RepositoryWithStats struct {
Repository
@@ -113,6 +124,7 @@ type RepoCardData struct {
Digest string // Latest manifest digest (sha256:...)
LastUpdated time.Time // When the repository was last pushed to
RegistryURL string // Registry URL for docker commands (e.g., "atcr.io" or "127.0.0.1:5000")
OciClient string // Preferred OCI client for pull commands (e.g., "docker", "podman")
}
// SetRegistryURL sets the RegistryURL field on all cards in the slice
@@ -122,6 +134,13 @@ func SetRegistryURL(cards []RepoCardData, registryURL string) {
}
}
// SetOciClient sets the OciClient field on all cards in the slice
func SetOciClient(cards []RepoCardData, ociClient string) {
for i := range cards {
cards[i].OciClient = ociClient
}
}
// PlatformInfo represents platform information (OS/Architecture)
type PlatformInfo struct {
OS string

File diff suppressed because it is too large Load Diff

View File

@@ -855,6 +855,15 @@ func TestGetTagsWithPlatforms(t *testing.T) {
t.Fatalf("Failed to create test user: %v", err)
}
// Register the test hold as public so the hold-access filter allows it
if err := UpsertCaptainRecord(db, &HoldCaptainRecord{
HoldDID: "did:web:hold.example.com",
OwnerDID: "did:plc:holdowner",
Public: true,
}); err != nil {
t.Fatalf("Failed to insert captain record: %v", err)
}
// Test 1: Single-arch manifest (no platform info)
singleArchManifest := &Manifest{
DID: testUser.DID,
@@ -882,7 +891,7 @@ func TestGetTagsWithPlatforms(t *testing.T) {
t.Fatalf("Failed to insert single-arch tag: %v", err)
}
tagsWithPlatforms, err := GetTagsWithPlatforms(db, testUser.DID, "myapp", 100, 0)
tagsWithPlatforms, err := GetTagsWithPlatforms(db, testUser.DID, "myapp", 100, 0, "")
if err != nil {
t.Fatalf("Failed to get tags with platforms: %v", err)
}
@@ -951,7 +960,7 @@ func TestGetTagsWithPlatforms(t *testing.T) {
t.Fatalf("Failed to insert multi-arch tag: %v", err)
}
multiTagsWithPlatforms, err := GetTagsWithPlatforms(db, testUser.DID, "multiapp", 100, 0)
multiTagsWithPlatforms, err := GetTagsWithPlatforms(db, testUser.DID, "multiapp", 100, 0, "")
if err != nil {
t.Fatalf("Failed to get multi-arch tags with platforms: %v", err)
}
@@ -1376,3 +1385,469 @@ func TestIsManifestReferenced(t *testing.T) {
t.Error("Expected sha256:childdef to NOT be referenced for different user")
}
}
func TestGetAllUntaggedManifestDigests(t *testing.T) {
db, err := InitDB(":memory:", LibsqlConfig{})
if err != nil {
t.Fatalf("Failed to init database: %v", err)
}
defer db.Close()
did := "did:plc:test123"
repo := "myapp"
now := time.Now()
if err := UpsertUser(db, &User{
DID: did,
Handle: "test.bsky.social",
PDSEndpoint: "https://test.pds.example.com",
LastSeen: now,
}); err != nil {
t.Fatalf("Failed to insert user: %v", err)
}
indexType := "application/vnd.oci.image.index.v1+json"
manifestType := "application/vnd.oci.image.manifest.v1+json"
hold := "did:web:hold.example.com"
insertManifest := func(t *testing.T, digest, mediaType string) int64 {
t.Helper()
id, err := InsertManifest(db, &Manifest{
DID: did, Repository: repo, Digest: digest,
HoldEndpoint: hold, SchemaVersion: 2, MediaType: mediaType,
CreatedAt: now,
})
if err != nil {
t.Fatalf("Failed to insert manifest %s: %v", digest, err)
}
return id
}
insertRef := func(t *testing.T, parentID int64, childDigest string, idx int) {
t.Helper()
err := InsertManifestReference(db, &ManifestReference{
ManifestID: parentID,
Digest: childDigest,
Size: 1000,
MediaType: manifestType,
PlatformArchitecture: "amd64",
PlatformOS: "linux",
ReferenceIndex: idx,
})
if err != nil {
t.Fatalf("Failed to insert reference: %v", err)
}
}
insertTag := func(t *testing.T, digest, tag string) {
t.Helper()
if err := UpsertTag(db, &Tag{
DID: did, Repository: repo, Tag: tag,
Digest: digest, CreatedAt: now,
}); err != nil {
t.Fatalf("Failed to insert tag: %v", err)
}
}
// Setup scenario:
//
// TAGGED index "sha256:tagged-index" -> tag "v1"
// children: sha256:tagged-child-amd64, sha256:shared-child-arm64
//
// UNTAGGED index "sha256:untagged-index" (no tag)
// children: sha256:untagged-child-amd64, sha256:shared-child-arm64
//
// UNTAGGED orphan single-arch "sha256:orphan-single" (no tag, no parent)
//
// TAGGED single-arch "sha256:tagged-single" -> tag "latest"
// Tagged index + its children
taggedIndexID := insertManifest(t, "sha256:tagged-index", indexType)
insertManifest(t, "sha256:tagged-child-amd64", manifestType)
insertManifest(t, "sha256:shared-child-arm64", manifestType)
insertRef(t, taggedIndexID, "sha256:tagged-child-amd64", 0)
insertRef(t, taggedIndexID, "sha256:shared-child-arm64", 1)
insertTag(t, "sha256:tagged-index", "v1")
// Untagged index + its children
untaggedIndexID := insertManifest(t, "sha256:untagged-index", indexType)
insertManifest(t, "sha256:untagged-child-amd64", manifestType)
// sha256:shared-child-arm64 already inserted, just add the reference
insertRef(t, untaggedIndexID, "sha256:untagged-child-amd64", 0)
insertRef(t, untaggedIndexID, "sha256:shared-child-arm64", 1)
// Orphan single-arch (no parent, no tag)
insertManifest(t, "sha256:orphan-single", manifestType)
// Tagged single-arch
insertManifest(t, "sha256:tagged-single", manifestType)
insertTag(t, "sha256:tagged-single", "latest")
// Run the query
digests, err := GetAllUntaggedManifestDigests(db, did, repo)
if err != nil {
t.Fatalf("GetAllUntaggedManifestDigests error: %v", err)
}
// Build sets for easy checking
digestSet := map[string]bool{}
for _, d := range digests {
digestSet[d] = true
}
// Should include: untagged index, its exclusive child, and the orphan single
if !digestSet["sha256:untagged-index"] {
t.Error("Expected untagged-index to be included")
}
if !digestSet["sha256:untagged-child-amd64"] {
t.Error("Expected untagged-child-amd64 to be included")
}
if !digestSet["sha256:orphan-single"] {
t.Error("Expected orphan-single to be included")
}
// Should NOT include: tagged index, tagged children, shared child (still referenced by tagged index), tagged single
if digestSet["sha256:tagged-index"] {
t.Error("Expected tagged-index to NOT be included")
}
if digestSet["sha256:tagged-child-amd64"] {
t.Error("Expected tagged-child-amd64 to NOT be included")
}
if digestSet["sha256:shared-child-arm64"] {
t.Error("Expected shared-child-arm64 to NOT be included (still referenced by tagged index)")
}
if digestSet["sha256:tagged-single"] {
t.Error("Expected tagged-single to NOT be included")
}
// Verify ordering: children should come before their parent index
childIdx := -1
parentIdx := -1
for i, d := range digests {
if d == "sha256:untagged-child-amd64" {
childIdx = i
}
if d == "sha256:untagged-index" {
parentIdx = i
}
}
if childIdx >= 0 && parentIdx >= 0 && childIdx > parentIdx {
t.Errorf("Expected children before parents: child at index %d, parent at index %d", childIdx, parentIdx)
}
// Verify total count: untagged-child-amd64, orphan-single, untagged-index = 3
if len(digests) != 3 {
t.Errorf("Expected 3 digests, got %d: %v", len(digests), digests)
}
}
// TestGetUserRepositories_HoldAccessFilter verifies that repositories whose
// manifests live on inaccessible holds are hidden from viewers without access.
func TestGetUserRepositories_HoldAccessFilter(t *testing.T) {
db, err := InitDB("file:TestGetUserRepositories_HoldAccessFilter?mode=memory&cache=shared", LibsqlConfig{})
if err != nil {
t.Fatalf("init db: %v", err)
}
defer db.Close()
testUser := &User{DID: "did:plc:alice", Handle: "alice.test", PDSEndpoint: "https://pds.example", LastSeen: time.Now()}
if err := UpsertUser(db, testUser); err != nil {
t.Fatalf("upsert user: %v", err)
}
// Public hold and a private invite-only hold
if err := UpsertCaptainRecord(db, &HoldCaptainRecord{
HoldDID: "did:web:public.example", OwnerDID: "did:plc:holdowner", Public: true,
}); err != nil {
t.Fatalf("seed public captain: %v", err)
}
if err := UpsertCaptainRecord(db, &HoldCaptainRecord{
HoldDID: "did:web:private.example", OwnerDID: "did:plc:holdowner", Public: false, AllowAllCrew: false,
}); err != nil {
t.Fatalf("seed private captain: %v", err)
}
// Two repos: one on the public hold, one on the private hold
if _, err := InsertManifest(db, &Manifest{
DID: testUser.DID, Repository: "publicrepo", Digest: "sha256:pub",
HoldEndpoint: "did:web:public.example", SchemaVersion: 2,
MediaType: "application/vnd.oci.image.manifest.v1+json", CreatedAt: time.Now(),
}); err != nil {
t.Fatalf("insert public manifest: %v", err)
}
if _, err := InsertManifest(db, &Manifest{
DID: testUser.DID, Repository: "privaterepo", Digest: "sha256:priv",
HoldEndpoint: "did:web:private.example", SchemaVersion: 2,
MediaType: "application/vnd.oci.image.manifest.v1+json", CreatedAt: time.Now(),
}); err != nil {
t.Fatalf("insert private manifest: %v", err)
}
// Anonymous viewer should see only the publicrepo
repos, err := GetUserRepositories(db, testUser.DID, "")
if err != nil {
t.Fatalf("GetUserRepositories anon: %v", err)
}
if len(repos) != 1 || repos[0].Name != "publicrepo" {
t.Errorf("anon viewer: expected [publicrepo], got %v", repos)
}
// Make the private-hold owner a crew member and re-query as them
if err := UpsertCrewMember(db, &CrewMember{
HoldDID: "did:web:private.example", MemberDID: "did:plc:crewdave", Rkey: "rk1",
}); err != nil {
t.Fatalf("upsert crew: %v", err)
}
repos, err = GetUserRepositories(db, testUser.DID, "did:plc:crewdave")
if err != nil {
t.Fatalf("GetUserRepositories crew: %v", err)
}
if len(repos) != 2 {
t.Errorf("crew viewer: expected both repos, got %d: %v", len(repos), repos)
}
}
// TestGetUserRepositories_BulkGrouping verifies that the bulk-fetch
// implementation correctly groups tags, manifests, annotations, and repo-page
// avatars per repository — and that ordering (last_push DESC for repos,
// created_at DESC for tags/manifests within a repo) is preserved.
//
// Regression guard for the previous N+1 implementation, which issued one
// query per repo and per relation.
func TestGetUserRepositories_BulkGrouping(t *testing.T) {
db, err := InitDB("file:TestGetUserRepositories_BulkGrouping?mode=memory&cache=shared", LibsqlConfig{})
if err != nil {
t.Fatalf("init db: %v", err)
}
defer db.Close()
user := &User{DID: "did:plc:owner", Handle: "owner.test", PDSEndpoint: "https://pds.example", LastSeen: time.Now()}
if err := UpsertUser(db, user); err != nil {
t.Fatalf("upsert user: %v", err)
}
if err := UpsertCaptainRecord(db, &HoldCaptainRecord{
HoldDID: "did:web:hold.example", OwnerDID: "did:plc:holdowner", Public: true,
}); err != nil {
t.Fatalf("seed captain: %v", err)
}
now := time.Now().UTC().Truncate(time.Second)
mediaType := "application/vnd.oci.image.manifest.v1+json"
// repoA: two manifests (oldest then newer) and two tags. last_push = now+10s.
manifestA1, err := InsertManifest(db, &Manifest{
DID: user.DID, Repository: "repoA", Digest: "sha256:a1",
HoldEndpoint: "did:web:hold.example", SchemaVersion: 2, MediaType: mediaType,
CreatedAt: now,
})
if err != nil {
t.Fatalf("insert manifest a1: %v", err)
}
manifestA2, err := InsertManifest(db, &Manifest{
DID: user.DID, Repository: "repoA", Digest: "sha256:a2",
HoldEndpoint: "did:web:hold.example", SchemaVersion: 2, MediaType: mediaType,
CreatedAt: now.Add(5 * time.Second),
})
if err != nil {
t.Fatalf("insert manifest a2: %v", err)
}
if err := UpsertTag(db, &Tag{DID: user.DID, Repository: "repoA", Tag: "v1", Digest: "sha256:a1", CreatedAt: now.Add(8 * time.Second)}); err != nil {
t.Fatalf("upsert tag v1: %v", err)
}
if err := UpsertTag(db, &Tag{DID: user.DID, Repository: "repoA", Tag: "v2", Digest: "sha256:a2", CreatedAt: now.Add(10 * time.Second)}); err != nil {
t.Fatalf("upsert tag v2: %v", err)
}
// repoB: one manifest, one tag. last_push = now+1s (older than repoA → repoA sorts first).
if _, err := InsertManifest(db, &Manifest{
DID: user.DID, Repository: "repoB", Digest: "sha256:b1",
HoldEndpoint: "did:web:hold.example", SchemaVersion: 2, MediaType: mediaType,
CreatedAt: now.Add(1 * time.Second),
}); err != nil {
t.Fatalf("insert manifest b1: %v", err)
}
if err := UpsertTag(db, &Tag{DID: user.DID, Repository: "repoB", Tag: "latest", Digest: "sha256:b1", CreatedAt: now.Add(1 * time.Second)}); err != nil {
t.Fatalf("upsert tag b latest: %v", err)
}
// Annotations only on repoA, plus a repo-page avatar on repoB to exercise the icon override.
if err := UpsertRepositoryAnnotations(db, user.DID, "repoA", map[string]string{
"org.opencontainers.image.title": "Repo A Title",
"org.opencontainers.image.description": "alpha",
"io.atcr.icon": "https://example.com/a.png",
}); err != nil {
t.Fatalf("upsert annotations: %v", err)
}
if err := UpsertRepoPage(db, user.DID, "repoB", "", "bafyrepob", false, now, now); err != nil {
t.Fatalf("upsert repo page: %v", err)
}
repos, err := GetUserRepositories(db, user.DID, "")
if err != nil {
t.Fatalf("GetUserRepositories: %v", err)
}
// Order: repoA first (newer last_push), then repoB.
if len(repos) != 2 {
t.Fatalf("expected 2 repos, got %d: %#v", len(repos), repos)
}
if repos[0].Name != "repoA" || repos[1].Name != "repoB" {
t.Fatalf("expected order [repoA, repoB] (last_push DESC), got [%s, %s]", repos[0].Name, repos[1].Name)
}
// repoA grouping
a := repos[0]
if len(a.Tags) != 2 {
t.Errorf("repoA: expected 2 tags, got %d", len(a.Tags))
}
// tags ordered created_at DESC → v2 first
if len(a.Tags) >= 2 && (a.Tags[0].Tag != "v2" || a.Tags[1].Tag != "v1") {
t.Errorf("repoA tags out of order, want [v2, v1] got [%s, %s]", a.Tags[0].Tag, a.Tags[1].Tag)
}
if len(a.Manifests) != 2 {
t.Errorf("repoA: expected 2 manifests, got %d", len(a.Manifests))
}
// manifests ordered created_at DESC → a2 first
if len(a.Manifests) >= 2 && (a.Manifests[0].ID != manifestA2 || a.Manifests[1].ID != manifestA1) {
t.Errorf("repoA manifests out of order, want [a2, a1] got [%d, %d]", a.Manifests[0].ID, a.Manifests[1].ID)
}
if a.Title != "Repo A Title" || a.Description != "alpha" {
t.Errorf("repoA annotations not applied: title=%q desc=%q", a.Title, a.Description)
}
if a.IconURL != "https://example.com/a.png" {
t.Errorf("repoA icon: expected annotation URL, got %q", a.IconURL)
}
// repoB grouping + page-avatar override
b := repos[1]
if len(b.Tags) != 1 || b.Tags[0].Tag != "latest" {
t.Errorf("repoB tags: %#v", b.Tags)
}
if len(b.Manifests) != 1 || b.Manifests[0].Digest != "sha256:b1" {
t.Errorf("repoB manifests: %#v", b.Manifests)
}
if b.IconURL == "" {
t.Errorf("repoB icon should be derived from repo-page avatar CID, got empty")
}
// Cross-repo isolation: tags/manifests for repoB must not leak into repoA and vice versa.
for _, tag := range a.Tags {
if tag.Repository != "repoA" {
t.Errorf("repoA tag has wrong repository: %#v", tag)
}
}
for _, m := range b.Manifests {
if m.Repository != "repoB" {
t.Errorf("repoB manifest has wrong repository: %#v", m)
}
}
}
// TestGetStarredRepoCards verifies the listing of repos starred by a user:
// stars whose target repo no longer has a manifest are silently dropped (the
// "still exists" filter the feature relies on), and results are ordered by
// star creation time DESC.
func TestGetStarredRepoCards(t *testing.T) {
db, err := InitDB("file:TestGetStarredRepoCards?mode=memory&cache=shared", LibsqlConfig{})
if err != nil {
t.Fatalf("init db: %v", err)
}
defer db.Close()
now := time.Now().UTC().Truncate(time.Second)
mediaType := "application/vnd.oci.image.manifest.v1+json"
starrer := &User{DID: "did:plc:starrer", Handle: "starrer.test", PDSEndpoint: "https://pds.example", LastSeen: now}
if err := UpsertUser(db, starrer); err != nil {
t.Fatalf("upsert starrer: %v", err)
}
owner := &User{DID: "did:plc:owner", Handle: "owner.test", PDSEndpoint: "https://pds.example", LastSeen: now}
if err := UpsertUser(db, owner); err != nil {
t.Fatalf("upsert owner: %v", err)
}
// Owner of a deleted repo (still has a users row, just no manifests).
ghost := &User{DID: "did:plc:ghost", Handle: "ghost.test", PDSEndpoint: "https://pds.example", LastSeen: now}
if err := UpsertUser(db, ghost); err != nil {
t.Fatalf("upsert ghost: %v", err)
}
if err := UpsertCaptainRecord(db, &HoldCaptainRecord{
HoldDID: "did:web:hold.example", OwnerDID: "did:plc:holdowner", Public: true,
}); err != nil {
t.Fatalf("seed captain: %v", err)
}
// Two existing repos owned by `owner`.
if _, err := InsertManifest(db, &Manifest{
DID: owner.DID, Repository: "repo-old", Digest: "sha256:old",
HoldEndpoint: "did:web:hold.example", SchemaVersion: 2, MediaType: mediaType,
CreatedAt: now,
}); err != nil {
t.Fatalf("insert old manifest: %v", err)
}
if _, err := InsertManifest(db, &Manifest{
DID: owner.DID, Repository: "repo-new", Digest: "sha256:new",
HoldEndpoint: "did:web:hold.example", SchemaVersion: 2, MediaType: mediaType,
CreatedAt: now.Add(5 * time.Second),
}); err != nil {
t.Fatalf("insert new manifest: %v", err)
}
// Three stars: two for existing repos, one for a deleted repo.
if err := UpsertStar(db, starrer.DID, owner.DID, "repo-old", now); err != nil {
t.Fatalf("upsert star repo-old: %v", err)
}
if err := UpsertStar(db, starrer.DID, owner.DID, "repo-new", now.Add(10*time.Second)); err != nil {
t.Fatalf("upsert star repo-new: %v", err)
}
if err := UpsertStar(db, starrer.DID, ghost.DID, "deleted-repo", now.Add(20*time.Second)); err != nil {
t.Fatalf("upsert star deleted-repo: %v", err)
}
cards, err := GetStarredRepoCards(db, starrer.DID, starrer.DID)
if err != nil {
t.Fatalf("GetStarredRepoCards: %v", err)
}
if len(cards) != 2 {
t.Fatalf("expected 2 cards (deleted repo dropped), got %d: %+v", len(cards), cards)
}
// Newest star first.
if cards[0].Repository != "repo-new" || cards[1].Repository != "repo-old" {
t.Errorf("expected order [repo-new, repo-old] (newest star first), got [%s, %s]", cards[0].Repository, cards[1].Repository)
}
// IsStarred should reflect the viewer's perspective. Viewer == starrer here
// so every returned row is starred-by-viewer.
for _, c := range cards {
if !c.IsStarred {
t.Errorf("card %s/%s expected IsStarred=true for self-viewer", c.OwnerHandle, c.Repository)
}
}
}
// TestGetUserRepositories_Empty verifies the bulk-fetch path short-circuits
// cleanly when the summary query returns no rows (no extra queries issued,
// nil slice returned).
func TestGetUserRepositories_Empty(t *testing.T) {
db, err := InitDB("file:TestGetUserRepositories_Empty?mode=memory&cache=shared", LibsqlConfig{})
if err != nil {
t.Fatalf("init db: %v", err)
}
defer db.Close()
user := &User{DID: "did:plc:nobody", Handle: "nobody.test", PDSEndpoint: "https://pds.example", LastSeen: time.Now()}
if err := UpsertUser(db, user); err != nil {
t.Fatalf("upsert user: %v", err)
}
repos, err := GetUserRepositories(db, user.DID, "")
if err != nil {
t.Fatalf("GetUserRepositories empty: %v", err)
}
if repos != nil {
t.Errorf("expected nil slice for user with no repos, got %#v", repos)
}
}

View File

@@ -39,18 +39,17 @@ func InitializeDatabase(dbPath string, cfg LibsqlConfig) (*sql.DB, *sql.DB, *Ses
} else {
roDSN += "?mode=ro"
}
readOnlyDB, err := sql.Open("libsql", roDSN)
// Wrap with busyTimeoutConnector so every pooled read-only connection
// gets PRAGMA busy_timeout. Without this, reads return SQLITE_BUSY
// immediately when a write is in progress on the read-write connection
// (busy_timeout is per-connection, so a one-shot PRAGMA only configures
// whichever conn served it).
roBase, err := openLibsqlLocalConnector(roDSN)
if err != nil {
slog.Warn("Failed to open read-only database connection", "error", err)
slog.Warn("Failed to open read-only database connector", "error", err)
return nil, nil, nil
}
// busy_timeout is per-connection — without this, reads return SQLITE_BUSY
// immediately when a write is in progress on the read-write connection.
var busyTimeout int
if err := readOnlyDB.QueryRow("PRAGMA busy_timeout = 5000").Scan(&busyTimeout); err != nil {
slog.Warn("Failed to set busy_timeout on read-only connection", "error", err)
}
readOnlyDB := sql.OpenDB(&busyTimeoutConnector{base: roBase, timeoutMs: 5000})
slog.Info("UI database initialized", "mode", "readonly", "path", dbPath)

View File

@@ -5,7 +5,9 @@
package db
import (
"context"
"database/sql"
"database/sql/driver"
"embed"
"fmt"
"io/fs"
@@ -55,36 +57,34 @@ func InitDB(path string, cfg LibsqlConfig) (*sql.DB, error) {
db = sql.OpenDB(connector)
slog.Info("Database opened in embedded replica mode", "path", path, "sync_url", cfg.SyncURL)
} else {
// Local-only mode: plain file via libsql driver
// Paths starting with "file:" or ":memory:" are already valid libsql URIs
// Local-only mode: plain file via libsql driver, wrapped so every new
// connection gets PRAGMA busy_timeout. SQLite's busy_timeout is
// per-connection, so a one-shot db.Exec only configures whichever
// pooled conn served the call — leaving the rest to fail SQLITE_BUSY
// instantly on any write contention with the jetstream/backfill workers.
// Paths starting with "file:" or ":memory:" are already valid libsql URIs.
dsn := path
if !strings.HasPrefix(path, "file:") && !strings.HasPrefix(path, ":memory:") {
dsn = "file:" + path
}
var err error
db, err = sql.Open("libsql", dsn)
baseConnector, err := openLibsqlLocalConnector(dsn)
if err != nil {
return nil, err
}
db = sql.OpenDB(&busyTimeoutConnector{base: baseConnector, timeoutMs: 5000})
slog.Info("Database opened in local-only mode", "path", path)
}
// In local-only mode, configure WAL and busy_timeout locally.
// In embedded replica mode, the remote server manages these settings
// and PRAGMA assignments are rejected as "unsupported statement"
// (observed with Bunny Database; Turso may behave similarly).
// In local-only mode, set WAL mode (database-wide setting, persists
// across connections — single call is sufficient unlike busy_timeout).
// In embedded replica mode, the remote server manages this and the
// PRAGMA is rejected as "unsupported statement" (observed with Bunny;
// Turso may behave similarly).
if cfg.SyncURL == "" {
// Enable WAL mode for concurrent read/write access
var journalMode string
if err := db.QueryRow("PRAGMA journal_mode = WAL").Scan(&journalMode); err != nil {
return nil, err
}
// Retry on lock instead of failing immediately (5s timeout)
var busyTimeout int
if err := db.QueryRow("PRAGMA busy_timeout = 5000").Scan(&busyTimeout); err != nil {
return nil, err
}
}
// Enable foreign keys
@@ -92,6 +92,16 @@ func InitDB(path string, cfg LibsqlConfig) (*sql.DB, error) {
return nil, err
}
// Bound the connection pool. With a remote target (Bunny Database), each
// idle conn is a stable libsql stream — keeping a handful warm avoids
// reconnect cost, capping the total prevents runaway contention. Short
// lifetimes ensure we recycle past any idle-side disconnects and drop any
// poisoned conn that survived IsPoisonedTxErr eviction.
db.SetMaxOpenConns(8)
db.SetMaxIdleConns(4)
db.SetConnMaxLifetime(5 * time.Minute)
db.SetConnMaxIdleTime(2 * time.Minute)
// Check if this is an existing database with migrations applied
isExisting, err := hasAppliedMigrations(db)
if err != nil {
@@ -202,20 +212,21 @@ func runMigrations(db *sql.DB, freshDB bool) error {
if err != nil {
return fmt.Errorf("failed to begin transaction for migration %d: %w", m.Version, err)
}
// Deferred rollback is a no-op once Commit succeeds; it guards against
// panics and any early return that forgets an explicit rollback.
defer func() { _ = tx.Rollback() }()
// Split query into individual statements and execute each
// go-sqlite3's Exec() doesn't reliably execute all statements in multi-statement queries
statements := splitSQLStatements(m.Query)
for i, stmt := range statements {
if _, err := tx.Exec(stmt); err != nil {
tx.Rollback()
return fmt.Errorf("failed to apply migration %d (%s) statement %d: %w", m.Version, m.Name, i+1, err)
}
}
// Record migration
if _, err := tx.Exec("INSERT INTO schema_migrations (version) VALUES (?)", m.Version); err != nil {
tx.Rollback()
return fmt.Errorf("failed to record migration %d: %w", m.Version, err)
}
@@ -366,3 +377,59 @@ func parseMigrationFilename(filename string) (int, string, error) {
return version, name, nil
}
// openLibsqlLocalConnector returns a driver.Connector for a local libsql DSN.
// go-libsql exports NewEmbeddedReplicaConnector for replica mode but no public
// constructor for local files, so we obtain the driver via a probe sql.Open
// (which is lazy and opens no connection) and ask it for a Connector.
func openLibsqlLocalConnector(dsn string) (driver.Connector, error) {
probe, err := sql.Open("libsql", dsn)
if err != nil {
return nil, fmt.Errorf("probe libsql driver: %w", err)
}
drv := probe.Driver()
_ = probe.Close()
dctx, ok := drv.(driver.DriverContext)
if !ok {
return nil, fmt.Errorf("libsql driver does not implement driver.DriverContext")
}
return dctx.OpenConnector(dsn)
}
// busyTimeoutConnector wraps a driver.Connector and runs PRAGMA busy_timeout
// on every newly opened connection. SQLite's busy_timeout is per-connection,
// so this is the only way to ensure every conn in the pool waits on lock
// contention instead of returning SQLITE_BUSY immediately.
type busyTimeoutConnector struct {
base driver.Connector
timeoutMs int
}
func (c *busyTimeoutConnector) Connect(ctx context.Context) (driver.Conn, error) {
conn, err := c.base.Connect(ctx)
if err != nil {
return nil, err
}
// libsql treats PRAGMA assignments as queries that return a row, so we
// must use QueryerContext rather than ExecerContext.
queryer, ok := conn.(driver.QueryerContext)
if !ok {
_ = conn.Close()
return nil, fmt.Errorf("libsql conn does not support QueryerContext")
}
rows, err := queryer.QueryContext(ctx, fmt.Sprintf("PRAGMA busy_timeout = %d", c.timeoutMs), nil)
if err != nil {
_ = conn.Close()
return nil, fmt.Errorf("set busy_timeout on new conn: %w", err)
}
_ = rows.Close()
return conn, nil
}
func (c *busyTimeoutConnector) Driver() driver.Driver {
return c.base.Driver()
}

View File

@@ -13,6 +13,7 @@ CREATE TABLE IF NOT EXISTS users (
pds_endpoint TEXT NOT NULL,
avatar TEXT,
default_hold_did TEXT,
oci_client TEXT DEFAULT '',
last_seen TIMESTAMP NOT NULL,
UNIQUE(handle)
);
@@ -29,6 +30,7 @@ CREATE TABLE IF NOT EXISTS manifests (
config_digest TEXT,
config_size INTEGER,
artifact_type TEXT NOT NULL DEFAULT 'container-image', -- container-image, helm-chart, unknown
subject_digest TEXT, -- digest of the parent manifest (for attestations/referrers)
created_at TIMESTAMP NOT NULL,
UNIQUE(did, repository, digest),
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
@@ -37,6 +39,7 @@ CREATE INDEX IF NOT EXISTS idx_manifests_did_repo ON manifests(did, repository);
CREATE INDEX IF NOT EXISTS idx_manifests_created_at ON manifests(created_at DESC);
CREATE INDEX IF NOT EXISTS idx_manifests_digest ON manifests(digest);
CREATE INDEX IF NOT EXISTS idx_manifests_artifact_type ON manifests(artifact_type);
CREATE INDEX IF NOT EXISTS idx_manifests_subject_digest ON manifests(subject_digest);
CREATE TABLE IF NOT EXISTS repository_annotations (
did TEXT NOT NULL,
@@ -166,6 +169,23 @@ CREATE TABLE IF NOT EXISTS repository_stats (
CREATE INDEX IF NOT EXISTS idx_repository_stats_did ON repository_stats(did);
CREATE INDEX IF NOT EXISTS idx_repository_stats_pull_count ON repository_stats(pull_count DESC);
CREATE TABLE IF NOT EXISTS repository_stats_daily (
did TEXT NOT NULL,
repository TEXT NOT NULL,
date TEXT NOT NULL,
pull_count INTEGER NOT NULL DEFAULT 0,
push_count INTEGER NOT NULL DEFAULT 0,
PRIMARY KEY(did, repository, date),
FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_repo_stats_daily_date ON repository_stats_daily(date DESC);
CREATE TABLE IF NOT EXISTS jetstream_cursor (
id INTEGER PRIMARY KEY CHECK (id = 1),
cursor INTEGER NOT NULL,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS stars (
starrer_did TEXT NOT NULL,
owner_did TEXT NOT NULL,
@@ -232,6 +252,7 @@ CREATE TABLE IF NOT EXISTS repo_pages (
repository TEXT NOT NULL,
description TEXT,
avatar_cid TEXT,
user_edited BOOLEAN NOT NULL DEFAULT 0,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
PRIMARY KEY(did, repository),
@@ -271,3 +292,24 @@ CREATE TABLE IF NOT EXISTS scans (
PRIMARY KEY(hold_did, manifest_digest)
);
CREATE INDEX IF NOT EXISTS idx_scans_user ON scans(user_did);
CREATE TABLE IF NOT EXISTS advisor_suggestions (
manifest_digest TEXT PRIMARY KEY,
suggestions_json TEXT NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS taken_down_subjects (
src TEXT NOT NULL,
did TEXT NOT NULL,
repo TEXT NOT NULL DEFAULT '',
cts TIMESTAMP NOT NULL,
PRIMARY KEY (src, did, repo)
);
CREATE INDEX IF NOT EXISTS idx_taken_down_subjects_did ON taken_down_subjects(did);
CREATE INDEX IF NOT EXISTS idx_taken_down_subjects_did_repo ON taken_down_subjects(did, repo);
CREATE TABLE IF NOT EXISTS labeler_cursor (
src TEXT PRIMARY KEY,
cursor INTEGER NOT NULL
);

View File

@@ -164,43 +164,15 @@ func (h *UnstarRepositoryHandler) ServeHTTP(w http.ResponseWriter, r *http.Reque
render.JSON(w, r, map[string]bool{"starred": false})
}
// CredentialHelperVersionResponse is the response for the credential helper version API
type CredentialHelperVersionResponse struct {
Latest string `json:"latest"`
DownloadURLs map[string]string `json:"download_urls"`
Checksums map[string]string `json:"checksums"`
ReleaseNotes string `json:"release_notes,omitempty"`
}
// CredentialHelperVersionHandler returns the latest credential helper version info
// Note: Version info is fetched dynamically from TangledRepo's releases
type CredentialHelperVersionHandler struct {
TangledRepo string
}
func (h *CredentialHelperVersionHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// This endpoint directs users to the Tangled repository for downloads
// Version info should be fetched from the repository's releases page
response := CredentialHelperVersionResponse{
Latest: "",
DownloadURLs: map[string]string{"tangled_repo": h.TangledRepo},
Checksums: nil,
ReleaseNotes: "Visit the Tangled repository for the latest releases: " + h.TangledRepo,
}
render.SetContentType(render.ContentTypeJSON)
w.Header().Set("Cache-Control", "public, max-age=300") // Cache for 5 minutes
render.JSON(w, r, response)
}
// renderStarComponent renders the star component HTML for HTMX responses
func renderStarComponent(w http.ResponseWriter, tmpl *template.Template, handle, repository string, isStarred bool, starCount int) {
data := map[string]any{
"Interactive": true,
"Handle": handle,
"Repository": repository,
"IsStarred": isStarred,
"StarCount": starCount,
"Interactive": true,
"IsAuthenticated": true,
"Handle": handle,
"Repository": repository,
"IsStarred": isStarred,
"StarCount": starCount,
}
var buf bytes.Buffer

View File

@@ -10,6 +10,7 @@ import (
"atcr.io/pkg/appview/webhooks"
"atcr.io/pkg/auth/oauth"
"atcr.io/pkg/billing"
indigooauth "github.com/bluesky-social/indigo/atproto/auth/oauth"
"github.com/bluesky-social/indigo/atproto/identity"
)
@@ -33,6 +34,7 @@ type BaseUIHandler struct {
Directory identity.Directory
BillingManager *billing.Manager
WebhookDispatcher *webhooks.Dispatcher
OAuthClientApp *indigooauth.ClientApp
// Stores
SessionStore *db.SessionStore
@@ -40,9 +42,11 @@ type BaseUIHandler struct {
OAuthStore *db.OAuthStore
// Config
DefaultHoldDID string
CompanyName string
Jurisdiction string
ClientName string // Full name: "AT Container Registry"
ClientShortName string // Short name: "ATCR"
DefaultHoldDID string
CompanyName string
Jurisdiction string
ClientName string // Full name: "AT Container Registry"
ClientShortName string // Short name: "ATCR"
AIAdvisorEnabled bool // True when Claude API key is configured
SourceURL string // Source code URL for the footer "Source" link
}

View File

@@ -10,23 +10,36 @@ import (
// PageData contains common fields shared across all page templates
type PageData struct {
User *db.User // Logged-in user (nil if not logged in)
Query string // Search query from URL parameter
RegistryURL string // Docker registry domain (e.g., "buoy.cr")
SiteURL string // Website domain (e.g., "seamark.dev")
ClientName string // Brand name for templates (e.g., "AT Container Registry")
ClientShortName string // Brand name for templates (e.g., "ATCR")
User *db.User // Logged-in user (nil if not logged in)
Query string // Search query from URL parameter
RegistryURL string // Docker registry domain (e.g., "buoy.cr")
SiteURL string // Website domain (e.g., "seamark.dev")
ClientName string // Brand name for templates (e.g., "AT Container Registry")
ClientShortName string // Brand name for templates (e.g., "ATCR")
OciClient string // Preferred OCI client for pull commands (e.g., "docker", "podman")
AIAdvisorEnabled bool // True when AI Image Advisor is available
SourceURL string // Source code URL for the footer "Source" link
CurrentPath string // Request path (used for OAuth return_to)
}
// NewPageData creates a PageData struct with common fields populated from the request
func NewPageData(r *http.Request, h *BaseUIHandler) PageData {
user := middleware.GetUser(r)
var ociClient string
if user != nil {
ociClient = user.OciClient
}
return PageData{
User: middleware.GetUser(r),
Query: r.URL.Query().Get("q"),
RegistryURL: h.RegistryURL,
SiteURL: h.SiteURL,
ClientName: h.ClientName,
ClientShortName: h.ClientShortName,
User: user,
Query: r.URL.Query().Get("q"),
RegistryURL: h.RegistryURL,
SiteURL: h.SiteURL,
ClientName: h.ClientName,
ClientShortName: h.ClientShortName,
OciClient: ociClient,
AIAdvisorEnabled: h.AIAdvisorEnabled,
SourceURL: h.SourceURL,
CurrentPath: r.URL.RequestURI(),
}
}

View File

@@ -1,14 +1,16 @@
package handlers
import (
"context"
"fmt"
"html/template"
"log/slog"
"net/http"
"net/url"
"strings"
"time"
"atcr.io/pkg/appview/db"
"atcr.io/pkg/atproto"
"github.com/go-chi/chi/v5"
"github.com/go-chi/render"
)
@@ -205,18 +207,18 @@ func (h *DeviceApprovalPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
// Get pending authorization
pending, ok := h.DeviceStore.GetPendingByUserCode(userCode)
if !ok {
h.renderError(w, "Invalid or expired authorization code")
h.renderError(w, r, "That authorization code has expired or doesn't exist. Start a fresh `docker login` from your terminal to get a new one.")
return
}
// Check if already approved
if pending.ApprovedDID != nil && *pending.ApprovedDID != "" {
h.renderSuccess(w, pending.DeviceName)
h.renderSuccess(w, r, pending.DeviceName)
return
}
// Render approval page
h.renderApprovalPage(w, sess.Handle, pending)
h.renderApprovalPage(w, r, sess, pending)
}
// DeviceApproveRequest is the request to approve a device
@@ -359,60 +361,157 @@ func (h *RevokeDeviceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
w.WriteHeader(http.StatusOK)
}
// Helper functions
// renderApprovalPage renders the device authorization confirmation page.
// The browser-side identity (avatar + handle + display name) and the
// terminal-side device facts are paired side-by-side so a wrong-account
// approval is visually obvious before the Approve button is clicked.
func (h *DeviceApprovalPageHandler) renderApprovalPage(w http.ResponseWriter, r *http.Request, sess *db.Session, pending *db.PendingAuthorization) {
// Hydrate the signed-in sailor: cached avatar/handle from our local
// users table; live displayName from their PDS (best-effort with a tight
// timeout — the page must still render quickly if Bluesky is slow).
user := &db.User{
DID: sess.DID,
Handle: sess.Handle,
PDSEndpoint: sess.PDSEndpoint,
}
if h.ReadOnlyDB != nil {
if u, err := db.GetUserByDID(h.ReadOnlyDB, sess.DID); err == nil && u != nil {
user = u
}
}
displayName := fetchDisplayName(r.Context(), sess)
meta := NewPageMeta(
"Authorize device - "+h.ClientShortName,
"Confirm device authorization for "+h.ClientShortName,
).WithRobots("noindex").
WithSiteName(h.ClientShortName)
pd := NewPageData(r, &h.BaseUIHandler)
pd.User = user
func (h *DeviceApprovalPageHandler) renderApprovalPage(w http.ResponseWriter, handle string, pending *db.PendingAuthorization) {
tmpl := template.Must(template.New("approval").Parse(deviceApprovalTemplate))
data := struct {
Handle string
DeviceName string
UserCode string
IPAddress string
PageData
Meta *PageMeta
Pending *db.PendingAuthorization
ProfileDisplayName string
UserDIDShort string
UserAgentShort string
}{
Handle: handle,
DeviceName: pending.DeviceName,
UserCode: pending.UserCode,
IPAddress: pending.IPAddress,
PageData: pd,
Meta: meta,
Pending: pending,
ProfileDisplayName: displayName,
UserDIDShort: shortenDID(sess.DID),
UserAgentShort: shortenUserAgent(pending.UserAgent),
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
if err := tmpl.Execute(w, data); err != nil {
if err := h.Templates.ExecuteTemplate(w, "device-approve", data); err != nil {
slog.Error("Failed to render device approval page", "component", "device/approve", "error", err)
http.Error(w, "failed to render template", http.StatusInternalServerError)
return
}
}
func (h *DeviceApprovalPageHandler) renderSuccess(w http.ResponseWriter, deviceName string) {
tmpl := template.Must(template.New("success").Parse(deviceSuccessTemplate))
func (h *DeviceApprovalPageHandler) renderSuccess(w http.ResponseWriter, r *http.Request, deviceName string) {
meta := NewPageMeta(
"Device authorized - "+h.ClientShortName,
"Device authorization complete",
).WithRobots("noindex").
WithSiteName(h.ClientShortName)
data := struct {
PageData
Meta *PageMeta
DeviceName string
}{
PageData: NewPageData(r, &h.BaseUIHandler),
Meta: meta,
DeviceName: deviceName,
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
if err := tmpl.Execute(w, data); err != nil {
if err := h.Templates.ExecuteTemplate(w, "device-approved", data); err != nil {
slog.Error("Failed to render device success page", "component", "device/approve", "error", err)
http.Error(w, "failed to render template", http.StatusInternalServerError)
return
}
}
func (h *DeviceApprovalPageHandler) renderError(w http.ResponseWriter, message string) {
tmpl := template.Must(template.New("error").Parse(deviceErrorTemplate))
func (h *DeviceApprovalPageHandler) renderError(w http.ResponseWriter, r *http.Request, message string) {
meta := NewPageMeta(
"Authorization error - "+h.ClientShortName,
"Device authorization could not be completed",
).WithRobots("noindex").
WithSiteName(h.ClientShortName)
data := struct {
PageData
Meta *PageMeta
Message string
}{
Message: message,
PageData: NewPageData(r, &h.BaseUIHandler),
Meta: meta,
Message: message,
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.WriteHeader(http.StatusBadRequest)
if err := tmpl.Execute(w, data); err != nil {
http.Error(w, "failed to render template", http.StatusInternalServerError)
return
if err := h.Templates.ExecuteTemplate(w, "device-error", data); err != nil {
slog.Error("Failed to render device error page", "component", "device/approve", "error", err)
}
}
// fetchDisplayName best-effort fetches the sailor's display name from
// their PDS. Returns "" on any failure — the template falls back to the
// handle so the page never blocks on a slow upstream.
func fetchDisplayName(ctx context.Context, sess *db.Session) string {
if sess == nil || sess.PDSEndpoint == "" {
return ""
}
timeoutCtx, cancel := context.WithTimeout(ctx, 1500*time.Millisecond)
defer cancel()
client := atproto.NewClient(sess.PDSEndpoint, sess.DID, "")
profile, err := client.GetActorProfile(timeoutCtx, sess.DID)
if err != nil || profile == nil {
return ""
}
return strings.TrimSpace(profile.DisplayName)
}
// shortenDID returns a compact DID for display (e.g.
// "did:plc:abc…xyz") without obscuring its kind.
func shortenDID(did string) string {
if len(did) <= 24 {
return did
}
// Keep the prefix (did:plc: / did:web:) and the last 6 chars.
prefixEnd := strings.Index(did[4:], ":")
if prefixEnd < 0 {
return did[:14] + "…" + did[len(did)-6:]
}
prefixEnd += 5 // include "did:" and the trailing ":"
if len(did)-prefixEnd <= 14 {
return did
}
return did[:prefixEnd+6] + "…" + did[len(did)-6:]
}
// shortenUserAgent picks a readable summary of the device's UA string —
// almost always something like "docker-credential-atcr/0.x" — and caps
// the length so the device card doesn't blow up on long UA strings.
func shortenUserAgent(ua string) string {
ua = strings.TrimSpace(ua)
if ua == "" {
return ""
}
if len(ua) > 80 {
return ua[:80] + "…"
}
return ua
}
func getClientIP(r *http.Request) string {
// Check X-Forwarded-For header
xff := r.Header.Get("X-Forwarded-For")
@@ -435,122 +534,3 @@ func getClientIP(r *http.Request) string {
return r.RemoteAddr
}
// HTML templates
const deviceApprovalTemplate = `
<!DOCTYPE html>
<html>
<head>
<title>Authorize Device - ATCR</title>
<style>
body { font-family: sans-serif; max-width: 600px; margin: 50px auto; padding: 20px; }
.approval-box { background: #e3f2fd; border: 1px solid #90caf9; padding: 30px; border-radius: 8px; }
.user-code { font-size: 32px; font-weight: bold; letter-spacing: 4px; text-align: center; margin: 20px 0; color: #1976d2; }
.device-info { background: #fff; padding: 15px; border-radius: 4px; margin: 15px 0; }
.device-info dt { font-weight: bold; margin-top: 10px; }
.device-info dd { margin-left: 0; color: #666; }
.actions { text-align: center; margin-top: 30px; }
button { font-size: 16px; padding: 12px 30px; margin: 0 10px; border: none; border-radius: 4px; cursor: pointer; }
.approve { background: #4caf50; color: white; }
.approve:hover { background: #45a049; }
.deny { background: #f44336; color: white; }
.deny:hover { background: #da190b; }
</style>
</head>
<body>
<div class="approval-box">
<h1>Authorize Device</h1>
<p>User: <strong>{{.Handle}}</strong></p>
<div class="user-code">{{.UserCode}}</div>
<div class="device-info">
<dl>
<dt>Device Name:</dt>
<dd>{{.DeviceName}}</dd>
<dt>IP Address:</dt>
<dd>{{.IPAddress}}</dd>
</dl>
</div>
<p><strong>Do you want to authorize this device?</strong></p>
<p>This device will be able to push and pull container images to your registry.</p>
<div class="actions">
<button class="approve" onclick="approve(true)">Approve</button>
<button class="deny" onclick="approve(false)">Deny</button>
</div>
</div>
<script>
async function approve(approved) {
const resp = await fetch('/device/approve', {
method: 'POST',
headers: {'Content-Type': 'application/json'},
body: JSON.stringify({
user_code: '{{.UserCode}}',
approve: approved
})
});
if (resp.ok) {
if (approved) {
window.location.href = '/device?user_code={{.UserCode}}';
} else {
alert('Device authorization denied');
window.location.href = '/';
}
} else {
alert('Failed to process authorization');
}
}
</script>
</body>
</html>
`
const deviceSuccessTemplate = `
<!DOCTYPE html>
<html>
<head>
<title>Device Authorized - ATCR</title>
<style>
body { font-family: sans-serif; max-width: 600px; margin: 50px auto; padding: 20px; }
.success { background: #d4edda; border: 1px solid #c3e6cb; padding: 30px; border-radius: 8px; }
h1 { color: #155724; }
a { color: #007bff; }
</style>
</head>
<body>
<div class="success">
<h1>✓ Device Authorized!</h1>
<p>Device <strong>{{.DeviceName}}</strong> has been successfully authorized.</p>
<p>You can now close this window and return to your terminal.</p>
<p><a href="/settings#devices">View your authorized devices</a></p>
</div>
</body>
</html>
`
const deviceErrorTemplate = `
<!DOCTYPE html>
<html>
<head>
<title>Authorization Error - ATCR</title>
<style>
body { font-family: sans-serif; max-width: 600px; margin: 50px auto; padding: 20px; }
.error { background: #f8d7da; border: 1px solid #f5c6cb; padding: 30px; border-radius: 8px; }
h1 { color: #721c24; }
a { color: #007bff; }
</style>
</head>
<body>
<div class="error">
<h1>✗ Authorization Error</h1>
<p>{{.Message}}</p>
<p><a href="/">Return to home</a></p>
</div>
</body>
</html>
`

View File

@@ -0,0 +1,558 @@
package handlers
import (
"fmt"
"log/slog"
"net/http"
"strings"
"sync"
"atcr.io/pkg/appview/db"
"atcr.io/pkg/appview/holdclient"
"atcr.io/pkg/atproto"
"github.com/go-chi/chi/v5"
)
// LayerDiffEntry represents one row in the layer diff table.
type LayerDiffEntry struct {
Status string // "shared", "rebuilt", "added", "removed"
Layer LayerDetail // the "to" layer (or from-layer for "removed")
PrevLayer *LayerDetail // set for "rebuilt" — the old layer
}
// VulnDiffEntry represents a vulnerability categorized by diff status.
type VulnDiffEntry struct {
Status string // "fixed", "new", "unchanged"
Vuln vulnMatch
}
// DiffSummary is the top-line summary for the banner and diff page.
type DiffSummary struct {
SizeDelta int64 // bytes, positive = "to" is larger
LayerCountFrom int
LayerCountTo int
VulnFixedCount int
VulnNewCount int
VulnFixedBySev vulnSummary
VulnNewBySev vulnSummary
HasVulnData bool
}
// layerKey returns the matching key for a layer — digest for real layers, command for empty layers.
func layerKey(l LayerDetail) string {
if l.Command != "" {
return l.Command
}
return l.Digest
}
// computeLayerDiff compares two ordered LayerDetail slices using LCS on commands (git diff style).
// Handles insertions and deletions in the middle, not just prefix divergence.
func computeLayerDiff(fromLayers, toLayers []LayerDetail) []LayerDiffEntry {
n := len(fromLayers)
m := len(toLayers)
// Build LCS table on layer keys (command or digest)
dp := make([][]int, n+1)
for i := range dp {
dp[i] = make([]int, m+1)
}
for i := 1; i <= n; i++ {
for j := 1; j <= m; j++ {
if layerKey(fromLayers[i-1]) == layerKey(toLayers[j-1]) {
dp[i][j] = dp[i-1][j-1] + 1
} else if dp[i-1][j] >= dp[i][j-1] {
dp[i][j] = dp[i-1][j]
} else {
dp[i][j] = dp[i][j-1]
}
}
}
// Backtrack to produce the diff
var result []LayerDiffEntry
i, j := n, m
// Build in reverse, then flip
var rev []LayerDiffEntry
for i > 0 || j > 0 {
if i > 0 && j > 0 && layerKey(fromLayers[i-1]) == layerKey(toLayers[j-1]) {
fl := fromLayers[i-1]
tl := toLayers[j-1]
// Same key — check if digest also matches
sameDigest := false
if fl.EmptyLayer && tl.EmptyLayer {
sameDigest = true // empty layers matched by command
} else if !fl.EmptyLayer && !tl.EmptyLayer {
sameDigest = fl.Digest == tl.Digest
}
if sameDigest {
rev = append(rev, LayerDiffEntry{Status: "shared", Layer: tl})
} else {
prevLayer := fl
rev = append(rev, LayerDiffEntry{Status: "rebuilt", Layer: tl, PrevLayer: &prevLayer})
}
i--
j--
} else if j > 0 && (i == 0 || dp[i][j-1] >= dp[i-1][j]) {
rev = append(rev, LayerDiffEntry{Status: "added", Layer: toLayers[j-1]})
j--
} else {
rev = append(rev, LayerDiffEntry{Status: "removed", Layer: fromLayers[i-1]})
i--
}
}
// Reverse
result = make([]LayerDiffEntry, len(rev))
for k, v := range rev {
result[len(rev)-1-k] = v
}
return result
}
// computeVulnDiff compares two vulnerability match slices by CVE ID.
func computeVulnDiff(fromMatches, toMatches []vulnMatch) []VulnDiffEntry {
fromSet := make(map[string]vulnMatch, len(fromMatches))
for _, m := range fromMatches {
fromSet[m.CVEID] = m
}
toSet := make(map[string]vulnMatch, len(toMatches))
for _, m := range toMatches {
toSet[m.CVEID] = m
}
var result []VulnDiffEntry
// Fixed: in from but not to
for id, m := range fromSet {
if _, ok := toSet[id]; !ok {
result = append(result, VulnDiffEntry{Status: "fixed", Vuln: m})
}
}
// New: in to but not from
for id, m := range toSet {
if _, ok := fromSet[id]; !ok {
result = append(result, VulnDiffEntry{Status: "new", Vuln: m})
}
}
// Unchanged: in both
for id, m := range toSet {
if _, ok := fromSet[id]; ok {
result = append(result, VulnDiffEntry{Status: "unchanged", Vuln: m})
}
}
return result
}
// computeDiffSummary derives the top-line summary from layer and vuln diffs.
func computeDiffSummary(fromLayers, toLayers []LayerDetail, vulnDiff []VulnDiffEntry, hasVulnData bool) DiffSummary {
var fromSize, toSize int64
for _, l := range fromLayers {
fromSize += l.Size
}
for _, l := range toLayers {
toSize += l.Size
}
summary := DiffSummary{
SizeDelta: toSize - fromSize,
LayerCountFrom: len(fromLayers),
LayerCountTo: len(toLayers),
HasVulnData: hasVulnData,
}
for _, entry := range vulnDiff {
switch entry.Status {
case "fixed":
summary.VulnFixedCount++
addToSevCount(&summary.VulnFixedBySev, entry.Vuln.Severity)
case "new":
summary.VulnNewCount++
addToSevCount(&summary.VulnNewBySev, entry.Vuln.Severity)
}
}
return summary
}
func addToSevCount(s *vulnSummary, severity string) {
// Normalize to canonical casing so "CRITICAL", "critical", "Crit" all land
// in the same bucket. Unknown severities count toward the total but don't
// bump any bucket — the template renders them as "Unknown" via the
// severityLabel helper.
switch strings.ToLower(strings.TrimSpace(severity)) {
case "critical", "crit", "c":
s.Critical++
case "high", "h":
s.High++
case "medium", "med", "m":
s.Medium++
case "low", "l":
s.Low++
}
s.Total++
}
// ManifestDiffHandler renders the full diff page comparing two manifests.
type ManifestDiffHandler struct {
BaseUIHandler
}
func (h *ManifestDiffHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
identifier := chi.URLParam(r, "handle")
// Route: /diff/{handle}/* — wildcard captures the repo name
repo := strings.TrimPrefix(chi.URLParam(r, "*"), "/")
if repo == "" {
RenderNotFound(w, r, &h.BaseUIHandler)
return
}
fromParam := r.URL.Query().Get("from")
toParam := r.URL.Query().Get("to")
if fromParam == "" || toParam == "" {
RenderNotFound(w, r, &h.BaseUIHandler)
return
}
// Resolve identity
did, resolvedHandle, _, err := atproto.ResolveIdentity(r.Context(), identifier)
if err != nil {
RenderNotFound(w, r, &h.BaseUIHandler)
return
}
owner, err := db.GetUserByDID(h.ReadOnlyDB, did)
if err != nil || owner == nil {
RenderNotFound(w, r, &h.BaseUIHandler)
return
}
if owner.Handle != resolvedHandle {
_ = db.UpdateUserHandle(h.DB, did, resolvedHandle)
owner.Handle = resolvedHandle
}
// Resolve from/to params — accept either digests (sha256:...) or tag names
fromDigest := fromParam
toDigest := toParam
if !strings.HasPrefix(fromDigest, "sha256:") {
tag, err := db.GetTagByName(h.ReadOnlyDB, owner.DID, repo, fromParam)
if err != nil || tag == nil {
RenderNotFound(w, r, &h.BaseUIHandler)
return
}
fromDigest = tag.Digest
}
if !strings.HasPrefix(toDigest, "sha256:") {
tag, err := db.GetTagByName(h.ReadOnlyDB, owner.DID, repo, toParam)
if err != nil || tag == nil {
RenderNotFound(w, r, &h.BaseUIHandler)
return
}
toDigest = tag.Digest
}
// Fetch both manifests
type manifestData struct {
manifest *db.ManifestWithMetadata
layers []LayerDetail
vulnData *vulnDetailsData
err error
}
// fetchManifest fetches layers and vulns for a digest.
// For manifest lists, it uses the provided platform child digest instead.
fetchManifest := func(digest, platformDigest string) manifestData {
m, err := db.GetManifestDetail(h.ReadOnlyDB, owner.DID, repo, digest)
if err != nil {
return manifestData{err: err}
}
// For multi-arch, resolve to the platform child
layerManifest := m
layerDigest := digest
holdEndpoint := m.HoldEndpoint
if m.IsManifestList && platformDigest != "" {
child, err := db.GetManifestDetail(h.ReadOnlyDB, owner.DID, repo, platformDigest)
if err == nil {
layerManifest = child
layerDigest = platformDigest
if child.HoldEndpoint != "" {
holdEndpoint = child.HoldEndpoint
}
}
}
dbLayers, _ := db.GetLayersForManifest(h.ReadOnlyDB, layerManifest.ID)
var layers []LayerDetail
var vulnData *vulnDetailsData
hold, holdErr := ResolveHold(r.Context(), h.ReadOnlyDB, holdEndpoint)
if holdErr == nil {
config, err := holdclient.FetchImageConfig(r.Context(), hold.URL, layerDigest)
if err == nil {
layers = buildLayerDetails(config.History, dbLayers)
} else {
layers = buildLayerDetails(nil, dbLayers)
}
vd := FetchVulnDetails(r.Context(), hold.DID, layerDigest)
vulnData = &vd
} else {
layers = buildLayerDetails(nil, dbLayers)
}
return manifestData{manifest: m, layers: layers, vulnData: vulnData}
}
// First fetch both top-level manifests to check for multi-arch
fromManifest, err := db.GetManifestDetail(h.ReadOnlyDB, owner.DID, repo, fromDigest)
if err != nil {
RenderNotFound(w, r, &h.BaseUIHandler)
return
}
toManifest, err := db.GetManifestDetail(h.ReadOnlyDB, owner.DID, repo, toDigest)
if err != nil {
RenderNotFound(w, r, &h.BaseUIHandler)
return
}
// Find common platforms for multi-arch
var commonPlatforms []db.PlatformInfo
var selectedPlatform string
isMultiArch := fromManifest.IsManifestList && toManifest.IsManifestList
fromPlatformDigest := ""
toPlatformDigest := ""
// platKey returns "os/arch[/variant]" for a platform.
platKey := func(os, arch, variant string) string {
k := os + "/" + arch
if variant != "" {
k += "/" + variant
}
return k
}
// pickPlatformChild returns the child digest from a manifest list whose
// platform matches the given key. Returns "" if no match.
pickPlatformChild := func(m *db.ManifestWithMetadata, key string) string {
for _, p := range m.Platforms {
if platKey(p.OS, p.Architecture, p.Variant) == key {
return p.Digest
}
}
return ""
}
if isMultiArch {
// Build intersection of platforms
for _, fp := range fromManifest.Platforms {
for _, tp := range toManifest.Platforms {
if fp.OS == tp.OS && fp.Architecture == tp.Architecture && fp.Variant == tp.Variant {
commonPlatforms = append(commonPlatforms, tp)
break
}
}
}
// Use query param or default to first common platform
selectedPlatform = r.URL.Query().Get("platform")
if len(commonPlatforms) > 0 {
if selectedPlatform == "" {
selectedPlatform = platKey(commonPlatforms[0].OS, commonPlatforms[0].Architecture, commonPlatforms[0].Variant)
}
fromPlatformDigest = pickPlatformChild(fromManifest, selectedPlatform)
toPlatformDigest = pickPlatformChild(toManifest, selectedPlatform)
}
} else if fromManifest.IsManifestList != toManifest.IsManifestList {
// Mixed: one side is a manifest list, the other is a platform child.
// Match them by looking up the single-arch side's platform via its
// parent manifest_references row and picking the matching child from
// the manifest list side.
var listSide *db.ManifestWithMetadata
var childDigest string
if fromManifest.IsManifestList {
listSide = fromManifest
childDigest = toDigest
} else {
listSide = toManifest
childDigest = fromDigest
}
plat, _ := db.GetChildManifestPlatform(h.ReadOnlyDB, owner.DID, repo, childDigest)
var listChildDigest string
if plat != nil {
listChildDigest = pickPlatformChild(listSide, platKey(plat.OS, plat.Architecture, plat.Variant))
}
// Fallback: if we couldn't determine the platform (or no match),
// default to the first non-attestation child of the manifest list so
// the diff at least shows real layers instead of an empty index.
if listChildDigest == "" {
for _, p := range listSide.Platforms {
if p.Digest != "" {
listChildDigest = p.Digest
break
}
}
}
if fromManifest.IsManifestList {
fromPlatformDigest = listChildDigest
} else {
toPlatformDigest = listChildDigest
}
}
// Fetch layer/vuln data in parallel
var fromData, toData manifestData
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
fromData = fetchManifest(fromDigest, fromPlatformDigest)
}()
go func() {
defer wg.Done()
toData = fetchManifest(toDigest, toPlatformDigest)
}()
wg.Wait()
// Track per-side fetch failures so we render the page with an inline
// alert naming which tag failed, instead of a generic 404 that makes
// users guess whether they typoed a tag or hit a transient outage.
// fromData.manifest / toData.manifest is nil only when the re-fetch at
// the top of fetchManifest hit a DB error (the tag resolution earlier
// already ruled out typos).
fromFailed := fromData.err != nil || fromData.manifest == nil
toFailed := toData.err != nil || toData.manifest == nil
// Fall back to the top-level manifest we already fetched so the page
// still has something to render for tag labels and metadata.
if fromFailed {
fromData.manifest = fromManifest
}
if toFailed {
toData.manifest = toManifest
}
// Compute diffs
layerDiff := computeLayerDiff(fromData.layers, toData.layers)
// ScanStatus distinguishes why vuln data may be missing: "ok" when both
// sides returned clean scan results; "no-data" when a scan was never
// recorded; "hold-unreachable" when we couldn't reach the hold to ask.
// The template branches on these so users can tell "not scanned yet"
// from "hold offline" at a glance.
fromScanStatus := "ok"
toScanStatus := "ok"
if fromData.vulnData == nil {
fromScanStatus = "hold-unreachable"
} else if fromData.vulnData.Error != "" {
fromScanStatus = "no-data"
}
if toData.vulnData == nil {
toScanStatus = "hold-unreachable"
} else if toData.vulnData.Error != "" {
toScanStatus = "no-data"
}
var vulnDiff []VulnDiffEntry
hasVulnData := fromScanStatus == "ok" && toScanStatus == "ok"
if hasVulnData {
vulnDiff = computeVulnDiff(fromData.vulnData.Matches, toData.vulnData.Matches)
}
summary := computeDiffSummary(fromData.layers, toData.layers, vulnDiff, hasVulnData)
// Determine tag labels
fromTag := fromDigest
if len(fromData.manifest.Tags) > 0 {
fromTag = fromData.manifest.Tags[0]
}
toTag := toDigest
if len(toData.manifest.Tags) > 0 {
toTag = toData.manifest.Tags[0]
}
// Count vulns by status for template
var fixedVulns, newVulns, unchangedVulns []vulnMatch
for _, entry := range vulnDiff {
switch entry.Status {
case "fixed":
fixedVulns = append(fixedVulns, entry.Vuln)
case "new":
newVulns = append(newVulns, entry.Vuln)
case "unchanged":
unchangedVulns = append(unchangedVulns, entry.Vuln)
}
}
title := fmt.Sprintf("Diff: %s → %s - %s/%s - %s", fromTag, toTag, owner.Handle, repo, h.ClientShortName)
description := fmt.Sprintf("Comparing %s to %s in %s/%s", fromTag, toTag, owner.Handle, repo)
meta := NewPageMeta(title, description).
WithCanonical(fmt.Sprintf("https://%s/diff/%s/%s?from=%s&to=%s", h.SiteURL, owner.Handle, repo, fromDigest, toDigest)).
WithSiteName(h.ClientShortName)
data := struct {
PageData
Meta *PageMeta
Owner *db.User
Repository string
FromManifest *db.ManifestWithMetadata
ToManifest *db.ManifestWithMetadata
FromTag string
ToTag string
Summary DiffSummary
LayerDiff []LayerDiffEntry
FixedVulns []vulnMatch
NewVulns []vulnMatch
UnchangedVulns []vulnMatch
HasVulnData bool
FromScanStatus string
ToScanStatus string
FromFailed bool
ToFailed bool
IsMultiArch bool
CommonPlatforms []db.PlatformInfo
SelectedPlatform string
FromDigest string
ToDigest string
}{
PageData: NewPageData(r, &h.BaseUIHandler),
Meta: meta,
Owner: owner,
Repository: repo,
FromManifest: fromData.manifest,
ToManifest: toData.manifest,
FromTag: fromTag,
ToTag: toTag,
Summary: summary,
LayerDiff: layerDiff,
FixedVulns: fixedVulns,
NewVulns: newVulns,
UnchangedVulns: unchangedVulns,
HasVulnData: hasVulnData,
FromScanStatus: fromScanStatus,
ToScanStatus: toScanStatus,
FromFailed: fromFailed,
ToFailed: toFailed,
IsMultiArch: isMultiArch,
CommonPlatforms: commonPlatforms,
SelectedPlatform: selectedPlatform,
FromDigest: fromDigest,
ToDigest: toDigest,
}
if err := h.Templates.ExecuteTemplate(w, "diff", data); err != nil {
slog.Warn("Failed to render diff page", "error", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}

View File

@@ -0,0 +1,402 @@
package handlers
import (
"testing"
)
func TestComputeLayerDiff_IdenticalLayers(t *testing.T) {
layers := []LayerDetail{
{Index: 1, Digest: "sha256:aaa", Size: 100, Command: "ADD file in /"},
{Index: 2, Digest: "sha256:bbb", Size: 200, Command: "RUN apt-get update"},
}
diff := computeLayerDiff(layers, layers)
if len(diff) != 2 {
t.Fatalf("expected 2 entries, got %d", len(diff))
}
for _, e := range diff {
if e.Status != "shared" {
t.Errorf("expected shared, got %s", e.Status)
}
}
}
func TestComputeLayerDiff_SharedPrefixThenDivergence(t *testing.T) {
from := []LayerDetail{
{Index: 1, Digest: "sha256:base", Size: 100},
{Index: 2, Digest: "sha256:old", Size: 200},
}
to := []LayerDetail{
{Index: 1, Digest: "sha256:base", Size: 100},
{Index: 2, Digest: "sha256:new1", Size: 300},
{Index: 3, Digest: "sha256:new2", Size: 150},
}
diff := computeLayerDiff(from, to)
// Lockstep: shared, then -/+ pair (no command match), then +1 added
if len(diff) != 4 {
t.Fatalf("expected 4 entries, got %d", len(diff))
}
expected := []struct {
status string
digest string
}{
{"shared", "sha256:base"},
{"removed", "sha256:old"}, // no command, different digest → -/+
{"added", "sha256:new1"},
{"added", "sha256:new2"}, // extra layer in to
}
for i, e := range expected {
if diff[i].Status != e.status {
t.Errorf("[%d] expected status %s, got %s", i, e.status, diff[i].Status)
}
if diff[i].Layer.Digest != e.digest {
t.Errorf("[%d] expected digest %s, got %s", i, e.digest, diff[i].Layer.Digest)
}
}
}
func TestComputeLayerDiff_SameCommandDifferentDigest(t *testing.T) {
from := []LayerDetail{
{Index: 1, Digest: "sha256:base", Size: 100, Command: "ADD file in /"},
{Index: 2, Digest: "sha256:old", Size: 200, Command: "RUN apt-get update"},
{Index: 3, Digest: "sha256:old2", Size: 300, Command: "RUN pip install flask"},
}
to := []LayerDetail{
{Index: 1, Digest: "sha256:base", Size: 100, Command: "ADD file in /"},
{Index: 2, Digest: "sha256:new", Size: 250, Command: "RUN apt-get update"},
{Index: 3, Digest: "sha256:new2", Size: 350, Command: "RUN pip install flask"},
}
diff := computeLayerDiff(from, to)
if len(diff) != 3 {
t.Fatalf("expected 3 entries, got %d", len(diff))
}
if diff[0].Status != "shared" {
t.Errorf("[0] expected shared, got %s", diff[0].Status)
}
if diff[1].Status != "rebuilt" {
t.Errorf("[1] expected rebuilt, got %s", diff[1].Status)
}
if diff[1].PrevLayer == nil || diff[1].PrevLayer.Size != 200 {
t.Error("[1] expected PrevLayer with size 200")
}
if diff[2].Status != "rebuilt" {
t.Errorf("[2] expected rebuilt, got %s", diff[2].Status)
}
}
func TestComputeLayerDiff_DifferentCommandDifferentDigest(t *testing.T) {
from := []LayerDetail{
{Index: 1, Digest: "sha256:base", Size: 100, Command: "ADD file in /"},
{Index: 2, Digest: "sha256:old", Size: 200, Command: "RUN pip install requests==2.28"},
}
to := []LayerDetail{
{Index: 1, Digest: "sha256:base", Size: 100, Command: "ADD file in /"},
{Index: 2, Digest: "sha256:new", Size: 250, Command: "RUN pip install requests==2.31"},
}
diff := computeLayerDiff(from, to)
if len(diff) != 3 {
t.Fatalf("expected 3 entries, got %d", len(diff))
}
if diff[0].Status != "shared" {
t.Errorf("[0] expected shared, got %s", diff[0].Status)
}
// Different command → -/+ pair
if diff[1].Status != "removed" {
t.Errorf("[1] expected removed, got %s", diff[1].Status)
}
if diff[2].Status != "added" {
t.Errorf("[2] expected added, got %s", diff[2].Status)
}
}
func TestComputeLayerDiff_InsertedLayer(t *testing.T) {
from := []LayerDetail{
{Index: 1, Digest: "sha256:aaa", Size: 100, Command: "ADD file in /"},
{Index: 2, Digest: "sha256:bbb", Size: 200, Command: "RUN apt-get update"},
{Index: 3, Digest: "sha256:ccc", Size: 300, Command: "RUN pip install flask"},
}
to := []LayerDetail{
{Index: 1, Digest: "sha256:aaa", Size: 100, Command: "ADD file in /"},
{Index: 2, Digest: "sha256:ddd", Size: 210, Command: "RUN apt-get update"},
{Index: 3, Digest: "sha256:eee", Size: 150, Command: "RUN apt-get install curl"},
{Index: 4, Digest: "sha256:fff", Size: 310, Command: "RUN pip install flask"},
}
diff := computeLayerDiff(from, to)
// Expected: shared, rebuilt, +added, rebuilt
expected := []string{"shared", "rebuilt", "added", "rebuilt"}
if len(diff) != len(expected) {
t.Fatalf("expected %d entries, got %d: %v", len(expected), len(diff), diffStatuses(diff))
}
for i, e := range expected {
if diff[i].Status != e {
t.Errorf("[%d] expected %s, got %s", i, e, diff[i].Status)
}
}
}
func TestComputeLayerDiff_RemovedLayer(t *testing.T) {
from := []LayerDetail{
{Index: 1, Digest: "sha256:aaa", Size: 100, Command: "ADD file in /"},
{Index: 2, Digest: "sha256:bbb", Size: 200, Command: "RUN apt-get update"},
{Index: 3, Digest: "sha256:ccc", Size: 150, Command: "RUN apt-get install curl"},
{Index: 4, Digest: "sha256:ddd", Size: 300, Command: "RUN pip install flask"},
}
to := []LayerDetail{
{Index: 1, Digest: "sha256:aaa", Size: 100, Command: "ADD file in /"},
{Index: 2, Digest: "sha256:eee", Size: 210, Command: "RUN apt-get update"},
{Index: 3, Digest: "sha256:fff", Size: 310, Command: "RUN pip install flask"},
}
diff := computeLayerDiff(from, to)
// Expected: shared, rebuilt, -removed, rebuilt
expected := []string{"shared", "rebuilt", "removed", "rebuilt"}
if len(diff) != len(expected) {
t.Fatalf("expected %d entries, got %d: %v", len(expected), len(diff), diffStatuses(diff))
}
for i, e := range expected {
if diff[i].Status != e {
t.Errorf("[%d] expected %s, got %s", i, e, diff[i].Status)
}
}
}
// helper for test error messages
func diffStatuses(diff []LayerDiffEntry) []string {
var s []string
for _, d := range diff {
s = append(s, d.Status)
}
return s
}
func TestComputeLayerDiff_EmptyLayersMatchByCommand(t *testing.T) {
from := []LayerDetail{
{Index: 1, Digest: "sha256:base", Size: 100},
{Index: 0, EmptyLayer: true, Command: "ENV FOO=bar"},
{Index: 2, Digest: "sha256:old", Size: 200},
}
to := []LayerDetail{
{Index: 1, Digest: "sha256:base", Size: 100},
{Index: 0, EmptyLayer: true, Command: "ENV FOO=bar"},
{Index: 2, Digest: "sha256:new", Size: 300},
}
diff := computeLayerDiff(from, to)
if len(diff) != 4 {
t.Fatalf("expected 4 entries, got %d", len(diff))
}
if diff[0].Status != "shared" || diff[1].Status != "shared" {
t.Error("first two entries should be shared (base layer + empty layer)")
}
// Different digests, no command → -/+ pair
if diff[2].Status != "removed" {
t.Errorf("[2] expected removed, got %s", diff[2].Status)
}
if diff[3].Status != "added" {
t.Errorf("[3] expected added, got %s", diff[3].Status)
}
}
func TestComputeLayerDiff_CompletelyDifferent(t *testing.T) {
from := []LayerDetail{
{Index: 1, Digest: "sha256:old1", Size: 100},
}
to := []LayerDetail{
{Index: 1, Digest: "sha256:new1", Size: 200},
{Index: 2, Digest: "sha256:new2", Size: 300},
}
diff := computeLayerDiff(from, to)
// Lockstep: -/+ pair for position 1, then +1 added
if len(diff) != 3 {
t.Fatalf("expected 3 entries, got %d", len(diff))
}
if diff[0].Status != "removed" {
t.Errorf("[0] expected removed, got %s", diff[0].Status)
}
if diff[1].Status != "added" {
t.Errorf("[1] expected added, got %s", diff[1].Status)
}
if diff[2].Status != "added" {
t.Errorf("[2] expected added, got %s", diff[2].Status)
}
}
func TestComputeLayerDiff_EmptyInputs(t *testing.T) {
diff := computeLayerDiff(nil, nil)
if len(diff) != 0 {
t.Fatalf("expected 0 entries, got %d", len(diff))
}
diff = computeLayerDiff(nil, []LayerDetail{{Index: 1, Digest: "sha256:a"}})
if len(diff) != 1 || diff[0].Status != "added" {
t.Error("expected 1 added entry")
}
diff = computeLayerDiff([]LayerDetail{{Index: 1, Digest: "sha256:a"}}, nil)
if len(diff) != 1 || diff[0].Status != "removed" {
t.Error("expected 1 removed entry")
}
}
func TestComputeVulnDiff_FixedAndNew(t *testing.T) {
from := []vulnMatch{
{CVEID: "CVE-2024-001", Severity: "Critical", Package: "openssl", Version: "1.1.0"},
{CVEID: "CVE-2024-002", Severity: "High", Package: "curl", Version: "7.85"},
{CVEID: "CVE-2024-003", Severity: "Medium", Package: "zlib", Version: "1.2.11"},
}
to := []vulnMatch{
{CVEID: "CVE-2024-002", Severity: "High", Package: "curl", Version: "7.85"},
{CVEID: "CVE-2025-001", Severity: "High", Package: "requests", Version: "2.31"},
}
diff := computeVulnDiff(from, to)
counts := map[string]int{}
for _, e := range diff {
counts[e.Status]++
}
if counts["fixed"] != 2 {
t.Errorf("expected 2 fixed, got %d", counts["fixed"])
}
if counts["new"] != 1 {
t.Errorf("expected 1 new, got %d", counts["new"])
}
if counts["unchanged"] != 1 {
t.Errorf("expected 1 unchanged, got %d", counts["unchanged"])
}
}
func TestComputeVulnDiff_AllFixed(t *testing.T) {
from := []vulnMatch{
{CVEID: "CVE-2024-001", Severity: "Critical"},
{CVEID: "CVE-2024-002", Severity: "High"},
}
diff := computeVulnDiff(from, nil)
for _, e := range diff {
if e.Status != "fixed" {
t.Errorf("expected fixed, got %s", e.Status)
}
}
if len(diff) != 2 {
t.Errorf("expected 2, got %d", len(diff))
}
}
func TestComputeVulnDiff_AllNew(t *testing.T) {
to := []vulnMatch{
{CVEID: "CVE-2025-001", Severity: "Critical"},
}
diff := computeVulnDiff(nil, to)
if len(diff) != 1 || diff[0].Status != "new" {
t.Error("expected 1 new entry")
}
}
func TestComputeVulnDiff_Empty(t *testing.T) {
diff := computeVulnDiff(nil, nil)
if len(diff) != 0 {
t.Errorf("expected 0, got %d", len(diff))
}
}
func TestComputeDiffSummary(t *testing.T) {
fromLayers := []LayerDetail{
{Index: 1, Size: 1000},
{Index: 2, Size: 2000},
}
toLayers := []LayerDetail{
{Index: 1, Size: 1000},
{Index: 2, Size: 2500},
{Index: 3, Size: 500},
}
vulnDiff := []VulnDiffEntry{
{Status: "fixed", Vuln: vulnMatch{Severity: "Critical"}},
{Status: "fixed", Vuln: vulnMatch{Severity: "High"}},
{Status: "fixed", Vuln: vulnMatch{Severity: "High"}},
{Status: "new", Vuln: vulnMatch{Severity: "Medium"}},
{Status: "unchanged", Vuln: vulnMatch{Severity: "Low"}},
}
summary := computeDiffSummary(fromLayers, toLayers, vulnDiff, true)
if summary.SizeDelta != 1000 {
t.Errorf("expected size delta 1000, got %d", summary.SizeDelta)
}
if summary.LayerCountFrom != 2 {
t.Errorf("expected from count 2, got %d", summary.LayerCountFrom)
}
if summary.LayerCountTo != 3 {
t.Errorf("expected to count 3, got %d", summary.LayerCountTo)
}
if summary.VulnFixedCount != 3 {
t.Errorf("expected 3 fixed, got %d", summary.VulnFixedCount)
}
if summary.VulnNewCount != 1 {
t.Errorf("expected 1 new, got %d", summary.VulnNewCount)
}
if summary.VulnFixedBySev.Critical != 1 {
t.Errorf("expected 1 fixed critical, got %d", summary.VulnFixedBySev.Critical)
}
if summary.VulnFixedBySev.High != 2 {
t.Errorf("expected 2 fixed high, got %d", summary.VulnFixedBySev.High)
}
if summary.VulnNewBySev.Medium != 1 {
t.Errorf("expected 1 new medium, got %d", summary.VulnNewBySev.Medium)
}
if !summary.HasVulnData {
t.Error("expected HasVulnData to be true")
}
}
func TestComputeDiffSummary_NoVulnData(t *testing.T) {
summary := computeDiffSummary(
[]LayerDetail{{Size: 100}},
[]LayerDetail{{Size: 200}},
nil,
false,
)
if summary.HasVulnData {
t.Error("expected HasVulnData to be false")
}
if summary.SizeDelta != 100 {
t.Errorf("expected size delta 100, got %d", summary.SizeDelta)
}
}
func TestComputeDiffSummary_SmallerImage(t *testing.T) {
summary := computeDiffSummary(
[]LayerDetail{{Size: 5000}, {Size: 3000}},
[]LayerDetail{{Size: 2000}},
nil,
false,
)
if summary.SizeDelta != -6000 {
t.Errorf("expected size delta -6000, got %d", summary.SizeDelta)
}
if summary.LayerCountFrom != 2 || summary.LayerCountTo != 1 {
t.Error("unexpected layer counts")
}
}

Some files were not shown because too many files have changed in this diff Show More