mirror of
https://tangled.org/evan.jarrett.net/at-container-registry
synced 2026-05-14 03:51:28 +00:00
Compare commits
50 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
98a2cfea59 | ||
|
|
3533f07ecb | ||
|
|
56f3b2fc2f | ||
|
|
2d0872790d | ||
|
|
419d05057e | ||
|
|
e1483f7aa5 | ||
|
|
b2d6842bb7 | ||
|
|
1ac8af74d5 | ||
|
|
c2250c8787 | ||
|
|
ba05b20364 | ||
|
|
18b000d0c1 | ||
|
|
4328eda814 | ||
|
|
7b4a2e22a2 | ||
|
|
ab66028151 | ||
|
|
410183f13b | ||
|
|
35aa19e2b2 | ||
|
|
13a793ca90 | ||
|
|
9af6eccc9d | ||
|
|
f9ba8ff62b | ||
|
|
e81681d61b | ||
|
|
dfee21e3d3 | ||
|
|
1bff09ecbe | ||
|
|
a602bf08d1 | ||
|
|
8f4fca815b | ||
|
|
9e09401cb3 | ||
|
|
c7783bf87c | ||
|
|
267012b41e | ||
|
|
6b6ce093d3 | ||
|
|
f057f169f0 | ||
|
|
23484645c0 | ||
|
|
37cf9d0b86 | ||
|
|
872b637547 | ||
|
|
185b0d05cb | ||
|
|
bbd7882a9d | ||
|
|
7a6775d4fa | ||
|
|
7c6b8945ed | ||
|
|
38c693acc9 | ||
|
|
9809c26281 | ||
|
|
e843b7233c | ||
|
|
2f30c22f0a | ||
|
|
25628dad2c | ||
|
|
564019d1c3 | ||
|
|
9033d74a19 | ||
|
|
03759713e9 | ||
|
|
a68477033a | ||
|
|
21b6f6301a | ||
|
|
1865377b52 | ||
|
|
20202aa0df | ||
|
|
90b0b42766 | ||
|
|
b54d33ccf8 |
@@ -6,7 +6,7 @@ pre_cmd = ["go generate ./pkg/hold/..."]
|
||||
cmd = "go build -buildvcs=false -o ./tmp/atcr-hold ./cmd/hold"
|
||||
entrypoint = ["./tmp/atcr-hold", "serve", "--config", "config-hold.example.yaml"]
|
||||
include_ext = ["go", "html", "css", "js"]
|
||||
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "pkg/appview", "node_modules"]
|
||||
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "node_modules", "scanner", "pkg/appview", "pkg/labeler"]
|
||||
exclude_regex = ["_test\\.go$", "cbor_gen\\.go$", "\\.min\\.js$", "public/css/style\\.css$", "public/icons\\.svg$"]
|
||||
delay = 3000
|
||||
stop_on_error = true
|
||||
|
||||
25
.air.labeler.toml
Normal file
25
.air.labeler.toml
Normal file
@@ -0,0 +1,25 @@
|
||||
root = "."
|
||||
tmp_dir = "tmp"
|
||||
|
||||
[build]
|
||||
cmd = "go build -buildvcs=false -o ./tmp/atcr-labeler ./cmd/labeler"
|
||||
entrypoint = ["./tmp/atcr-labeler", "serve", "--config", "config-labeler.example.yaml"]
|
||||
include_ext = ["go", "html", "css", "js"]
|
||||
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "node_modules", "scanner", "pkg/appview", "pkg/hold"]
|
||||
exclude_regex = ["_test\\.go$", "cbor_gen\\.go$", "\\.min\\.js$", "public/css/style\\.css$", "public/icons\\.svg$"]
|
||||
delay = 3000
|
||||
stop_on_error = true
|
||||
send_interrupt = true
|
||||
kill_delay = 500
|
||||
|
||||
[log]
|
||||
time = false
|
||||
|
||||
[color]
|
||||
main = "cyan"
|
||||
watcher = "magenta"
|
||||
build = "yellow"
|
||||
runner = "green"
|
||||
|
||||
[misc]
|
||||
clean_on_exit = true
|
||||
@@ -10,7 +10,7 @@ pre_cmd = ["go generate ./pkg/appview/..."]
|
||||
cmd = "go build -tags billing -buildvcs=false -o ./tmp/atcr-appview ./cmd/appview"
|
||||
entrypoint = ["./tmp/atcr-appview", "serve", "--config", "config-appview.example.yaml"]
|
||||
include_ext = ["go", "html", "css", "js"]
|
||||
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "node_modules", "pkg/hold"]
|
||||
exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "node_modules", "scanner", "pkg/hold", "pkg/labeler"]
|
||||
exclude_regex = ["_test\\.go$", "cbor_gen\\.go$", "\\.min\\.js$", "public/css/style\\.css$", "public/icons\\.svg$"]
|
||||
delay = 3000
|
||||
stop_on_error = true
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -36,3 +36,4 @@ pkg/hold/admin/public/css/style.css
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
node_modules
|
||||
.impeccable.md
|
||||
|
||||
@@ -45,6 +45,49 @@ archives:
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
|
||||
brews:
|
||||
- name: docker-credential-atcr
|
||||
ids:
|
||||
- credential-helper
|
||||
repository:
|
||||
# Tap lives inside the main repo under Formula/. DID-based URL keeps
|
||||
# this stable across any future Tangled handle/domain rename.
|
||||
owner: 'did:plc:pddp4xt5lgnv2qsegbzzs4xg'
|
||||
name: at-container-registry
|
||||
branch: main
|
||||
git:
|
||||
url: 'git@knot1.tangled.sh:did:plc:pddp4xt5lgnv2qsegbzzs4xg/at-container-registry'
|
||||
private_key: '{{ .Env.TAP_SSH_KEY }}'
|
||||
directory: Formula
|
||||
url_template: "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v{{ .Version }}/download/{{ .ArtifactName }}"
|
||||
homepage: "https://atcr.io"
|
||||
description: "Docker credential helper for ATCR (ATProto Container Registry)"
|
||||
license: "MIT"
|
||||
commit_author:
|
||||
name: atcr-bot
|
||||
email: releases@atcr.io
|
||||
commit_msg_template: "Brew formula update for {{ .ProjectName }} version {{ .Tag }}"
|
||||
install: |
|
||||
bin.install "docker-credential-atcr"
|
||||
test: |
|
||||
assert_match version.to_s, shell_output("#{bin}/docker-credential-atcr version 2>&1")
|
||||
caveats: |
|
||||
To finish setup, run:
|
||||
docker-credential-atcr configure-docker
|
||||
|
||||
This writes the credential helper entry to ~/.docker/config.json. Or
|
||||
add manually:
|
||||
{
|
||||
"credHelpers": {
|
||||
"atcr.io": "atcr"
|
||||
}
|
||||
}
|
||||
|
||||
Then push an image to trigger device authorization:
|
||||
docker push atcr.io/<your-handle>/<image>:latest
|
||||
|
||||
Credentials live in ~/.atcr/device.json (0600).
|
||||
|
||||
snapshot:
|
||||
version_template: "{{ incpatch .Version }}-next"
|
||||
|
||||
@@ -60,14 +103,7 @@ changelog:
|
||||
|
||||
publishers:
|
||||
- name: atproto-pds
|
||||
cmd: ./scripts/publish-artifact.sh
|
||||
env:
|
||||
- APP_PASSWORD={{ .Env.APP_PASSWORD }}
|
||||
- TANGLED_REPO_DID={{ .Env.TANGLED_REPO_DID }}
|
||||
- TANGLED_REF_NAME={{ .Env.TANGLED_REF_NAME }}
|
||||
- REPO_URL={{ .Env.REPO_URL }}
|
||||
- ARTIFACT_PATH={{ abs .ArtifactPath }}
|
||||
- ARTIFACT_NAME={{ .ArtifactName }}
|
||||
cmd: ./scripts/publish-artifact.sh "{{ abs .ArtifactPath }}" "{{ .ArtifactName }}"
|
||||
|
||||
# Disable since not using GitHub
|
||||
release:
|
||||
|
||||
@@ -5,7 +5,7 @@ when:
|
||||
branch: ["main"]
|
||||
|
||||
engine: kubernetes
|
||||
image: golang:1.25-trixie
|
||||
image: golang:1.26-trixie
|
||||
architecture: amd64
|
||||
|
||||
steps:
|
||||
|
||||
@@ -12,7 +12,7 @@ when:
|
||||
tag: ["v*"]
|
||||
|
||||
engine: kubernetes
|
||||
image: golang:1.25-trixie
|
||||
image: golang:1.26-trixie
|
||||
architecture: amd64
|
||||
|
||||
environment:
|
||||
@@ -37,4 +37,8 @@ steps:
|
||||
export REPO_URL="at://${TANGLED_REPO_DID}/sh.tangled.repo/${REPO_RKEY}"
|
||||
export APP_PASSWORD="${PUBLISH_APP_PASSWORD}"
|
||||
|
||||
# Log in once here so parallel publish-artifact.sh invocations share the
|
||||
# same session file instead of racing on goat's auth-session.json.
|
||||
goat account login -u "${TANGLED_REPO_DID}" -p "${APP_PASSWORD}"
|
||||
|
||||
goreleaser release --clean
|
||||
|
||||
@@ -5,7 +5,7 @@ when:
|
||||
branch: ["main"]
|
||||
|
||||
engine: kubernetes
|
||||
image: golang:1.25-trixie
|
||||
image: golang:1.26-trixie
|
||||
architecture: amd64
|
||||
|
||||
steps:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Production build for ATCR AppView
|
||||
# Result: ~30MB scratch image with static binary
|
||||
FROM docker.io/golang:1.25.7-trixie AS builder
|
||||
FROM docker.io/golang:1.26.2-trixie AS builder
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
@@ -18,8 +18,13 @@ COPY . .
|
||||
RUN npm ci
|
||||
RUN go generate ./...
|
||||
|
||||
# Legal "Last updated" dates — pass from host (see Makefile docker-appview
|
||||
# target). Empty falls back to the hardcoded default in legal.go.
|
||||
ARG PRIVACY_DATE=""
|
||||
ARG TERMS_DATE=""
|
||||
|
||||
RUN CGO_ENABLED=1 go build \
|
||||
-ldflags="-s -w -linkmode external -extldflags '-static'" \
|
||||
-ldflags="-s -w -linkmode external -extldflags '-static' -X 'atcr.io/pkg/appview/handlers.privacyLastUpdated=${PRIVACY_DATE}' -X 'atcr.io/pkg/appview/handlers.termsLastUpdated=${TERMS_DATE}'" \
|
||||
-tags sqlite_omit_load_extension \
|
||||
-trimpath \
|
||||
-o atcr-appview ./cmd/appview
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Development image with Air hot reload
|
||||
# Build: docker build -f Dockerfile.dev -t atcr-dev .
|
||||
# Run: docker run -v $(pwd):/app -p 5000:5000 atcr-dev
|
||||
FROM docker.io/golang:1.25.7-trixie
|
||||
FROM docker.io/golang:1.26.2-trixie
|
||||
|
||||
ARG AIR_CONFIG=.air.toml
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM docker.io/golang:1.25.7-trixie AS builder
|
||||
FROM docker.io/golang:1.26.2-trixie AS builder
|
||||
|
||||
# Build argument to enable Stripe billing integration
|
||||
# Usage: docker build --build-arg BILLING_ENABLED=true -f Dockerfile.hold .
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM docker.io/golang:1.25.7-trixie AS builder
|
||||
FROM docker.io/golang:1.26.2-trixie AS builder
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
|
||||
@@ -4,28 +4,28 @@
|
||||
class DockerCredentialAtcr < Formula
|
||||
desc "Docker credential helper for ATCR (ATProto Container Registry)"
|
||||
homepage "https://atcr.io"
|
||||
version "0.0.1"
|
||||
version "0.1.2"
|
||||
license "MIT"
|
||||
|
||||
on_macos do
|
||||
on_arm do
|
||||
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v0.0.1/download/docker-credential-atcr_0.0.1_Darwin_arm64.tar.gz"
|
||||
sha256 "REPLACE_WITH_SHA256"
|
||||
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v#{version}/download/docker-credential-atcr_#{version}_Darwin_arm64.tar.gz"
|
||||
sha256 "938f7de369c1a93f8275b91f238ae6ffca2f304f5bd236208b3398574c853b52"
|
||||
end
|
||||
on_intel do
|
||||
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v0.0.1/download/docker-credential-atcr_0.0.1_Darwin_x86_64.tar.gz"
|
||||
sha256 "REPLACE_WITH_SHA256"
|
||||
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v#{version}/download/docker-credential-atcr_#{version}_Darwin_x86_64.tar.gz"
|
||||
sha256 "3ce955317f52b049ee6c6348c0f758d36a0ed4d52f60f96b20d68798b5830c88"
|
||||
end
|
||||
end
|
||||
|
||||
on_linux do
|
||||
on_arm do
|
||||
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v0.0.1/download/docker-credential-atcr_0.0.1_Linux_arm64.tar.gz"
|
||||
sha256 "REPLACE_WITH_SHA256"
|
||||
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v#{version}/download/docker-credential-atcr_#{version}_Linux_arm64.tar.gz"
|
||||
sha256 "be951a136a79be39935ff57f561cfc8f714fa6abfa1b0f81f9ef383e37e6278f"
|
||||
end
|
||||
on_intel do
|
||||
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v0.0.1/download/docker-credential-atcr_0.0.1_Linux_x86_64.tar.gz"
|
||||
sha256 "REPLACE_WITH_SHA256"
|
||||
url "https://tangled.org/evan.jarrett.net/at-container-registry/tags/v#{version}/download/docker-credential-atcr_#{version}_Linux_x86_64.tar.gz"
|
||||
sha256 "08965d9177ab993c229e20c1c3a9675caa20bae795c57b37eb9c72127360d4c7"
|
||||
end
|
||||
end
|
||||
|
||||
@@ -39,21 +39,21 @@ class DockerCredentialAtcr < Formula
|
||||
|
||||
def caveats
|
||||
<<~EOS
|
||||
To configure Docker to use ATCR credential helper, add the following
|
||||
to your ~/.docker/config.json:
|
||||
To finish setup, run:
|
||||
docker-credential-atcr configure-docker
|
||||
|
||||
This writes the credential helper entry to ~/.docker/config.json. Or
|
||||
add manually:
|
||||
{
|
||||
"credHelpers": {
|
||||
"atcr.io": "atcr"
|
||||
}
|
||||
}
|
||||
|
||||
Or run: docker-credential-atcr configure-docker
|
||||
|
||||
To authenticate with ATCR:
|
||||
Then push an image to trigger device authorization:
|
||||
docker push atcr.io/<your-handle>/<image>:latest
|
||||
|
||||
Configuration is stored in: ~/.atcr/config.json
|
||||
Credentials live in ~/.atcr/device.json (0600).
|
||||
EOS
|
||||
end
|
||||
end
|
||||
|
||||
@@ -8,13 +8,13 @@ The ATCR credential helper enables Docker to authenticate with ATCR registries u
|
||||
|
||||
**Linux/macOS:**
|
||||
```bash
|
||||
curl -fsSL https://atcr.io/install.sh | bash
|
||||
curl -fsSL https://atcr.io/static/install.sh | bash
|
||||
```
|
||||
|
||||
Or download and run manually:
|
||||
|
||||
```bash
|
||||
curl -fsSLO https://atcr.io/install.sh
|
||||
curl -fsSLO https://atcr.io/static/install.sh
|
||||
chmod +x install.sh
|
||||
./install.sh
|
||||
```
|
||||
@@ -22,7 +22,7 @@ chmod +x install.sh
|
||||
Custom installation directory:
|
||||
|
||||
```bash
|
||||
INSTALL_DIR=$HOME/.local/bin curl -fsSL https://atcr.io/install.sh | bash
|
||||
INSTALL_DIR=$HOME/.local/bin curl -fsSL https://atcr.io/static/install.sh | bash
|
||||
```
|
||||
|
||||
**Windows (PowerShell as Administrator):**
|
||||
@@ -40,8 +40,9 @@ Invoke-WebRequest -Uri https://atcr.io/install.ps1 -OutFile install.ps1
|
||||
### Using Homebrew (macOS and Linux)
|
||||
|
||||
```bash
|
||||
# Add the ATCR tap
|
||||
brew tap atcr-io/tap
|
||||
# Add the ATCR tap (the main repo itself serves as the tap).
|
||||
# DID-based URL is stable across any future handle rename.
|
||||
brew tap atcr/tap https://tangled.org/did:plc:pddp4xt5lgnv2qsegbzzs4xg/at-container-registry
|
||||
|
||||
# Install the credential helper
|
||||
brew install docker-credential-atcr
|
||||
@@ -55,7 +56,7 @@ Homebrew will automatically download the correct binary for your platform.
|
||||
|
||||
### Manual Installation
|
||||
|
||||
1. **Download the binary** for your platform from [GitHub Releases](https://github.com/atcr-io/atcr/releases)
|
||||
1. **Download the binary** for your platform from [Tangled tags](https://tangled.org/evan.jarrett.net/at-container-registry/tags)
|
||||
|
||||
- Linux amd64: `docker-credential-atcr_VERSION_Linux_x86_64.tar.gz`
|
||||
- Linux arm64: `docker-credential-atcr_VERSION_Linux_arm64.tar.gz`
|
||||
@@ -84,13 +85,6 @@ Homebrew will automatically download the correct binary for your platform.
|
||||
docker-credential-atcr version
|
||||
```
|
||||
|
||||
### From Source (requires Go 1.23+)
|
||||
|
||||
```bash
|
||||
go install atcr.io/cmd/credential-helper@latest
|
||||
sudo mv $(go env GOPATH)/bin/credential-helper /usr/local/bin/docker-credential-atcr
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### 1. Configure Docker
|
||||
|
||||
29
Makefile
29
Makefile
@@ -31,10 +31,18 @@ $(GENERATED_ASSETS):
|
||||
|
||||
build: build-appview build-hold build-credential-helper ## Build all binaries
|
||||
|
||||
# Legal page "Last updated" dates come from the git commit date of the page
|
||||
# templates. Empty values (e.g., Docker builds without .git) fall back to the
|
||||
# hardcoded default in legal.go.
|
||||
LEGAL_PKG := atcr.io/pkg/appview/handlers
|
||||
PRIVACY_DATE := $(shell git log -1 --format=%cs -- pkg/appview/templates/pages/privacy.html 2>/dev/null)
|
||||
TERMS_DATE := $(shell git log -1 --format=%cs -- pkg/appview/templates/pages/terms.html 2>/dev/null)
|
||||
APPVIEW_LDFLAGS := -X '$(LEGAL_PKG).privacyLastUpdated=$(PRIVACY_DATE)' -X '$(LEGAL_PKG).termsLastUpdated=$(TERMS_DATE)'
|
||||
|
||||
build-appview: $(GENERATED_ASSETS) ## Build appview binary only
|
||||
@echo "→ Building appview..."
|
||||
@mkdir -p bin
|
||||
go build -o bin/atcr-appview ./cmd/appview
|
||||
go build -ldflags="$(APPVIEW_LDFLAGS)" -o bin/atcr-appview ./cmd/appview
|
||||
|
||||
build-hold: $(GENERATED_ASSETS) ## Build hold binary only
|
||||
@echo "→ Building hold..."
|
||||
@@ -69,7 +77,19 @@ test-verbose: ## Run tests with verbose output
|
||||
|
||||
.PHONY: check-golangci-lint
|
||||
check-golangci-lint:
|
||||
@which golangci-lint > /dev/null || (echo "→ Installing golangci-lint..." && go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest)
|
||||
@LINT_PKG=github.com/golangci/golangci-lint/v2/cmd/golangci-lint@latest; \
|
||||
CUR_GO=$$(go version | grep -oE 'go[0-9]+\.[0-9]+' | head -1 | sed 's/^go//'); \
|
||||
if ! command -v golangci-lint > /dev/null 2>&1; then \
|
||||
echo "→ Installing golangci-lint..."; \
|
||||
go install $$LINT_PKG; \
|
||||
else \
|
||||
LINT_GO=$$(golangci-lint --version 2>&1 | grep -oE 'built with go[0-9]+\.[0-9]+' | head -1 | sed 's/^built with go//'); \
|
||||
if [ -n "$$LINT_GO" ] && [ "$$LINT_GO" != "$$CUR_GO" ] && \
|
||||
[ "$$(printf '%s\n%s\n' $$LINT_GO $$CUR_GO | sort -V | head -1)" = "$$LINT_GO" ]; then \
|
||||
echo "→ golangci-lint built with go$$LINT_GO but project targets go$$CUR_GO — reinstalling..."; \
|
||||
go install $$LINT_PKG; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
lint: check-golangci-lint ## Run golangci-lint
|
||||
@echo "→ Running golangci-lint..."
|
||||
@@ -97,7 +117,10 @@ docker: docker-appview docker-hold docker-scanner ## Build all Docker images
|
||||
|
||||
docker-appview: ## Build appview Docker image
|
||||
@echo "→ Building appview Docker image..."
|
||||
docker build -f Dockerfile.appview -t atcr.io/atcr.io/appview:latest .
|
||||
docker build -f Dockerfile.appview \
|
||||
--build-arg PRIVACY_DATE=$(PRIVACY_DATE) \
|
||||
--build-arg TERMS_DATE=$(TERMS_DATE) \
|
||||
-t atcr.io/atcr.io/appview:latest .
|
||||
|
||||
docker-hold: ## Build hold Docker image
|
||||
@echo "→ Building hold Docker image..."
|
||||
|
||||
@@ -54,7 +54,7 @@ atcr.io/did:plc:xyz123/myapp:latest
|
||||
|
||||
**1. Install credential helper:**
|
||||
```bash
|
||||
curl -fsSL https://atcr.io/install.sh | bash
|
||||
curl -fsSL https://atcr.io/static/install.sh | bash
|
||||
```
|
||||
|
||||
**2. Configure Docker** (add to `~/.docker/config.json`):
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
@@ -16,13 +17,10 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// VersionAPIResponse is the response from /api/credential-helper/version
|
||||
type VersionAPIResponse struct {
|
||||
Latest string `json:"latest"`
|
||||
DownloadURLs map[string]string `json:"download_urls"`
|
||||
Checksums map[string]string `json:"checksums"`
|
||||
ReleaseNotes string `json:"release_notes,omitempty"`
|
||||
}
|
||||
// tangledReleasesBase is the tangled.org path for the credential-helper's
|
||||
// release repository. /tags/latest issues a 302 redirect to the latest tag,
|
||||
// and /tags/{version}/download/{filename} serves goreleaser artifacts directly.
|
||||
const tangledReleasesBase = "https://tangled.org/did:plc:e3kzdezk5gsirzh7eoqplc64"
|
||||
|
||||
func newUpdateCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
@@ -37,35 +35,23 @@ func newUpdateCmd() *cobra.Command {
|
||||
func runUpdate(cmd *cobra.Command, args []string) error {
|
||||
checkOnly, _ := cmd.Flags().GetBool("check")
|
||||
|
||||
// Default API URL
|
||||
apiURL := "https://atcr.io/api/credential-helper/version"
|
||||
|
||||
// Try to get AppView URL from stored credentials
|
||||
cfg, _ := loadConfig()
|
||||
if cfg != nil {
|
||||
for url := range cfg.Registries {
|
||||
apiURL = url + "/api/credential-helper/version"
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
versionInfo, err := fetchVersionInfo(apiURL)
|
||||
latest, err := fetchLatestVersion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking for updates: %w", err)
|
||||
}
|
||||
|
||||
if !isNewerVersion(versionInfo.Latest, version) {
|
||||
if !isNewerVersion(latest, version) {
|
||||
fmt.Printf("You're already running the latest version (%s)\n", version)
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("New version available: %s (current: %s)\n", versionInfo.Latest, version)
|
||||
fmt.Printf("New version available: %s (current: %s)\n", latest, version)
|
||||
|
||||
if checkOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := performUpdate(versionInfo); err != nil {
|
||||
if err := performUpdate(latest); err != nil {
|
||||
return fmt.Errorf("update failed: %w", err)
|
||||
}
|
||||
|
||||
@@ -73,28 +59,42 @@ func runUpdate(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchVersionInfo fetches version info from the AppView API
|
||||
func fetchVersionInfo(apiURL string) (*VersionAPIResponse, error) {
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
// fetchLatestVersion resolves the latest released version by reading the
|
||||
// redirect Location header of {tangledReleasesBase}/tags/latest.
|
||||
func fetchLatestVersion() (string, error) {
|
||||
client := httpClientWithTimeout(10*time.Second, func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
})
|
||||
|
||||
resp, err := client.Get(apiURL)
|
||||
resp, err := client.Get(tangledReleasesBase + "/tags/latest")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fetching version info: %w", err)
|
||||
return "", fmt.Errorf("fetching latest tag: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("version API returned status %d", resp.StatusCode)
|
||||
switch resp.StatusCode {
|
||||
case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther,
|
||||
http.StatusTemporaryRedirect, http.StatusPermanentRedirect:
|
||||
default:
|
||||
return "", fmt.Errorf("expected redirect from tags/latest, got status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var versionInfo VersionAPIResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&versionInfo); err != nil {
|
||||
return nil, fmt.Errorf("parsing version info: %w", err)
|
||||
location := resp.Header.Get("Location")
|
||||
if location == "" {
|
||||
return "", fmt.Errorf("tags/latest returned redirect with no Location header")
|
||||
}
|
||||
|
||||
return &versionInfo, nil
|
||||
u, err := url.Parse(location)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parsing redirect location %q: %w", location, err)
|
||||
}
|
||||
|
||||
tag := path.Base(u.Path)
|
||||
if !strings.HasPrefix(tag, "v") {
|
||||
return "", fmt.Errorf("unexpected tag in redirect location %q", location)
|
||||
}
|
||||
|
||||
return tag, nil
|
||||
}
|
||||
|
||||
// isNewerVersion compares two version strings (simple semver comparison)
|
||||
@@ -130,21 +130,30 @@ func isNewerVersion(newVersion, currentVersion string) bool {
|
||||
return len(newParts) > len(curParts)
|
||||
}
|
||||
|
||||
// getPlatformKey returns the platform key for the current OS/arch
|
||||
func getPlatformKey() string {
|
||||
return fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)
|
||||
// goreleaserArchiveName returns the archive filename goreleaser publishes for
|
||||
// the given version and the current platform. The naming template lives in
|
||||
// .goreleaser.yaml: docker-credential-atcr_{Version}_{Title(OS)}_{Arch} with
|
||||
// amd64→x86_64 and 386→i386.
|
||||
func goreleaserArchiveName(version string) string {
|
||||
versionNoV := strings.TrimPrefix(version, "v")
|
||||
|
||||
os := strings.ToUpper(runtime.GOOS[:1]) + runtime.GOOS[1:]
|
||||
|
||||
arch := runtime.GOARCH
|
||||
switch arch {
|
||||
case "amd64":
|
||||
arch = "x86_64"
|
||||
case "386":
|
||||
arch = "i386"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("docker-credential-atcr_%s_%s_%s.tar.gz", versionNoV, os, arch)
|
||||
}
|
||||
|
||||
// performUpdate downloads and installs the new version
|
||||
func performUpdate(versionInfo *VersionAPIResponse) error {
|
||||
platformKey := getPlatformKey()
|
||||
|
||||
downloadURL, ok := versionInfo.DownloadURLs[platformKey]
|
||||
if !ok {
|
||||
return fmt.Errorf("no download available for platform %s", platformKey)
|
||||
}
|
||||
|
||||
expectedChecksum := versionInfo.Checksums[platformKey]
|
||||
func performUpdate(latest string) error {
|
||||
filename := goreleaserArchiveName(latest)
|
||||
downloadURL := fmt.Sprintf("%s/tags/%s/download/%s", tangledReleasesBase, latest, filename)
|
||||
|
||||
fmt.Printf("Downloading update from %s...\n", downloadURL)
|
||||
|
||||
@@ -155,34 +164,17 @@ func performUpdate(versionInfo *VersionAPIResponse) error {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
archivePath := filepath.Join(tmpDir, "archive.tar.gz")
|
||||
if strings.HasSuffix(downloadURL, ".zip") {
|
||||
archivePath = filepath.Join(tmpDir, "archive.zip")
|
||||
}
|
||||
|
||||
if err := downloadFile(downloadURL, archivePath); err != nil {
|
||||
return fmt.Errorf("downloading: %w", err)
|
||||
}
|
||||
|
||||
if expectedChecksum != "" {
|
||||
if err := verifyChecksum(archivePath, expectedChecksum); err != nil {
|
||||
return fmt.Errorf("checksum verification failed: %w", err)
|
||||
}
|
||||
fmt.Println("Checksum verified.")
|
||||
}
|
||||
|
||||
binaryPath := filepath.Join(tmpDir, "docker-credential-atcr")
|
||||
if runtime.GOOS == "windows" {
|
||||
binaryPath += ".exe"
|
||||
}
|
||||
|
||||
if strings.HasSuffix(archivePath, ".zip") {
|
||||
if err := extractZip(archivePath, tmpDir); err != nil {
|
||||
return fmt.Errorf("extracting archive: %w", err)
|
||||
}
|
||||
} else {
|
||||
if err := extractTarGz(archivePath, tmpDir); err != nil {
|
||||
return fmt.Errorf("extracting archive: %w", err)
|
||||
}
|
||||
if err := extractTarGz(archivePath, tmpDir); err != nil {
|
||||
return fmt.Errorf("extracting archive: %w", err)
|
||||
}
|
||||
|
||||
currentPath, err := os.Executable()
|
||||
@@ -224,7 +216,7 @@ func performUpdate(versionInfo *VersionAPIResponse) error {
|
||||
|
||||
// downloadFile downloads a file from a URL to a local path
|
||||
func downloadFile(url, destPath string) error {
|
||||
resp, err := http.Get(url) //nolint:gosec
|
||||
resp, err := httpClient().Get(url) //nolint:gosec
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -244,15 +236,6 @@ func downloadFile(url, destPath string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// verifyChecksum verifies the SHA256 checksum of a file
|
||||
func verifyChecksum(filePath, expected string) error {
|
||||
if expected == "" {
|
||||
return nil
|
||||
}
|
||||
// Checksums are optional until configured
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractTarGz extracts a .tar.gz archive
|
||||
func extractTarGz(archivePath, destDir string) error {
|
||||
cmd := exec.Command("tar", "-xzf", archivePath, "-C", destDir)
|
||||
@@ -262,15 +245,6 @@ func extractTarGz(archivePath, destDir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractZip extracts a .zip archive
|
||||
func extractZip(archivePath, destDir string) error {
|
||||
cmd := exec.Command("unzip", "-o", archivePath, "-d", destDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("unzip failed: %s: %w", string(output), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyFile copies a file from src to dst
|
||||
func copyFile(src, dst string) error {
|
||||
input, err := os.ReadFile(src)
|
||||
|
||||
@@ -57,7 +57,7 @@ func requestDeviceCode(serverURL string) (*DeviceCodeResponse, string, error) {
|
||||
deviceName := hostname()
|
||||
|
||||
reqBody, _ := json.Marshal(DeviceCodeRequest{DeviceName: deviceName})
|
||||
resp, err := http.Post(appViewURL+"/auth/device/code", "application/json", bytes.NewReader(reqBody))
|
||||
resp, err := httpClient().Post(appViewURL+"/auth/device/code", "application/json", bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
return nil, appViewURL, fmt.Errorf("failed to request device code: %w", err)
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func pollDeviceToken(appViewURL string, codeResp *DeviceCodeResponse) (*Account,
|
||||
time.Sleep(pollInterval)
|
||||
|
||||
tokenReqBody, _ := json.Marshal(DeviceTokenRequest{DeviceCode: codeResp.DeviceCode})
|
||||
tokenResp, err := http.Post(appViewURL+"/auth/device/token", "application/json", bytes.NewReader(tokenReqBody))
|
||||
tokenResp, err := httpClient().Post(appViewURL+"/auth/device/token", "application/json", bytes.NewReader(tokenReqBody))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -120,9 +120,7 @@ func pollDeviceToken(appViewURL string, codeResp *DeviceCodeResponse) (*Account,
|
||||
|
||||
// validateCredentials checks if the credentials are still valid by making a test request
|
||||
func validateCredentials(appViewURL, handle, deviceSecret string) ValidationResult {
|
||||
client := &http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
client := httpClientWithTimeout(5*time.Second, nil)
|
||||
|
||||
tokenURL := appViewURL + "/auth/token?service=" + appViewURL
|
||||
|
||||
|
||||
75
cmd/credential-helper/http.go
Normal file
75
cmd/credential-helper/http.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// userAgent returns the User-Agent string for outgoing HTTP requests.
|
||||
//
|
||||
// Format: docker-credential-atcr/<version> (<os>/<arch>; commit <short>)
|
||||
//
|
||||
// Format follows the convention Docker's own clients use, so it parses
|
||||
// cleanly with the same regexes server-side log analyzers already
|
||||
// understand. The commit suffix lets users on the device-approval page
|
||||
// distinguish two devices on the same version line if they ever need to.
|
||||
func userAgent() string {
|
||||
short := commit
|
||||
if len(short) > 7 {
|
||||
short = short[:7]
|
||||
}
|
||||
return fmt.Sprintf("docker-credential-atcr/%s (%s/%s; commit %s)",
|
||||
version, runtime.GOOS, runtime.GOARCH, short)
|
||||
}
|
||||
|
||||
// uaTransport wraps another RoundTripper and sets the User-Agent header
|
||||
// on every request that doesn't already carry one. Used as the default
|
||||
// transport for the helper's shared http.Client so we can't forget to
|
||||
// set the UA on a future call site.
|
||||
type uaTransport struct {
|
||||
base http.RoundTripper
|
||||
}
|
||||
|
||||
func (t *uaTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if req.Header.Get("User-Agent") == "" {
|
||||
// Clone before mutating: net/http may retry a request and the
|
||||
// caller could be using the same *Request elsewhere.
|
||||
clone := req.Clone(req.Context())
|
||||
clone.Header.Set("User-Agent", userAgent())
|
||||
req = clone
|
||||
}
|
||||
base := t.base
|
||||
if base == nil {
|
||||
base = http.DefaultTransport
|
||||
}
|
||||
return base.RoundTrip(req)
|
||||
}
|
||||
|
||||
var sharedHTTPClient = sync.OnceValue(func() *http.Client {
|
||||
return &http.Client{
|
||||
Transport: &uaTransport{base: http.DefaultTransport},
|
||||
}
|
||||
})
|
||||
|
||||
// httpClient returns the shared UA-tagged http.Client used for all of
|
||||
// the helper's outgoing HTTP requests. It carries no per-request
|
||||
// timeout — call sites that want one should use httpClientWithTimeout.
|
||||
func httpClient() *http.Client {
|
||||
return sharedHTTPClient()
|
||||
}
|
||||
|
||||
// httpClientWithTimeout returns a fresh client that shares the shared
|
||||
// transport (so connection pooling and the UA header are preserved) but
|
||||
// scopes a per-client timeout. CheckRedirect can be supplied for cases
|
||||
// like fetchLatestVersion that need to inspect a redirect rather than
|
||||
// follow it.
|
||||
func httpClientWithTimeout(timeout time.Duration, checkRedirect func(*http.Request, []*http.Request) error) *http.Client {
|
||||
return &http.Client{
|
||||
Transport: sharedHTTPClient().Transport,
|
||||
Timeout: timeout,
|
||||
CheckRedirect: checkRedirect,
|
||||
}
|
||||
}
|
||||
70
cmd/credential-helper/http_test.go
Normal file
70
cmd/credential-helper/http_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUserAgent_Format(t *testing.T) {
|
||||
old := commit
|
||||
commit = "abc1234deadbeef"
|
||||
t.Cleanup(func() { commit = old })
|
||||
|
||||
ua := userAgent()
|
||||
if !strings.HasPrefix(ua, "docker-credential-atcr/") {
|
||||
t.Errorf("UA missing product prefix: %q", ua)
|
||||
}
|
||||
if !strings.Contains(ua, "commit abc1234)") {
|
||||
t.Errorf("UA should truncate commit to 7 chars, got %q", ua)
|
||||
}
|
||||
if strings.Contains(ua, "Go-http-client") {
|
||||
t.Errorf("UA leaked default Go client string: %q", ua)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPClient_SetsUserAgent(t *testing.T) {
|
||||
var got string
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
got = r.Header.Get("User-Agent")
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
resp, err := httpClient().Get(srv.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("get: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
want := userAgent()
|
||||
if got != want {
|
||||
t.Errorf("server saw User-Agent %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPClient_RespectsExplicitUserAgent(t *testing.T) {
|
||||
var got string
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
got = r.Header.Get("User-Agent")
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
req, err := http.NewRequest("GET", srv.URL, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("User-Agent", "explicit-test/1.0")
|
||||
|
||||
resp, err := httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("do: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
if got != "explicit-test/1.0" {
|
||||
t.Errorf("explicit UA was overwritten: got %q", got)
|
||||
}
|
||||
}
|
||||
@@ -105,7 +105,7 @@ func runGet(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Check for updates (cached, non-blocking)
|
||||
checkAndNotifyUpdate(appViewURL)
|
||||
checkAndNotifyUpdate()
|
||||
|
||||
// Return credentials for Docker
|
||||
creds := Credentials{
|
||||
@@ -200,7 +200,7 @@ func runList(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// checkAndNotifyUpdate checks for updates in the background and notifies the user
|
||||
func checkAndNotifyUpdate(appViewURL string) {
|
||||
func checkAndNotifyUpdate() {
|
||||
cache := loadUpdateCheckCache()
|
||||
if cache != nil && cache.Current == version {
|
||||
// Cache is fresh and for current version
|
||||
@@ -214,21 +214,19 @@ func checkAndNotifyUpdate(appViewURL string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch version info
|
||||
apiURL := appViewURL + "/api/credential-helper/version"
|
||||
versionInfo, err := fetchVersionInfo(apiURL)
|
||||
latest, err := fetchLatestVersion()
|
||||
if err != nil {
|
||||
return // Silently fail
|
||||
}
|
||||
|
||||
saveUpdateCheckCache(&UpdateCheckCache{
|
||||
CheckedAt: timeNow(),
|
||||
Latest: versionInfo.Latest,
|
||||
Latest: latest,
|
||||
Current: version,
|
||||
})
|
||||
|
||||
if isNewerVersion(versionInfo.Latest, version) {
|
||||
fmt.Fprintf(os.Stderr, "\nUpdate available: %s (current: %s)\n", versionInfo.Latest, version)
|
||||
if isNewerVersion(latest, version) {
|
||||
fmt.Fprintf(os.Stderr, "\nUpdate available: %s (current: %s)\n", latest, version)
|
||||
fmt.Fprintf(os.Stderr, "Run: docker-credential-atcr update\n\n")
|
||||
}
|
||||
}
|
||||
|
||||
271
cmd/hold/plc.go
271
cmd/hold/plc.go
@@ -5,12 +5,11 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"atcr.io/pkg/atproto/did"
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
"atcr.io/pkg/hold"
|
||||
"atcr.io/pkg/hold/pds"
|
||||
|
||||
"github.com/bluesky-social/indigo/atproto/atcrypto"
|
||||
didplc "github.com/did-method-plc/go-didplc"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -21,144 +20,210 @@ var plcCmd = &cobra.Command{
|
||||
|
||||
var plcConfigFile string
|
||||
|
||||
var (
|
||||
plcAddRotationKeyFirst bool
|
||||
plcAddRotationKeyLast bool
|
||||
)
|
||||
|
||||
var plcAddRotationKeyCmd = &cobra.Command{
|
||||
Use: "add-rotation-key <multibase-key>",
|
||||
Use: "add-rotation-key [multibase-key]",
|
||||
Short: "Add a rotation key to this hold's PLC identity",
|
||||
Long: `Add an additional rotation key to the hold's did:plc document.
|
||||
The key must be a multibase-encoded private key (K-256 or P-256, starting with 'z').
|
||||
|
||||
If a multibase-encoded private key (K-256 or P-256, starting with 'z') is supplied as
|
||||
the positional argument, that key is added. If no argument is given, a fresh K-256
|
||||
keypair is generated and the private half is printed to stdout. Save it offline as
|
||||
your recovery key, since it will not be shown again.
|
||||
|
||||
By default the new key is inserted at the highest priority position (--first), which
|
||||
allows it to override ops signed by lower-priority keys within PLC's 72-hour recovery
|
||||
window. Pass --last to append at the lowest priority instead.
|
||||
|
||||
The hold's configured rotation key is used to sign the PLC update.
|
||||
|
||||
atcr-hold plc add-rotation-key --config config.yaml z...`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
atcr-hold plc add-rotation-key --config config.yaml # generate + print
|
||||
atcr-hold plc add-rotation-key --config config.yaml --last # append, low priority
|
||||
atcr-hold plc add-rotation-key --config config.yaml z... # use supplied key`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
firstSet := cmd.Flags().Changed("first")
|
||||
lastSet := cmd.Flags().Changed("last")
|
||||
if firstSet && lastSet {
|
||||
return fmt.Errorf("--first and --last are mutually exclusive")
|
||||
}
|
||||
prepend := !plcAddRotationKeyLast
|
||||
|
||||
cfg, err := hold.LoadConfig(plcConfigFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
|
||||
if cfg.Database.DIDMethod != "plc" {
|
||||
return fmt.Errorf("this command only works with did:plc (database.did_method is %q)", cfg.Database.DIDMethod)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Resolve the hold's DID
|
||||
holdDID, err := pds.LoadOrCreateDID(ctx, pds.DIDConfig{
|
||||
DID: cfg.Database.DID,
|
||||
DIDMethod: cfg.Database.DIDMethod,
|
||||
PublicURL: cfg.Server.PublicURL,
|
||||
DBPath: cfg.Database.Path,
|
||||
SigningKeyPath: cfg.Database.KeyPath,
|
||||
RotationKey: cfg.Database.RotationKey,
|
||||
PLCDirectoryURL: cfg.Database.PLCDirectoryURL,
|
||||
})
|
||||
holdDID, rotationKey, signingKey, err := loadHoldPLCIdentity(ctx, cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve hold DID: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse the rotation key from config (required for signing PLC updates)
|
||||
if cfg.Database.RotationKey == "" {
|
||||
return fmt.Errorf("database.rotation_key must be set to sign PLC updates")
|
||||
}
|
||||
rotationKey, err := atcrypto.ParsePrivateMultibase(cfg.Database.RotationKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse rotation_key from config: %w", err)
|
||||
}
|
||||
|
||||
// Parse the new key to add (K-256 or P-256)
|
||||
newKey, err := atcrypto.ParsePrivateMultibase(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse key argument: %w", err)
|
||||
}
|
||||
newKeyPub, err := newKey.PublicKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get public key from argument: %w", err)
|
||||
}
|
||||
newKeyDIDKey := newKeyPub.DIDKey()
|
||||
|
||||
// Load signing key for verification methods
|
||||
keyPath := cfg.Database.KeyPath
|
||||
if keyPath == "" {
|
||||
keyPath = cfg.Database.Path + "/signing.key"
|
||||
}
|
||||
signingKey, err := oauth.GenerateOrLoadPDSKey(keyPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load signing key: %w", err)
|
||||
}
|
||||
|
||||
// Fetch current PLC state
|
||||
plcDirectoryURL := cfg.Database.PLCDirectoryURL
|
||||
if plcDirectoryURL == "" {
|
||||
plcDirectoryURL = "https://plc.directory"
|
||||
}
|
||||
client := &didplc.Client{DirectoryURL: plcDirectoryURL}
|
||||
|
||||
opLog, err := client.OpLog(ctx, holdDID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch PLC op log: %w", err)
|
||||
}
|
||||
if len(opLog) == 0 {
|
||||
return fmt.Errorf("empty op log for %s", holdDID)
|
||||
}
|
||||
|
||||
lastEntry := opLog[len(opLog)-1]
|
||||
lastOp := lastEntry.Regular
|
||||
if lastOp == nil {
|
||||
return fmt.Errorf("last PLC operation is not a regular op")
|
||||
}
|
||||
|
||||
// Check if key already present
|
||||
for _, k := range lastOp.RotationKeys {
|
||||
if k == newKeyDIDKey {
|
||||
fmt.Printf("Key %s is already a rotation key for %s\n", newKeyDIDKey, holdDID)
|
||||
return nil
|
||||
var newKey atcrypto.PrivateKeyExportable
|
||||
if len(args) == 1 {
|
||||
newKey, err = atcrypto.ParsePrivateMultibase(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse key argument: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Build updated rotation keys: keep existing, append new
|
||||
rotationKeys := make([]string, len(lastOp.RotationKeys))
|
||||
copy(rotationKeys, lastOp.RotationKeys)
|
||||
rotationKeys = append(rotationKeys, newKeyDIDKey)
|
||||
|
||||
// Build update: preserve everything else from current state
|
||||
sigPub, err := signingKey.PublicKey()
|
||||
res, err := did.AddRotationKey(ctx, did.AddRotationKeyOptions{
|
||||
DID: holdDID,
|
||||
PLCDirectoryURL: cfg.Database.PLCDirectoryURL,
|
||||
RotationKey: rotationKey,
|
||||
SigningKey: signingKey,
|
||||
VerificationKeyName: "atproto",
|
||||
NewKey: newKey,
|
||||
Prepend: prepend,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get signing public key: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
prevCID := lastEntry.AsOperation().CID().String()
|
||||
|
||||
op := &didplc.RegularOp{
|
||||
Type: "plc_operation",
|
||||
RotationKeys: rotationKeys,
|
||||
VerificationMethods: map[string]string{
|
||||
"atproto": sigPub.DIDKey(),
|
||||
},
|
||||
AlsoKnownAs: lastOp.AlsoKnownAs,
|
||||
Services: lastOp.Services,
|
||||
Prev: &prevCID,
|
||||
if res.AlreadyPresent {
|
||||
fmt.Printf("Key %s is already a rotation key for %s (priority %d of %d)\n",
|
||||
res.NewKeyDIDKey, holdDID, res.ExistingAt, res.TotalKeys)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := op.Sign(rotationKey); err != nil {
|
||||
return fmt.Errorf("failed to sign PLC update: %w", err)
|
||||
}
|
||||
|
||||
if err := client.Submit(ctx, holdDID, op); err != nil {
|
||||
return fmt.Errorf("failed to submit PLC update: %w", err)
|
||||
if res.Generated {
|
||||
fmt.Println("=========================================================================")
|
||||
fmt.Println("GENERATED NEW ROTATION KEY. SAVE THIS NOW. IT WILL NOT BE SHOWN AGAIN.")
|
||||
fmt.Println("Store it offline (password manager, paper, hardware token).")
|
||||
fmt.Println()
|
||||
fmt.Printf("Private key (multibase): %s\n", res.NewKey.Multibase())
|
||||
fmt.Printf("Public key (did:key): %s\n", res.NewKeyDIDKey)
|
||||
fmt.Println("=========================================================================")
|
||||
}
|
||||
|
||||
slog.Info("Added rotation key to PLC identity",
|
||||
"did", holdDID,
|
||||
"new_key", newKeyDIDKey,
|
||||
"total_rotation_keys", len(rotationKeys),
|
||||
"new_key", res.NewKeyDIDKey,
|
||||
"priority", res.InsertedAt,
|
||||
"total_rotation_keys", res.TotalKeys,
|
||||
"generated", res.Generated,
|
||||
)
|
||||
fmt.Printf("Added rotation key %s to %s\n", newKeyDIDKey, holdDID)
|
||||
fmt.Printf("Added rotation key %s to %s (priority %d of %d)\n",
|
||||
res.NewKeyDIDKey, holdDID, res.InsertedAt, res.TotalKeys)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var plcListRotationKeysCmd = &cobra.Command{
|
||||
Use: "list-rotation-keys",
|
||||
Short: "List rotation keys in this hold's PLC document",
|
||||
Long: `Fetch the hold's did:plc document from the PLC directory and print its
|
||||
rotation keys in priority order (index 0 is highest priority and can override
|
||||
ops signed by lower-priority keys within PLC's 72-hour recovery window).
|
||||
|
||||
The key matching the local database.rotation_key is marked as LOCAL.`,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := hold.LoadConfig(plcConfigFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
if cfg.Database.DIDMethod != "plc" {
|
||||
return fmt.Errorf("this command only works with did:plc (database.did_method is %q)", cfg.Database.DIDMethod)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
holdDID, err := did.LoadOrCreate(ctx, cfg.DIDConfig())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve hold DID: %w", err)
|
||||
}
|
||||
|
||||
var localRotationKey atcrypto.PrivateKey
|
||||
if cfg.Database.RotationKey != "" {
|
||||
localRotationKey, err = atcrypto.ParsePrivateMultibase(cfg.Database.RotationKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse rotation_key from config: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := did.ListRotationKeys(ctx, did.ListRotationKeysOptions{
|
||||
DID: holdDID,
|
||||
PLCDirectoryURL: cfg.Database.PLCDirectoryURL,
|
||||
LocalRotationKey: localRotationKey,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
printRotationKeys(res)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// loadHoldPLCIdentity is the shared "load DID + rotation key + signing key" helper used
|
||||
// by every PLC command. It enforces that database.rotation_key is set since every PLC
|
||||
// command needs a rotation key to either sign updates or verify the LOCAL marker.
|
||||
func loadHoldPLCIdentity(ctx context.Context, cfg *hold.Config) (string, atcrypto.PrivateKey, *atcrypto.PrivateKeyK256, error) {
|
||||
holdDID, err := did.LoadOrCreate(ctx, cfg.DIDConfig())
|
||||
if err != nil {
|
||||
return "", nil, nil, fmt.Errorf("failed to resolve hold DID: %w", err)
|
||||
}
|
||||
|
||||
if cfg.Database.RotationKey == "" {
|
||||
return "", nil, nil, fmt.Errorf("database.rotation_key must be set to sign PLC updates")
|
||||
}
|
||||
rotationKey, err := atcrypto.ParsePrivateMultibase(cfg.Database.RotationKey)
|
||||
if err != nil {
|
||||
return "", nil, nil, fmt.Errorf("failed to parse rotation_key from config: %w", err)
|
||||
}
|
||||
|
||||
keyPath := cfg.Database.KeyPath
|
||||
if keyPath == "" {
|
||||
keyPath = cfg.Database.Path + "/signing.key"
|
||||
}
|
||||
signingKey, err := oauth.GenerateOrLoadPDSKey(keyPath)
|
||||
if err != nil {
|
||||
return "", nil, nil, fmt.Errorf("failed to load signing key: %w", err)
|
||||
}
|
||||
return holdDID, rotationKey, signingKey, nil
|
||||
}
|
||||
|
||||
// printRotationKeys is the shared CLI output for `list-rotation-keys`.
|
||||
func printRotationKeys(res *did.ListRotationKeysResult) {
|
||||
fmt.Printf("DID: %s\n", res.DID)
|
||||
fmt.Printf("PLC directory: %s\n", res.Directory)
|
||||
fmt.Printf("Rotation keys (%d):\n", len(res.Keys))
|
||||
for i, k := range res.Keys {
|
||||
marker := ""
|
||||
switch {
|
||||
case len(res.Keys) == 1:
|
||||
marker = "(only key)"
|
||||
case i == 0:
|
||||
marker = "(highest priority)"
|
||||
case i == len(res.Keys)-1:
|
||||
marker = "(lowest priority)"
|
||||
}
|
||||
localTag := ""
|
||||
if res.LocalDIDKey != "" && k == res.LocalDIDKey {
|
||||
localTag = " [LOCAL — database.rotation_key]"
|
||||
}
|
||||
fmt.Printf(" [%d] %s %s%s\n", i, k, marker, localTag)
|
||||
}
|
||||
|
||||
if res.LocalDIDKey != "" && !res.LocalPresent {
|
||||
fmt.Printf("\nWARNING: local rotation_key (%s) is NOT present in the PLC document.\n", res.LocalDIDKey)
|
||||
fmt.Println("This service cannot sign PLC updates. Possible compromise or out-of-band rotation.")
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
plcCmd.PersistentFlags().StringVarP(&plcConfigFile, "config", "c", "", "path to YAML configuration file")
|
||||
|
||||
plcAddRotationKeyCmd.Flags().BoolVar(&plcAddRotationKeyFirst, "first", true, "insert at highest priority (default)")
|
||||
plcAddRotationKeyCmd.Flags().BoolVar(&plcAddRotationKeyLast, "last", false, "insert at lowest priority")
|
||||
|
||||
plcCmd.AddCommand(plcAddRotationKeyCmd)
|
||||
plcCmd.AddCommand(plcListRotationKeysCmd)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"atcr.io/pkg/atproto/did"
|
||||
"atcr.io/pkg/hold"
|
||||
holddb "atcr.io/pkg/hold/db"
|
||||
"atcr.io/pkg/hold/pds"
|
||||
@@ -39,7 +40,7 @@ The CAR is written to stdout, so redirect to a file:
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
if err := holdPDS.ExportToCAR(ctx, os.Stdout); err != nil {
|
||||
if err := holdPDS.RepomgrRef().ReadRepo(ctx, holdPDS.UID(), "", os.Stdout); err != nil {
|
||||
return fmt.Errorf("failed to export: %w", err)
|
||||
}
|
||||
|
||||
@@ -105,15 +106,7 @@ func init() {
|
||||
// openHoldPDS creates a HoldPDS from config for offline CLI operations.
|
||||
// Returns the PDS and a cleanup function that must be deferred.
|
||||
func openHoldPDS(ctx context.Context, cfg *hold.Config) (*pds.HoldPDS, func(), error) {
|
||||
holdDID, err := pds.LoadOrCreateDID(ctx, pds.DIDConfig{
|
||||
DID: cfg.Database.DID,
|
||||
DIDMethod: cfg.Database.DIDMethod,
|
||||
PublicURL: cfg.Server.PublicURL,
|
||||
DBPath: cfg.Database.Path,
|
||||
SigningKeyPath: cfg.Database.KeyPath,
|
||||
RotationKey: cfg.Database.RotationKey,
|
||||
PLCDirectoryURL: cfg.Database.PLCDirectoryURL,
|
||||
})
|
||||
holdDID, err := did.LoadOrCreate(ctx, cfg.DIDConfig())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to resolve hold DID: %w", err)
|
||||
}
|
||||
|
||||
68
cmd/hold/scan_backfill.go
Normal file
68
cmd/hold/scan_backfill.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"atcr.io/pkg/hold"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var scanBackfillConfigFile string
|
||||
|
||||
var scanBackfillCmd = &cobra.Command{
|
||||
Use: "scan-backfill",
|
||||
Short: "Rewrite legacy scan records to use the status field (offline)",
|
||||
Long: `Walks every io.atcr.hold.scan record on this hold and assigns a status
|
||||
("skipped" or "failed") to records that pre-date the status field.
|
||||
|
||||
A legacy record is one with an empty status, no SBOM blob, and zero
|
||||
vulnerability counts. Layer media types decide the rewrite:
|
||||
|
||||
- helm.chart.content / in-toto / dsse.envelope → status="skipped"
|
||||
- everything else → status="failed"
|
||||
|
||||
The tool is idempotent and preserves each record's original scannedAt.
|
||||
|
||||
This subcommand opens the hold's CAR store directly, so the running hold
|
||||
service must be stopped first (otherwise the embedded PDS holds an exclusive
|
||||
lock). For zero-downtime backfill on a production hold, hit the admin
|
||||
endpoint POST /admin/api/scan-backfill instead.`,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := hold.LoadConfig(scanBackfillConfigFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load config: %w", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
holdPDS, cleanup, err := openHoldPDS(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
logf := func(format string, args ...any) {
|
||||
fmt.Fprintf(cmd.ErrOrStderr(), " "+format+"\n", args...)
|
||||
}
|
||||
res, err := holdPDS.BackfillScanStatus(ctx, logf, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("backfill: %w", err)
|
||||
}
|
||||
|
||||
out := cmd.OutOrStdout()
|
||||
fmt.Fprintf(out, "Backfill complete:\n")
|
||||
fmt.Fprintf(out, " scanned: %d\n", res.Scanned)
|
||||
fmt.Fprintf(out, " already-tagged: %d\n", res.AlreadyTagged)
|
||||
fmt.Fprintf(out, " → skipped: %d\n", res.MarkedSkipped)
|
||||
fmt.Fprintf(out, " → failed: %d\n", res.MarkedFailed)
|
||||
fmt.Fprintf(out, " rewritten: %d\n", res.Rewritten)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
scanBackfillCmd.Flags().StringVarP(&scanBackfillConfigFile, "config", "c", "", "path to YAML configuration file")
|
||||
rootCmd.AddCommand(scanBackfillCmd)
|
||||
}
|
||||
83
cmd/labeler/main.go
Normal file
83
cmd/labeler/main.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"atcr.io/pkg/labeler"
|
||||
)
|
||||
|
||||
var configFile string
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "atcr-labeler",
|
||||
Short: "ATCR Labeler Service - ATProto content moderation",
|
||||
}
|
||||
|
||||
var serveCmd = &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Start the labeler service",
|
||||
Long: `Start the ATCR labeler service with admin UI and subscribeLabels endpoint.
|
||||
|
||||
Configuration is loaded from the appview config YAML (labeler section).
|
||||
Use --config to specify the config file path.`,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := labeler.LoadConfig(configFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
|
||||
server, err := labeler.NewServer(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize labeler: %w", err)
|
||||
}
|
||||
|
||||
return server.Serve()
|
||||
},
|
||||
}
|
||||
|
||||
var configCmd = &cobra.Command{
|
||||
Use: "config",
|
||||
Short: "Configuration management commands",
|
||||
}
|
||||
|
||||
var configInitCmd = &cobra.Command{
|
||||
Use: "init [path]",
|
||||
Short: "Generate an example configuration file",
|
||||
Long: `Generate an example YAML configuration file with all available options.`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
yamlBytes, err := labeler.ExampleYAML()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate example config: %w", err)
|
||||
}
|
||||
if len(args) == 1 {
|
||||
if err := os.WriteFile(args[0], yamlBytes, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write config file: %w", err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Wrote example config to %s\n", args[0])
|
||||
return nil
|
||||
}
|
||||
fmt.Print(string(yamlBytes))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
serveCmd.Flags().StringVarP(&configFile, "config", "c", "", "path to YAML configuration file")
|
||||
|
||||
configCmd.AddCommand(configInitCmd)
|
||||
|
||||
rootCmd.AddCommand(serveCmd)
|
||||
rootCmd.AddCommand(configCmd)
|
||||
rootCmd.AddCommand(plcCmd)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
225
cmd/labeler/plc.go
Normal file
225
cmd/labeler/plc.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"atcr.io/pkg/atproto/did"
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
"atcr.io/pkg/labeler"
|
||||
|
||||
"github.com/bluesky-social/indigo/atproto/atcrypto"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var plcCmd = &cobra.Command{
|
||||
Use: "plc",
|
||||
Short: "PLC directory management commands",
|
||||
}
|
||||
|
||||
var plcConfigFile string
|
||||
|
||||
var (
|
||||
plcAddRotationKeyFirst bool
|
||||
plcAddRotationKeyLast bool
|
||||
)
|
||||
|
||||
var plcAddRotationKeyCmd = &cobra.Command{
|
||||
Use: "add-rotation-key [multibase-key]",
|
||||
Short: "Add a rotation key to this labeler's PLC identity",
|
||||
Long: `Add an additional rotation key to the labeler's did:plc document.
|
||||
|
||||
If a multibase-encoded private key (K-256 or P-256, starting with 'z') is supplied as
|
||||
the positional argument, that key is added. If no argument is given, a fresh K-256
|
||||
keypair is generated and the private half is printed to stdout. Save it offline as
|
||||
your recovery key, since it will not be shown again.
|
||||
|
||||
By default the new key is inserted at the highest priority position (--first), which
|
||||
allows it to override ops signed by lower-priority keys within PLC's 72-hour recovery
|
||||
window. Pass --last to append at the lowest priority instead.
|
||||
|
||||
The labeler's configured rotation key is used to sign the PLC update.
|
||||
|
||||
atcr-labeler plc add-rotation-key --config config.yaml # generate + print
|
||||
atcr-labeler plc add-rotation-key --config config.yaml --last # append, low priority
|
||||
atcr-labeler plc add-rotation-key --config config.yaml z... # use supplied key`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
firstSet := cmd.Flags().Changed("first")
|
||||
lastSet := cmd.Flags().Changed("last")
|
||||
if firstSet && lastSet {
|
||||
return fmt.Errorf("--first and --last are mutually exclusive")
|
||||
}
|
||||
prepend := !plcAddRotationKeyLast
|
||||
|
||||
cfg, err := labeler.LoadConfig(plcConfigFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
if cfg.Labeler.DIDMethod != "plc" {
|
||||
return fmt.Errorf("this command only works with did:plc (labeler.did_method is %q)", cfg.Labeler.DIDMethod)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
labelerDID, rotationKey, signingKey, err := loadLabelerPLCIdentity(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var newKey atcrypto.PrivateKeyExportable
|
||||
if len(args) == 1 {
|
||||
newKey, err = atcrypto.ParsePrivateMultibase(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse key argument: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := did.AddRotationKey(ctx, did.AddRotationKeyOptions{
|
||||
DID: labelerDID,
|
||||
PLCDirectoryURL: cfg.PLCDirectoryURL(),
|
||||
RotationKey: rotationKey,
|
||||
SigningKey: signingKey,
|
||||
VerificationKeyName: "atproto_label",
|
||||
NewKey: newKey,
|
||||
Prepend: prepend,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if res.AlreadyPresent {
|
||||
fmt.Printf("Key %s is already a rotation key for %s (priority %d of %d)\n",
|
||||
res.NewKeyDIDKey, labelerDID, res.ExistingAt, res.TotalKeys)
|
||||
return nil
|
||||
}
|
||||
|
||||
if res.Generated {
|
||||
fmt.Println("=========================================================================")
|
||||
fmt.Println("GENERATED NEW ROTATION KEY. SAVE THIS NOW. IT WILL NOT BE SHOWN AGAIN.")
|
||||
fmt.Println("Store it offline (password manager, paper, hardware token).")
|
||||
fmt.Println()
|
||||
fmt.Printf("Private key (multibase): %s\n", res.NewKey.Multibase())
|
||||
fmt.Printf("Public key (did:key): %s\n", res.NewKeyDIDKey)
|
||||
fmt.Println("=========================================================================")
|
||||
}
|
||||
|
||||
slog.Info("Added rotation key to PLC identity",
|
||||
"did", labelerDID,
|
||||
"new_key", res.NewKeyDIDKey,
|
||||
"priority", res.InsertedAt,
|
||||
"total_rotation_keys", res.TotalKeys,
|
||||
"generated", res.Generated,
|
||||
)
|
||||
fmt.Printf("Added rotation key %s to %s (priority %d of %d)\n",
|
||||
res.NewKeyDIDKey, labelerDID, res.InsertedAt, res.TotalKeys)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var plcListRotationKeysCmd = &cobra.Command{
|
||||
Use: "list-rotation-keys",
|
||||
Short: "List rotation keys in this labeler's PLC document",
|
||||
Long: `Fetch the labeler's did:plc document from the PLC directory and print its
|
||||
rotation keys in priority order (index 0 is highest priority and can override
|
||||
ops signed by lower-priority keys within PLC's 72-hour recovery window).
|
||||
|
||||
The key matching the local labeler.rotation_key is marked as LOCAL.`,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := labeler.LoadConfig(plcConfigFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
if cfg.Labeler.DIDMethod != "plc" {
|
||||
return fmt.Errorf("this command only works with did:plc (labeler.did_method is %q)", cfg.Labeler.DIDMethod)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
labelerDID, _, _, err := loadLabelerPLCIdentity(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var localRotationKey atcrypto.PrivateKey
|
||||
if cfg.Labeler.RotationKey != "" {
|
||||
localRotationKey, err = atcrypto.ParsePrivateMultibase(cfg.Labeler.RotationKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse rotation_key from config: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := did.ListRotationKeys(ctx, did.ListRotationKeysOptions{
|
||||
DID: labelerDID,
|
||||
PLCDirectoryURL: cfg.PLCDirectoryURL(),
|
||||
LocalRotationKey: localRotationKey,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
printRotationKeys(res)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// loadLabelerPLCIdentity is the shared "load DID + rotation key + signing key" helper
|
||||
// used by every PLC command. Mirrors loadHoldPLCIdentity over in cmd/hold/plc.go.
|
||||
func loadLabelerPLCIdentity(ctx context.Context, cfg *labeler.Config) (string, atcrypto.PrivateKey, *atcrypto.PrivateKeyK256, error) {
|
||||
labelerDID, _, err := labeler.LoadIdentity(ctx, cfg)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
if cfg.Labeler.RotationKey == "" {
|
||||
return "", nil, nil, fmt.Errorf("labeler.rotation_key must be set to sign PLC updates")
|
||||
}
|
||||
rotationKey, err := atcrypto.ParsePrivateMultibase(cfg.Labeler.RotationKey)
|
||||
if err != nil {
|
||||
return "", nil, nil, fmt.Errorf("failed to parse rotation_key from config: %w", err)
|
||||
}
|
||||
|
||||
signingKey, err := oauth.GenerateOrLoadPDSKey(cfg.SigningKeyPath())
|
||||
if err != nil {
|
||||
return "", nil, nil, fmt.Errorf("failed to load signing key: %w", err)
|
||||
}
|
||||
return labelerDID, rotationKey, signingKey, nil
|
||||
}
|
||||
|
||||
// printRotationKeys is the shared CLI output for list-rotation-keys, kept identical to
|
||||
// the hold version since the formatting is service-agnostic.
|
||||
func printRotationKeys(res *did.ListRotationKeysResult) {
|
||||
fmt.Printf("DID: %s\n", res.DID)
|
||||
fmt.Printf("PLC directory: %s\n", res.Directory)
|
||||
fmt.Printf("Rotation keys (%d):\n", len(res.Keys))
|
||||
for i, k := range res.Keys {
|
||||
marker := ""
|
||||
switch {
|
||||
case len(res.Keys) == 1:
|
||||
marker = "(only key)"
|
||||
case i == 0:
|
||||
marker = "(highest priority)"
|
||||
case i == len(res.Keys)-1:
|
||||
marker = "(lowest priority)"
|
||||
}
|
||||
localTag := ""
|
||||
if res.LocalDIDKey != "" && k == res.LocalDIDKey {
|
||||
localTag = " [LOCAL — labeler.rotation_key]"
|
||||
}
|
||||
fmt.Printf(" [%d] %s %s%s\n", i, k, marker, localTag)
|
||||
}
|
||||
|
||||
if res.LocalDIDKey != "" && !res.LocalPresent {
|
||||
fmt.Printf("\nWARNING: local rotation_key (%s) is NOT present in the PLC document.\n", res.LocalDIDKey)
|
||||
fmt.Println("This service cannot sign PLC updates. Possible compromise or out-of-band rotation.")
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
plcCmd.PersistentFlags().StringVarP(&plcConfigFile, "config", "c", "", "path to YAML configuration file")
|
||||
|
||||
plcAddRotationKeyCmd.Flags().BoolVar(&plcAddRotationKeyFirst, "first", true, "insert at highest priority (default)")
|
||||
plcAddRotationKeyCmd.Flags().BoolVar(&plcAddRotationKeyLast, "last", false, "insert at lowest priority")
|
||||
|
||||
plcCmd.AddCommand(plcAddRotationKeyCmd)
|
||||
plcCmd.AddCommand(plcListRotationKeysCmd)
|
||||
}
|
||||
@@ -29,8 +29,6 @@ server:
|
||||
default_hold_did: ""
|
||||
# Allows HTTP (not HTTPS) for DID resolution and uses transition:generic OAuth scope.
|
||||
test_mode: false
|
||||
# Path to P-256 private key for OAuth client authentication. Auto-generated on first run.
|
||||
oauth_key_path: /var/lib/atcr/oauth/client.key
|
||||
# Display name shown on OAuth authorization screens.
|
||||
client_name: AT Container Registry
|
||||
# Short name used in page titles and browser tabs.
|
||||
@@ -52,6 +50,8 @@ ui:
|
||||
libsql_auth_token: ""
|
||||
# How often to sync with remote libSQL server. Default: 60s.
|
||||
libsql_sync_interval: 1m0s
|
||||
# Source code URL displayed in the footer "Source" link. Defaults to the upstream ATCR project.
|
||||
source_url: https://tangled.org/evan.jarrett.net/at-container-registry
|
||||
# Health check and cache settings.
|
||||
health:
|
||||
# How long to cache hold health check results.
|
||||
@@ -70,16 +70,13 @@ jetstream:
|
||||
backfill_enabled: true
|
||||
# How often to re-run backfill to catch missed events. Set to 0 to only backfill on startup.
|
||||
backfill_interval: 24h0m0s
|
||||
# Relay endpoints for backfill, tried in order on failure.
|
||||
# Endpoints used for backfill. MUST support com.atproto.sync.listReposByCollection. Tried in order on failure.
|
||||
relay_endpoints:
|
||||
- https://relay1.us-east.bsky.network
|
||||
- https://relay1.us-west.bsky.network
|
||||
- https://zlay.waow.tech
|
||||
# JWT authentication settings.
|
||||
auth:
|
||||
# RSA private key for signing registry JWTs issued to Docker clients.
|
||||
key_path: /var/lib/atcr/auth/private-key.pem
|
||||
# X.509 certificate matching the JWT signing key.
|
||||
# X.509 certificate matching the JWT signing key (auto-generated on each boot from the JWT key in the database).
|
||||
cert_path: /var/lib/atcr/auth/private-key.crt
|
||||
# Credential helper download settings.
|
||||
credential_helper:
|
||||
@@ -95,6 +92,10 @@ legal:
|
||||
ai:
|
||||
# Anthropic API key for AI Image Advisor. Also reads CLAUDE_API_KEY env var as fallback.
|
||||
api_key: ""
|
||||
# ATProto labeler for content moderation (DMCA takedowns).
|
||||
labeler:
|
||||
# DID or URL of the ATProto labeler (e.g., did:web:labeler.atcr.io). Empty disables label filtering.
|
||||
did: ""
|
||||
# Stripe billing integration (requires -tags billing build).
|
||||
billing:
|
||||
# Stripe secret key. Can also be set via STRIPE_SECRET_KEY env var (takes precedence). Billing is enabled automatically when set.
|
||||
@@ -104,9 +105,9 @@ billing:
|
||||
# ISO 4217 currency code (e.g. "usd").
|
||||
currency: usd
|
||||
# Redirect URL after successful checkout. Use {base_url} placeholder.
|
||||
success_url: '{base_url}/settings#billing'
|
||||
success_url: '{base_url}/settings/billing'
|
||||
# Redirect URL after cancelled checkout. Use {base_url} placeholder.
|
||||
cancel_url: '{base_url}/settings#billing'
|
||||
cancel_url: '{base_url}/settings/billing'
|
||||
# Subscription tiers ordered by rank (lowest to highest).
|
||||
tiers:
|
||||
- # Tier name. Position in list determines rank (0-based).
|
||||
|
||||
@@ -45,8 +45,10 @@ server:
|
||||
successor: ""
|
||||
# Use localhost for OAuth redirects during development.
|
||||
test_mode: false
|
||||
# Request crawl from this relay on startup to make the embedded PDS discoverable.
|
||||
relay_endpoint: ""
|
||||
# Endpoints used for proactive scan discovery. MUST support com.atproto.sync.listReposByCollection. Also sent requestCrawl on startup (best-effort, in addition to built-in known relays).
|
||||
relay_endpoints:
|
||||
- https://relay1.us-east.bsky.network
|
||||
- https://relay1.us-west.bsky.network
|
||||
# DID of the appview this hold is managed by (e.g. did:web:atcr.io). Resolved via did:web for URL and public key.
|
||||
appview_did: did:web:172.28.0.2%3A5000
|
||||
# Read timeout for HTTP requests.
|
||||
@@ -135,3 +137,9 @@ scanner:
|
||||
secret: ""
|
||||
# Minimum interval between re-scans of the same manifest. When set, the hold proactively scans manifests when the scanner is idle. Default: 168h (7 days). Set to 0 to disable.
|
||||
rescan_interval: 168h0m0s
|
||||
# Labeler subscription settings. When configured, the hold consumes takedown labels from the named labeler and purges affected records on receipt; GC consults the cache to gate blob cleanup. Empty subscribe_url disables.
|
||||
labeler:
|
||||
# DID or URL of the ATProto labeler (e.g., did:web:labeler.atcr.io). Empty disables labeler integration.
|
||||
did: ""
|
||||
# Reversibility window for takedowns. Blobs survive this long after a takedown so the action can be reversed. After this window the GC reclaims them. Default: 720h (30 days).
|
||||
grace_window: 720h0m0s
|
||||
|
||||
53
config-labeler.example.yaml
Normal file
53
config-labeler.example.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
# ATCR Labeler Configuration
|
||||
# Generated with defaults — edit as needed.
|
||||
|
||||
# Configuration format version.
|
||||
version: "0.1"
|
||||
# Log level: debug, info, warn, error.
|
||||
log_level: info
|
||||
# Labeler service settings.
|
||||
labeler:
|
||||
# Enable the labeler service.
|
||||
enabled: true
|
||||
# Listen address for labeler (e.g., :5002).
|
||||
addr: :5002
|
||||
# Externally reachable labeler URL (required, e.g. https://labeler.example.com).
|
||||
public_url: https://labeler.example.com
|
||||
# OAuth client display name (e.g., "ATCR Labeler").
|
||||
client_name: ATCR Labeler
|
||||
# Short brand label used in UI copy (e.g., "ATCR").
|
||||
client_short_name: ATCR
|
||||
# DID of the labeler admin. Only this DID can log into the admin panel.
|
||||
owner_did: did:plc:your-did-here
|
||||
# Directory for labeler state (database, signing key, did.txt).
|
||||
data_dir: /var/lib/atcr-labeler
|
||||
# DID method: "plc" (recommended) or "web".
|
||||
did_method: plc
|
||||
# Explicit did:plc identifier for adoption/recovery (optional).
|
||||
did: ""
|
||||
# Path to K-256 signing key (defaults to <data_dir>/signing.key).
|
||||
key_path: ""
|
||||
# Multibase-encoded rotation key (K-256 or P-256). Required to update the PLC document.
|
||||
rotation_key: ""
|
||||
# PLC directory URL (default https://plc.directory).
|
||||
plc_directory_url: https://plc.directory
|
||||
# Optional libSQL/Bunny remote sync URL. Empty = local-only.
|
||||
libsql_sync_url: ""
|
||||
# Auth token for libsql_sync_url.
|
||||
libsql_auth_token: ""
|
||||
# Embedded-replica pull interval (e.g. 30s). 0 = manual sync only.
|
||||
libsql_sync_interval: 0s
|
||||
# Remote log shipping settings.
|
||||
log_shipper:
|
||||
# Log shipping backend: "victoria", "opensearch", or "loki". Empty disables shipping.
|
||||
backend: ""
|
||||
# Remote log service endpoint, e.g. "http://victorialogs:9428".
|
||||
url: ""
|
||||
# Number of log entries to buffer before flushing to the remote service.
|
||||
batch_size: 0
|
||||
# Maximum time between flushes, even if batch is not full.
|
||||
flush_interval: 0s
|
||||
# Basic auth username for the log service (optional).
|
||||
username: ""
|
||||
# Basic auth password for the log service (optional).
|
||||
password: ""
|
||||
@@ -28,6 +28,12 @@ var holdConfigTmpl string
|
||||
//go:embed configs/scanner.yaml.tmpl
|
||||
var scannerConfigTmpl string
|
||||
|
||||
//go:embed systemd/labeler.service.tmpl
|
||||
var labelerServiceTmpl string
|
||||
|
||||
//go:embed configs/labeler.yaml.tmpl
|
||||
var labelerConfigTmpl string
|
||||
|
||||
//go:embed configs/cloudinit.sh.tmpl
|
||||
var cloudInitTmpl string
|
||||
|
||||
@@ -43,10 +49,11 @@ type ConfigValues struct {
|
||||
S3SecretKey string
|
||||
|
||||
// Infrastructure (computed from zone + config)
|
||||
Zone string // e.g. "us-chi1"
|
||||
HoldDomain string // e.g. "us-chi1.cove.seamark.dev"
|
||||
HoldDid string // e.g. "did:web:us-chi1.cove.seamark.dev"
|
||||
BasePath string // e.g. "/var/lib/seamark"
|
||||
Zone string // e.g. "us-chi1"
|
||||
HoldDomain string // e.g. "us-chi1.cove.seamark.dev"
|
||||
HoldDid string // e.g. "did:web:us-chi1.cove.seamark.dev"
|
||||
LabelerDomain string // e.g. "labeler.seamark.dev"
|
||||
BasePath string // e.g. "/var/lib/seamark"
|
||||
|
||||
// Scanner (auto-generated shared secret)
|
||||
ScannerSecret string // hex-encoded 32-byte secret; empty disables scanning
|
||||
@@ -111,9 +118,33 @@ func renderScannerServiceUnit(p scannerServiceUnitParams) (string, error) {
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// labelerServiceUnitParams holds values for rendering the labeler systemd unit.
|
||||
type labelerServiceUnitParams struct {
|
||||
DisplayName string // e.g. "Seamark"
|
||||
User string // e.g. "seamark"
|
||||
BinaryPath string // e.g. "/opt/seamark/bin/seamark-labeler"
|
||||
ConfigPath string // e.g. "/etc/seamark/labeler.yaml"
|
||||
DataDir string // e.g. "/var/lib/seamark"
|
||||
ServiceName string // e.g. "seamark-labeler"
|
||||
AppviewServiceName string // e.g. "seamark-appview" (After= dependency)
|
||||
}
|
||||
|
||||
func renderLabelerServiceUnit(p labelerServiceUnitParams) (string, error) {
|
||||
t, err := template.New("labeler-service").Parse(labelerServiceTmpl)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse labeler service template: %w", err)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := t.Execute(&buf, p); err != nil {
|
||||
return "", fmt.Errorf("render labeler service template: %w", err)
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// generateAppviewCloudInit generates the cloud-init user-data script for the appview server.
|
||||
// Sets up the OS, directories, config, and systemd unit. Binaries are deployed separately via SCP.
|
||||
func generateAppviewCloudInit(cfg *InfraConfig, vals *ConfigValues) (string, error) {
|
||||
// When withLabeler is true, a second phase is appended that creates labeler data
|
||||
// directories and installs a labeler systemd service. Binaries are deployed separately via SCP.
|
||||
func generateAppviewCloudInit(cfg *InfraConfig, vals *ConfigValues, withLabeler bool) (string, error) {
|
||||
naming := cfg.Naming()
|
||||
|
||||
configYAML, err := renderConfig(appviewConfigTmpl, vals)
|
||||
@@ -133,7 +164,7 @@ func generateAppviewCloudInit(cfg *InfraConfig, vals *ConfigValues) (string, err
|
||||
return "", fmt.Errorf("appview service unit: %w", err)
|
||||
}
|
||||
|
||||
return generateCloudInit(cloudInitParams{
|
||||
script, err := generateCloudInit(cloudInitParams{
|
||||
BinaryName: naming.Appview(),
|
||||
ServiceUnit: serviceUnit,
|
||||
ConfigYAML: configYAML,
|
||||
@@ -146,6 +177,69 @@ func generateAppviewCloudInit(cfg *InfraConfig, vals *ConfigValues) (string, err
|
||||
LogFile: naming.LogFile(),
|
||||
DisplayName: naming.DisplayName(),
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !withLabeler {
|
||||
return script, nil
|
||||
}
|
||||
|
||||
// Render labeler config YAML
|
||||
labelerConfigYAML, err := renderConfig(labelerConfigTmpl, vals)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("labeler config: %w", err)
|
||||
}
|
||||
|
||||
// Append labeler setup phase
|
||||
labelerUnit, err := renderLabelerServiceUnit(labelerServiceUnitParams{
|
||||
DisplayName: naming.DisplayName(),
|
||||
User: naming.SystemUser(),
|
||||
BinaryPath: naming.InstallDir() + "/bin/" + naming.Labeler(),
|
||||
ConfigPath: naming.LabelerConfigPath(),
|
||||
DataDir: naming.BasePath(),
|
||||
ServiceName: naming.Labeler(),
|
||||
AppviewServiceName: naming.Appview(),
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("labeler service unit: %w", err)
|
||||
}
|
||||
|
||||
// Escape single quotes for heredoc embedding
|
||||
labelerUnit = strings.ReplaceAll(labelerUnit, "'", "'\\''")
|
||||
labelerConfigYAML = strings.ReplaceAll(labelerConfigYAML, "'", "'\\''")
|
||||
|
||||
labelerPhase := fmt.Sprintf(`
|
||||
# === Labeler Setup ===
|
||||
|
||||
# Labeler data dirs
|
||||
mkdir -p %s
|
||||
chown -R %s:%s %s
|
||||
|
||||
# Labeler config
|
||||
cat > %s << 'CFGEOF'
|
||||
%s
|
||||
CFGEOF
|
||||
|
||||
# Labeler systemd service
|
||||
cat > /etc/systemd/system/%s.service << 'SVCEOF'
|
||||
%s
|
||||
SVCEOF
|
||||
systemctl daemon-reload
|
||||
systemctl enable %s
|
||||
|
||||
echo "=== Labeler setup complete ==="
|
||||
`,
|
||||
naming.LabelerDataDir(),
|
||||
naming.SystemUser(), naming.SystemUser(), naming.LabelerDataDir(),
|
||||
naming.LabelerConfigPath(),
|
||||
labelerConfigYAML,
|
||||
naming.Labeler(),
|
||||
labelerUnit,
|
||||
naming.Labeler(),
|
||||
)
|
||||
|
||||
return script + labelerPhase, nil
|
||||
}
|
||||
|
||||
// generateHoldCloudInit generates the cloud-init user-data script for the hold server.
|
||||
@@ -280,8 +374,10 @@ func generateCloudInit(p cloudInitParams) (string, error) {
|
||||
}
|
||||
|
||||
// syncServiceUnit compares a rendered systemd service unit against what's on
|
||||
// the server. If they differ, it writes the new unit file. Returns true if the
|
||||
// unit was updated (caller should daemon-reload before restart).
|
||||
// the server. If they differ, it writes the new unit file. If the unit is
|
||||
// missing entirely, it installs it and runs `systemctl enable` so the service
|
||||
// starts on boot. Returns true if the unit was created or updated (caller
|
||||
// should daemon-reload before restart).
|
||||
func syncServiceUnit(name, ip, serviceName, renderedUnit string) (bool, error) {
|
||||
unitPath := "/etc/systemd/system/" + serviceName + ".service"
|
||||
|
||||
@@ -294,8 +390,15 @@ func syncServiceUnit(name, ip, serviceName, renderedUnit string) (bool, error) {
|
||||
rendered := strings.TrimSpace(renderedUnit)
|
||||
|
||||
if remote == "__MISSING__" {
|
||||
fmt.Printf(" service unit: %s not found (cloud-init will handle it)\n", name)
|
||||
return false, nil
|
||||
// First-time install: write file, daemon-reload, and enable so the
|
||||
// service comes up on boot. The caller's restart will start it.
|
||||
script := fmt.Sprintf("cat > %s << 'SVCEOF'\n%s\nSVCEOF\nsystemctl daemon-reload\nsystemctl enable %s",
|
||||
unitPath, rendered, serviceName)
|
||||
if _, err := runSSH(ip, script, false); err != nil {
|
||||
return false, fmt.Errorf("install service unit: %w", err)
|
||||
}
|
||||
fmt.Printf(" service unit: %s installed and enabled\n", name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if remote == rendered {
|
||||
@@ -323,7 +426,17 @@ func syncConfigKeys(name, ip, configPath, templateYAML string) error {
|
||||
remote = strings.TrimSpace(remote)
|
||||
|
||||
if remote == "__MISSING__" {
|
||||
fmt.Printf(" config sync: %s not yet created (cloud-init will handle it)\n", name)
|
||||
// First-time install: write the rendered template as-is. Subsequent
|
||||
// runs use the merge-keys path below to preserve operator edits.
|
||||
dir := configPath[:strings.LastIndex(configPath, "/")]
|
||||
if _, err := runSSH(ip, fmt.Sprintf("mkdir -p %s", dir), false); err != nil {
|
||||
return fmt.Errorf("create config dir: %w", err)
|
||||
}
|
||||
script := fmt.Sprintf("cat > %s << 'CFGEOF'\n%s\nCFGEOF", configPath, strings.TrimRight(templateYAML, "\n"))
|
||||
if _, err := runSSH(ip, script, false); err != nil {
|
||||
return fmt.Errorf("write initial config: %w", err)
|
||||
}
|
||||
fmt.Printf(" config sync: %s installed\n", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -90,10 +90,12 @@ func extractFromAppviewTemplate() (clientName, baseDomain string, registryDomain
|
||||
return clientName, baseDomain, registryDomains, nil
|
||||
}
|
||||
|
||||
// readSSHPublicKey reads an SSH public key from a file path.
|
||||
// readSSHPublicKey reads an SSH public key from a file path. An empty path
|
||||
// returns an empty key without error — callers that need the key (e.g. when
|
||||
// creating new servers) must check for empty before use.
|
||||
func readSSHPublicKey(path string) (string, error) {
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("--ssh-key is required (path to SSH public key file)")
|
||||
return "", nil
|
||||
}
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
|
||||
@@ -11,13 +11,13 @@ server:
|
||||
addr: :5000
|
||||
base_url: "https://seamark.dev"
|
||||
default_hold_did: "{{.HoldDid}}"
|
||||
oauth_key_path: "{{.BasePath}}/oauth/client.key"
|
||||
client_name: Seamark
|
||||
test_mode: false
|
||||
client_short_name: Seamark
|
||||
registry_domains:
|
||||
- "buoy.cr"
|
||||
- "bouy.cr"
|
||||
- "seamark.cr"
|
||||
ui:
|
||||
database_path: "{{.BasePath}}/ui.db"
|
||||
theme: seamark
|
||||
@@ -39,10 +39,11 @@ jetstream:
|
||||
- https://relay1.us-east.bsky.network
|
||||
- https://relay1.us-west.bsky.network
|
||||
auth:
|
||||
key_path: "{{.BasePath}}/auth/private-key.pem"
|
||||
cert_path: "{{.BasePath}}/auth/private-key.crt"
|
||||
credential_helper:
|
||||
tangled_repo: ""
|
||||
legal:
|
||||
company_name: Seamark
|
||||
jurisdiction: State of Texas, United States
|
||||
ai:
|
||||
api_key: ""
|
||||
labeler:
|
||||
did: ""
|
||||
|
||||
@@ -21,6 +21,17 @@ apt-get install -y git gcc make curl libsqlite3-dev nodejs npm htop systemd-time
|
||||
sed -i 's/^#NTP=.*/NTP=0.debian.pool.ntp.org 1.debian.pool.ntp.org 2.debian.pool.ntp.org 3.debian.pool.ntp.org/' /etc/systemd/timesyncd.conf
|
||||
timedatectl set-ntp true
|
||||
|
||||
# Persistent journald (capped, 7-day retention)
|
||||
mkdir -p /var/log/journal
|
||||
mkdir -p /etc/systemd/journald.conf.d
|
||||
cat > /etc/systemd/journald.conf.d/atcr.conf << 'JOURNALEOF'
|
||||
[Journal]
|
||||
Storage=persistent
|
||||
SystemMaxUse=500M
|
||||
MaxRetentionSec=7day
|
||||
JOURNALEOF
|
||||
systemctl restart systemd-journald
|
||||
|
||||
# Swap (for small instances)
|
||||
if [ ! -f /swapfile ]; then
|
||||
dd if=/dev/zero of=/swapfile bs=1M count=2048
|
||||
|
||||
@@ -20,7 +20,9 @@ server:
|
||||
public: false
|
||||
successor: ""
|
||||
test_mode: false
|
||||
relay_endpoint: ""
|
||||
relay_endpoints:
|
||||
- https://relay1.us-east.bsky.network
|
||||
- https://relay1.us-west.bsky.network
|
||||
appview_did: did:web:seamark.dev
|
||||
read_timeout: 5m0s
|
||||
write_timeout: 5m0s
|
||||
@@ -61,4 +63,9 @@ quota:
|
||||
scanner:
|
||||
secret: "{{.ScannerSecret}}"
|
||||
rescan_interval: 168h0m0s
|
||||
labeler:
|
||||
# Subscribe to the appview's labeler so takedowns purge records on this
|
||||
# hold and the GC honors the reversibility window. Empty disables.
|
||||
did: "did:web:seamark.dev"
|
||||
grace_window: 720h0m0s
|
||||
|
||||
|
||||
22
deploy/upcloud/configs/labeler.yaml.tmpl
Normal file
22
deploy/upcloud/configs/labeler.yaml.tmpl
Normal file
@@ -0,0 +1,22 @@
|
||||
version: "0.1"
|
||||
log_level: info
|
||||
log_shipper:
|
||||
backend: ""
|
||||
url: ""
|
||||
batch_size: 100
|
||||
flush_interval: 5s
|
||||
username: ""
|
||||
password: ""
|
||||
labeler:
|
||||
enabled: true
|
||||
addr: :5002
|
||||
public_url: "https://{{.LabelerDomain}}"
|
||||
client_name: "Seamark Labeler"
|
||||
client_short_name: Seamark
|
||||
owner_did: ""
|
||||
data_dir: "{{.BasePath}}/labeler"
|
||||
did_method: plc
|
||||
did: ""
|
||||
key_path: ""
|
||||
rotation_key: ""
|
||||
plc_directory_url: https://plc.directory
|
||||
@@ -1,10 +1,10 @@
|
||||
module atcr.io/deploy
|
||||
|
||||
go 1.25.7
|
||||
go 1.26.2
|
||||
|
||||
require (
|
||||
github.com/UpCloudLtd/upcloud-go-api/v8 v8.34.3
|
||||
github.com/charmbracelet/huh v0.8.0
|
||||
github.com/UpCloudLtd/upcloud-go-api/v8 v8.35.0
|
||||
github.com/charmbracelet/huh v1.0.0
|
||||
github.com/spf13/cobra v1.10.2
|
||||
go.yaml.in/yaml/v3 v3.0.4
|
||||
)
|
||||
@@ -15,23 +15,23 @@ require (
|
||||
github.com/catppuccin/go v0.3.0 // indirect
|
||||
github.com/charmbracelet/bubbles v1.0.0 // indirect
|
||||
github.com/charmbracelet/bubbletea v1.3.10 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.4.2 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.4.3 // indirect
|
||||
github.com/charmbracelet/lipgloss v1.1.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.11.6 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.11.7 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.15 // indirect
|
||||
github.com/charmbracelet/x/exp/strings v0.1.0 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.2 // indirect
|
||||
github.com/clipperhouse/displaywidth v0.10.0 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.6.0 // indirect
|
||||
github.com/clipperhouse/displaywidth v0.11.0 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.7.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.4.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.21 // indirect
|
||||
github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.23 // indirect
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
@@ -41,7 +41,7 @@ require (
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect
|
||||
golang.org/x/sys v0.41.0 // indirect
|
||||
golang.org/x/text v0.34.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f // indirect
|
||||
golang.org/x/sys v0.43.0 // indirect
|
||||
golang.org/x/text v0.36.0 // indirect
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
|
||||
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
|
||||
github.com/UpCloudLtd/upcloud-go-api/v8 v8.34.3 h1:7ba03u4L5LafZPVO2k6B0/f114k5dFF3GtAN7FEKfno=
|
||||
github.com/UpCloudLtd/upcloud-go-api/v8 v8.34.3/go.mod h1:NBh1d/ip1bhdAIhuPWbyPme7tbLzDTV7dhutUmU1vg8=
|
||||
github.com/UpCloudLtd/upcloud-go-api/v8 v8.35.0 h1:AIt07ExXzCaC9YVszkVPT+CteoyXldw0C8DGUMxtjD4=
|
||||
github.com/UpCloudLtd/upcloud-go-api/v8 v8.35.0/go.mod h1:sxG94uNhC31OQH+zK0RhZjVj+PdkhObsNAt5bvq2J8c=
|
||||
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
||||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
@@ -14,14 +14,14 @@ github.com/charmbracelet/bubbles v1.0.0 h1:12J8/ak/uCZEMQ6KU7pcfwceyjLlWsDLAxB5f
|
||||
github.com/charmbracelet/bubbles v1.0.0/go.mod h1:9d/Zd5GdnauMI5ivUIVisuEm3ave1XwXtD1ckyV6r3E=
|
||||
github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
|
||||
github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4=
|
||||
github.com/charmbracelet/colorprofile v0.4.2 h1:BdSNuMjRbotnxHSfxy+PCSa4xAmz7szw70ktAtWRYrY=
|
||||
github.com/charmbracelet/colorprofile v0.4.2/go.mod h1:0rTi81QpwDElInthtrQ6Ni7cG0sDtwAd4C4le060fT8=
|
||||
github.com/charmbracelet/huh v0.8.0 h1:Xz/Pm2h64cXQZn/Jvele4J3r7DDiqFCNIVteYukxDvY=
|
||||
github.com/charmbracelet/huh v0.8.0/go.mod h1:5YVc+SlZ1IhQALxRPpkGwwEKftN/+OlJlnJYlDRFqN4=
|
||||
github.com/charmbracelet/colorprofile v0.4.3 h1:QPa1IWkYI+AOB+fE+mg/5/4HRMZcaXex9t5KX76i20Q=
|
||||
github.com/charmbracelet/colorprofile v0.4.3/go.mod h1:/zT4BhpD5aGFpqQQqw7a+VtHCzu+zrQtt1zhMt9mR4Q=
|
||||
github.com/charmbracelet/huh v1.0.0 h1:wOnedH8G4qzJbmhftTqrpppyqHakl/zbbNdXIWJyIxw=
|
||||
github.com/charmbracelet/huh v1.0.0/go.mod h1:5YVc+SlZ1IhQALxRPpkGwwEKftN/+OlJlnJYlDRFqN4=
|
||||
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
|
||||
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
|
||||
github.com/charmbracelet/x/ansi v0.11.6 h1:GhV21SiDz/45W9AnV2R61xZMRri5NlLnl6CVF7ihZW8=
|
||||
github.com/charmbracelet/x/ansi v0.11.6/go.mod h1:2JNYLgQUsyqaiLovhU2Rv/pb8r6ydXKS3NIttu3VGZQ=
|
||||
github.com/charmbracelet/x/ansi v0.11.7 h1:kzv1kJvjg2S3r9KHo8hDdHFQLEqn4RBCb39dAYC84jI=
|
||||
github.com/charmbracelet/x/ansi v0.11.7/go.mod h1:9qGpnAVYz+8ACONkZBUWPtL7lulP9No6p1epAihUZwQ=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.15 h1:ur3pZy0o6z/R7EylET877CBxaiE1Sp1GMxoFPAIztPI=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.15/go.mod h1:J1YVbR7MUuEGIFPCaaZ96KDl5NoS0DAWkskup+mOY+Q=
|
||||
github.com/charmbracelet/x/conpty v0.1.0 h1:4zc8KaIcbiL4mghEON8D72agYtSeIgq8FSThSPQIb+U=
|
||||
@@ -38,10 +38,10 @@ github.com/charmbracelet/x/termios v0.1.1 h1:o3Q2bT8eqzGnGPOYheoYS8eEleT5ZVNYNy8
|
||||
github.com/charmbracelet/x/termios v0.1.1/go.mod h1:rB7fnv1TgOPOyyKRJ9o+AsTU/vK5WHJ2ivHeut/Pcwo=
|
||||
github.com/charmbracelet/x/xpty v0.1.2 h1:Pqmu4TEJ8KeA9uSkISKMU3f+C1F6OGBn8ABuGlqCbtI=
|
||||
github.com/charmbracelet/x/xpty v0.1.2/go.mod h1:XK2Z0id5rtLWcpeNiMYBccNNBrP2IJnzHI0Lq13Xzq4=
|
||||
github.com/clipperhouse/displaywidth v0.10.0 h1:GhBG8WuerxjFQQYeuZAeVTuyxuX+UraiZGD4HJQ3Y8g=
|
||||
github.com/clipperhouse/displaywidth v0.10.0/go.mod h1:XqJajYsaiEwkxOj4bowCTMcT1SgvHo9flfF3jQasdbs=
|
||||
github.com/clipperhouse/uax29/v2 v2.6.0 h1:z0cDbUV+aPASdFb2/ndFnS9ts/WNXgTNNGFoKXuhpos=
|
||||
github.com/clipperhouse/uax29/v2 v2.6.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/clipperhouse/displaywidth v0.11.0 h1:lBc6kY44VFw+TDx4I8opi/EtL9m20WSEFgwIwO+UVM8=
|
||||
github.com/clipperhouse/displaywidth v0.11.0/go.mod h1:bkrFNkf81G8HyVqmKGxsPufD3JhNl3dSqnGhOoSD/o0=
|
||||
github.com/clipperhouse/uax29/v2 v2.7.0 h1:+gs4oBZ2gPfVrKPthwbMzWZDaAFPGYK72F0NJv2v7Vk=
|
||||
github.com/clipperhouse/uax29/v2 v2.7.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
|
||||
@@ -60,13 +60,14 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag=
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/lucasb-eyer/go-colorful v1.4.0 h1:UtrWVfLdarDgc44HcS7pYloGHJUjHV/4FwW4TvVgFr4=
|
||||
github.com/lucasb-eyer/go-colorful v1.4.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs=
|
||||
github.com/mattn/go-isatty v0.0.21/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4=
|
||||
github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75 h1:P8UmIzZMYDR+NGImiFvErt6VWfIRPuGM+vyjiEdkmIw=
|
||||
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
|
||||
github.com/mattn/go-runewidth v0.0.23 h1:7ykA0T0jkPpzSvMS5i9uoNn2Xy3R383f9HDx3RybWcw=
|
||||
github.com/mattn/go-runewidth v0.0.23/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
||||
@@ -93,13 +94,13 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavM
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o=
|
||||
golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f h1:W3F4c+6OLc6H2lb//N1q4WpJkhzJCK5J6kUi1NTVXfM=
|
||||
golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f/go.mod h1:J1xhfL/vlindoeF/aINzNzt2Bket5bjo9sdOYzOsU80=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
||||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||
golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
|
||||
golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||
golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg=
|
||||
golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
@@ -57,5 +57,14 @@ func (n Naming) ScannerConfigPath() string { return n.ConfigDir() + "/scanner.ya
|
||||
// ScannerDataDir returns the scanner data directory (e.g. "/var/lib/seamark/scanner").
|
||||
func (n Naming) ScannerDataDir() string { return n.BasePath() + "/scanner" }
|
||||
|
||||
// Labeler returns the labeler binary/service name (e.g. "seamark-labeler").
|
||||
func (n Naming) Labeler() string { return n.ClientName + "-labeler" }
|
||||
|
||||
// LabelerConfigPath returns the labeler config file path.
|
||||
func (n Naming) LabelerConfigPath() string { return n.ConfigDir() + "/labeler.yaml" }
|
||||
|
||||
// LabelerDataDir returns the labeler data directory (e.g. "/var/lib/seamark/labeler").
|
||||
func (n Naming) LabelerDataDir() string { return n.BasePath() + "/labeler" }
|
||||
|
||||
// S3Name returns the name used for S3 storage, user, and bucket.
|
||||
func (n Naming) S3Name() string { return n.ClientName }
|
||||
|
||||
@@ -29,21 +29,22 @@ var provisionCmd = &cobra.Command{
|
||||
sshKey, _ := cmd.Flags().GetString("ssh-key")
|
||||
s3Secret, _ := cmd.Flags().GetString("s3-secret")
|
||||
withScanner, _ := cmd.Flags().GetBool("with-scanner")
|
||||
return cmdProvision(token, zone, plan, sshKey, s3Secret, withScanner)
|
||||
withLabeler, _ := cmd.Flags().GetBool("with-labeler")
|
||||
return cmdProvision(token, zone, plan, sshKey, s3Secret, withScanner, withLabeler)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
provisionCmd.Flags().String("zone", "", "UpCloud zone (interactive picker if omitted)")
|
||||
provisionCmd.Flags().String("plan", "", "Server plan (interactive picker if omitted)")
|
||||
provisionCmd.Flags().String("ssh-key", "", "Path to SSH public key file (required)")
|
||||
provisionCmd.Flags().String("ssh-key", "", "Path to SSH public key file (required when creating new servers)")
|
||||
provisionCmd.Flags().String("s3-secret", "", "S3 secret access key (for existing object storage)")
|
||||
provisionCmd.Flags().Bool("with-scanner", false, "Deploy vulnerability scanner alongside hold")
|
||||
_ = provisionCmd.MarkFlagRequired("ssh-key")
|
||||
provisionCmd.Flags().Bool("with-labeler", false, "Deploy content moderation labeler alongside appview")
|
||||
rootCmd.AddCommand(provisionCmd)
|
||||
}
|
||||
|
||||
func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bool) error {
|
||||
func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner, withLabeler bool) error {
|
||||
cfg, err := loadConfig(zone, plan, sshKeyPath, s3Secret)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -98,6 +99,12 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
|
||||
_ = saveState(state)
|
||||
}
|
||||
|
||||
// Labeler setup
|
||||
if withLabeler {
|
||||
state.LabelerEnabled = true
|
||||
_ = saveState(state)
|
||||
}
|
||||
|
||||
fmt.Printf("Provisioning %s infrastructure in zone %s...\n", naming.DisplayName(), cfg.Zone)
|
||||
if needsServers {
|
||||
fmt.Printf("Server plan: %s\n", cfg.Plan)
|
||||
@@ -146,6 +153,8 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
|
||||
|
||||
// Hold domain is zone-based (e.g. us-chi1.cove.seamark.dev)
|
||||
holdDomain := cfg.Zone + ".cove." + cfg.BaseDomain
|
||||
// Labeler domain is a fixed subdomain on the base domain (e.g. labeler.seamark.dev)
|
||||
labelerDomain := "labeler." + cfg.BaseDomain
|
||||
|
||||
// Build config template values
|
||||
vals := &ConfigValues{
|
||||
@@ -157,6 +166,7 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
|
||||
Zone: cfg.Zone,
|
||||
HoldDomain: holdDomain,
|
||||
HoldDid: "did:web:" + holdDomain,
|
||||
LabelerDomain: labelerDomain,
|
||||
BasePath: naming.BasePath(),
|
||||
ScannerSecret: state.ScannerSecret,
|
||||
}
|
||||
@@ -198,7 +208,7 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
|
||||
appviewCreated := false
|
||||
if state.Appview.UUID != "" {
|
||||
fmt.Printf("Appview: %s (exists)\n", state.Appview.UUID)
|
||||
appviewScript, err := generateAppviewCloudInit(cfg, vals)
|
||||
appviewScript, err := generateAppviewCloudInit(cfg, vals, state.LabelerEnabled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -212,9 +222,18 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
|
||||
if err := syncConfigKeys("appview", state.Appview.PublicIP, naming.AppviewConfigPath(), appviewConfigYAML); err != nil {
|
||||
return fmt.Errorf("appview config sync: %w", err)
|
||||
}
|
||||
if state.LabelerEnabled {
|
||||
labelerConfigYAML, err := renderConfig(labelerConfigTmpl, vals)
|
||||
if err != nil {
|
||||
return fmt.Errorf("render labeler config: %w", err)
|
||||
}
|
||||
if err := syncConfigKeys("labeler", state.Appview.PublicIP, naming.LabelerConfigPath(), labelerConfigYAML); err != nil {
|
||||
return fmt.Errorf("labeler config sync: %w", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Creating appview server...")
|
||||
appviewUserData, err := generateAppviewCloudInit(cfg, vals)
|
||||
appviewUserData, err := generateAppviewCloudInit(cfg, vals, state.LabelerEnabled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -290,7 +309,7 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
|
||||
fmt.Printf("Load balancer: %s (exists)\n", state.LB.UUID)
|
||||
} else {
|
||||
fmt.Println("Creating load balancer (Essentials tier)...")
|
||||
lb, err := createLoadBalancer(ctx, svc, cfg, naming, state.Network.UUID, state.Appview.PrivateIP, state.Hold.PrivateIP, holdDomain)
|
||||
lb, err := createLoadBalancer(ctx, svc, cfg, naming, state.Network.UUID, state.Appview.PrivateIP, state.Hold.PrivateIP, holdDomain, labelerDomain, state.LabelerEnabled)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create LB: %w", err)
|
||||
}
|
||||
@@ -308,6 +327,13 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
|
||||
return fmt.Errorf("LB hold forwarded headers: %w", err)
|
||||
}
|
||||
|
||||
// Ensure labeler backend + route-labeler rule when labeler is enabled
|
||||
if state.LabelerEnabled {
|
||||
if err := ensureLBLabelerRoute(ctx, svc, state.LB.UUID, state.Appview.PrivateIP, labelerDomain); err != nil {
|
||||
return fmt.Errorf("LB labeler route: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Always reconcile scanner block rule
|
||||
if err := ensureLBScannerBlock(ctx, svc, state.LB.UUID); err != nil {
|
||||
return fmt.Errorf("LB scanner block: %w", err)
|
||||
@@ -317,6 +343,9 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
|
||||
tlsDomains := []string{cfg.BaseDomain}
|
||||
tlsDomains = append(tlsDomains, cfg.RegistryDomains...)
|
||||
tlsDomains = append(tlsDomains, holdDomain)
|
||||
if state.LabelerEnabled {
|
||||
tlsDomains = append(tlsDomains, labelerDomain)
|
||||
}
|
||||
if err := ensureLBCertificates(ctx, svc, state.LB.UUID, tlsDomains); err != nil {
|
||||
return fmt.Errorf("LB certificates: %w", err)
|
||||
}
|
||||
@@ -348,6 +377,12 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
|
||||
if err := buildLocal(rootDir, outputPath, "./cmd/appview"); err != nil {
|
||||
return fmt.Errorf("build appview: %w", err)
|
||||
}
|
||||
if state.LabelerEnabled {
|
||||
outputPath := filepath.Join(rootDir, "bin", "atcr-labeler")
|
||||
if err := buildLocal(rootDir, outputPath, "./cmd/labeler"); err != nil {
|
||||
return fmt.Errorf("build labeler: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if holdCreated {
|
||||
outputPath := filepath.Join(rootDir, "bin", "atcr-hold")
|
||||
@@ -381,6 +416,13 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
|
||||
if err := scpFile(localPath, state.Appview.PublicIP, remotePath); err != nil {
|
||||
return fmt.Errorf("upload appview: %w", err)
|
||||
}
|
||||
if state.LabelerEnabled {
|
||||
labelerLocal := filepath.Join(rootDir, "bin", "atcr-labeler")
|
||||
labelerRemote := naming.InstallDir() + "/bin/" + naming.Labeler()
|
||||
if err := scpFile(labelerLocal, state.Appview.PublicIP, labelerRemote); err != nil {
|
||||
return fmt.Errorf("upload labeler: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if holdCreated {
|
||||
localPath := filepath.Join(rootDir, "bin", "atcr-hold")
|
||||
@@ -398,6 +440,25 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
|
||||
}
|
||||
}
|
||||
|
||||
// Labeler binary: build and upload when labeler is enabled but appview was
|
||||
// not freshly created (the appviewCreated branch above already handled it).
|
||||
if state.LabelerEnabled && !appviewCreated {
|
||||
rootDir := projectRoot()
|
||||
if err := runGenerate(rootDir); err != nil {
|
||||
return fmt.Errorf("go generate: %w", err)
|
||||
}
|
||||
fmt.Println("\nBuilding labeler locally (GOOS=linux GOARCH=amd64)...")
|
||||
labelerLocal := filepath.Join(rootDir, "bin", "atcr-labeler")
|
||||
if err := buildLocal(rootDir, labelerLocal, "./cmd/labeler"); err != nil {
|
||||
return fmt.Errorf("build labeler: %w", err)
|
||||
}
|
||||
labelerRemote := naming.InstallDir() + "/bin/" + naming.Labeler()
|
||||
fmt.Println("Deploying labeler binary...")
|
||||
if err := scpFile(labelerLocal, state.Appview.PublicIP, labelerRemote); err != nil {
|
||||
return fmt.Errorf("upload labeler: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("\n=== Provisioning Complete ===")
|
||||
fmt.Println()
|
||||
fmt.Println("DNS records needed:")
|
||||
@@ -407,6 +468,9 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
|
||||
fmt.Printf(" CNAME %-24s → %s\n", rd, lbDNS)
|
||||
}
|
||||
fmt.Printf(" CNAME %-24s → %s\n", holdDomain, lbDNS)
|
||||
if state.LabelerEnabled {
|
||||
fmt.Printf(" CNAME %-24s → %s\n", labelerDomain, lbDNS)
|
||||
}
|
||||
} else {
|
||||
fmt.Println(" (LB DNS name not yet available — check 'status' in a few minutes)")
|
||||
}
|
||||
@@ -421,11 +485,14 @@ func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bo
|
||||
} else {
|
||||
fmt.Println(" 1. Start services:")
|
||||
}
|
||||
services := []string{naming.Appview(), naming.Hold()}
|
||||
if state.ScannerEnabled {
|
||||
fmt.Printf(" systemctl start %s / %s / %s\n", naming.Appview(), naming.Hold(), naming.Scanner())
|
||||
} else {
|
||||
fmt.Printf(" systemctl start %s / %s\n", naming.Appview(), naming.Hold())
|
||||
services = append(services, naming.Scanner())
|
||||
}
|
||||
if state.LabelerEnabled {
|
||||
services = append(services, naming.Labeler())
|
||||
}
|
||||
fmt.Printf(" systemctl start %s\n", strings.Join(services, " / "))
|
||||
fmt.Println(" 2. Configure DNS records above")
|
||||
|
||||
return nil
|
||||
@@ -541,6 +608,9 @@ func objectStorageRegion(zone string) string {
|
||||
}
|
||||
|
||||
func createServer(ctx context.Context, svc *service.Service, cfg *InfraConfig, templateUUID, networkUUID, title, userData string) (*ServerState, error) {
|
||||
if cfg.SSHPublicKey == "" {
|
||||
return nil, fmt.Errorf("creating server %s requires --ssh-key (path to SSH public key file)", title)
|
||||
}
|
||||
storageTier := "maxiops"
|
||||
if strings.HasPrefix(strings.ToUpper(cfg.Plan), "DEV-") {
|
||||
storageTier = "standard"
|
||||
@@ -676,7 +746,83 @@ func createFirewallRules(ctx context.Context, svc *service.Service, serverUUID,
|
||||
})
|
||||
}
|
||||
|
||||
func createLoadBalancer(ctx context.Context, svc *service.Service, cfg *InfraConfig, naming Naming, networkUUID, appviewIP, holdIP, holdDomain string) (*upcloud.LoadBalancer, error) {
|
||||
func createLoadBalancer(ctx context.Context, svc *service.Service, cfg *InfraConfig, naming Naming, networkUUID, appviewIP, holdIP, holdDomain, labelerDomain string, withLabeler bool) (*upcloud.LoadBalancer, error) {
|
||||
frontendRules := []request.LoadBalancerFrontendRule{
|
||||
{
|
||||
Name: "set-forwarded-headers",
|
||||
Priority: 1,
|
||||
Matchers: []upcloud.LoadBalancerMatcher{},
|
||||
Actions: []upcloud.LoadBalancerAction{
|
||||
request.NewLoadBalancerSetForwardedHeadersAction(),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "route-hold",
|
||||
Priority: 10,
|
||||
Matchers: []upcloud.LoadBalancerMatcher{
|
||||
{
|
||||
Type: upcloud.LoadBalancerMatcherTypeHost,
|
||||
Host: &upcloud.LoadBalancerMatcherHost{
|
||||
Value: holdDomain,
|
||||
},
|
||||
},
|
||||
},
|
||||
Actions: []upcloud.LoadBalancerAction{
|
||||
request.NewLoadBalancerSetForwardedHeadersAction(),
|
||||
{
|
||||
Type: upcloud.LoadBalancerActionTypeUseBackend,
|
||||
UseBackend: &upcloud.LoadBalancerActionUseBackend{
|
||||
Backend: "hold",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
backends := []request.LoadBalancerBackend{
|
||||
{
|
||||
Name: "appview",
|
||||
Members: []request.LoadBalancerBackendMember{
|
||||
{
|
||||
Name: "appview-1",
|
||||
Type: upcloud.LoadBalancerBackendMemberTypeStatic,
|
||||
IP: appviewIP,
|
||||
Port: 5000,
|
||||
Weight: 100,
|
||||
MaxSessions: 1000,
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
Properties: &upcloud.LoadBalancerBackendProperties{
|
||||
HealthCheckType: upcloud.LoadBalancerHealthCheckTypeHTTP,
|
||||
HealthCheckURL: "/health",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "hold",
|
||||
Members: []request.LoadBalancerBackendMember{
|
||||
{
|
||||
Name: "hold-1",
|
||||
Type: upcloud.LoadBalancerBackendMemberTypeStatic,
|
||||
IP: holdIP,
|
||||
Port: 8080,
|
||||
Weight: 100,
|
||||
MaxSessions: 1000,
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
Properties: &upcloud.LoadBalancerBackendProperties{
|
||||
HealthCheckType: upcloud.LoadBalancerHealthCheckTypeHTTP,
|
||||
HealthCheckURL: "/xrpc/_health",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if withLabeler {
|
||||
frontendRules = append(frontendRules, labelerFrontendRule(labelerDomain))
|
||||
backends = append(backends, labelerBackend(appviewIP))
|
||||
}
|
||||
|
||||
lb, err := svc.CreateLoadBalancer(ctx, &request.CreateLoadBalancerRequest{
|
||||
Name: naming.LBName(),
|
||||
Plan: "essentials",
|
||||
@@ -704,37 +850,7 @@ func createLoadBalancer(ctx context.Context, svc *service.Service, cfg *InfraCon
|
||||
Networks: []upcloud.LoadBalancerFrontendNetwork{
|
||||
{Name: "public"},
|
||||
},
|
||||
Rules: []request.LoadBalancerFrontendRule{
|
||||
{
|
||||
Name: "set-forwarded-headers",
|
||||
Priority: 1,
|
||||
Matchers: []upcloud.LoadBalancerMatcher{},
|
||||
Actions: []upcloud.LoadBalancerAction{
|
||||
request.NewLoadBalancerSetForwardedHeadersAction(),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "route-hold",
|
||||
Priority: 10,
|
||||
Matchers: []upcloud.LoadBalancerMatcher{
|
||||
{
|
||||
Type: upcloud.LoadBalancerMatcherTypeHost,
|
||||
Host: &upcloud.LoadBalancerMatcherHost{
|
||||
Value: holdDomain,
|
||||
},
|
||||
},
|
||||
},
|
||||
Actions: []upcloud.LoadBalancerAction{
|
||||
request.NewLoadBalancerSetForwardedHeadersAction(),
|
||||
{
|
||||
Type: upcloud.LoadBalancerActionTypeUseBackend,
|
||||
UseBackend: &upcloud.LoadBalancerActionUseBackend{
|
||||
Backend: "hold",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Rules: frontendRules,
|
||||
},
|
||||
{
|
||||
Name: "http-redirect",
|
||||
@@ -770,44 +886,7 @@ func createLoadBalancer(ctx context.Context, svc *service.Service, cfg *InfraCon
|
||||
},
|
||||
},
|
||||
Resolvers: []request.LoadBalancerResolver{},
|
||||
Backends: []request.LoadBalancerBackend{
|
||||
{
|
||||
Name: "appview",
|
||||
Members: []request.LoadBalancerBackendMember{
|
||||
{
|
||||
Name: "appview-1",
|
||||
Type: upcloud.LoadBalancerBackendMemberTypeStatic,
|
||||
IP: appviewIP,
|
||||
Port: 5000,
|
||||
Weight: 100,
|
||||
MaxSessions: 1000,
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
Properties: &upcloud.LoadBalancerBackendProperties{
|
||||
HealthCheckType: upcloud.LoadBalancerHealthCheckTypeHTTP,
|
||||
HealthCheckURL: "/health",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "hold",
|
||||
Members: []request.LoadBalancerBackendMember{
|
||||
{
|
||||
Name: "hold-1",
|
||||
Type: upcloud.LoadBalancerBackendMemberTypeStatic,
|
||||
IP: holdIP,
|
||||
Port: 8080,
|
||||
Weight: 100,
|
||||
MaxSessions: 1000,
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
Properties: &upcloud.LoadBalancerBackendProperties{
|
||||
HealthCheckType: upcloud.LoadBalancerHealthCheckTypeHTTP,
|
||||
HealthCheckURL: "/xrpc/_health",
|
||||
},
|
||||
},
|
||||
},
|
||||
Backends: backends,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -816,6 +895,55 @@ func createLoadBalancer(ctx context.Context, svc *service.Service, cfg *InfraCon
|
||||
return lb, nil
|
||||
}
|
||||
|
||||
// labelerBackend builds the labeler LB backend pointing at the appview server's
|
||||
// private IP on the labeler listen port.
|
||||
func labelerBackend(appviewIP string) request.LoadBalancerBackend {
|
||||
return request.LoadBalancerBackend{
|
||||
Name: "labeler",
|
||||
Members: []request.LoadBalancerBackendMember{
|
||||
{
|
||||
Name: "labeler-1",
|
||||
Type: upcloud.LoadBalancerBackendMemberTypeStatic,
|
||||
IP: appviewIP,
|
||||
Port: 5002,
|
||||
Weight: 100,
|
||||
MaxSessions: 1000,
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
Properties: &upcloud.LoadBalancerBackendProperties{
|
||||
HealthCheckType: upcloud.LoadBalancerHealthCheckTypeHTTP,
|
||||
HealthCheckURL: "/.well-known/did.json",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// labelerFrontendRule returns a host-match rule routing labelerDomain to the
|
||||
// labeler backend with forwarded headers.
|
||||
func labelerFrontendRule(labelerDomain string) request.LoadBalancerFrontendRule {
|
||||
return request.LoadBalancerFrontendRule{
|
||||
Name: "route-labeler",
|
||||
Priority: 20,
|
||||
Matchers: []upcloud.LoadBalancerMatcher{
|
||||
{
|
||||
Type: upcloud.LoadBalancerMatcherTypeHost,
|
||||
Host: &upcloud.LoadBalancerMatcherHost{
|
||||
Value: labelerDomain,
|
||||
},
|
||||
},
|
||||
},
|
||||
Actions: []upcloud.LoadBalancerAction{
|
||||
request.NewLoadBalancerSetForwardedHeadersAction(),
|
||||
{
|
||||
Type: upcloud.LoadBalancerActionTypeUseBackend,
|
||||
UseBackend: &upcloud.LoadBalancerActionUseBackend{
|
||||
Backend: "labeler",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ensureLBCertificates reconciles TLS certificate bundles on the load balancer.
|
||||
// It skips domains that already have a TLS config attached and creates missing ones.
|
||||
func ensureLBCertificates(ctx context.Context, svc *service.Service, lbUUID string, tlsDomains []string) error {
|
||||
@@ -993,6 +1121,88 @@ func ensureLBHoldForwardedHeaders(ctx context.Context, svc *service.Service, lbU
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureLBLabelerRoute idempotently ensures the LB has a "labeler" backend
|
||||
// pointing at the appview server's private IP and a "route-labeler" frontend
|
||||
// rule matching labelerDomain. Used to add labeler routing to a pre-existing LB
|
||||
// during a re-provision with --with-labeler.
|
||||
func ensureLBLabelerRoute(ctx context.Context, svc *service.Service, lbUUID, appviewIP, labelerDomain string) error {
|
||||
// 1. Ensure backend exists
|
||||
backends, err := svc.GetLoadBalancerBackends(ctx, &request.GetLoadBalancerBackendsRequest{ServiceUUID: lbUUID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("get backends: %w", err)
|
||||
}
|
||||
hasBackend := false
|
||||
for _, b := range backends {
|
||||
if b.Name == "labeler" {
|
||||
hasBackend = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasBackend {
|
||||
_, err := svc.CreateLoadBalancerBackend(ctx, &request.CreateLoadBalancerBackendRequest{
|
||||
ServiceUUID: lbUUID,
|
||||
Backend: labelerBackend(appviewIP),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create labeler backend: %w", err)
|
||||
}
|
||||
fmt.Println(" Labeler backend: created")
|
||||
} else {
|
||||
fmt.Println(" Labeler backend: exists")
|
||||
}
|
||||
|
||||
// 2. Ensure frontend rule exists with correct host matcher
|
||||
rules, err := svc.GetLoadBalancerFrontendRules(ctx, &request.GetLoadBalancerFrontendRulesRequest{
|
||||
ServiceUUID: lbUUID,
|
||||
FrontendName: "https",
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("get frontend rules: %w", err)
|
||||
}
|
||||
for _, r := range rules {
|
||||
if r.Name == "route-labeler" {
|
||||
// Verify the host matcher and use_backend action are correct
|
||||
hostOK := false
|
||||
for _, m := range r.Matchers {
|
||||
if m.Host != nil && m.Host.Value == labelerDomain {
|
||||
hostOK = true
|
||||
break
|
||||
}
|
||||
}
|
||||
backendOK := false
|
||||
for _, a := range r.Actions {
|
||||
if a.UseBackend != nil && a.UseBackend.Backend == "labeler" {
|
||||
backendOK = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if hostOK && backendOK {
|
||||
fmt.Println(" Route-labeler rule: exists and valid")
|
||||
return nil
|
||||
}
|
||||
fmt.Println(" Route-labeler rule: exists but misconfigured, recreating")
|
||||
if err := svc.DeleteLoadBalancerFrontendRule(ctx, &request.DeleteLoadBalancerFrontendRuleRequest{
|
||||
ServiceUUID: lbUUID,
|
||||
FrontendName: "https",
|
||||
Name: r.Name,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("delete route-labeler rule: %w", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := svc.CreateLoadBalancerFrontendRule(ctx, &request.CreateLoadBalancerFrontendRuleRequest{
|
||||
ServiceUUID: lbUUID,
|
||||
FrontendName: "https",
|
||||
Rule: labelerFrontendRule(labelerDomain),
|
||||
}); err != nil {
|
||||
return fmt.Errorf("create route-labeler rule: %w", err)
|
||||
}
|
||||
fmt.Println(" Route-labeler rule: created")
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureLBScannerBlock ensures the "https" frontend has a rule that returns 403
|
||||
// for common scanner paths (.php, .asp, .aspx, .jsp, .cgi, .env).
|
||||
func ensureLBScannerBlock(ctx context.Context, svc *service.Service, lbUUID string) error {
|
||||
|
||||
@@ -20,6 +20,7 @@ type InfraState struct {
|
||||
ObjectStorage ObjectStorageState `json:"object_storage"`
|
||||
ScannerEnabled bool `json:"scanner_enabled,omitempty"`
|
||||
ScannerSecret string `json:"scanner_secret,omitempty"`
|
||||
LabelerEnabled bool `json:"labeler_enabled,omitempty"`
|
||||
}
|
||||
|
||||
// Naming returns a Naming helper, defaulting to "seamark" if ClientName is empty.
|
||||
|
||||
25
deploy/upcloud/systemd/labeler.service.tmpl
Normal file
25
deploy/upcloud/systemd/labeler.service.tmpl
Normal file
@@ -0,0 +1,25 @@
|
||||
[Unit]
|
||||
Description={{.DisplayName}} Labeler (Content Moderation)
|
||||
After=network-online.target {{.AppviewServiceName}}.service
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User={{.User}}
|
||||
Group={{.User}}
|
||||
ExecStart={{.BinaryPath}} serve --config {{.ConfigPath}}
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
ReadWritePaths={{.DataDir}}
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier={{.ServiceName}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -24,7 +24,8 @@ var updateCmd = &cobra.Command{
|
||||
target = args[0]
|
||||
}
|
||||
withScanner, _ := cmd.Flags().GetBool("with-scanner")
|
||||
return cmdUpdate(target, withScanner)
|
||||
withLabeler, _ := cmd.Flags().GetBool("with-labeler")
|
||||
return cmdUpdate(target, withScanner, withLabeler)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -40,11 +41,12 @@ var sshCmd = &cobra.Command{
|
||||
|
||||
func init() {
|
||||
updateCmd.Flags().Bool("with-scanner", false, "Enable and deploy vulnerability scanner alongside hold")
|
||||
updateCmd.Flags().Bool("with-labeler", false, "Enable and deploy content moderation labeler alongside appview")
|
||||
rootCmd.AddCommand(updateCmd)
|
||||
rootCmd.AddCommand(sshCmd)
|
||||
}
|
||||
|
||||
func cmdUpdate(target string, withScanner bool) error {
|
||||
func cmdUpdate(target string, withScanner, withLabeler bool) error {
|
||||
state, err := loadState()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -67,6 +69,12 @@ func cmdUpdate(target string, withScanner bool) error {
|
||||
_ = saveState(state)
|
||||
}
|
||||
|
||||
// Enable labeler retroactively via --with-labeler on update
|
||||
if withLabeler && !state.LabelerEnabled {
|
||||
state.LabelerEnabled = true
|
||||
_ = saveState(state)
|
||||
}
|
||||
|
||||
vals := configValsFromState(state)
|
||||
|
||||
targets := map[string]struct {
|
||||
@@ -144,6 +152,21 @@ func cmdUpdate(target string, withScanner bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Build labeler locally if needed
|
||||
needLabeler := false
|
||||
for _, name := range toUpdate {
|
||||
if name == "appview" && state.LabelerEnabled {
|
||||
needLabeler = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if needLabeler {
|
||||
outputPath := filepath.Join(rootDir, "bin", "atcr-labeler")
|
||||
if err := buildLocal(rootDir, outputPath, "./cmd/labeler"); err != nil {
|
||||
return fmt.Errorf("build labeler: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Deploy each target
|
||||
for _, name := range toUpdate {
|
||||
t := targets[name]
|
||||
@@ -244,13 +267,65 @@ curl -sf http://localhost:9090/healthz > /dev/null && echo "SCANNER_HEALTH_OK" |
|
||||
`
|
||||
}
|
||||
|
||||
// Labeler additions for appview server
|
||||
labelerRestart := ""
|
||||
if name == "appview" && state.LabelerEnabled {
|
||||
// Sync labeler config keys
|
||||
labelerConfigYAML, err := renderConfig(labelerConfigTmpl, vals)
|
||||
if err != nil {
|
||||
return fmt.Errorf("render labeler config: %w", err)
|
||||
}
|
||||
if err := syncConfigKeys("labeler", t.ip, naming.LabelerConfigPath(), labelerConfigYAML); err != nil {
|
||||
return fmt.Errorf("labeler config sync: %w", err)
|
||||
}
|
||||
|
||||
// Sync labeler service unit
|
||||
labelerUnit, err := renderLabelerServiceUnit(labelerServiceUnitParams{
|
||||
DisplayName: naming.DisplayName(),
|
||||
User: naming.SystemUser(),
|
||||
BinaryPath: naming.InstallDir() + "/bin/" + naming.Labeler(),
|
||||
ConfigPath: naming.LabelerConfigPath(),
|
||||
DataDir: naming.BasePath(),
|
||||
ServiceName: naming.Labeler(),
|
||||
AppviewServiceName: naming.Appview(),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("render labeler service unit: %w", err)
|
||||
}
|
||||
labelerUnitChanged, err := syncServiceUnit("labeler", t.ip, naming.Labeler(), labelerUnit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("labeler service unit sync: %w", err)
|
||||
}
|
||||
if labelerUnitChanged {
|
||||
daemonReload = "systemctl daemon-reload"
|
||||
}
|
||||
|
||||
// Upload labeler binary
|
||||
labelerLocal := filepath.Join(rootDir, "bin", "atcr-labeler")
|
||||
labelerRemote := naming.InstallDir() + "/bin/" + naming.Labeler()
|
||||
if err := scpFile(labelerLocal, t.ip, labelerRemote); err != nil {
|
||||
return fmt.Errorf("upload labeler: %w", err)
|
||||
}
|
||||
|
||||
// Ensure labeler data dirs exist
|
||||
labelerSetup := fmt.Sprintf(`mkdir -p %s
|
||||
chown -R %s:%s %s`,
|
||||
naming.LabelerDataDir(),
|
||||
naming.SystemUser(), naming.SystemUser(), naming.LabelerDataDir())
|
||||
if _, err := runSSH(t.ip, labelerSetup, false); err != nil {
|
||||
return fmt.Errorf("labeler dir setup: %w", err)
|
||||
}
|
||||
|
||||
labelerRestart = fmt.Sprintf("\nsystemctl restart %s", naming.Labeler())
|
||||
}
|
||||
|
||||
// Restart services and health check
|
||||
restartScript := fmt.Sprintf(`set -euo pipefail
|
||||
%s
|
||||
systemctl restart %s%s
|
||||
systemctl restart %s%s%s
|
||||
sleep 2
|
||||
curl -sf %s > /dev/null && echo "HEALTH_OK" || echo "HEALTH_FAIL"
|
||||
%s`, daemonReload, t.serviceName, scannerRestart, t.healthURL, scannerHealthCheck)
|
||||
%s`, daemonReload, t.serviceName, scannerRestart, labelerRestart, t.healthURL, scannerHealthCheck)
|
||||
|
||||
output, err := runSSH(t.ip, restartScript, true)
|
||||
if err != nil {
|
||||
@@ -289,6 +364,7 @@ func configValsFromState(state *InfraState) *ConfigValues {
|
||||
naming := state.Naming()
|
||||
_, baseDomain, _, _ := extractFromAppviewTemplate()
|
||||
holdDomain := state.Zone + ".cove." + baseDomain
|
||||
labelerDomain := "labeler." + baseDomain
|
||||
|
||||
return &ConfigValues{
|
||||
S3Endpoint: state.ObjectStorage.Endpoint,
|
||||
@@ -299,6 +375,7 @@ func configValsFromState(state *InfraState) *ConfigValues {
|
||||
Zone: state.Zone,
|
||||
HoldDomain: holdDomain,
|
||||
HoldDid: "did:web:" + holdDomain,
|
||||
LabelerDomain: labelerDomain,
|
||||
BasePath: naming.BasePath(),
|
||||
ScannerSecret: state.ScannerSecret,
|
||||
}
|
||||
|
||||
@@ -19,6 +19,9 @@ services:
|
||||
# ATCR_SERVER_CLIENT_SHORT_NAME: "Seamark"
|
||||
ATCR_SERVER_MANAGED_HOLDS: did:web:172.28.0.3%3A8080
|
||||
ATCR_SERVER_DEFAULT_HOLD_DID: did:web:172.28.0.3%3A8080
|
||||
# Labeler URL (HTTP for dev — ParseLabelerURL accepts it directly so we don't
|
||||
# have to round-trip through did:web → https:// resolution).
|
||||
ATCR_LABELER_DID: http://172.28.0.4:5002
|
||||
ATCR_SERVER_TEST_MODE: true
|
||||
ATCR_LOG_LEVEL: debug
|
||||
LOG_SHIPPER_BACKEND: victoria
|
||||
@@ -62,6 +65,13 @@ services:
|
||||
HOLD_REGISTRATION_ALLOW_ALL_CREW: true
|
||||
HOLD_SERVER_TEST_MODE: true
|
||||
HOLD_LOG_LEVEL: debug
|
||||
# Subscribe to the dev labeler so takedowns purge records on this hold and
|
||||
# GC honors the reversibility window. Same value the appview uses for
|
||||
# ATCR_LABELER_DID — accepts a did:web identifier or a raw URL.
|
||||
HOLD_LABELER_DID: http://172.28.0.4:5002
|
||||
# Short grace window for dev so the takedown→GC path is exercisable without
|
||||
# waiting weeks. Production default is 720h (30 days).
|
||||
HOLD_LABELER_GRACE_WINDOW: 1h
|
||||
LOG_SHIPPER_BACKEND: victoria
|
||||
LOG_SHIPPER_URL: http://172.28.0.10:9428
|
||||
# S3 storage config comes from env_file (AWS_*, S3_*)
|
||||
@@ -97,6 +107,52 @@ services:
|
||||
atcr-network:
|
||||
ipv4_address: 172.28.0.3
|
||||
|
||||
atcr-labeler:
|
||||
# Base config: config-labeler.example.yaml (passed via Air entrypoint).
|
||||
# Env vars below override config file values for local dev.
|
||||
#
|
||||
# Why did:web for dev: did:plc would submit a real PLC operation to plc.directory
|
||||
# for every fresh dev environment, polluting production with throwaway DIDs that
|
||||
# point at 172.28.0.x. did:web is purely self-served via /.well-known/did.json so
|
||||
# nothing leaks. Switch to plc + a real public_url for production.
|
||||
environment:
|
||||
LABELER_LABELER_DID_METHOD: web
|
||||
LABELER_LABELER_PUBLIC_URL: http://172.28.0.4:5002
|
||||
LABELER_LABELER_OWNER_DID: did:plc:pddp4xt5lgnv2qsegbzzs4xg
|
||||
LABELER_LABELER_DATA_DIR: /var/lib/atcr-labeler
|
||||
LABELER_SERVER_TEST_MODE: true
|
||||
LABELER_LOG_LEVEL: debug
|
||||
LOG_SHIPPER_BACKEND: victoria
|
||||
LOG_SHIPPER_URL: http://172.28.0.10:9428
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "1"
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.dev
|
||||
args:
|
||||
AIR_CONFIG: .air.labeler.toml
|
||||
image: atcr-labeler-dev:latest
|
||||
container_name: atcr-labeler
|
||||
ports:
|
||||
- "5002:5002"
|
||||
volumes:
|
||||
# Mount source code for Air hot reload
|
||||
- .:/app:z
|
||||
- go-mod-cache:/go/pkg/mod
|
||||
# Persist signing key + did.txt + label database across container restarts so
|
||||
# dev signatures stay verifiable. Wipe with `docker compose down -v` to reset.
|
||||
- atcr-labeler:/var/lib/atcr-labeler
|
||||
restart: unless-stopped
|
||||
dns:
|
||||
- 8.8.8.8
|
||||
- 1.1.1.1
|
||||
networks:
|
||||
atcr-network:
|
||||
ipv4_address: 172.28.0.4
|
||||
|
||||
# Victoria Logs for centralized log storage
|
||||
# Uncomment to enable, then set LOG_SHIPPER_* env vars above
|
||||
victorialogs:
|
||||
@@ -123,6 +179,7 @@ networks:
|
||||
|
||||
volumes:
|
||||
atcr-hold:
|
||||
atcr-labeler:
|
||||
atcr-auth:
|
||||
atcr-ui:
|
||||
go-mod-cache:
|
||||
|
||||
@@ -206,8 +206,8 @@ server:
|
||||
billing:
|
||||
enabled: true
|
||||
currency: usd
|
||||
success_url: "{base_url}/settings#storage"
|
||||
cancel_url: "{base_url}/settings#storage"
|
||||
success_url: "{base_url}/settings/billing"
|
||||
cancel_url: "{base_url}/settings/billing"
|
||||
tiers:
|
||||
- name: "Free"
|
||||
# No stripe_price = free tier
|
||||
|
||||
@@ -68,7 +68,7 @@ git push origin v1.0.0
|
||||
**Usage:**
|
||||
```bash
|
||||
# Linux/macOS
|
||||
curl -fsSL https://atcr.io/install.sh | bash
|
||||
curl -fsSL https://atcr.io/static/install.sh | bash
|
||||
|
||||
# Windows (PowerShell)
|
||||
iwr -useb https://atcr.io/install.ps1 | iex
|
||||
@@ -113,7 +113,7 @@ go install atcr.io/cmd/credential-helper@latest
|
||||
sudo mv $(go env GOPATH)/bin/credential-helper /usr/local/bin/docker-credential-atcr
|
||||
```
|
||||
|
||||
**Note:** This requires Go 1.23+ and compiles locally.
|
||||
**Note:** This requires Go 1.26+ and compiles locally.
|
||||
|
||||
## Release Process
|
||||
|
||||
@@ -138,7 +138,7 @@ sudo mv $(go env GOPATH)/bin/credential-helper /usr/local/bin/docker-credential-
|
||||
- Visit: https://github.com/atcr-io/atcr/releases
|
||||
- Test install script:
|
||||
```bash
|
||||
ATCR_VERSION=v1.0.0 curl -fsSL https://atcr.io/install.sh | bash
|
||||
ATCR_VERSION=v1.0.0 curl -fsSL https://atcr.io/static/install.sh | bash
|
||||
docker-credential-atcr version
|
||||
```
|
||||
|
||||
@@ -266,7 +266,7 @@ Docker looks for binaries named `docker-credential-*` in PATH:
|
||||
# Clean install in fresh environment
|
||||
docker run --rm -it ubuntu:latest bash
|
||||
apt update && apt install -y curl
|
||||
curl -fsSL https://atcr.io/install.sh | bash
|
||||
curl -fsSL https://atcr.io/static/install.sh | bash
|
||||
```
|
||||
|
||||
2. **Test Docker integration:**
|
||||
|
||||
@@ -37,6 +37,7 @@ This document lists all XRPC endpoints implemented in the Hold service (`pkg/hol
|
||||
|----------|--------|-------------|
|
||||
| `/xrpc/com.atproto.repo.deleteRecord` | POST | Delete a record |
|
||||
| `/xrpc/com.atproto.repo.uploadBlob` | POST | Upload ATProto blob |
|
||||
| `/xrpc/io.atcr.hold.purgeManifest` | POST | Purge layer/scan/image-config records for a manifest (eager delete + takedown). Idempotent. |
|
||||
|
||||
### Auth Required (Service Token or DPoP)
|
||||
|
||||
@@ -82,6 +83,7 @@ All require `blob:write` permission via service token:
|
||||
| `/xrpc/io.atcr.hold.getQuota` | GET | none | Get user quota info |
|
||||
| `/xrpc/io.atcr.hold.getLayersForManifest` | GET | none | Get layer records for a manifest AT-URI |
|
||||
| `/xrpc/io.atcr.hold.image.getConfig` | GET | none | Get OCI image config record for a manifest digest |
|
||||
| `/xrpc/io.atcr.hold.purgeManifest` | POST | owner/crew admin | Purge layer/scan/image-config records for a single manifest URI. Called by appview on UI delete; called internally on takedown receipt. Does not delete S3 blobs (GC handles those). |
|
||||
| `/xrpc/io.atcr.hold.listTiers` | GET | none | List hold's available tiers with quotas and features (scanOnPush) |
|
||||
| `/xrpc/io.atcr.hold.updateCrewTier` | POST | appview token | Update crew member's tier |
|
||||
|
||||
|
||||
@@ -202,6 +202,52 @@ SBOM results are stored in two places:
|
||||
- Record key: SBOM manifest digest
|
||||
- Contains reference to subject image
|
||||
|
||||
## Scan Record Status
|
||||
|
||||
Every scan attempt produces an `io.atcr.hold.scan` record. The `status` field
|
||||
tells the appview how to render the result:
|
||||
|
||||
| Status | Meaning | Stale-loop behavior |
|
||||
|-------------|--------------------------------------------------------------------------------------------------|--------------------------------------------------|
|
||||
| `ok` (or empty) | Scanner produced an SBOM. Vulnerability counts populated; SBOM blob populated. | Re-scanned on the rescan interval (default 7d). |
|
||||
| `failed` | Scanner ran but errored (network, OOM, parse failure). No SBOM, no counts. | Re-scanned on the rescan interval — failures may be transient. |
|
||||
| `skipped` | Scanner intentionally bypassed the artifact (helm chart, in-toto attestation, DSSE envelope). The `reason` field explains why. | **Never re-queued.** A skipped record won't change without a code change in the scanner. |
|
||||
|
||||
Records written before the `status` field existed have an empty status. The
|
||||
appview treats empty + nil-blob + zero-count as failed (legacy fallback).
|
||||
|
||||
### Unscannable artifact types
|
||||
|
||||
The scanner skips artifacts whose config media type appears in
|
||||
`unscannableConfigTypes` (`scanner/internal/scan/worker.go`). Currently:
|
||||
|
||||
- `application/vnd.cncf.helm.config.v1+json` — Helm charts. Rendered with a
|
||||
helm-aware digest page (`pkg/appview/handlers/digest.go`) that shows
|
||||
Chart.yaml metadata instead of layers / vulns / SBOM.
|
||||
- `application/vnd.in-toto+json` — in-toto attestations.
|
||||
- `application/vnd.dsse.envelope.v1+json` — DSSE envelopes (SLSA provenance).
|
||||
|
||||
For these types the appview's vuln/SBOM tabs render
|
||||
*"Vulnerability scanning isn't applied to this artifact type."* — no retry hint.
|
||||
|
||||
To add a new unscannable type: append the media type to
|
||||
`unscannableConfigTypes`. Existing records won't auto-rewrite — run
|
||||
`atcr-hold scan-backfill` once to convert any pre-existing failure records
|
||||
into skipped records (see below).
|
||||
|
||||
### Backfill tool
|
||||
|
||||
`atcr-hold scan-backfill --config <path>` walks every scan record on the
|
||||
hold and rewrites legacy ones (empty status + nil blob + zero counts) using
|
||||
the manifest's layer media types as a signal:
|
||||
|
||||
- Layer media type contains `helm.chart.content`, `in-toto`, or
|
||||
`dsse.envelope` → `status="skipped"`.
|
||||
- Otherwise → `status="failed"`.
|
||||
|
||||
The tool is idempotent and preserves the original `scannedAt`, so it can be
|
||||
re-run safely. Run once per hold after upgrading.
|
||||
|
||||
## Accessing SBOMs
|
||||
|
||||
Multiple methods for discovering and retrieving SBOM data.
|
||||
|
||||
@@ -141,7 +141,6 @@ jetstream.backfill_enabled → ATCR_JETSTREAM_BACKFILL_ENABLED
|
||||
| `health` | Hold health check interval and cache TTL | Sensible defaults (15m) |
|
||||
| `log_shipper` | Remote log shipping (Victoria, OpenSearch, Loki) | Disabled by default |
|
||||
| `legal` | Terms/privacy page customization | Optional |
|
||||
| `credential_helper` | Credential helper download source | Optional |
|
||||
|
||||
### Auto-generated files
|
||||
|
||||
|
||||
179
go.mod
179
go.mod
@@ -1,19 +1,19 @@
|
||||
module atcr.io
|
||||
|
||||
go 1.25.7
|
||||
go 1.26.2
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.55.8
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0
|
||||
github.com/bluesky-social/indigo v0.0.0-20260213003059-85cdd0d6871c
|
||||
github.com/charmbracelet/huh v0.8.0
|
||||
github.com/charmbracelet/huh/spinner v0.0.0-20260216111231-bffc99a26329
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.6
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.16
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.15
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.99.1
|
||||
github.com/bluesky-social/indigo v0.0.0-20260417172304-7da09df6081d
|
||||
github.com/charmbracelet/huh v1.0.0
|
||||
github.com/charmbracelet/huh/spinner v0.0.0-20260223110133-9dc45e34a40b
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
github.com/did-method-plc/go-didplc v0.0.0-20251009212921-7b7a252b8019
|
||||
github.com/distribution/distribution/v3 v3.0.0
|
||||
github.com/distribution/distribution/v3 v3.1.0
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/earthboundkid/versioninfo/v2 v2.24.1
|
||||
github.com/go-chi/chi/v5 v5.2.5
|
||||
@@ -21,16 +21,17 @@ require (
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0
|
||||
github.com/goki/freetype v1.0.5
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1
|
||||
github.com/google/go-querystring v1.2.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
|
||||
github.com/ipfs/go-block-format v0.2.3
|
||||
github.com/ipfs/go-cid v0.6.0
|
||||
github.com/ipfs/go-cid v0.6.1
|
||||
github.com/ipfs/go-ipfs-blockstore v1.3.1
|
||||
github.com/ipfs/go-ipld-cbor v0.2.1
|
||||
github.com/ipfs/go-ipld-format v0.6.3
|
||||
github.com/ipfs/go-libipfs v0.7.0
|
||||
github.com/ipld/go-car v0.6.3
|
||||
github.com/klauspost/compress v1.18.4
|
||||
github.com/klauspost/compress v1.18.5
|
||||
github.com/microcosm-cc/bluemonday v1.0.27
|
||||
github.com/multiformats/go-multihash v0.2.3
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
@@ -39,39 +40,38 @@ require (
|
||||
github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c
|
||||
github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/stripe/stripe-go/v84 v84.3.0
|
||||
github.com/stripe/stripe-go/v84 v84.4.1
|
||||
github.com/tursodatabase/go-libsql v0.0.0-20251219133454-43644db490ff
|
||||
github.com/whyrusleeping/cbor-gen v0.3.1
|
||||
github.com/yuin/goldmark v1.7.16
|
||||
go.opentelemetry.io/otel v1.40.0
|
||||
github.com/yuin/goldmark v1.8.2
|
||||
go.opentelemetry.io/otel v1.43.0
|
||||
go.yaml.in/yaml/v4 v4.0.0-rc.4
|
||||
golang.org/x/crypto v0.48.0
|
||||
golang.org/x/image v0.36.0
|
||||
golang.org/x/sys v0.41.0
|
||||
golang.org/x/crypto v0.50.0
|
||||
golang.org/x/image v0.39.0
|
||||
golang.org/x/sys v0.43.0
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
|
||||
github.com/RussellLuo/slidingwindow v0.0.0-20200528002341-535bb99d338b // indirect
|
||||
github.com/ajg/form v1.6.1 // indirect
|
||||
github.com/ajg/form v1.7.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
||||
github.com/atotto/clipboard v0.1.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||
github.com/aws/smithy-go v1.24.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.22 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.22 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 // indirect
|
||||
github.com/aws/smithy-go v1.25.0 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/aymerick/douceur v0.2.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
@@ -79,13 +79,15 @@ require (
|
||||
github.com/catppuccin/go v0.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7 // indirect
|
||||
github.com/charmbracelet/bubbles v1.0.0 // indirect
|
||||
github.com/charmbracelet/bubbletea v1.3.10 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||
github.com/charmbracelet/x/ansi v0.10.1 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
|
||||
github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.4.3 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.11.7 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.15 // indirect
|
||||
github.com/charmbracelet/x/exp/strings v0.1.0 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.2 // indirect
|
||||
github.com/clipperhouse/displaywidth v0.11.0 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.7.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.7.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
@@ -94,75 +96,74 @@ require (
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/fatih/color v1.19.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/gammazero/chanqueue v1.1.2 // indirect
|
||||
github.com/gammazero/deque v1.2.1 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.4 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/snappy v1.0.0 // indirect
|
||||
github.com/google/go-querystring v1.2.0 // indirect
|
||||
github.com/gorilla/css v1.0.1 // indirect
|
||||
github.com/gorilla/handlers v1.5.2 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.29.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/ipfs/bbloom v0.0.4 // indirect
|
||||
github.com/ipfs/boxo v0.36.0 // indirect
|
||||
github.com/ipfs/bbloom v0.1.0 // indirect
|
||||
github.com/ipfs/boxo v0.38.0 // indirect
|
||||
github.com/ipfs/go-cidutil v0.1.1 // indirect
|
||||
github.com/ipfs/go-datastore v0.9.1 // indirect
|
||||
github.com/ipfs/go-dsqueue v0.2.0 // indirect
|
||||
github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect
|
||||
github.com/ipfs/go-ipfs-util v0.0.3 // indirect
|
||||
github.com/ipfs/go-ipld-legacy v0.2.2 // indirect
|
||||
github.com/ipfs/go-ipld-legacy v0.3.0 // indirect
|
||||
github.com/ipfs/go-log v1.0.5 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.9.1 // indirect
|
||||
github.com/ipfs/go-metrics-interface v0.3.0 // indirect
|
||||
github.com/ipld/go-codec-dagpb v1.7.0 // indirect
|
||||
github.com/ipld/go-ipld-prime v0.21.0 // indirect
|
||||
github.com/ipld/go-ipld-prime v0.22.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/libsql/sqlite-antlr4-parser v0.0.0-20240721121621-c0bdc870f11c // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.4.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mattn/go-isatty v0.0.21 // indirect
|
||||
github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.23 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/mr-tron/base58 v1.3.0 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.3.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.10.0 // indirect
|
||||
github.com/multiformats/go-varint v0.1.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.3.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f // indirect
|
||||
github.com/polydawn/refmt v0.89.1-0.20231129105047-37766d95467a // indirect
|
||||
github.com/prometheus/client_golang v1.23.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.67.5 // indirect
|
||||
github.com/prometheus/otlptranslator v1.0.0 // indirect
|
||||
github.com/prometheus/procfs v0.19.2 // indirect
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.17.3 // indirect
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.17.3 // indirect
|
||||
github.com/redis/go-redis/v9 v9.17.3 // indirect
|
||||
github.com/prometheus/procfs v0.20.1 // indirect
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 // indirect
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 // indirect
|
||||
github.com/redis/go-redis/v9 v9.18.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/sagikazarmark/locafero v0.12.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4 // indirect
|
||||
@@ -175,40 +176,40 @@ require (
|
||||
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect
|
||||
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.65.0 // indirect
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.65.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.16.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.62.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.16.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/log v0.16.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.40.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.68.0 // indirect
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.68.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.19.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.19.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.65.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.19.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.19.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/log v0.19.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.43.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.10.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.1 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.4 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect
|
||||
golang.org/x/net v0.50.0 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
golang.org/x/text v0.34.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect
|
||||
google.golang.org/grpc v1.79.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f // indirect
|
||||
golang.org/x/net v0.53.0 // indirect
|
||||
golang.org/x/sync v0.20.0 // indirect
|
||||
golang.org/x/text v0.36.0 // indirect
|
||||
golang.org/x/time v0.15.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260414002931-afd174a4e478 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260414002931-afd174a4e478 // indirect
|
||||
google.golang.org/grpc v1.80.0 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
||||
423
go.work.sum
423
go.work.sum
File diff suppressed because it is too large
Load Diff
@@ -18,7 +18,7 @@
|
||||
"configJson": {
|
||||
"type": "string",
|
||||
"description": "Raw OCI image config JSON blob",
|
||||
"maxLength": 65536
|
||||
"maxLength": 1000000
|
||||
},
|
||||
"createdAt": {
|
||||
"type": "string",
|
||||
|
||||
54
lexicons/io/atcr/hold/purgeManifest.json
Normal file
54
lexicons/io/atcr/hold/purgeManifest.json
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
"lexicon": 1,
|
||||
"id": "io.atcr.hold.purgeManifest",
|
||||
"defs": {
|
||||
"main": {
|
||||
"type": "procedure",
|
||||
"description": "Purge layer, scan, and image-config records associated with a manifest. Used by the appview when a user deletes a manifest, and by the hold's own labeler subscriber on takedown receipt. Idempotent: missing records are not errors. Does not delete S3 blobs (GC handles that based on remaining references).",
|
||||
"input": {
|
||||
"encoding": "application/json",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"required": ["manifestUri"],
|
||||
"properties": {
|
||||
"manifestUri": {
|
||||
"type": "string",
|
||||
"format": "at-uri",
|
||||
"description": "AT-URI of the manifest record, e.g. at://did:plc:xyz/io.atcr.manifest/<digest>"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"output": {
|
||||
"encoding": "application/json",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"required": ["success", "layersDeleted", "scanDeleted", "imageConfigDeleted"],
|
||||
"properties": {
|
||||
"success": {
|
||||
"type": "boolean",
|
||||
"description": "Whether the purge completed successfully"
|
||||
},
|
||||
"layersDeleted": {
|
||||
"type": "integer",
|
||||
"description": "Number of layer records deleted"
|
||||
},
|
||||
"scanDeleted": {
|
||||
"type": "boolean",
|
||||
"description": "Whether a scan record was deleted"
|
||||
},
|
||||
"imageConfigDeleted": {
|
||||
"type": "boolean",
|
||||
"description": "Whether an image config record was deleted"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"errors": [
|
||||
{ "name": "AuthRequired" },
|
||||
{ "name": "InvalidRequest" },
|
||||
{ "name": "PurgeFailed" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -69,6 +69,17 @@
|
||||
"type": "string",
|
||||
"format": "datetime",
|
||||
"description": "RFC3339 timestamp of when the scan completed"
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"knownValues": ["ok", "failed", "skipped"],
|
||||
"description": "Outcome of the scan attempt. 'ok' (or omitted, for back-compat) means the scanner produced an SBOM. 'failed' means the scanner ran but errored. 'skipped' means the scanner intentionally bypassed this artifact type (e.g. helm charts).",
|
||||
"maxLength": 32
|
||||
},
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": "Optional human-readable explanation for non-ok status (e.g. 'unscannable artifact type application/vnd.cncf.helm.config.v1+json').",
|
||||
"maxLength": 256
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
471
package-lock.json
generated
471
package-lock.json
generated
@@ -8,24 +8,23 @@
|
||||
"name": "atcr-styles",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"actor-typeahead": "^0.1.2",
|
||||
"htmx-ext-json-enc": "^2.0.3",
|
||||
"htmx.org": "^2.0.8",
|
||||
"lucide": "^0.577.0"
|
||||
"lucide": "^1.8.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@tailwindcss/cli": "^4.2.1",
|
||||
"@tailwindcss/cli": "^4.2.2",
|
||||
"@tailwindcss/typography": "^0.5.19",
|
||||
"daisyui": "^5.5.19",
|
||||
"esbuild": "^0.27.4",
|
||||
"esbuild": "^0.28.0",
|
||||
"glob": "^13.0.6",
|
||||
"tailwindcss": "^4.2"
|
||||
"tailwindcss": "^4.2.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/aix-ppc64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.4.tgz",
|
||||
"integrity": "sha512-cQPwL2mp2nSmHHJlCyoXgHGhbEPMrEEU5xhkcy3Hs/O7nGZqEpZ2sUtLaL9MORLtDfRvVl2/3PAuEkYZH0Ty8Q==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.28.0.tgz",
|
||||
"integrity": "sha512-lhRUCeuOyJQURhTxl4WkpFTjIsbDayJHih5kZC1giwE+MhIzAb7mEsQMqMf18rHLsrb5qI1tafG20mLxEWcWlA==",
|
||||
"cpu": [
|
||||
"ppc64"
|
||||
],
|
||||
@@ -40,9 +39,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/android-arm": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.4.tgz",
|
||||
"integrity": "sha512-X9bUgvxiC8CHAGKYufLIHGXPJWnr0OCdR0anD2e21vdvgCI8lIfqFbnoeOz7lBjdrAGUhqLZLcQo6MLhTO2DKQ==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.28.0.tgz",
|
||||
"integrity": "sha512-wqh0ByljabXLKHeWXYLqoJ5jKC4XBaw6Hk08OfMrCRd2nP2ZQ5eleDZC41XHyCNgktBGYMbqnrJKq/K/lzPMSQ==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
@@ -57,9 +56,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/android-arm64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.4.tgz",
|
||||
"integrity": "sha512-gdLscB7v75wRfu7QSm/zg6Rx29VLdy9eTr2t44sfTW7CxwAtQghZ4ZnqHk3/ogz7xao0QAgrkradbBzcqFPasw==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.28.0.tgz",
|
||||
"integrity": "sha512-+WzIXQOSaGs33tLEgYPYe/yQHf0WTU0X42Jca3y8NWMbUVhp7rUnw+vAsRC/QiDrdD31IszMrZy+qwPOPjd+rw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -74,9 +73,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/android-x64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.4.tgz",
|
||||
"integrity": "sha512-PzPFnBNVF292sfpfhiyiXCGSn9HZg5BcAz+ivBuSsl6Rk4ga1oEXAamhOXRFyMcjwr2DVtm40G65N3GLeH1Lvw==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.28.0.tgz",
|
||||
"integrity": "sha512-+VJggoaKhk2VNNqVL7f6S189UzShHC/mR9EE8rDdSkdpN0KflSwWY/gWjDrNxxisg8Fp1ZCD9jLMo4m0OUfeUA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -91,9 +90,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/darwin-arm64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.4.tgz",
|
||||
"integrity": "sha512-b7xaGIwdJlht8ZFCvMkpDN6uiSmnxxK56N2GDTMYPr2/gzvfdQN8rTfBsvVKmIVY/X7EM+/hJKEIbbHs9oA4tQ==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.28.0.tgz",
|
||||
"integrity": "sha512-0T+A9WZm+bZ84nZBtk1ckYsOvyA3x7e2Acj1KdVfV4/2tdG4fzUp91YHx+GArWLtwqp77pBXVCPn2We7Letr0Q==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -108,9 +107,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/darwin-x64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.4.tgz",
|
||||
"integrity": "sha512-sR+OiKLwd15nmCdqpXMnuJ9W2kpy0KigzqScqHI3Hqwr7IXxBp3Yva+yJwoqh7rE8V77tdoheRYataNKL4QrPw==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.28.0.tgz",
|
||||
"integrity": "sha512-fyzLm/DLDl/84OCfp2f/XQ4flmORsjU7VKt8HLjvIXChJoFFOIL6pLJPH4Yhd1n1gGFF9mPwtlN5Wf82DZs+LQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -125,9 +124,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/freebsd-arm64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.4.tgz",
|
||||
"integrity": "sha512-jnfpKe+p79tCnm4GVav68A7tUFeKQwQyLgESwEAUzyxk/TJr4QdGog9sqWNcUbr/bZt/O/HXouspuQDd9JxFSw==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.28.0.tgz",
|
||||
"integrity": "sha512-l9GeW5UZBT9k9brBYI+0WDffcRxgHQD8ShN2Ur4xWq/NFzUKm3k5lsH4PdaRgb2w7mI9u61nr2gI2mLI27Nh3Q==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -142,9 +141,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/freebsd-x64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.4.tgz",
|
||||
"integrity": "sha512-2kb4ceA/CpfUrIcTUl1wrP/9ad9Atrp5J94Lq69w7UwOMolPIGrfLSvAKJp0RTvkPPyn6CIWrNy13kyLikZRZQ==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.28.0.tgz",
|
||||
"integrity": "sha512-BXoQai/A0wPO6Es3yFJ7APCiKGc1tdAEOgeTNy3SsB491S3aHn4S4r3e976eUnPdU+NbdtmBuLncYir2tMU9Nw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -159,9 +158,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-arm": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.4.tgz",
|
||||
"integrity": "sha512-aBYgcIxX/wd5n2ys0yESGeYMGF+pv6g0DhZr3G1ZG4jMfruU9Tl1i2Z+Wnj9/KjGz1lTLCcorqE2viePZqj4Eg==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.28.0.tgz",
|
||||
"integrity": "sha512-CjaaREJagqJp7iTaNQjjidaNbCKYcd4IDkzbwwxtSvjI7NZm79qiHc8HqciMddQ6CKvJT6aBd8lO9kN/ZudLlw==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
@@ -176,9 +175,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-arm64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.4.tgz",
|
||||
"integrity": "sha512-7nQOttdzVGth1iz57kxg9uCz57dxQLHWxopL6mYuYthohPKEK0vU0C3O21CcBK6KDlkYVcnDXY099HcCDXd9dA==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.28.0.tgz",
|
||||
"integrity": "sha512-RVyzfb3FWsGA55n6WY0MEIEPURL1FcbhFE6BffZEMEekfCzCIMtB5yyDcFnVbTnwk+CLAgTujmV/Lgvih56W+A==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -193,9 +192,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-ia32": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.4.tgz",
|
||||
"integrity": "sha512-oPtixtAIzgvzYcKBQM/qZ3R+9TEUd1aNJQu0HhGyqtx6oS7qTpvjheIWBbes4+qu1bNlo2V4cbkISr8q6gRBFA==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.28.0.tgz",
|
||||
"integrity": "sha512-KBnSTt1kxl9x70q+ydterVdl+Cn0H18ngRMRCEQfrbqdUuntQQ0LoMZv47uB97NljZFzY6HcfqEZ2SAyIUTQBQ==",
|
||||
"cpu": [
|
||||
"ia32"
|
||||
],
|
||||
@@ -210,9 +209,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-loong64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.4.tgz",
|
||||
"integrity": "sha512-8mL/vh8qeCoRcFH2nM8wm5uJP+ZcVYGGayMavi8GmRJjuI3g1v6Z7Ni0JJKAJW+m0EtUuARb6Lmp4hMjzCBWzA==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.28.0.tgz",
|
||||
"integrity": "sha512-zpSlUce1mnxzgBADvxKXX5sl8aYQHo2ezvMNI8I0lbblJtp8V4odlm3Yzlj7gPyt3T8ReksE6bK+pT3WD+aJRg==",
|
||||
"cpu": [
|
||||
"loong64"
|
||||
],
|
||||
@@ -227,9 +226,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-mips64el": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.4.tgz",
|
||||
"integrity": "sha512-1RdrWFFiiLIW7LQq9Q2NES+HiD4NyT8Itj9AUeCl0IVCA459WnPhREKgwrpaIfTOe+/2rdntisegiPWn/r/aAw==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.28.0.tgz",
|
||||
"integrity": "sha512-2jIfP6mmjkdmeTlsX/9vmdmhBmKADrWqN7zcdtHIeNSCH1SqIoNI63cYsjQR8J+wGa4Y5izRcSHSm8K3QWmk3w==",
|
||||
"cpu": [
|
||||
"mips64el"
|
||||
],
|
||||
@@ -244,9 +243,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-ppc64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.4.tgz",
|
||||
"integrity": "sha512-tLCwNG47l3sd9lpfyx9LAGEGItCUeRCWeAx6x2Jmbav65nAwoPXfewtAdtbtit/pJFLUWOhpv0FpS6GQAmPrHA==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.28.0.tgz",
|
||||
"integrity": "sha512-bc0FE9wWeC0WBm49IQMPSPILRocGTQt3j5KPCA8os6VprfuJ7KD+5PzESSrJ6GmPIPJK965ZJHTUlSA6GNYEhg==",
|
||||
"cpu": [
|
||||
"ppc64"
|
||||
],
|
||||
@@ -261,9 +260,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-riscv64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.4.tgz",
|
||||
"integrity": "sha512-BnASypppbUWyqjd1KIpU4AUBiIhVr6YlHx/cnPgqEkNoVOhHg+YiSVxM1RLfiy4t9cAulbRGTNCKOcqHrEQLIw==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.28.0.tgz",
|
||||
"integrity": "sha512-SQPZOwoTTT/HXFXQJG/vBX8sOFagGqvZyXcgLA3NhIqcBv1BJU1d46c0rGcrij2B56Z2rNiSLaZOYW5cUk7yLQ==",
|
||||
"cpu": [
|
||||
"riscv64"
|
||||
],
|
||||
@@ -278,9 +277,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-s390x": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.4.tgz",
|
||||
"integrity": "sha512-+eUqgb/Z7vxVLezG8bVB9SfBie89gMueS+I0xYh2tJdw3vqA/0ImZJ2ROeWwVJN59ihBeZ7Tu92dF/5dy5FttA==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.28.0.tgz",
|
||||
"integrity": "sha512-SCfR0HN8CEEjnYnySJTd2cw0k9OHB/YFzt5zgJEwa+wL/T/raGWYMBqwDNAC6dqFKmJYZoQBRfHjgwLHGSrn3Q==",
|
||||
"cpu": [
|
||||
"s390x"
|
||||
],
|
||||
@@ -295,9 +294,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-x64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.4.tgz",
|
||||
"integrity": "sha512-S5qOXrKV8BQEzJPVxAwnryi2+Iq5pB40gTEIT69BQONqR7JH1EPIcQ/Uiv9mCnn05jff9umq/5nqzxlqTOg9NA==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.28.0.tgz",
|
||||
"integrity": "sha512-us0dSb9iFxIi8srnpl931Nvs65it/Jd2a2K3qs7fz2WfGPHqzfzZTfec7oxZJRNPXPnNYZtanmRc4AL/JwVzHQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -312,9 +311,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/netbsd-arm64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.4.tgz",
|
||||
"integrity": "sha512-xHT8X4sb0GS8qTqiwzHqpY00C95DPAq7nAwX35Ie/s+LO9830hrMd3oX0ZMKLvy7vsonee73x0lmcdOVXFzd6Q==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.28.0.tgz",
|
||||
"integrity": "sha512-CR/RYotgtCKwtftMwJlUU7xCVNg3lMYZ0RzTmAHSfLCXw3NtZtNpswLEj/Kkf6kEL3Gw+BpOekRX0BYCtklhUw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -329,9 +328,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/netbsd-x64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.4.tgz",
|
||||
"integrity": "sha512-RugOvOdXfdyi5Tyv40kgQnI0byv66BFgAqjdgtAKqHoZTbTF2QqfQrFwa7cHEORJf6X2ht+l9ABLMP0dnKYsgg==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.28.0.tgz",
|
||||
"integrity": "sha512-nU1yhmYutL+fQ71Kxnhg8uEOdC0pwEW9entHykTgEbna2pw2dkbFSMeqjjyHZoCmt8SBkOSvV+yNmm94aUrrqw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -346,9 +345,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/openbsd-arm64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.4.tgz",
|
||||
"integrity": "sha512-2MyL3IAaTX+1/qP0O1SwskwcwCoOI4kV2IBX1xYnDDqthmq5ArrW94qSIKCAuRraMgPOmG0RDTA74mzYNQA9ow==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.28.0.tgz",
|
||||
"integrity": "sha512-cXb5vApOsRsxsEl4mcZ1XY3D4DzcoMxR/nnc4IyqYs0rTI8ZKmW6kyyg+11Z8yvgMfAEldKzP7AdP64HnSC/6g==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -363,9 +362,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/openbsd-x64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.4.tgz",
|
||||
"integrity": "sha512-u8fg/jQ5aQDfsnIV6+KwLOf1CmJnfu1ShpwqdwC0uA7ZPwFws55Ngc12vBdeUdnuWoQYx/SOQLGDcdlfXhYmXQ==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.28.0.tgz",
|
||||
"integrity": "sha512-8wZM2qqtv9UP3mzy7HiGYNH/zjTA355mpeuA+859TyR+e+Tc08IHYpLJuMsfpDJwoLo1ikIJI8jC3GFjnRClzA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -380,9 +379,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/openharmony-arm64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.4.tgz",
|
||||
"integrity": "sha512-JkTZrl6VbyO8lDQO3yv26nNr2RM2yZzNrNHEsj9bm6dOwwu9OYN28CjzZkH57bh4w0I2F7IodpQvUAEd1mbWXg==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.28.0.tgz",
|
||||
"integrity": "sha512-FLGfyizszcef5C3YtoyQDACyg95+dndv79i2EekILBofh5wpCa1KuBqOWKrEHZg3zrL3t5ouE5jgr94vA+Wb2w==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -397,9 +396,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/sunos-x64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.4.tgz",
|
||||
"integrity": "sha512-/gOzgaewZJfeJTlsWhvUEmUG4tWEY2Spp5M20INYRg2ZKl9QPO3QEEgPeRtLjEWSW8FilRNacPOg8R1uaYkA6g==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.28.0.tgz",
|
||||
"integrity": "sha512-1ZgjUoEdHZZl/YlV76TSCz9Hqj9h9YmMGAgAPYd+q4SicWNX3G5GCyx9uhQWSLcbvPW8Ni7lj4gDa1T40akdlw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -414,9 +413,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/win32-arm64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.4.tgz",
|
||||
"integrity": "sha512-Z9SExBg2y32smoDQdf1HRwHRt6vAHLXcxD2uGgO/v2jK7Y718Ix4ndsbNMU/+1Qiem9OiOdaqitioZwxivhXYg==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.28.0.tgz",
|
||||
"integrity": "sha512-Q9StnDmQ/enxnpxCCLSg0oo4+34B9TdXpuyPeTedN/6+iXBJ4J+zwfQI28u/Jl40nOYAxGoNi7mFP40RUtkmUA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -431,9 +430,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/win32-ia32": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.4.tgz",
|
||||
"integrity": "sha512-DAyGLS0Jz5G5iixEbMHi5KdiApqHBWMGzTtMiJ72ZOLhbu/bzxgAe8Ue8CTS3n3HbIUHQz/L51yMdGMeoxXNJw==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.28.0.tgz",
|
||||
"integrity": "sha512-zF3ag/gfiCe6U2iczcRzSYJKH1DCI+ByzSENHlM2FcDbEeo5Zd2C86Aq0tKUYAJJ1obRP84ymxIAksZUcdztHA==",
|
||||
"cpu": [
|
||||
"ia32"
|
||||
],
|
||||
@@ -448,9 +447,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/win32-x64": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.4.tgz",
|
||||
"integrity": "sha512-+knoa0BDoeXgkNvvV1vvbZX4+hizelrkwmGJBdT17t8FNPwG2lKemmuMZlmaNQ3ws3DKKCxpb4zRZEIp3UxFCg==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.28.0.tgz",
|
||||
"integrity": "sha512-pEl1bO9mfAmIC+tW5btTmrKaujg3zGtUmWNdCw/xs70FBjwAL3o9OEKNHvNmnyylD6ubxUERiEhdsL0xBQ9efw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -824,68 +823,68 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/cli": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/cli/-/cli-4.2.1.tgz",
|
||||
"integrity": "sha512-b7MGn51IA80oSG+7fuAgzfQ+7pZBgjzbqwmiv6NO7/+a1sev32cGqnwhscT7h0EcAvMa9r7gjRylqOH8Xhc4DA==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/cli/-/cli-4.2.2.tgz",
|
||||
"integrity": "sha512-iJS+8kAFZ8HPqnh0O5DHCLjo4L6dD97DBQEkrhfSO4V96xeefUus2jqsBs1dUMt3OU9Ks4qIkiY0mpL5UW+4LQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@parcel/watcher": "^2.5.1",
|
||||
"@tailwindcss/node": "4.2.1",
|
||||
"@tailwindcss/oxide": "4.2.1",
|
||||
"@tailwindcss/node": "4.2.2",
|
||||
"@tailwindcss/oxide": "4.2.2",
|
||||
"enhanced-resolve": "^5.19.0",
|
||||
"mri": "^1.2.0",
|
||||
"picocolors": "^1.1.1",
|
||||
"tailwindcss": "4.2.1"
|
||||
"tailwindcss": "4.2.2"
|
||||
},
|
||||
"bin": {
|
||||
"tailwindcss": "dist/index.mjs"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/node": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.2.1.tgz",
|
||||
"integrity": "sha512-jlx6sLk4EOwO6hHe1oCGm1Q4AN/s0rSrTTPBGPM0/RQ6Uylwq17FuU8IeJJKEjtc6K6O07zsvP+gDO6MMWo7pg==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.2.2.tgz",
|
||||
"integrity": "sha512-pXS+wJ2gZpVXqFaUEjojq7jzMpTGf8rU6ipJz5ovJV6PUGmlJ+jvIwGrzdHdQ80Sg+wmQxUFuoW1UAAwHNEdFA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@jridgewell/remapping": "^2.3.5",
|
||||
"enhanced-resolve": "^5.19.0",
|
||||
"jiti": "^2.6.1",
|
||||
"lightningcss": "1.31.1",
|
||||
"lightningcss": "1.32.0",
|
||||
"magic-string": "^0.30.21",
|
||||
"source-map-js": "^1.2.1",
|
||||
"tailwindcss": "4.2.1"
|
||||
"tailwindcss": "4.2.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.2.1.tgz",
|
||||
"integrity": "sha512-yv9jeEFWnjKCI6/T3Oq50yQEOqmpmpfzG1hcZsAOaXFQPfzWprWrlHSdGPEF3WQTi8zu8ohC9Mh9J470nT5pUw==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.2.2.tgz",
|
||||
"integrity": "sha512-qEUA07+E5kehxYp9BVMpq9E8vnJuBHfJEC0vPC5e7iL/hw7HR61aDKoVoKzrG+QKp56vhNZe4qwkRmMC0zDLvg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 20"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@tailwindcss/oxide-android-arm64": "4.2.1",
|
||||
"@tailwindcss/oxide-darwin-arm64": "4.2.1",
|
||||
"@tailwindcss/oxide-darwin-x64": "4.2.1",
|
||||
"@tailwindcss/oxide-freebsd-x64": "4.2.1",
|
||||
"@tailwindcss/oxide-linux-arm-gnueabihf": "4.2.1",
|
||||
"@tailwindcss/oxide-linux-arm64-gnu": "4.2.1",
|
||||
"@tailwindcss/oxide-linux-arm64-musl": "4.2.1",
|
||||
"@tailwindcss/oxide-linux-x64-gnu": "4.2.1",
|
||||
"@tailwindcss/oxide-linux-x64-musl": "4.2.1",
|
||||
"@tailwindcss/oxide-wasm32-wasi": "4.2.1",
|
||||
"@tailwindcss/oxide-win32-arm64-msvc": "4.2.1",
|
||||
"@tailwindcss/oxide-win32-x64-msvc": "4.2.1"
|
||||
"@tailwindcss/oxide-android-arm64": "4.2.2",
|
||||
"@tailwindcss/oxide-darwin-arm64": "4.2.2",
|
||||
"@tailwindcss/oxide-darwin-x64": "4.2.2",
|
||||
"@tailwindcss/oxide-freebsd-x64": "4.2.2",
|
||||
"@tailwindcss/oxide-linux-arm-gnueabihf": "4.2.2",
|
||||
"@tailwindcss/oxide-linux-arm64-gnu": "4.2.2",
|
||||
"@tailwindcss/oxide-linux-arm64-musl": "4.2.2",
|
||||
"@tailwindcss/oxide-linux-x64-gnu": "4.2.2",
|
||||
"@tailwindcss/oxide-linux-x64-musl": "4.2.2",
|
||||
"@tailwindcss/oxide-wasm32-wasi": "4.2.2",
|
||||
"@tailwindcss/oxide-win32-arm64-msvc": "4.2.2",
|
||||
"@tailwindcss/oxide-win32-x64-msvc": "4.2.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-android-arm64": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.2.1.tgz",
|
||||
"integrity": "sha512-eZ7G1Zm5EC8OOKaesIKuw77jw++QJ2lL9N+dDpdQiAB/c/B2wDh0QPFHbkBVrXnwNugvrbJFk1gK2SsVjwWReg==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.2.2.tgz",
|
||||
"integrity": "sha512-dXGR1n+P3B6748jZO/SvHZq7qBOqqzQ+yFrXpoOWWALWndF9MoSKAT3Q0fYgAzYzGhxNYOoysRvYlpixRBBoDg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -900,9 +899,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-darwin-arm64": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.2.1.tgz",
|
||||
"integrity": "sha512-q/LHkOstoJ7pI1J0q6djesLzRvQSIfEto148ppAd+BVQK0JYjQIFSK3JgYZJa+Yzi0DDa52ZsQx2rqytBnf8Hw==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.2.2.tgz",
|
||||
"integrity": "sha512-iq9Qjr6knfMpZHj55/37ouZeykwbDqF21gPFtfnhCCKGDcPI/21FKC9XdMO/XyBM7qKORx6UIhGgg6jLl7BZlg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -917,9 +916,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-darwin-x64": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.2.1.tgz",
|
||||
"integrity": "sha512-/f/ozlaXGY6QLbpvd/kFTro2l18f7dHKpB+ieXz+Cijl4Mt9AI2rTrpq7V+t04nK+j9XBQHnSMdeQRhbGyt6fw==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.2.2.tgz",
|
||||
"integrity": "sha512-BlR+2c3nzc8f2G639LpL89YY4bdcIdUmiOOkv2GQv4/4M0vJlpXEa0JXNHhCHU7VWOKWT/CjqHdTP8aUuDJkuw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -934,9 +933,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-freebsd-x64": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.2.1.tgz",
|
||||
"integrity": "sha512-5e/AkgYJT/cpbkys/OU2Ei2jdETCLlifwm7ogMC7/hksI2fC3iiq6OcXwjibcIjPung0kRtR3TxEITkqgn0TcA==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.2.2.tgz",
|
||||
"integrity": "sha512-YUqUgrGMSu2CDO82hzlQ5qSb5xmx3RUrke/QgnoEx7KvmRJHQuZHZmZTLSuuHwFf0DJPybFMXMYf+WJdxHy/nQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -951,9 +950,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.2.1.tgz",
|
||||
"integrity": "sha512-Uny1EcVTTmerCKt/1ZuKTkb0x8ZaiuYucg2/kImO5A5Y/kBz41/+j0gxUZl+hTF3xkWpDmHX+TaWhOtba2Fyuw==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.2.2.tgz",
|
||||
"integrity": "sha512-FPdhvsW6g06T9BWT0qTwiVZYE2WIFo2dY5aCSpjG/S/u1tby+wXoslXS0kl3/KXnULlLr1E3NPRRw0g7t2kgaQ==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
@@ -968,9 +967,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-linux-arm64-gnu": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.2.1.tgz",
|
||||
"integrity": "sha512-CTrwomI+c7n6aSSQlsPL0roRiNMDQ/YzMD9EjcR+H4f0I1SQ8QqIuPnsVp7QgMkC1Qi8rtkekLkOFjo7OlEFRQ==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.2.2.tgz",
|
||||
"integrity": "sha512-4og1V+ftEPXGttOO7eCmW7VICmzzJWgMx+QXAJRAhjrSjumCwWqMfkDrNu1LXEQzNAwz28NCUpucgQPrR4S2yw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -985,9 +984,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-linux-arm64-musl": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.2.1.tgz",
|
||||
"integrity": "sha512-WZA0CHRL/SP1TRbA5mp9htsppSEkWuQ4KsSUumYQnyl8ZdT39ntwqmz4IUHGN6p4XdSlYfJwM4rRzZLShHsGAQ==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.2.2.tgz",
|
||||
"integrity": "sha512-oCfG/mS+/+XRlwNjnsNLVwnMWYH7tn/kYPsNPh+JSOMlnt93mYNCKHYzylRhI51X+TbR+ufNhhKKzm6QkqX8ag==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -1002,9 +1001,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-linux-x64-gnu": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.2.1.tgz",
|
||||
"integrity": "sha512-qMFzxI2YlBOLW5PhblzuSWlWfwLHaneBE0xHzLrBgNtqN6mWfs+qYbhryGSXQjFYB1Dzf5w+LN5qbUTPhW7Y5g==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.2.2.tgz",
|
||||
"integrity": "sha512-rTAGAkDgqbXHNp/xW0iugLVmX62wOp2PoE39BTCGKjv3Iocf6AFbRP/wZT/kuCxC9QBh9Pu8XPkv/zCZB2mcMg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -1019,9 +1018,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-linux-x64-musl": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.2.1.tgz",
|
||||
"integrity": "sha512-5r1X2FKnCMUPlXTWRYpHdPYUY6a1Ar/t7P24OuiEdEOmms5lyqjDRvVY1yy9Rmioh+AunQ0rWiOTPE8F9A3v5g==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.2.2.tgz",
|
||||
"integrity": "sha512-XW3t3qwbIwiSyRCggeO2zxe3KWaEbM0/kW9e8+0XpBgyKU4ATYzcVSMKteZJ1iukJ3HgHBjbg9P5YPRCVUxlnQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -1036,9 +1035,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.2.1.tgz",
|
||||
"integrity": "sha512-MGFB5cVPvshR85MTJkEvqDUnuNoysrsRxd6vnk1Lf2tbiqNlXpHYZqkqOQalydienEWOHHFyyuTSYRsLfxFJ2Q==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.2.2.tgz",
|
||||
"integrity": "sha512-eKSztKsmEsn1O5lJ4ZAfyn41NfG7vzCg496YiGtMDV86jz1q/irhms5O0VrY6ZwTUkFy/EKG3RfWgxSI3VbZ8Q==",
|
||||
"bundleDependencies": [
|
||||
"@napi-rs/wasm-runtime",
|
||||
"@emnapi/core",
|
||||
@@ -1066,9 +1065,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.2.1.tgz",
|
||||
"integrity": "sha512-YlUEHRHBGnCMh4Nj4GnqQyBtsshUPdiNroZj8VPkvTZSoHsilRCwXcVKnG9kyi0ZFAS/3u+qKHBdDc81SADTRA==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.2.2.tgz",
|
||||
"integrity": "sha512-qPmaQM4iKu5mxpsrWZMOZRgZv1tOZpUm+zdhhQP0VhJfyGGO3aUKdbh3gDZc/dPLQwW4eSqWGrrcWNBZWUWaXQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -1083,9 +1082,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-win32-x64-msvc": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.2.1.tgz",
|
||||
"integrity": "sha512-rbO34G5sMWWyrN/idLeVxAZgAKWrn5LiR3/I90Q9MkA67s6T1oB0xtTe+0heoBvHSpbU9Mk7i6uwJnpo4u21XQ==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.2.2.tgz",
|
||||
"integrity": "sha512-1T/37VvI7WyH66b+vqHj/cLwnCxt7Qt3WFu5Q8hk65aOvlwAhs7rAp1VkulBJw/N4tMirXjVnylTR72uI0HGcA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -1112,12 +1111,6 @@
|
||||
"tailwindcss": ">=3.0.0 || insiders || >=4.0.0-alpha.20 || >=4.0.0-beta.1"
|
||||
}
|
||||
},
|
||||
"node_modules/actor-typeahead": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/actor-typeahead/-/actor-typeahead-0.1.2.tgz",
|
||||
"integrity": "sha512-I97YqqNl7Kar0J/bIJvgY/KmHpssHcDElhfwVTLP7wRFlkxso2ZLBqiS2zol5A8UVUJbQK2JXYaqNpZXz8Uk2A==",
|
||||
"license": "MPL-2.0"
|
||||
},
|
||||
"node_modules/balanced-match": {
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz",
|
||||
@@ -1175,9 +1168,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/enhanced-resolve": {
|
||||
"version": "5.20.0",
|
||||
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.20.0.tgz",
|
||||
"integrity": "sha512-/ce7+jQ1PQ6rVXwe+jKEg5hW5ciicHwIQUagZkp6IufBoY3YDgdTTY1azVs0qoRgVmvsNB+rbjLJxDAeHHtwsQ==",
|
||||
"version": "5.20.1",
|
||||
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.20.1.tgz",
|
||||
"integrity": "sha512-Qohcme7V1inbAfvjItgw0EaxVX5q2rdVEZHRBrEQdRZTssLDGsL8Lwrznl8oQ/6kuTJONLaDcGjkNP247XEhcA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -1189,9 +1182,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/esbuild": {
|
||||
"version": "0.27.4",
|
||||
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.4.tgz",
|
||||
"integrity": "sha512-Rq4vbHnYkK5fws5NF7MYTU68FPRE1ajX7heQ/8QXXWqNgqqJ/GkmmyxIzUnf2Sr/bakf8l54716CcMGHYhMrrQ==",
|
||||
"version": "0.28.0",
|
||||
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.28.0.tgz",
|
||||
"integrity": "sha512-sNR9MHpXSUV/XB4zmsFKN+QgVG82Cc7+/aaxJ8Adi8hyOac+EXptIp45QBPaVyX3N70664wRbTcLTOemCAnyqw==",
|
||||
"dev": true,
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
@@ -1202,32 +1195,32 @@
|
||||
"node": ">=18"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@esbuild/aix-ppc64": "0.27.4",
|
||||
"@esbuild/android-arm": "0.27.4",
|
||||
"@esbuild/android-arm64": "0.27.4",
|
||||
"@esbuild/android-x64": "0.27.4",
|
||||
"@esbuild/darwin-arm64": "0.27.4",
|
||||
"@esbuild/darwin-x64": "0.27.4",
|
||||
"@esbuild/freebsd-arm64": "0.27.4",
|
||||
"@esbuild/freebsd-x64": "0.27.4",
|
||||
"@esbuild/linux-arm": "0.27.4",
|
||||
"@esbuild/linux-arm64": "0.27.4",
|
||||
"@esbuild/linux-ia32": "0.27.4",
|
||||
"@esbuild/linux-loong64": "0.27.4",
|
||||
"@esbuild/linux-mips64el": "0.27.4",
|
||||
"@esbuild/linux-ppc64": "0.27.4",
|
||||
"@esbuild/linux-riscv64": "0.27.4",
|
||||
"@esbuild/linux-s390x": "0.27.4",
|
||||
"@esbuild/linux-x64": "0.27.4",
|
||||
"@esbuild/netbsd-arm64": "0.27.4",
|
||||
"@esbuild/netbsd-x64": "0.27.4",
|
||||
"@esbuild/openbsd-arm64": "0.27.4",
|
||||
"@esbuild/openbsd-x64": "0.27.4",
|
||||
"@esbuild/openharmony-arm64": "0.27.4",
|
||||
"@esbuild/sunos-x64": "0.27.4",
|
||||
"@esbuild/win32-arm64": "0.27.4",
|
||||
"@esbuild/win32-ia32": "0.27.4",
|
||||
"@esbuild/win32-x64": "0.27.4"
|
||||
"@esbuild/aix-ppc64": "0.28.0",
|
||||
"@esbuild/android-arm": "0.28.0",
|
||||
"@esbuild/android-arm64": "0.28.0",
|
||||
"@esbuild/android-x64": "0.28.0",
|
||||
"@esbuild/darwin-arm64": "0.28.0",
|
||||
"@esbuild/darwin-x64": "0.28.0",
|
||||
"@esbuild/freebsd-arm64": "0.28.0",
|
||||
"@esbuild/freebsd-x64": "0.28.0",
|
||||
"@esbuild/linux-arm": "0.28.0",
|
||||
"@esbuild/linux-arm64": "0.28.0",
|
||||
"@esbuild/linux-ia32": "0.28.0",
|
||||
"@esbuild/linux-loong64": "0.28.0",
|
||||
"@esbuild/linux-mips64el": "0.28.0",
|
||||
"@esbuild/linux-ppc64": "0.28.0",
|
||||
"@esbuild/linux-riscv64": "0.28.0",
|
||||
"@esbuild/linux-s390x": "0.28.0",
|
||||
"@esbuild/linux-x64": "0.28.0",
|
||||
"@esbuild/netbsd-arm64": "0.28.0",
|
||||
"@esbuild/netbsd-x64": "0.28.0",
|
||||
"@esbuild/openbsd-arm64": "0.28.0",
|
||||
"@esbuild/openbsd-x64": "0.28.0",
|
||||
"@esbuild/openharmony-arm64": "0.28.0",
|
||||
"@esbuild/sunos-x64": "0.28.0",
|
||||
"@esbuild/win32-arm64": "0.28.0",
|
||||
"@esbuild/win32-ia32": "0.28.0",
|
||||
"@esbuild/win32-x64": "0.28.0"
|
||||
}
|
||||
},
|
||||
"node_modules/glob": {
|
||||
@@ -1303,9 +1296,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss": {
|
||||
"version": "1.31.1",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.31.1.tgz",
|
||||
"integrity": "sha512-l51N2r93WmGUye3WuFoN5k10zyvrVs0qfKBhyC5ogUQ6Ew6JUSswh78mbSO+IU3nTWsyOArqPCcShdQSadghBQ==",
|
||||
"version": "1.32.0",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.32.0.tgz",
|
||||
"integrity": "sha512-NXYBzinNrblfraPGyrbPoD19C1h9lfI/1mzgWYvXUTe414Gz/X1FD2XBZSZM7rRTrMA8JL3OtAaGifrIKhQ5yQ==",
|
||||
"dev": true,
|
||||
"license": "MPL-2.0",
|
||||
"dependencies": {
|
||||
@@ -1319,23 +1312,23 @@
|
||||
"url": "https://opencollective.com/parcel"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"lightningcss-android-arm64": "1.31.1",
|
||||
"lightningcss-darwin-arm64": "1.31.1",
|
||||
"lightningcss-darwin-x64": "1.31.1",
|
||||
"lightningcss-freebsd-x64": "1.31.1",
|
||||
"lightningcss-linux-arm-gnueabihf": "1.31.1",
|
||||
"lightningcss-linux-arm64-gnu": "1.31.1",
|
||||
"lightningcss-linux-arm64-musl": "1.31.1",
|
||||
"lightningcss-linux-x64-gnu": "1.31.1",
|
||||
"lightningcss-linux-x64-musl": "1.31.1",
|
||||
"lightningcss-win32-arm64-msvc": "1.31.1",
|
||||
"lightningcss-win32-x64-msvc": "1.31.1"
|
||||
"lightningcss-android-arm64": "1.32.0",
|
||||
"lightningcss-darwin-arm64": "1.32.0",
|
||||
"lightningcss-darwin-x64": "1.32.0",
|
||||
"lightningcss-freebsd-x64": "1.32.0",
|
||||
"lightningcss-linux-arm-gnueabihf": "1.32.0",
|
||||
"lightningcss-linux-arm64-gnu": "1.32.0",
|
||||
"lightningcss-linux-arm64-musl": "1.32.0",
|
||||
"lightningcss-linux-x64-gnu": "1.32.0",
|
||||
"lightningcss-linux-x64-musl": "1.32.0",
|
||||
"lightningcss-win32-arm64-msvc": "1.32.0",
|
||||
"lightningcss-win32-x64-msvc": "1.32.0"
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss-android-arm64": {
|
||||
"version": "1.31.1",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.31.1.tgz",
|
||||
"integrity": "sha512-HXJF3x8w9nQ4jbXRiNppBCqeZPIAfUo8zE/kOEGbW5NZvGc/K7nMxbhIr+YlFlHW5mpbg/YFPdbnCh1wAXCKFg==",
|
||||
"version": "1.32.0",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.32.0.tgz",
|
||||
"integrity": "sha512-YK7/ClTt4kAK0vo6w3X+Pnm0D2cf2vPHbhOXdoNti1Ga0al1P4TBZhwjATvjNwLEBCnKvjJc2jQgHXH0NEwlAg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -1354,9 +1347,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss-darwin-arm64": {
|
||||
"version": "1.31.1",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.31.1.tgz",
|
||||
"integrity": "sha512-02uTEqf3vIfNMq3h/z2cJfcOXnQ0GRwQrkmPafhueLb2h7mqEidiCzkE4gBMEH65abHRiQvhdcQ+aP0D0g67sg==",
|
||||
"version": "1.32.0",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.32.0.tgz",
|
||||
"integrity": "sha512-RzeG9Ju5bag2Bv1/lwlVJvBE3q6TtXskdZLLCyfg5pt+HLz9BqlICO7LZM7VHNTTn/5PRhHFBSjk5lc4cmscPQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -1375,9 +1368,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss-darwin-x64": {
|
||||
"version": "1.31.1",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.31.1.tgz",
|
||||
"integrity": "sha512-1ObhyoCY+tGxtsz1lSx5NXCj3nirk0Y0kB/g8B8DT+sSx4G9djitg9ejFnjb3gJNWo7qXH4DIy2SUHvpoFwfTA==",
|
||||
"version": "1.32.0",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.32.0.tgz",
|
||||
"integrity": "sha512-U+QsBp2m/s2wqpUYT/6wnlagdZbtZdndSmut/NJqlCcMLTWp5muCrID+K5UJ6jqD2BFshejCYXniPDbNh73V8w==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -1396,9 +1389,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss-freebsd-x64": {
|
||||
"version": "1.31.1",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.31.1.tgz",
|
||||
"integrity": "sha512-1RINmQKAItO6ISxYgPwszQE1BrsVU5aB45ho6O42mu96UiZBxEXsuQ7cJW4zs4CEodPUioj/QrXW1r9pLUM74A==",
|
||||
"version": "1.32.0",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.32.0.tgz",
|
||||
"integrity": "sha512-JCTigedEksZk3tHTTthnMdVfGf61Fky8Ji2E4YjUTEQX14xiy/lTzXnu1vwiZe3bYe0q+SpsSH/CTeDXK6WHig==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -1417,9 +1410,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss-linux-arm-gnueabihf": {
|
||||
"version": "1.31.1",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.31.1.tgz",
|
||||
"integrity": "sha512-OOCm2//MZJ87CdDK62rZIu+aw9gBv4azMJuA8/KB74wmfS3lnC4yoPHm0uXZ/dvNNHmnZnB8XLAZzObeG0nS1g==",
|
||||
"version": "1.32.0",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.32.0.tgz",
|
||||
"integrity": "sha512-x6rnnpRa2GL0zQOkt6rts3YDPzduLpWvwAF6EMhXFVZXD4tPrBkEFqzGowzCsIWsPjqSK+tyNEODUBXeeVHSkw==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
@@ -1438,9 +1431,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss-linux-arm64-gnu": {
|
||||
"version": "1.31.1",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.31.1.tgz",
|
||||
"integrity": "sha512-WKyLWztD71rTnou4xAD5kQT+982wvca7E6QoLpoawZ1gP9JM0GJj4Tp5jMUh9B3AitHbRZ2/H3W5xQmdEOUlLg==",
|
||||
"version": "1.32.0",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.32.0.tgz",
|
||||
"integrity": "sha512-0nnMyoyOLRJXfbMOilaSRcLH3Jw5z9HDNGfT/gwCPgaDjnx0i8w7vBzFLFR1f6CMLKF8gVbebmkUN3fa/kQJpQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -1459,9 +1452,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss-linux-arm64-musl": {
|
||||
"version": "1.31.1",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.31.1.tgz",
|
||||
"integrity": "sha512-mVZ7Pg2zIbe3XlNbZJdjs86YViQFoJSpc41CbVmKBPiGmC4YrfeOyz65ms2qpAobVd7WQsbW4PdsSJEMymyIMg==",
|
||||
"version": "1.32.0",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.32.0.tgz",
|
||||
"integrity": "sha512-UpQkoenr4UJEzgVIYpI80lDFvRmPVg6oqboNHfoH4CQIfNA+HOrZ7Mo7KZP02dC6LjghPQJeBsvXhJod/wnIBg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -1480,9 +1473,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss-linux-x64-gnu": {
|
||||
"version": "1.31.1",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.31.1.tgz",
|
||||
"integrity": "sha512-xGlFWRMl+0KvUhgySdIaReQdB4FNudfUTARn7q0hh/V67PVGCs3ADFjw+6++kG1RNd0zdGRlEKa+T13/tQjPMA==",
|
||||
"version": "1.32.0",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.32.0.tgz",
|
||||
"integrity": "sha512-V7Qr52IhZmdKPVr+Vtw8o+WLsQJYCTd8loIfpDaMRWGUZfBOYEJeyJIkqGIDMZPwPx24pUMfwSxxI8phr/MbOA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -1501,9 +1494,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss-linux-x64-musl": {
|
||||
"version": "1.31.1",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.31.1.tgz",
|
||||
"integrity": "sha512-eowF8PrKHw9LpoZii5tdZwnBcYDxRw2rRCyvAXLi34iyeYfqCQNA9rmUM0ce62NlPhCvof1+9ivRaTY6pSKDaA==",
|
||||
"version": "1.32.0",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.32.0.tgz",
|
||||
"integrity": "sha512-bYcLp+Vb0awsiXg/80uCRezCYHNg1/l3mt0gzHnWV9XP1W5sKa5/TCdGWaR/zBM2PeF/HbsQv/j2URNOiVuxWg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -1522,9 +1515,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss-win32-arm64-msvc": {
|
||||
"version": "1.31.1",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.31.1.tgz",
|
||||
"integrity": "sha512-aJReEbSEQzx1uBlQizAOBSjcmr9dCdL3XuC/6HLXAxmtErsj2ICo5yYggg1qOODQMtnjNQv2UHb9NpOuFtYe4w==",
|
||||
"version": "1.32.0",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.32.0.tgz",
|
||||
"integrity": "sha512-8SbC8BR40pS6baCM8sbtYDSwEVQd4JlFTOlaD3gWGHfThTcABnNDBda6eTZeqbofalIJhFx0qKzgHJmcPTnGdw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -1543,9 +1536,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss-win32-x64-msvc": {
|
||||
"version": "1.31.1",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.31.1.tgz",
|
||||
"integrity": "sha512-I9aiFrbd7oYHwlnQDqr1Roz+fTz61oDDJX7n9tYF9FJymH1cIN1DtKw3iYt6b8WZgEjoNwVSncwF4wx/ZedMhw==",
|
||||
"version": "1.32.0",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.32.0.tgz",
|
||||
"integrity": "sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -1574,9 +1567,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lucide": {
|
||||
"version": "0.577.0",
|
||||
"resolved": "https://registry.npmjs.org/lucide/-/lucide-0.577.0.tgz",
|
||||
"integrity": "sha512-PpC/m5eOItp/WU/GlQPFBXDOhq6HibL73KzYP37OX3LM7VmzWQF8voEj8QRWUFvy9FIKfeDQkWYoyS1D/MdWFA==",
|
||||
"version": "1.8.0",
|
||||
"resolved": "https://registry.npmjs.org/lucide/-/lucide-1.8.0.tgz",
|
||||
"integrity": "sha512-JjV/QnadgFLj1Pyu9IKl0lknrolFEzo04B64QcYLLeRzZl/iEHpdbSrRRKbyXcv45SZNv+WGjIUCT33e7xHO6Q==",
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/magic-string": {
|
||||
@@ -1694,16 +1687,16 @@
|
||||
}
|
||||
},
|
||||
"node_modules/tailwindcss": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.2.1.tgz",
|
||||
"integrity": "sha512-/tBrSQ36vCleJkAOsy9kbNTgaxvGbyOamC30PRePTQe/o1MFwEKHQk4Cn7BNGaPtjp+PuUrByJehM1hgxfq4sw==",
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.2.2.tgz",
|
||||
"integrity": "sha512-KWBIxs1Xb6NoLdMVqhbhgwZf2PGBpPEiwOqgI4pFIYbNTfBXiKYyWoTsXgBQ9WFg/OlhnvHaY+AEpW7wSmFo2Q==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/tapable": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz",
|
||||
"integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==",
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.2.tgz",
|
||||
"integrity": "sha512-1MOpMXuhGzGL5TTCZFItxCc0AARf1EZFQkGqMm7ERKj8+Hgr5oLvJOVFcC+lRmR8hCe2S3jC4T5D7Vg/d7/fhA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
|
||||
17
package.json
17
package.json
@@ -4,9 +4,9 @@
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"icons:build": "node scripts/generate-icons.js",
|
||||
"css:build": "BROWSERSLIST_IGNORE_OLD_DATA=1 npx tailwindcss -i ./pkg/appview/src/css/main.css -o ./pkg/appview/public/css/style.css --minify",
|
||||
"css:build:hold": "BROWSERSLIST_IGNORE_OLD_DATA=1 npx tailwindcss -i ./pkg/hold/admin/src/css/main.css -o ./pkg/hold/admin/public/css/style.css --minify",
|
||||
"css:watch": "BROWSERSLIST_IGNORE_OLD_DATA=1 npx tailwindcss -i ./pkg/appview/src/css/main.css -o ./pkg/appview/public/css/style.css --watch",
|
||||
"css:build": "npx tailwindcss -i ./pkg/appview/src/css/main.css -o ./pkg/appview/public/css/style.css --minify",
|
||||
"css:build:hold": "npx tailwindcss -i ./pkg/hold/admin/src/css/main.css -o ./pkg/hold/admin/public/css/style.css --minify",
|
||||
"css:watch": "npx tailwindcss -i ./pkg/appview/src/css/main.css -o ./pkg/appview/public/css/style.css --watch",
|
||||
"js:build": "esbuild pkg/appview/src/js/main.js --bundle --minify --format=esm --outfile=pkg/appview/public/js/bundle.min.js",
|
||||
"js:build:hold": "esbuild pkg/hold/admin/src/js/main.js --bundle --minify --format=esm --outfile=pkg/hold/admin/public/js/bundle.min.js",
|
||||
"js:watch": "esbuild pkg/appview/src/js/main.js --bundle --watch --format=esm --outfile=pkg/appview/public/js/bundle.min.js",
|
||||
@@ -16,17 +16,16 @@
|
||||
"watch": "npm run css:watch & npm run js:watch"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@tailwindcss/cli": "^4.2.1",
|
||||
"@tailwindcss/cli": "^4.2.2",
|
||||
"@tailwindcss/typography": "^0.5.19",
|
||||
"daisyui": "^5.5.19",
|
||||
"esbuild": "^0.27.4",
|
||||
"esbuild": "^0.28.0",
|
||||
"glob": "^13.0.6",
|
||||
"tailwindcss": "^4.2"
|
||||
"lucide": "^1.8.0",
|
||||
"tailwindcss": "^4.2.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"actor-typeahead": "^0.1.2",
|
||||
"htmx-ext-json-enc": "^2.0.3",
|
||||
"htmx.org": "^2.0.8",
|
||||
"lucide": "^0.577.0"
|
||||
"htmx.org": "^2.0.8"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ type Config struct {
|
||||
CredentialHelper CredentialHelperConfig `yaml:"credential_helper" comment:"Credential helper download settings."`
|
||||
Legal LegalConfig `yaml:"legal" comment:"Legal page customization for self-hosted instances."`
|
||||
AI AIConfig `yaml:"ai" comment:"AI-powered image advisor settings."`
|
||||
Labeler LabelerRefConfig `yaml:"labeler" comment:"ATProto labeler for content moderation (DMCA takedowns)."`
|
||||
Billing billing.Config `yaml:"billing" comment:"Stripe billing integration (requires -tags billing build)."`
|
||||
Distribution *configuration.Configuration `yaml:"-"` // Wrapped distribution config for compatibility
|
||||
}
|
||||
@@ -51,9 +52,6 @@ type ServerConfig struct {
|
||||
// Allows HTTP (not HTTPS) for DID resolution.
|
||||
TestMode bool `yaml:"test_mode" comment:"Allows HTTP (not HTTPS) for DID resolution and uses transition:generic OAuth scope."`
|
||||
|
||||
// Path to P-256 private key for OAuth client authentication.
|
||||
OAuthKeyPath string `yaml:"oauth_key_path" comment:"Path to P-256 private key for OAuth client authentication. Auto-generated on first run."`
|
||||
|
||||
// Display name shown on OAuth authorization screens.
|
||||
ClientName string `yaml:"client_name" comment:"Display name shown on OAuth authorization screens."`
|
||||
|
||||
@@ -84,6 +82,9 @@ type UIConfig struct {
|
||||
|
||||
// How often to sync with the remote libSQL server.
|
||||
LibsqlSyncInterval time.Duration `yaml:"libsql_sync_interval" comment:"How often to sync with remote libSQL server. Default: 60s."`
|
||||
|
||||
// Source code URL displayed in the footer "Source" link.
|
||||
SourceURL string `yaml:"source_url" comment:"Source code URL displayed in the footer \"Source\" link. Defaults to the upstream ATCR project."`
|
||||
}
|
||||
|
||||
// HealthConfig defines health check and cache settings
|
||||
@@ -106,17 +107,14 @@ type JetstreamConfig struct {
|
||||
// How often to re-run backfill to catch missed events. Set to 0 to only backfill on startup.
|
||||
BackfillInterval time.Duration `yaml:"backfill_interval" comment:"How often to re-run backfill to catch missed events. Set to 0 to only backfill on startup."`
|
||||
|
||||
// Relay endpoints for backfill, tried in order on failure.
|
||||
RelayEndpoints []string `yaml:"relay_endpoints" comment:"Relay endpoints for backfill, tried in order on failure."`
|
||||
// Relay endpoints for backfill — MUST support com.atproto.sync.listReposByCollection. Tried in order on failure.
|
||||
RelayEndpoints []string `yaml:"relay_endpoints" comment:"Endpoints used for backfill. MUST support com.atproto.sync.listReposByCollection. Tried in order on failure."`
|
||||
}
|
||||
|
||||
// AuthConfig defines authentication settings
|
||||
type AuthConfig struct {
|
||||
// RSA private key for signing registry JWTs.
|
||||
KeyPath string `yaml:"key_path" comment:"RSA private key for signing registry JWTs issued to Docker clients."`
|
||||
|
||||
// X.509 certificate matching the JWT signing key.
|
||||
CertPath string `yaml:"cert_path" comment:"X.509 certificate matching the JWT signing key."`
|
||||
CertPath string `yaml:"cert_path" comment:"X.509 certificate matching the JWT signing key (auto-generated on each boot from the JWT key in the database)."`
|
||||
|
||||
// TokenExpiration is the JWT expiration duration (5 minutes, not configurable)
|
||||
TokenExpiration time.Duration `yaml:"-"`
|
||||
@@ -147,6 +145,12 @@ type AIConfig struct {
|
||||
APIKey string `yaml:"api_key" comment:"Anthropic API key for AI Image Advisor. Also reads CLAUDE_API_KEY env var as fallback."`
|
||||
}
|
||||
|
||||
// LabelerRefConfig defines the connection to an ATProto labeler service.
|
||||
type LabelerRefConfig struct {
|
||||
// DID or URL of the labeler service for content moderation.
|
||||
DID string `yaml:"did" comment:"DID or URL of the ATProto labeler (e.g., did:web:labeler.atcr.io). Empty disables label filtering."`
|
||||
}
|
||||
|
||||
// setDefaults registers all default values on the given Viper instance.
|
||||
func setDefaults(v *viper.Viper) {
|
||||
v.SetDefault("version", "0.1")
|
||||
@@ -159,7 +163,6 @@ func setDefaults(v *viper.Viper) {
|
||||
v.SetDefault("server.test_mode", false)
|
||||
v.SetDefault("server.client_name", "AT Container Registry")
|
||||
v.SetDefault("server.client_short_name", "ATCR")
|
||||
v.SetDefault("server.oauth_key_path", "/var/lib/atcr/oauth/client.key")
|
||||
v.SetDefault("server.registry_domains", []string{})
|
||||
v.SetDefault("server.managed_holds", []string{})
|
||||
|
||||
@@ -169,6 +172,7 @@ func setDefaults(v *viper.Viper) {
|
||||
v.SetDefault("ui.libsql_sync_url", "")
|
||||
v.SetDefault("ui.libsql_auth_token", "")
|
||||
v.SetDefault("ui.libsql_sync_interval", "60s")
|
||||
v.SetDefault("ui.source_url", "https://tangled.org/evan.jarrett.net/at-container-registry")
|
||||
|
||||
// Health defaults
|
||||
v.SetDefault("health.cache_ttl", "15m")
|
||||
@@ -189,7 +193,6 @@ func setDefaults(v *viper.Viper) {
|
||||
})
|
||||
|
||||
// Auth defaults
|
||||
v.SetDefault("auth.key_path", "/var/lib/atcr/auth/private-key.pem")
|
||||
v.SetDefault("auth.cert_path", "/var/lib/atcr/auth/private-key.crt")
|
||||
|
||||
// Log shipper defaults
|
||||
@@ -203,6 +206,9 @@ func setDefaults(v *viper.Viper) {
|
||||
v.SetDefault("legal.company_name", "")
|
||||
v.SetDefault("legal.jurisdiction", "")
|
||||
|
||||
// Labeler defaults
|
||||
v.SetDefault("labeler.did", "")
|
||||
|
||||
// Log formatter (used by distribution config, not in Config struct)
|
||||
v.SetDefault("log_formatter", "text")
|
||||
}
|
||||
@@ -223,8 +229,8 @@ func ExampleYAML() ([]byte, error) {
|
||||
|
||||
// Populate example billing tiers so operators see the structure
|
||||
cfg.Billing.Currency = "usd"
|
||||
cfg.Billing.SuccessURL = "{base_url}/settings#billing"
|
||||
cfg.Billing.CancelURL = "{base_url}/settings#billing"
|
||||
cfg.Billing.SuccessURL = "{base_url}/settings/billing"
|
||||
cfg.Billing.CancelURL = "{base_url}/settings/billing"
|
||||
cfg.Billing.OwnerBadge = true
|
||||
cfg.Billing.Tiers = []billing.BillingTierConfig{
|
||||
{Name: "deckhand", Description: "Get started with basic storage", MaxWebhooks: 1},
|
||||
|
||||
@@ -18,10 +18,8 @@ import (
|
||||
"github.com/bluesky-social/indigo/atproto/atcrypto"
|
||||
)
|
||||
|
||||
// loadOAuthKey loads the OAuth P-256 key with priority: DB → file → generate.
|
||||
// Keys loaded from file or newly generated are stored in the DB.
|
||||
func loadOAuthKey(database *sql.DB, keyPath string) (*atcrypto.PrivateKeyP256, error) {
|
||||
// Try database first
|
||||
// loadOAuthKey loads the OAuth P-256 key from the DB, generating one if absent.
|
||||
func loadOAuthKey(database *sql.DB) (*atcrypto.PrivateKeyP256, error) {
|
||||
data, err := db.GetCryptoKey(database, "oauth_p256")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query crypto_keys: %w", err)
|
||||
@@ -35,23 +33,6 @@ func loadOAuthKey(database *sql.DB, keyPath string) (*atcrypto.PrivateKeyP256, e
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// Try file fallback
|
||||
if keyPath != "" {
|
||||
if fileData, err := os.ReadFile(keyPath); err == nil {
|
||||
key, err := atcrypto.ParsePrivateBytesP256(fileData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse OAuth key from file %s: %w", keyPath, err)
|
||||
}
|
||||
// Migrate to database
|
||||
if err := db.PutCryptoKey(database, "oauth_p256", fileData); err != nil {
|
||||
return nil, fmt.Errorf("failed to store OAuth key in database: %w", err)
|
||||
}
|
||||
slog.Info("Migrated OAuth P-256 key from file to database", "path", keyPath)
|
||||
return key, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Generate new key
|
||||
p256Key, err := atcrypto.GeneratePrivateKeyP256()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate OAuth P-256 key: %w", err)
|
||||
@@ -66,16 +47,15 @@ func loadOAuthKey(database *sql.DB, keyPath string) (*atcrypto.PrivateKeyP256, e
|
||||
return p256Key, nil
|
||||
}
|
||||
|
||||
// loadJWTKeyAndCert loads the JWT RSA key from DB (with file fallback) and generates
|
||||
// a self-signed certificate. The cert is always regenerated and written to certPath
|
||||
// on disk because the distribution library reads it via os.Open().
|
||||
func loadJWTKeyAndCert(database *sql.DB, keyPath, certPath string) (*rsa.PrivateKey, []byte, error) {
|
||||
rsaKey, err := loadRSAKey(database, keyPath)
|
||||
// loadJWTKeyAndCert loads the JWT RSA key from the DB and generates a self-signed
|
||||
// certificate. The cert is always regenerated and written to certPath on disk
|
||||
// because the distribution library reads it via os.Open().
|
||||
func loadJWTKeyAndCert(database *sql.DB, certPath string) (*rsa.PrivateKey, []byte, error) {
|
||||
rsaKey, err := loadRSAKey(database)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Generate cert and write to disk for distribution library
|
||||
certDER, err := generateAndWriteCert(rsaKey, certPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -84,9 +64,8 @@ func loadJWTKeyAndCert(database *sql.DB, keyPath, certPath string) (*rsa.Private
|
||||
return rsaKey, certDER, nil
|
||||
}
|
||||
|
||||
// loadRSAKey loads the RSA private key with priority: DB → file → generate.
|
||||
func loadRSAKey(database *sql.DB, keyPath string) (*rsa.PrivateKey, error) {
|
||||
// Try database first
|
||||
// loadRSAKey loads the RSA private key from the DB, generating one if absent.
|
||||
func loadRSAKey(database *sql.DB) (*rsa.PrivateKey, error) {
|
||||
data, err := db.GetCryptoKey(database, "jwt_rsa")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query crypto_keys: %w", err)
|
||||
@@ -100,23 +79,6 @@ func loadRSAKey(database *sql.DB, keyPath string) (*rsa.PrivateKey, error) {
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// Try file fallback
|
||||
if keyPath != "" {
|
||||
if fileData, err := os.ReadFile(keyPath); err == nil {
|
||||
key, err := parseRSAKeyPEM(fileData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse RSA key from file %s: %w", keyPath, err)
|
||||
}
|
||||
// Migrate to database
|
||||
if err := db.PutCryptoKey(database, "jwt_rsa", fileData); err != nil {
|
||||
return nil, fmt.Errorf("failed to store RSA key in database: %w", err)
|
||||
}
|
||||
slog.Info("Migrated JWT RSA key from file to database", "path", keyPath)
|
||||
return key, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Generate new key
|
||||
rsaKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate RSA key: %w", err)
|
||||
|
||||
@@ -29,6 +29,36 @@ func GetRepositoryAnnotations(db DBTX, did, repository string) (map[string]strin
|
||||
return annotations, rows.Err()
|
||||
}
|
||||
|
||||
// GetRepositoryAnnotationsByDID retrieves all annotations for every
|
||||
// repository owned by a DID, grouped as map[repository]map[key]value.
|
||||
// Used by bulk-fetch paths to avoid issuing one query per repository.
|
||||
func GetRepositoryAnnotationsByDID(db DBTX, did string) (map[string]map[string]string, error) {
|
||||
rows, err := db.Query(`
|
||||
SELECT repository, key, value
|
||||
FROM repository_annotations
|
||||
WHERE did = ?
|
||||
`, did)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
out := make(map[string]map[string]string)
|
||||
for rows.Next() {
|
||||
var repo, key, value string
|
||||
if err := rows.Scan(&repo, &key, &value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m, ok := out[repo]
|
||||
if !ok {
|
||||
m = make(map[string]string)
|
||||
out[repo] = m
|
||||
}
|
||||
m[key] = value
|
||||
}
|
||||
return out, rows.Err()
|
||||
}
|
||||
|
||||
// UpsertRepositoryAnnotations upserts annotations for a repository.
|
||||
// Stale keys not present in the new map are deleted.
|
||||
// Unchanged values are skipped to avoid unnecessary writes.
|
||||
|
||||
580
pkg/appview/db/batch.go
Normal file
580
pkg/appview/db/batch.go
Normal file
@@ -0,0 +1,580 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BatchSize is the maximum number of rows included in a single multi-row INSERT.
|
||||
// Kept well under SQLite's default SQLITE_MAX_VARIABLE_NUMBER (32766) and any
|
||||
// remote libsql parameter ceiling — at 11 columns this is 1100 placeholders.
|
||||
const BatchSize = 100
|
||||
|
||||
// buildPlaceholders returns a comma-separated list of `rows` groups of the form
|
||||
// `(?,?,?)`, each group containing `cols` placeholders. Used to construct the
|
||||
// VALUES clause of multi-row INSERT statements.
|
||||
func buildPlaceholders(rows, cols int) string {
|
||||
if rows <= 0 || cols <= 0 {
|
||||
return ""
|
||||
}
|
||||
group := "(" + strings.Repeat("?,", cols-1) + "?)"
|
||||
var sb strings.Builder
|
||||
sb.Grow((len(group) + 1) * rows)
|
||||
for i := 0; i < rows; i++ {
|
||||
if i > 0 {
|
||||
sb.WriteByte(',')
|
||||
}
|
||||
sb.WriteString(group)
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// chunk returns the half-open range [start, end) for the i-th chunk of size
|
||||
// BatchSize within a slice of length n.
|
||||
func chunk(n, i int) (start, end int) {
|
||||
start = i * BatchSize
|
||||
end = start + BatchSize
|
||||
if end > n {
|
||||
end = n
|
||||
}
|
||||
return start, end
|
||||
}
|
||||
|
||||
// BatchInsertManifests upserts a batch of manifests and returns a map of
|
||||
// digest → manifest id for the inserted rows (both new and existing). Rows
|
||||
// are keyed by (did, repository, digest); callers that need the id must
|
||||
// group their input so that digest is unique per (did, repository) in one
|
||||
// batch call.
|
||||
//
|
||||
// Implementation: one multi-row INSERT per sub-batch, followed by one SELECT
|
||||
// to fetch ids back (libsql's RETURNING support across replica modes is
|
||||
// uneven; a second SELECT is reliable and still a single round-trip per
|
||||
// sub-batch).
|
||||
func BatchInsertManifests(db DBTX, manifests []Manifest) (map[string]int64, error) {
|
||||
out := make(map[string]int64, len(manifests))
|
||||
if len(manifests) == 0 {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
for i := 0; i*BatchSize < len(manifests); i++ {
|
||||
start, end := chunk(len(manifests), i)
|
||||
batch := manifests[start:end]
|
||||
|
||||
const cols = 11
|
||||
args := make([]any, 0, len(batch)*cols)
|
||||
for _, m := range batch {
|
||||
args = append(args,
|
||||
m.DID, m.Repository, m.Digest, m.HoldEndpoint,
|
||||
m.SchemaVersion, m.MediaType, m.ConfigDigest,
|
||||
m.ConfigSize, m.ArtifactType,
|
||||
nullString(m.SubjectDigest),
|
||||
m.CreatedAt,
|
||||
)
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO manifests
|
||||
(did, repository, digest, hold_endpoint, schema_version, media_type,
|
||||
config_digest, config_size, artifact_type, subject_digest, created_at)
|
||||
VALUES ` + buildPlaceholders(len(batch), cols) + `
|
||||
ON CONFLICT(did, repository, digest) DO UPDATE SET
|
||||
hold_endpoint = excluded.hold_endpoint,
|
||||
schema_version = excluded.schema_version,
|
||||
media_type = excluded.media_type,
|
||||
config_digest = excluded.config_digest,
|
||||
config_size = excluded.config_size,
|
||||
artifact_type = excluded.artifact_type,
|
||||
subject_digest = excluded.subject_digest
|
||||
WHERE excluded.hold_endpoint != manifests.hold_endpoint
|
||||
OR excluded.schema_version != manifests.schema_version
|
||||
OR excluded.media_type != manifests.media_type
|
||||
OR excluded.config_digest IS NOT manifests.config_digest
|
||||
OR excluded.config_size IS NOT manifests.config_size
|
||||
OR excluded.artifact_type != manifests.artifact_type
|
||||
OR excluded.subject_digest IS NOT manifests.subject_digest
|
||||
`
|
||||
if _, err := db.Exec(query, args...); err != nil {
|
||||
return nil, fmt.Errorf("batch insert manifests: %w", err)
|
||||
}
|
||||
|
||||
// Fetch ids for this sub-batch by (did, digest) — digests are unique enough
|
||||
// that matching on (did, digest) avoids needing a three-column IN list.
|
||||
// repository is included in the row to disambiguate if a user genuinely has
|
||||
// the same digest across repos.
|
||||
selectArgs := make([]any, 0, 1+2*len(batch))
|
||||
// Group by did (caller usually supplies one did per call, but be safe).
|
||||
didSet := make(map[string]struct{})
|
||||
for _, m := range batch {
|
||||
didSet[m.DID] = struct{}{}
|
||||
}
|
||||
// Build a per-did IN (?) query; usually exactly one iteration.
|
||||
for did := range didSet {
|
||||
digests := make([]string, 0, len(batch))
|
||||
for _, m := range batch {
|
||||
if m.DID == did {
|
||||
digests = append(digests, m.Digest)
|
||||
}
|
||||
}
|
||||
selectArgs = append(selectArgs[:0], did)
|
||||
for _, d := range digests {
|
||||
selectArgs = append(selectArgs, d)
|
||||
}
|
||||
selectQuery := `
|
||||
SELECT repository, digest, id FROM manifests
|
||||
WHERE did = ? AND digest IN (` +
|
||||
strings.TrimSuffix(strings.Repeat("?,", len(digests)), ",") + `)
|
||||
`
|
||||
rows, err := db.Query(selectQuery, selectArgs...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("batch select manifest ids: %w", err)
|
||||
}
|
||||
for rows.Next() {
|
||||
var repo, digest string
|
||||
var id int64
|
||||
if err := rows.Scan(&repo, &digest, &id); err != nil {
|
||||
rows.Close()
|
||||
return nil, fmt.Errorf("scan manifest id: %w", err)
|
||||
}
|
||||
// Key format matches what callers use: "did|repo|digest".
|
||||
out[manifestKey(did, repo, digest)] = id
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ManifestKey builds the lookup key used by BatchInsertManifests' result map.
|
||||
// Callers construct the same key from their in-memory Manifest structs to
|
||||
// find the assigned id.
|
||||
func ManifestKey(did, repository, digest string) string {
|
||||
return manifestKey(did, repository, digest)
|
||||
}
|
||||
|
||||
func manifestKey(did, repository, digest string) string {
|
||||
return did + "|" + repository + "|" + digest
|
||||
}
|
||||
|
||||
// BatchInsertLayers inserts a batch of layers, skipping any that already exist.
|
||||
// Layers are immutable, so ON CONFLICT DO NOTHING matches the single-row
|
||||
// InsertLayer semantics.
|
||||
func BatchInsertLayers(db DBTX, layers []Layer) error {
|
||||
if len(layers) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i*BatchSize < len(layers); i++ {
|
||||
start, end := chunk(len(layers), i)
|
||||
batch := layers[start:end]
|
||||
|
||||
const cols = 6
|
||||
args := make([]any, 0, len(batch)*cols)
|
||||
for _, l := range batch {
|
||||
var annotationsJSON any
|
||||
if len(l.Annotations) > 0 {
|
||||
b, err := json.Marshal(l.Annotations)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal layer annotations: %w", err)
|
||||
}
|
||||
s := string(b)
|
||||
annotationsJSON = &s
|
||||
}
|
||||
args = append(args, l.ManifestID, l.Digest, l.Size, l.MediaType, l.LayerIndex, annotationsJSON)
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO layers (manifest_id, digest, size, media_type, layer_index, annotations)
|
||||
VALUES ` + buildPlaceholders(len(batch), cols) + `
|
||||
ON CONFLICT(manifest_id, layer_index) DO NOTHING
|
||||
`
|
||||
if _, err := db.Exec(query, args...); err != nil {
|
||||
return fmt.Errorf("batch insert layers: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchInsertManifestReferences inserts a batch of manifest references.
|
||||
// The table has PRIMARY KEY(manifest_id, reference_index); duplicates skip.
|
||||
func BatchInsertManifestReferences(db DBTX, refs []ManifestReference) error {
|
||||
if len(refs) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i*BatchSize < len(refs); i++ {
|
||||
start, end := chunk(len(refs), i)
|
||||
batch := refs[start:end]
|
||||
|
||||
const cols = 10
|
||||
args := make([]any, 0, len(batch)*cols)
|
||||
for _, r := range batch {
|
||||
args = append(args,
|
||||
r.ManifestID, r.Digest, r.Size, r.MediaType,
|
||||
r.PlatformArchitecture, r.PlatformOS,
|
||||
r.PlatformVariant, r.PlatformOSVersion,
|
||||
r.IsAttestation, r.ReferenceIndex,
|
||||
)
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO manifest_references (manifest_id, digest, size, media_type,
|
||||
platform_architecture, platform_os,
|
||||
platform_variant, platform_os_version,
|
||||
is_attestation, reference_index)
|
||||
VALUES ` + buildPlaceholders(len(batch), cols) + `
|
||||
ON CONFLICT(manifest_id, reference_index) DO NOTHING
|
||||
`
|
||||
if _, err := db.Exec(query, args...); err != nil {
|
||||
return fmt.Errorf("batch insert manifest references: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchUpsertTags upserts a batch of tag records, matching UpsertTag semantics.
|
||||
func BatchUpsertTags(db DBTX, tags []Tag) error {
|
||||
if len(tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i*BatchSize < len(tags); i++ {
|
||||
start, end := chunk(len(tags), i)
|
||||
batch := tags[start:end]
|
||||
|
||||
const cols = 5
|
||||
args := make([]any, 0, len(batch)*cols)
|
||||
for _, t := range batch {
|
||||
args = append(args, t.DID, t.Repository, t.Tag, t.Digest, t.CreatedAt)
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO tags (did, repository, tag, digest, created_at)
|
||||
VALUES ` + buildPlaceholders(len(batch), cols) + `
|
||||
ON CONFLICT(did, repository, tag) DO UPDATE SET
|
||||
digest = excluded.digest,
|
||||
created_at = excluded.created_at
|
||||
WHERE excluded.digest != tags.digest
|
||||
OR excluded.created_at != tags.created_at
|
||||
`
|
||||
if _, err := db.Exec(query, args...); err != nil {
|
||||
return fmt.Errorf("batch upsert tags: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StarInput is a struct projection of the UpsertStar argument list for use with BatchUpsertStars.
|
||||
type StarInput struct {
|
||||
StarrerDID string
|
||||
OwnerDID string
|
||||
Repository string
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
// BatchUpsertStars upserts a batch of stars. Stars are immutable.
|
||||
func BatchUpsertStars(db DBTX, stars []StarInput) error {
|
||||
if len(stars) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i*BatchSize < len(stars); i++ {
|
||||
start, end := chunk(len(stars), i)
|
||||
batch := stars[start:end]
|
||||
|
||||
const cols = 4
|
||||
args := make([]any, 0, len(batch)*cols)
|
||||
for _, s := range batch {
|
||||
args = append(args, s.StarrerDID, s.OwnerDID, s.Repository, s.CreatedAt)
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO stars (starrer_did, owner_did, repository, created_at)
|
||||
VALUES ` + buildPlaceholders(len(batch), cols) + `
|
||||
ON CONFLICT(starrer_did, owner_did, repository) DO NOTHING
|
||||
`
|
||||
if _, err := db.Exec(query, args...); err != nil {
|
||||
return fmt.Errorf("batch upsert stars: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchUpsertRepoPages upserts a batch of repo page records.
|
||||
func BatchUpsertRepoPages(db DBTX, pages []RepoPage) error {
|
||||
if len(pages) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i*BatchSize < len(pages); i++ {
|
||||
start, end := chunk(len(pages), i)
|
||||
batch := pages[start:end]
|
||||
|
||||
const cols = 7
|
||||
args := make([]any, 0, len(batch)*cols)
|
||||
for _, p := range batch {
|
||||
args = append(args,
|
||||
p.DID, p.Repository, p.Description, p.AvatarCID,
|
||||
p.UserEdited, p.CreatedAt, p.UpdatedAt,
|
||||
)
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO repo_pages (did, repository, description, avatar_cid, user_edited, created_at, updated_at)
|
||||
VALUES ` + buildPlaceholders(len(batch), cols) + `
|
||||
ON CONFLICT(did, repository) DO UPDATE SET
|
||||
description = excluded.description,
|
||||
avatar_cid = excluded.avatar_cid,
|
||||
user_edited = excluded.user_edited,
|
||||
updated_at = excluded.updated_at
|
||||
WHERE excluded.description IS NOT repo_pages.description
|
||||
OR excluded.avatar_cid IS NOT repo_pages.avatar_cid
|
||||
OR excluded.user_edited IS NOT repo_pages.user_edited
|
||||
`
|
||||
if _, err := db.Exec(query, args...); err != nil {
|
||||
return fmt.Errorf("batch upsert repo pages: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchUpsertDailyStats upserts a batch of daily stats rows.
|
||||
func BatchUpsertDailyStats(db DBTX, stats []DailyStats) error {
|
||||
if len(stats) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i*BatchSize < len(stats); i++ {
|
||||
start, end := chunk(len(stats), i)
|
||||
batch := stats[start:end]
|
||||
|
||||
const cols = 5
|
||||
args := make([]any, 0, len(batch)*cols)
|
||||
for _, s := range batch {
|
||||
args = append(args, s.DID, s.Repository, s.Date, s.PullCount, s.PushCount)
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO repository_stats_daily (did, repository, date, pull_count, push_count)
|
||||
VALUES ` + buildPlaceholders(len(batch), cols) + `
|
||||
ON CONFLICT(did, repository, date) DO UPDATE SET
|
||||
pull_count = excluded.pull_count,
|
||||
push_count = excluded.push_count
|
||||
WHERE excluded.pull_count != repository_stats_daily.pull_count
|
||||
OR excluded.push_count != repository_stats_daily.push_count
|
||||
`
|
||||
if _, err := db.Exec(query, args...); err != nil {
|
||||
return fmt.Errorf("batch upsert daily stats: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchUpsertRepositoryStats upserts aggregated repository stats.
|
||||
func BatchUpsertRepositoryStats(db DBTX, stats []RepositoryStats) error {
|
||||
if len(stats) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i*BatchSize < len(stats); i++ {
|
||||
start, end := chunk(len(stats), i)
|
||||
batch := stats[start:end]
|
||||
|
||||
const cols = 6
|
||||
args := make([]any, 0, len(batch)*cols)
|
||||
for _, s := range batch {
|
||||
args = append(args,
|
||||
s.DID, s.Repository, s.PullCount, s.LastPull, s.PushCount, s.LastPush,
|
||||
)
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO repository_stats (did, repository, pull_count, last_pull, push_count, last_push)
|
||||
VALUES ` + buildPlaceholders(len(batch), cols) + `
|
||||
ON CONFLICT(did, repository) DO UPDATE SET
|
||||
pull_count = excluded.pull_count,
|
||||
last_pull = excluded.last_pull,
|
||||
push_count = excluded.push_count,
|
||||
last_push = excluded.last_push
|
||||
WHERE excluded.pull_count != repository_stats.pull_count
|
||||
OR excluded.last_pull IS NOT repository_stats.last_pull
|
||||
OR excluded.push_count != repository_stats.push_count
|
||||
OR excluded.last_push IS NOT repository_stats.last_push
|
||||
`
|
||||
if _, err := db.Exec(query, args...); err != nil {
|
||||
return fmt.Errorf("batch upsert repository stats: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchUpsertCaptainRecords upserts a batch of captain records.
|
||||
func BatchUpsertCaptainRecords(db DBTX, records []HoldCaptainRecord) error {
|
||||
if len(records) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i*BatchSize < len(records); i++ {
|
||||
start, end := chunk(len(records), i)
|
||||
batch := records[start:end]
|
||||
|
||||
const cols = 8
|
||||
args := make([]any, 0, len(batch)*cols)
|
||||
for _, r := range batch {
|
||||
args = append(args,
|
||||
r.HoldDID, r.OwnerDID, r.Public, r.AllowAllCrew,
|
||||
nullString(r.DeployedAt),
|
||||
nullString(r.Region),
|
||||
nullString(r.Successor),
|
||||
r.UpdatedAt,
|
||||
)
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO hold_captain_records (
|
||||
hold_did, owner_did, public, allow_all_crew,
|
||||
deployed_at, region, successor, updated_at
|
||||
) VALUES ` + buildPlaceholders(len(batch), cols) + `
|
||||
ON CONFLICT(hold_did) DO UPDATE SET
|
||||
owner_did = excluded.owner_did,
|
||||
public = excluded.public,
|
||||
allow_all_crew = excluded.allow_all_crew,
|
||||
deployed_at = excluded.deployed_at,
|
||||
region = excluded.region,
|
||||
successor = excluded.successor,
|
||||
updated_at = excluded.updated_at
|
||||
WHERE excluded.owner_did != hold_captain_records.owner_did
|
||||
OR excluded.public != hold_captain_records.public
|
||||
OR excluded.allow_all_crew != hold_captain_records.allow_all_crew
|
||||
OR excluded.deployed_at IS NOT hold_captain_records.deployed_at
|
||||
OR excluded.region IS NOT hold_captain_records.region
|
||||
OR excluded.successor IS NOT hold_captain_records.successor
|
||||
`
|
||||
if _, err := db.Exec(query, args...); err != nil {
|
||||
return fmt.Errorf("batch upsert captain records: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchUpsertCrewMembers upserts a batch of crew members.
|
||||
func BatchUpsertCrewMembers(db DBTX, members []CrewMember) error {
|
||||
if len(members) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i*BatchSize < len(members); i++ {
|
||||
start, end := chunk(len(members), i)
|
||||
batch := members[start:end]
|
||||
|
||||
// updated_at uses CURRENT_TIMESTAMP literal, so it's not a placeholder.
|
||||
const cols = 7
|
||||
args := make([]any, 0, len(batch)*cols)
|
||||
for _, m := range batch {
|
||||
args = append(args,
|
||||
m.HoldDID, m.MemberDID, m.Rkey,
|
||||
nullString(m.Role),
|
||||
nullString(m.Permissions),
|
||||
nullString(m.Tier),
|
||||
nullString(m.AddedAt),
|
||||
)
|
||||
}
|
||||
|
||||
// Replace each group with `(?,?,?,?,?,?,?,CURRENT_TIMESTAMP)` — we build it
|
||||
// manually because buildPlaceholders only handles uniform placeholders.
|
||||
group := "(" + strings.Repeat("?,", cols) + "CURRENT_TIMESTAMP)"
|
||||
var sb strings.Builder
|
||||
sb.Grow((len(group) + 1) * len(batch))
|
||||
for i := 0; i < len(batch); i++ {
|
||||
if i > 0 {
|
||||
sb.WriteByte(',')
|
||||
}
|
||||
sb.WriteString(group)
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO hold_crew_members (
|
||||
hold_did, member_did, rkey, role, permissions, tier, added_at, updated_at
|
||||
) VALUES ` + sb.String() + `
|
||||
ON CONFLICT(hold_did, member_did) DO UPDATE SET
|
||||
rkey = excluded.rkey,
|
||||
role = excluded.role,
|
||||
permissions = excluded.permissions,
|
||||
tier = excluded.tier,
|
||||
added_at = excluded.added_at,
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
WHERE excluded.rkey != hold_crew_members.rkey
|
||||
OR excluded.role IS NOT hold_crew_members.role
|
||||
OR excluded.permissions IS NOT hold_crew_members.permissions
|
||||
OR excluded.tier IS NOT hold_crew_members.tier
|
||||
OR excluded.added_at IS NOT hold_crew_members.added_at
|
||||
`
|
||||
if _, err := db.Exec(query, args...); err != nil {
|
||||
return fmt.Errorf("batch upsert crew members: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AnnotationRow represents a single key/value annotation for a repository,
|
||||
// used by BatchUpsertRepositoryAnnotations.
|
||||
type AnnotationRow struct {
|
||||
DID string
|
||||
Repository string
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
// BatchUpsertRepositoryAnnotations upserts annotation rows and deletes any
|
||||
// stale keys for each (did, repository) represented in the input. The caller
|
||||
// is responsible for pre-filtering: rows should represent only repositories
|
||||
// whose newest manifest has at least one non-empty annotation, matching the
|
||||
// single-row UpsertRepositoryAnnotations semantics.
|
||||
func BatchUpsertRepositoryAnnotations(db DBTX, rows []AnnotationRow) error {
|
||||
if len(rows) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Group rows by (did, repository) so we can delete stale keys per repo.
|
||||
type repoKey struct{ did, repo string }
|
||||
keysByRepo := make(map[repoKey][]string)
|
||||
for _, r := range rows {
|
||||
k := repoKey{r.DID, r.Repository}
|
||||
keysByRepo[k] = append(keysByRepo[k], r.Key)
|
||||
}
|
||||
|
||||
// Delete stale keys per repository in one statement each. We could batch
|
||||
// further with OR chains, but DELETE is cheap and each repo has few keys.
|
||||
for k, keys := range keysByRepo {
|
||||
placeholders := strings.TrimSuffix(strings.Repeat("?,", len(keys)), ",")
|
||||
args := make([]any, 0, 2+len(keys))
|
||||
args = append(args, k.did, k.repo)
|
||||
for _, key := range keys {
|
||||
args = append(args, key)
|
||||
}
|
||||
if _, err := db.Exec(`
|
||||
DELETE FROM repository_annotations
|
||||
WHERE did = ? AND repository = ? AND key NOT IN (`+placeholders+`)
|
||||
`, args...); err != nil {
|
||||
return fmt.Errorf("batch delete stale annotations: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Upsert all annotation rows in sub-batches.
|
||||
now := time.Now()
|
||||
for i := 0; i*BatchSize < len(rows); i++ {
|
||||
start, end := chunk(len(rows), i)
|
||||
batch := rows[start:end]
|
||||
|
||||
const cols = 5
|
||||
args := make([]any, 0, len(batch)*cols)
|
||||
for _, r := range batch {
|
||||
args = append(args, r.DID, r.Repository, r.Key, r.Value, now)
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO repository_annotations (did, repository, key, value, updated_at)
|
||||
VALUES ` + buildPlaceholders(len(batch), cols) + `
|
||||
ON CONFLICT(did, repository, key) DO UPDATE SET
|
||||
value = excluded.value,
|
||||
updated_at = excluded.updated_at
|
||||
WHERE excluded.value != repository_annotations.value
|
||||
`
|
||||
if _, err := db.Exec(query, args...); err != nil {
|
||||
return fmt.Errorf("batch upsert annotations: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
383
pkg/appview/db/batch_test.go
Normal file
383
pkg/appview/db/batch_test.go
Normal file
@@ -0,0 +1,383 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// setupBatchTestDB spins up a fresh in-memory libsql database with the full
|
||||
// schema applied, so every batch test can write realistic data without
|
||||
// stubbing individual tables.
|
||||
func setupBatchTestDB(t *testing.T) *sql.DB {
|
||||
t.Helper()
|
||||
safeName := strings.ReplaceAll(t.Name(), "/", "_")
|
||||
d, err := InitDB(fmt.Sprintf("file:%s?mode=memory&cache=shared", safeName), LibsqlConfig{})
|
||||
if err != nil {
|
||||
t.Fatalf("init db: %v", err)
|
||||
}
|
||||
// Single conn to avoid cross-test contention in the shared in-memory cache.
|
||||
d.SetMaxOpenConns(1)
|
||||
t.Cleanup(func() { d.Close() })
|
||||
return d
|
||||
}
|
||||
|
||||
func createBatchTestUser(t *testing.T, d *sql.DB, did string) {
|
||||
t.Helper()
|
||||
_, err := d.Exec(`
|
||||
INSERT OR IGNORE INTO users (did, handle, pds_endpoint, last_seen)
|
||||
VALUES (?, ?, ?, datetime('now'))
|
||||
`, did, did+".bsky.social", "https://pds.example.com")
|
||||
if err != nil {
|
||||
t.Fatalf("seed user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func countRows(t *testing.T, d *sql.DB, query string, args ...any) int {
|
||||
t.Helper()
|
||||
var n int
|
||||
if err := d.QueryRow(query, args...).Scan(&n); err != nil {
|
||||
t.Fatalf("count: %v", err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func TestBuildPlaceholders(t *testing.T) {
|
||||
cases := []struct {
|
||||
rows, cols int
|
||||
want string
|
||||
}{
|
||||
{1, 1, "(?)"},
|
||||
{2, 1, "(?),(?)"},
|
||||
{1, 3, "(?,?,?)"},
|
||||
{3, 2, "(?,?),(?,?),(?,?)"},
|
||||
{0, 5, ""},
|
||||
{5, 0, ""},
|
||||
}
|
||||
for _, c := range cases {
|
||||
got := buildPlaceholders(c.rows, c.cols)
|
||||
if got != c.want {
|
||||
t.Errorf("buildPlaceholders(%d,%d) = %q, want %q", c.rows, c.cols, got, c.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchInsertManifests_InsertsAndReturnsIDs(t *testing.T) {
|
||||
d := setupBatchTestDB(t)
|
||||
createBatchTestUser(t, d, "did:plc:alice")
|
||||
|
||||
now := time.Now()
|
||||
manifests := []Manifest{
|
||||
{DID: "did:plc:alice", Repository: "app1", Digest: "sha256:aaa", HoldEndpoint: "did:web:hold", SchemaVersion: 2, MediaType: "application/vnd.oci.image.manifest.v1+json", ArtifactType: "container-image", CreatedAt: now},
|
||||
{DID: "did:plc:alice", Repository: "app2", Digest: "sha256:bbb", HoldEndpoint: "did:web:hold", SchemaVersion: 2, MediaType: "application/vnd.oci.image.manifest.v1+json", ArtifactType: "container-image", CreatedAt: now},
|
||||
}
|
||||
|
||||
ids, err := BatchInsertManifests(d, manifests)
|
||||
if err != nil {
|
||||
t.Fatalf("batch insert: %v", err)
|
||||
}
|
||||
if len(ids) != 2 {
|
||||
t.Fatalf("expected 2 ids, got %d", len(ids))
|
||||
}
|
||||
if ids[ManifestKey("did:plc:alice", "app1", "sha256:aaa")] == 0 {
|
||||
t.Errorf("missing id for app1")
|
||||
}
|
||||
if ids[ManifestKey("did:plc:alice", "app2", "sha256:bbb")] == 0 {
|
||||
t.Errorf("missing id for app2")
|
||||
}
|
||||
if got := countRows(t, d, `SELECT COUNT(*) FROM manifests`); got != 2 {
|
||||
t.Errorf("row count = %d, want 2", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchInsertManifests_Idempotent(t *testing.T) {
|
||||
d := setupBatchTestDB(t)
|
||||
createBatchTestUser(t, d, "did:plc:alice")
|
||||
|
||||
now := time.Now()
|
||||
m := []Manifest{{
|
||||
DID: "did:plc:alice", Repository: "app", Digest: "sha256:aaa",
|
||||
HoldEndpoint: "did:web:hold", SchemaVersion: 2,
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
ArtifactType: "container-image", CreatedAt: now,
|
||||
}}
|
||||
if _, err := BatchInsertManifests(d, m); err != nil {
|
||||
t.Fatalf("first insert: %v", err)
|
||||
}
|
||||
if _, err := BatchInsertManifests(d, m); err != nil {
|
||||
t.Fatalf("second insert: %v", err)
|
||||
}
|
||||
if got := countRows(t, d, `SELECT COUNT(*) FROM manifests`); got != 1 {
|
||||
t.Errorf("expected idempotent; row count = %d", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchInsertManifests_Chunking(t *testing.T) {
|
||||
// Exceed one sub-batch to exercise the chunk loop.
|
||||
d := setupBatchTestDB(t)
|
||||
createBatchTestUser(t, d, "did:plc:alice")
|
||||
|
||||
const n = BatchSize + 17
|
||||
now := time.Now()
|
||||
manifests := make([]Manifest, n)
|
||||
for i := 0; i < n; i++ {
|
||||
manifests[i] = Manifest{
|
||||
DID: "did:plc:alice", Repository: "app", Digest: fmt.Sprintf("sha256:%04d", i),
|
||||
HoldEndpoint: "did:web:hold", SchemaVersion: 2,
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
ArtifactType: "container-image", CreatedAt: now,
|
||||
}
|
||||
}
|
||||
ids, err := BatchInsertManifests(d, manifests)
|
||||
if err != nil {
|
||||
t.Fatalf("batch insert: %v", err)
|
||||
}
|
||||
if len(ids) != n {
|
||||
t.Errorf("ids len = %d, want %d", len(ids), n)
|
||||
}
|
||||
if got := countRows(t, d, `SELECT COUNT(*) FROM manifests`); got != n {
|
||||
t.Errorf("row count = %d, want %d", got, n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchInsertLayers_RespectsFK(t *testing.T) {
|
||||
d := setupBatchTestDB(t)
|
||||
createBatchTestUser(t, d, "did:plc:alice")
|
||||
|
||||
now := time.Now()
|
||||
ids, err := BatchInsertManifests(d, []Manifest{{
|
||||
DID: "did:plc:alice", Repository: "app", Digest: "sha256:aaa",
|
||||
HoldEndpoint: "did:web:hold", SchemaVersion: 2,
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
ArtifactType: "container-image", CreatedAt: now,
|
||||
}})
|
||||
if err != nil {
|
||||
t.Fatalf("insert manifest: %v", err)
|
||||
}
|
||||
mid := ids[ManifestKey("did:plc:alice", "app", "sha256:aaa")]
|
||||
|
||||
layers := []Layer{
|
||||
{ManifestID: mid, Digest: "sha256:L0", Size: 100, MediaType: "application/vnd.oci.image.layer.v1.tar+gzip", LayerIndex: 0},
|
||||
{ManifestID: mid, Digest: "sha256:L1", Size: 200, MediaType: "application/vnd.oci.image.layer.v1.tar+gzip", LayerIndex: 1},
|
||||
}
|
||||
if err := BatchInsertLayers(d, layers); err != nil {
|
||||
t.Fatalf("batch insert layers: %v", err)
|
||||
}
|
||||
if got := countRows(t, d, `SELECT COUNT(*) FROM layers`); got != 2 {
|
||||
t.Errorf("layers count = %d, want 2", got)
|
||||
}
|
||||
// Re-run to confirm ON CONFLICT DO NOTHING doesn't error.
|
||||
if err := BatchInsertLayers(d, layers); err != nil {
|
||||
t.Fatalf("idempotent layers: %v", err)
|
||||
}
|
||||
if got := countRows(t, d, `SELECT COUNT(*) FROM layers`); got != 2 {
|
||||
t.Errorf("layers after re-insert = %d, want 2", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchUpsertTags_Idempotent(t *testing.T) {
|
||||
d := setupBatchTestDB(t)
|
||||
createBatchTestUser(t, d, "did:plc:alice")
|
||||
|
||||
now := time.Now()
|
||||
tags := []Tag{
|
||||
{DID: "did:plc:alice", Repository: "app", Tag: "v1", Digest: "sha256:aaa", CreatedAt: now},
|
||||
{DID: "did:plc:alice", Repository: "app", Tag: "v2", Digest: "sha256:bbb", CreatedAt: now},
|
||||
}
|
||||
if err := BatchUpsertTags(d, tags); err != nil {
|
||||
t.Fatalf("batch upsert: %v", err)
|
||||
}
|
||||
if err := BatchUpsertTags(d, tags); err != nil {
|
||||
t.Fatalf("rerun: %v", err)
|
||||
}
|
||||
if got := countRows(t, d, `SELECT COUNT(*) FROM tags`); got != 2 {
|
||||
t.Errorf("tags count = %d, want 2", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchUpsertStars(t *testing.T) {
|
||||
d := setupBatchTestDB(t)
|
||||
createBatchTestUser(t, d, "did:plc:alice")
|
||||
createBatchTestUser(t, d, "did:plc:bob")
|
||||
|
||||
now := time.Now()
|
||||
stars := []StarInput{
|
||||
{StarrerDID: "did:plc:bob", OwnerDID: "did:plc:alice", Repository: "app", CreatedAt: now},
|
||||
}
|
||||
if err := BatchUpsertStars(d, stars); err != nil {
|
||||
t.Fatalf("batch upsert stars: %v", err)
|
||||
}
|
||||
// Re-insert to confirm ON CONFLICT DO NOTHING.
|
||||
if err := BatchUpsertStars(d, stars); err != nil {
|
||||
t.Fatalf("rerun: %v", err)
|
||||
}
|
||||
if got := countRows(t, d, `SELECT COUNT(*) FROM stars`); got != 1 {
|
||||
t.Errorf("stars count = %d, want 1", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchUpsertRepoPages(t *testing.T) {
|
||||
d := setupBatchTestDB(t)
|
||||
createBatchTestUser(t, d, "did:plc:alice")
|
||||
|
||||
now := time.Now()
|
||||
pages := []RepoPage{
|
||||
{DID: "did:plc:alice", Repository: "app", Description: "desc", CreatedAt: now, UpdatedAt: now},
|
||||
}
|
||||
if err := BatchUpsertRepoPages(d, pages); err != nil {
|
||||
t.Fatalf("batch upsert: %v", err)
|
||||
}
|
||||
// Update with new description.
|
||||
pages[0].Description = "new desc"
|
||||
if err := BatchUpsertRepoPages(d, pages); err != nil {
|
||||
t.Fatalf("update: %v", err)
|
||||
}
|
||||
var desc string
|
||||
if err := d.QueryRow(`SELECT description FROM repo_pages WHERE did=? AND repository=?`,
|
||||
"did:plc:alice", "app").Scan(&desc); err != nil {
|
||||
t.Fatalf("select: %v", err)
|
||||
}
|
||||
if desc != "new desc" {
|
||||
t.Errorf("description = %q, want %q", desc, "new desc")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchUpsertDailyStats(t *testing.T) {
|
||||
d := setupBatchTestDB(t)
|
||||
createBatchTestUser(t, d, "did:plc:alice")
|
||||
|
||||
stats := []DailyStats{
|
||||
{DID: "did:plc:alice", Repository: "app", Date: "2026-04-19", PullCount: 5, PushCount: 2},
|
||||
}
|
||||
if err := BatchUpsertDailyStats(d, stats); err != nil {
|
||||
t.Fatalf("upsert: %v", err)
|
||||
}
|
||||
stats[0].PullCount = 10
|
||||
if err := BatchUpsertDailyStats(d, stats); err != nil {
|
||||
t.Fatalf("update: %v", err)
|
||||
}
|
||||
var pull int
|
||||
if err := d.QueryRow(`SELECT pull_count FROM repository_stats_daily WHERE did=? AND repository=? AND date=?`,
|
||||
"did:plc:alice", "app", "2026-04-19").Scan(&pull); err != nil {
|
||||
t.Fatalf("select: %v", err)
|
||||
}
|
||||
if pull != 10 {
|
||||
t.Errorf("pull = %d, want 10", pull)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchUpsertRepositoryAnnotations_DropsStaleKeys(t *testing.T) {
|
||||
d := setupBatchTestDB(t)
|
||||
createBatchTestUser(t, d, "did:plc:alice")
|
||||
|
||||
rows := []AnnotationRow{
|
||||
{DID: "did:plc:alice", Repository: "app", Key: "a", Value: "1"},
|
||||
{DID: "did:plc:alice", Repository: "app", Key: "b", Value: "2"},
|
||||
}
|
||||
if err := BatchUpsertRepositoryAnnotations(d, rows); err != nil {
|
||||
t.Fatalf("initial: %v", err)
|
||||
}
|
||||
if got := countRows(t, d, `SELECT COUNT(*) FROM repository_annotations WHERE did=? AND repository=?`,
|
||||
"did:plc:alice", "app"); got != 2 {
|
||||
t.Errorf("initial count = %d, want 2", got)
|
||||
}
|
||||
|
||||
// Second call drops stale key "b".
|
||||
rows = []AnnotationRow{
|
||||
{DID: "did:plc:alice", Repository: "app", Key: "a", Value: "1-updated"},
|
||||
}
|
||||
if err := BatchUpsertRepositoryAnnotations(d, rows); err != nil {
|
||||
t.Fatalf("update: %v", err)
|
||||
}
|
||||
if got := countRows(t, d, `SELECT COUNT(*) FROM repository_annotations WHERE did=? AND repository=?`,
|
||||
"did:plc:alice", "app"); got != 1 {
|
||||
t.Errorf("after update = %d, want 1", got)
|
||||
}
|
||||
var val string
|
||||
if err := d.QueryRow(`SELECT value FROM repository_annotations WHERE key=? AND did=? AND repository=?`,
|
||||
"a", "did:plc:alice", "app").Scan(&val); err != nil {
|
||||
t.Fatalf("select: %v", err)
|
||||
}
|
||||
if val != "1-updated" {
|
||||
t.Errorf("value = %q, want 1-updated", val)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchUpsertCaptainRecords(t *testing.T) {
|
||||
d := setupBatchTestDB(t)
|
||||
|
||||
now := time.Now()
|
||||
records := []HoldCaptainRecord{
|
||||
{HoldDID: "did:web:hold1", OwnerDID: "did:plc:alice", Public: true, AllowAllCrew: false, UpdatedAt: now},
|
||||
}
|
||||
if err := BatchUpsertCaptainRecords(d, records); err != nil {
|
||||
t.Fatalf("upsert: %v", err)
|
||||
}
|
||||
if got := countRows(t, d, `SELECT COUNT(*) FROM hold_captain_records`); got != 1 {
|
||||
t.Errorf("count = %d, want 1", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchUpsertCrewMembers(t *testing.T) {
|
||||
d := setupBatchTestDB(t)
|
||||
|
||||
members := []CrewMember{
|
||||
{HoldDID: "did:web:hold1", MemberDID: "did:plc:alice", Rkey: "rkey1", Role: "owner"},
|
||||
}
|
||||
if err := BatchUpsertCrewMembers(d, members); err != nil {
|
||||
t.Fatalf("upsert: %v", err)
|
||||
}
|
||||
// Update the rkey: triggers the ON CONFLICT path.
|
||||
members[0].Rkey = "rkey2"
|
||||
if err := BatchUpsertCrewMembers(d, members); err != nil {
|
||||
t.Fatalf("update: %v", err)
|
||||
}
|
||||
var rkey string
|
||||
if err := d.QueryRow(`SELECT rkey FROM hold_crew_members WHERE hold_did=? AND member_did=?`,
|
||||
"did:web:hold1", "did:plc:alice").Scan(&rkey); err != nil {
|
||||
t.Fatalf("select: %v", err)
|
||||
}
|
||||
if rkey != "rkey2" {
|
||||
t.Errorf("rkey = %q, want rkey2", rkey)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchEmptySlices(t *testing.T) {
|
||||
d := setupBatchTestDB(t)
|
||||
// Every batch function must tolerate an empty input slice without erroring.
|
||||
if _, err := BatchInsertManifests(d, nil); err != nil {
|
||||
t.Errorf("manifests: %v", err)
|
||||
}
|
||||
if err := BatchInsertLayers(d, nil); err != nil {
|
||||
t.Errorf("layers: %v", err)
|
||||
}
|
||||
if err := BatchInsertManifestReferences(d, nil); err != nil {
|
||||
t.Errorf("refs: %v", err)
|
||||
}
|
||||
if err := BatchUpsertTags(d, nil); err != nil {
|
||||
t.Errorf("tags: %v", err)
|
||||
}
|
||||
if err := BatchUpsertStars(d, nil); err != nil {
|
||||
t.Errorf("stars: %v", err)
|
||||
}
|
||||
if err := BatchUpsertRepoPages(d, nil); err != nil {
|
||||
t.Errorf("repo pages: %v", err)
|
||||
}
|
||||
if err := BatchUpsertDailyStats(d, nil); err != nil {
|
||||
t.Errorf("daily: %v", err)
|
||||
}
|
||||
if err := BatchUpsertRepositoryStats(d, nil); err != nil {
|
||||
t.Errorf("repo stats: %v", err)
|
||||
}
|
||||
if err := BatchUpsertCaptainRecords(d, nil); err != nil {
|
||||
t.Errorf("captain: %v", err)
|
||||
}
|
||||
if err := BatchUpsertCrewMembers(d, nil); err != nil {
|
||||
t.Errorf("crew: %v", err)
|
||||
}
|
||||
if err := BatchUpsertRepositoryAnnotations(d, nil); err != nil {
|
||||
t.Errorf("annotations: %v", err)
|
||||
}
|
||||
}
|
||||
69
pkg/appview/db/conn.go
Normal file
69
pkg/appview/db/conn.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// poisonedTxSubstrings are error-message substrings emitted when go-libsql or the
|
||||
// remote libsql server leaves a connection in a state that cannot safely be reused.
|
||||
// Most come from Bunny Database killing a transaction that exceeded its server-side
|
||||
// timeout; the follow-on COMMIT then sees the connection in a poisoned state.
|
||||
var poisonedTxSubstrings = []string{
|
||||
"Transaction timed-out",
|
||||
"no transaction is active",
|
||||
"connection has reached an invalid state",
|
||||
"invalid state, started with",
|
||||
}
|
||||
|
||||
// IsPoisonedTxErr reports whether err indicates the underlying connection is no
|
||||
// longer usable for further statements. Callers should evict the connection from
|
||||
// the pool when this returns true.
|
||||
func IsPoisonedTxErr(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
msg := err.Error()
|
||||
for _, s := range poisonedTxSubstrings {
|
||||
if strings.Contains(msg, s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ExecResilient borrows a dedicated connection from db, runs fn against it, and
|
||||
// evicts the connection from the pool when fn returns a poisoned-transaction
|
||||
// error. The connection is always released via Close.
|
||||
//
|
||||
// Poison eviction works by returning driver.ErrBadConn from within conn.Raw:
|
||||
// database/sql treats that as a signal to discard the underlying driver conn
|
||||
// rather than returning it to the idle pool.
|
||||
//
|
||||
// ExecResilient does NOT retry. Callers wrap the call in their own retry policy
|
||||
// when that is desired (for example, a single retry on the live Jetstream path).
|
||||
func ExecResilient(ctx context.Context, db *sql.DB, fn func(*sql.Conn) error) error {
|
||||
conn, err := db.Conn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
execErr := fn(conn)
|
||||
if IsPoisonedTxErr(execErr) {
|
||||
// Discard the underlying driver conn so it never serves another caller.
|
||||
// The Raw callback's return value is what triggers eviction; we ignore
|
||||
// any error from Raw itself.
|
||||
_ = conn.Raw(func(any) error { return driver.ErrBadConn })
|
||||
}
|
||||
return execErr
|
||||
}
|
||||
|
||||
// ErrNoPoolConn is returned by ExecResilient when a connection cannot be
|
||||
// obtained from the pool (e.g. context cancelled). It wraps the underlying
|
||||
// pool error for callers that want to distinguish pool-exhaustion from
|
||||
// statement-level errors.
|
||||
var ErrNoPoolConn = errors.New("db: failed to acquire pool connection")
|
||||
28
pkg/appview/db/conn_test.go
Normal file
28
pkg/appview/db/conn_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsPoisonedTxErr(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
want bool
|
||||
}{
|
||||
{"nil", nil, false},
|
||||
{"unrelated", errors.New("disk full"), false},
|
||||
{"bunny timeout", errors.New("Remote SQlite failure: `2:0:Transaction timed-out`"), true},
|
||||
{"no active tx", errors.New("Remote SQlite failure: `3:1:cannot commit - no transaction is active`"), true},
|
||||
{"init state", errors.New("error code = 2: Error executing statement: connection has reached an invalid state, started with Init"), true},
|
||||
{"just invalid state", errors.New("generic failure: invalid state, started with Query"), true},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
if got := IsPoisonedTxErr(c.err); got != c.want {
|
||||
t.Errorf("IsPoisonedTxErr(%v) = %v, want %v", c.err, got, c.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -333,9 +333,10 @@ func GetAvailableHolds(db DBTX, userDID string) ([]AvailableHold, error) {
|
||||
c.permissions
|
||||
FROM hold_captain_records h
|
||||
LEFT JOIN hold_crew_members c ON h.hold_did = c.hold_did AND c.member_did = ?1
|
||||
WHERE h.allow_all_crew = 1
|
||||
WHERE (h.successor IS NULL OR h.successor = '')
|
||||
AND (h.allow_all_crew = 1
|
||||
OR h.owner_did = ?1
|
||||
OR c.member_did IS NOT NULL
|
||||
OR c.member_did IS NOT NULL)
|
||||
ORDER BY
|
||||
CASE
|
||||
WHEN h.owner_did = ?1 THEN 0
|
||||
@@ -387,6 +388,54 @@ func GetAvailableHolds(db DBTX, userDID string) ([]AvailableHold, error) {
|
||||
return holds, nil
|
||||
}
|
||||
|
||||
// GetAccessibleHoldDIDs returns the set of hold DIDs whose content the viewer
|
||||
// is allowed to see in listings. If viewerDID is empty (anonymous), this
|
||||
// returns holds with public=1 OR allow_all_crew=1. For signed-in viewers it
|
||||
// additionally includes holds where the viewer is owner or crew.
|
||||
//
|
||||
// The returned slice is suitable for use in an IN (...) clause against
|
||||
// manifests.hold_endpoint / tags.hold_endpoint (which store the hold DID).
|
||||
func GetAccessibleHoldDIDs(db DBTX, viewerDID string) ([]string, error) {
|
||||
var rows *sql.Rows
|
||||
var err error
|
||||
|
||||
if viewerDID == "" {
|
||||
rows, err = db.Query(`
|
||||
SELECT hold_did
|
||||
FROM hold_captain_records
|
||||
WHERE public = 1 OR allow_all_crew = 1
|
||||
`)
|
||||
} else {
|
||||
rows, err = db.Query(`
|
||||
SELECT DISTINCT h.hold_did
|
||||
FROM hold_captain_records h
|
||||
LEFT JOIN hold_crew_members c
|
||||
ON h.hold_did = c.hold_did AND c.member_did = ?1
|
||||
WHERE h.public = 1
|
||||
OR h.allow_all_crew = 1
|
||||
OR h.owner_did = ?1
|
||||
OR c.member_did IS NOT NULL
|
||||
`, viewerDID)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query accessible holds: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var dids []string
|
||||
for rows.Next() {
|
||||
var did string
|
||||
if err := rows.Scan(&did); err != nil {
|
||||
return nil, fmt.Errorf("failed to scan accessible hold: %w", err)
|
||||
}
|
||||
dids = append(dids, did)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error iterating accessible holds: %w", err)
|
||||
}
|
||||
return dids, nil
|
||||
}
|
||||
|
||||
// GetCrewMemberships returns all holds where a user is a crew member
|
||||
func GetCrewMemberships(db DBTX, memberDID string) ([]CrewMember, error) {
|
||||
query := `
|
||||
|
||||
@@ -464,3 +464,94 @@ func TestListHoldDIDs_OrderByUpdatedAt(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetAccessibleHoldDIDs tests the viewer→hold visibility computation
|
||||
// used to filter listings to what the viewer is allowed to see.
|
||||
func TestGetAccessibleHoldDIDs(t *testing.T) {
|
||||
db := setupHoldTestDB(t)
|
||||
|
||||
// Seed 4 captain records covering each visibility combo
|
||||
records := []*HoldCaptainRecord{
|
||||
{HoldDID: "did:web:public.example", OwnerDID: "did:plc:alice", Public: true, AllowAllCrew: false, UpdatedAt: time.Now()},
|
||||
{HoldDID: "did:web:selfserv.example", OwnerDID: "did:plc:bob", Public: false, AllowAllCrew: true, UpdatedAt: time.Now()},
|
||||
{HoldDID: "did:web:invite.example", OwnerDID: "did:plc:carol", Public: false, AllowAllCrew: false, UpdatedAt: time.Now()},
|
||||
{HoldDID: "did:web:carol-hold.example", OwnerDID: "did:plc:carol", Public: false, AllowAllCrew: false, UpdatedAt: time.Now()},
|
||||
}
|
||||
for _, r := range records {
|
||||
if err := UpsertCaptainRecord(db, r); err != nil {
|
||||
t.Fatalf("seed captain %s: %v", r.HoldDID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// dave is crew of did:web:invite.example
|
||||
if err := UpsertCrewMember(db, &CrewMember{
|
||||
HoldDID: "did:web:invite.example", MemberDID: "did:plc:dave", Rkey: "rk1",
|
||||
}); err != nil {
|
||||
t.Fatalf("seed crew: %v", err)
|
||||
}
|
||||
|
||||
contains := func(haystack []string, needle string) bool {
|
||||
for _, s := range haystack {
|
||||
if s == needle {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
t.Run("anonymous viewer sees public + self-service only", func(t *testing.T) {
|
||||
dids, err := GetAccessibleHoldDIDs(db, "")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(dids) != 2 {
|
||||
t.Fatalf("expected 2 DIDs (public+self-service), got %d: %v", len(dids), dids)
|
||||
}
|
||||
if !contains(dids, "did:web:public.example") {
|
||||
t.Errorf("missing public hold: %v", dids)
|
||||
}
|
||||
if !contains(dids, "did:web:selfserv.example") {
|
||||
t.Errorf("missing self-service hold: %v", dids)
|
||||
}
|
||||
if contains(dids, "did:web:invite.example") {
|
||||
t.Errorf("anon should not see invite-only hold: %v", dids)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("crew member also sees invite-only hold", func(t *testing.T) {
|
||||
dids, err := GetAccessibleHoldDIDs(db, "did:plc:dave")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if !contains(dids, "did:web:invite.example") {
|
||||
t.Errorf("crew member should see invite-only hold they belong to: %v", dids)
|
||||
}
|
||||
if contains(dids, "did:web:carol-hold.example") {
|
||||
t.Errorf("dave is not crew of carol's private hold: %v", dids)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("owner sees their own private hold", func(t *testing.T) {
|
||||
dids, err := GetAccessibleHoldDIDs(db, "did:plc:carol")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// carol owns invite.example and carol-hold.example, both private
|
||||
if !contains(dids, "did:web:invite.example") {
|
||||
t.Errorf("owner should see their invite-only hold: %v", dids)
|
||||
}
|
||||
if !contains(dids, "did:web:carol-hold.example") {
|
||||
t.Errorf("owner should see their second private hold: %v", dids)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("random authenticated viewer gets same set as anonymous", func(t *testing.T) {
|
||||
dids, err := GetAccessibleHoldDIDs(db, "did:plc:nobody")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(dids) != 2 {
|
||||
t.Fatalf("expected 2 DIDs, got %d: %v", len(dids), dids)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
33
pkg/appview/db/jetstream_cursor.go
Normal file
33
pkg/appview/db/jetstream_cursor.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// GetJetstreamCursor returns the last persisted Jetstream cursor (time_us).
|
||||
// Returns 0 when no cursor has been saved yet (e.g. fresh database).
|
||||
func GetJetstreamCursor(db DBTX) (int64, error) {
|
||||
var cursor int64
|
||||
err := db.QueryRow(`SELECT cursor FROM jetstream_cursor WHERE id = 1`).Scan(&cursor)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return cursor, nil
|
||||
}
|
||||
|
||||
// SaveJetstreamCursor writes the given cursor to the singleton jetstream_cursor row.
|
||||
// Idempotent — safe to call on every tick.
|
||||
func SaveJetstreamCursor(db DBTX, cursor int64) error {
|
||||
_, err := db.Exec(`
|
||||
INSERT INTO jetstream_cursor (id, cursor, updated_at)
|
||||
VALUES (1, ?, CURRENT_TIMESTAMP)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
cursor = excluded.cursor,
|
||||
updated_at = excluded.updated_at
|
||||
`, cursor)
|
||||
return err
|
||||
}
|
||||
50
pkg/appview/db/jetstream_cursor_test.go
Normal file
50
pkg/appview/db/jetstream_cursor_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestJetstreamCursor_RoundTrip(t *testing.T) {
|
||||
safeName := strings.ReplaceAll(t.Name(), "/", "_")
|
||||
d, err := InitDB(fmt.Sprintf("file:%s?mode=memory&cache=shared", safeName), LibsqlConfig{})
|
||||
if err != nil {
|
||||
t.Fatalf("init db: %v", err)
|
||||
}
|
||||
d.SetMaxOpenConns(1)
|
||||
defer d.Close()
|
||||
|
||||
// Fresh DB: no persisted cursor.
|
||||
got, err := GetJetstreamCursor(d)
|
||||
if err != nil {
|
||||
t.Fatalf("get empty: %v", err)
|
||||
}
|
||||
if got != 0 {
|
||||
t.Errorf("initial cursor = %d, want 0", got)
|
||||
}
|
||||
|
||||
// Save → read.
|
||||
if err := SaveJetstreamCursor(d, 1234567890); err != nil {
|
||||
t.Fatalf("save: %v", err)
|
||||
}
|
||||
got, err = GetJetstreamCursor(d)
|
||||
if err != nil {
|
||||
t.Fatalf("get after save: %v", err)
|
||||
}
|
||||
if got != 1234567890 {
|
||||
t.Errorf("cursor = %d, want 1234567890", got)
|
||||
}
|
||||
|
||||
// Overwrite with newer value.
|
||||
if err := SaveJetstreamCursor(d, 9999999999); err != nil {
|
||||
t.Fatalf("save 2: %v", err)
|
||||
}
|
||||
got, err = GetJetstreamCursor(d)
|
||||
if err != nil {
|
||||
t.Fatalf("get 2: %v", err)
|
||||
}
|
||||
if got != 9999999999 {
|
||||
t.Errorf("cursor after overwrite = %d, want 9999999999", got)
|
||||
}
|
||||
}
|
||||
79
pkg/appview/db/labels.go
Normal file
79
pkg/appview/db/labels.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LabelChecker wraps a database connection to check takedown labels.
|
||||
// Implements middleware.LabelChecker interface.
|
||||
type LabelChecker struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// NewLabelChecker creates a new LabelChecker.
|
||||
func NewLabelChecker(database *sql.DB) *LabelChecker {
|
||||
return &LabelChecker{db: database}
|
||||
}
|
||||
|
||||
// IsTakenDown checks if a (DID, repository) pair has an active takedown.
|
||||
func (lc *LabelChecker) IsTakenDown(did, repository string) (bool, error) {
|
||||
return IsTakenDown(lc.db, did, repository)
|
||||
}
|
||||
|
||||
// IsTakenDown reports whether the given (did, repo) pair is currently taken
|
||||
// down, either by an exact-repo row or by a user-level row (repo=”).
|
||||
func IsTakenDown(db DBTX, did, repository string) (bool, error) {
|
||||
var exists bool
|
||||
err := db.QueryRow(
|
||||
`SELECT EXISTS(
|
||||
SELECT 1 FROM taken_down_subjects
|
||||
WHERE did = ? AND (repo = ? OR repo = '')
|
||||
)`,
|
||||
did, repository,
|
||||
).Scan(&exists)
|
||||
return exists, err
|
||||
}
|
||||
|
||||
// SetTakedown records a positive takedown for (src, did, repo). Idempotent:
|
||||
// re-applying updates the timestamp.
|
||||
func SetTakedown(db DBTX, src, did, repo string, cts time.Time) error {
|
||||
_, err := db.Exec(
|
||||
`INSERT INTO taken_down_subjects (src, did, repo, cts) VALUES (?, ?, ?, ?)
|
||||
ON CONFLICT(src, did, repo) DO UPDATE SET cts = excluded.cts`,
|
||||
src, did, repo, cts.UTC().Format(time.RFC3339),
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// RemoveTakedown drops the takedown row for (src, did, repo). Idempotent.
|
||||
func RemoveTakedown(db DBTX, src, did, repo string) error {
|
||||
_, err := db.Exec(
|
||||
`DELETE FROM taken_down_subjects WHERE src = ? AND did = ? AND repo = ?`,
|
||||
src, did, repo,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetCursor returns the last persisted cursor for a labeler src (0 if none).
|
||||
func GetCursor(db DBTX, src string) (int64, error) {
|
||||
var cursor int64
|
||||
err := db.QueryRow(
|
||||
`SELECT cursor FROM labeler_cursor WHERE src = ?`,
|
||||
src,
|
||||
).Scan(&cursor)
|
||||
if err == sql.ErrNoRows {
|
||||
return 0, nil
|
||||
}
|
||||
return cursor, err
|
||||
}
|
||||
|
||||
// SetCursor persists the cursor for a labeler src.
|
||||
func SetCursor(db DBTX, src string, cursor int64) error {
|
||||
_, err := db.Exec(
|
||||
`INSERT INTO labeler_cursor (src, cursor) VALUES (?, ?)
|
||||
ON CONFLICT(src) DO UPDATE SET cursor = excluded.cursor`,
|
||||
src, cursor,
|
||||
)
|
||||
return err
|
||||
}
|
||||
214
pkg/appview/db/labels_test.go
Normal file
214
pkg/appview/db/labels_test.go
Normal file
@@ -0,0 +1,214 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const testLabelerSrc = "did:plc:labeler"
|
||||
|
||||
// TestSetTakedown_RepoLevel verifies that an exact-repo row blocks only that
|
||||
// repo and leaves siblings untouched.
|
||||
func TestSetTakedown_RepoLevel(t *testing.T) {
|
||||
db, err := InitDB("file:TestSetTakedown_RepoLevel?mode=memory&cache=shared", LibsqlConfig{})
|
||||
if err != nil {
|
||||
t.Fatalf("init db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if err := SetTakedown(db, testLabelerSrc, "did:plc:user", "myrepo", time.Now().UTC()); err != nil {
|
||||
t.Fatalf("set takedown: %v", err)
|
||||
}
|
||||
|
||||
hit, err := IsTakenDown(db, "did:plc:user", "myrepo")
|
||||
if err != nil {
|
||||
t.Fatalf("is taken down (target): %v", err)
|
||||
}
|
||||
if !hit {
|
||||
t.Fatal("expected target repo to be taken down")
|
||||
}
|
||||
|
||||
hit, err = IsTakenDown(db, "did:plc:user", "otherrepo")
|
||||
if err != nil {
|
||||
t.Fatalf("is taken down (sibling): %v", err)
|
||||
}
|
||||
if hit {
|
||||
t.Fatal("sibling repo should not be taken down")
|
||||
}
|
||||
|
||||
hit, err = IsTakenDown(db, "did:plc:other", "myrepo")
|
||||
if err != nil {
|
||||
t.Fatalf("is taken down (other did): %v", err)
|
||||
}
|
||||
if hit {
|
||||
t.Fatal("repo on different did should not be taken down")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSetTakedown_UserLevel verifies that a user-level row (repo=”) matches
|
||||
// any repo lookup for that DID.
|
||||
func TestSetTakedown_UserLevel(t *testing.T) {
|
||||
db, err := InitDB("file:TestSetTakedown_UserLevel?mode=memory&cache=shared", LibsqlConfig{})
|
||||
if err != nil {
|
||||
t.Fatalf("init db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if err := SetTakedown(db, testLabelerSrc, "did:plc:user", "", time.Now().UTC()); err != nil {
|
||||
t.Fatalf("set user-level takedown: %v", err)
|
||||
}
|
||||
|
||||
for _, repo := range []string{"alpha", "beta", "anything"} {
|
||||
hit, err := IsTakenDown(db, "did:plc:user", repo)
|
||||
if err != nil {
|
||||
t.Fatalf("is taken down %q: %v", repo, err)
|
||||
}
|
||||
if !hit {
|
||||
t.Fatalf("user-level takedown should cover repo %q", repo)
|
||||
}
|
||||
}
|
||||
|
||||
hit, err := IsTakenDown(db, "did:plc:bystander", "alpha")
|
||||
if err != nil {
|
||||
t.Fatalf("is taken down (bystander): %v", err)
|
||||
}
|
||||
if hit {
|
||||
t.Fatal("user-level takedown should not affect a different did")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRemoveTakedown_Idempotent verifies that removing a missing row is a
|
||||
// no-op and removing an existing row flips IsTakenDown back to false.
|
||||
func TestRemoveTakedown_Idempotent(t *testing.T) {
|
||||
db, err := InitDB("file:TestRemoveTakedown_Idempotent?mode=memory&cache=shared", LibsqlConfig{})
|
||||
if err != nil {
|
||||
t.Fatalf("init db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if err := RemoveTakedown(db, testLabelerSrc, "did:plc:ghost", "ghost"); err != nil {
|
||||
t.Fatalf("remove missing row: %v", err)
|
||||
}
|
||||
|
||||
if err := SetTakedown(db, testLabelerSrc, "did:plc:user", "myrepo", time.Now().UTC()); err != nil {
|
||||
t.Fatalf("set takedown: %v", err)
|
||||
}
|
||||
if err := RemoveTakedown(db, testLabelerSrc, "did:plc:user", "myrepo"); err != nil {
|
||||
t.Fatalf("remove existing row: %v", err)
|
||||
}
|
||||
|
||||
hit, err := IsTakenDown(db, "did:plc:user", "myrepo")
|
||||
if err != nil {
|
||||
t.Fatalf("is taken down after remove: %v", err)
|
||||
}
|
||||
if hit {
|
||||
t.Fatal("expected takedown to be cleared after remove")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSetTakedown_OnConflictUpdatesCts verifies that re-applying with a later
|
||||
// cts updates the row's timestamp instead of inserting a duplicate.
|
||||
func TestSetTakedown_OnConflictUpdatesCts(t *testing.T) {
|
||||
db, err := InitDB("file:TestSetTakedown_OnConflictUpdatesCts?mode=memory&cache=shared", LibsqlConfig{})
|
||||
if err != nil {
|
||||
t.Fatalf("init db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
earlier := time.Now().Add(-2 * time.Hour).UTC().Truncate(time.Second)
|
||||
later := time.Now().UTC().Truncate(time.Second)
|
||||
|
||||
if err := SetTakedown(db, testLabelerSrc, "did:plc:user", "repo", earlier); err != nil {
|
||||
t.Fatalf("set initial: %v", err)
|
||||
}
|
||||
if err := SetTakedown(db, testLabelerSrc, "did:plc:user", "repo", later); err != nil {
|
||||
t.Fatalf("set update: %v", err)
|
||||
}
|
||||
|
||||
var stored string
|
||||
err = db.QueryRow(
|
||||
`SELECT cts FROM taken_down_subjects WHERE src=? AND did=? AND repo=?`,
|
||||
testLabelerSrc, "did:plc:user", "repo",
|
||||
).Scan(&stored)
|
||||
if err != nil {
|
||||
t.Fatalf("query cts: %v", err)
|
||||
}
|
||||
got, err := time.Parse(time.RFC3339, stored)
|
||||
if err != nil {
|
||||
t.Fatalf("parse stored cts %q: %v", stored, err)
|
||||
}
|
||||
if !got.Equal(later) {
|
||||
t.Fatalf("expected cts to be updated to %s, got %s", later, got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCursor_GetSetRoundtrip verifies that an unknown src returns 0 and
|
||||
// SetCursor/GetCursor round-trip and overwrite.
|
||||
func TestCursor_GetSetRoundtrip(t *testing.T) {
|
||||
db, err := InitDB("file:TestCursor_GetSetRoundtrip?mode=memory&cache=shared", LibsqlConfig{})
|
||||
if err != nil {
|
||||
t.Fatalf("init db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
got, err := GetCursor(db, "did:plc:nobody")
|
||||
if err != nil {
|
||||
t.Fatalf("get cursor unknown: %v", err)
|
||||
}
|
||||
if got != 0 {
|
||||
t.Fatalf("expected 0 for unknown src, got %d", got)
|
||||
}
|
||||
|
||||
if err := SetCursor(db, testLabelerSrc, 42); err != nil {
|
||||
t.Fatalf("set cursor: %v", err)
|
||||
}
|
||||
got, err = GetCursor(db, testLabelerSrc)
|
||||
if err != nil {
|
||||
t.Fatalf("get cursor after set: %v", err)
|
||||
}
|
||||
if got != 42 {
|
||||
t.Fatalf("expected 42, got %d", got)
|
||||
}
|
||||
|
||||
if err := SetCursor(db, testLabelerSrc, 7); err != nil {
|
||||
t.Fatalf("set cursor (overwrite): %v", err)
|
||||
}
|
||||
got, err = GetCursor(db, testLabelerSrc)
|
||||
if err != nil {
|
||||
t.Fatalf("get cursor after overwrite: %v", err)
|
||||
}
|
||||
if got != 7 {
|
||||
t.Fatalf("expected 7 after overwrite, got %d", got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSetTakedown_PerSrcIsolation verifies that two labeler srcs can each
|
||||
// own a row for the same (did, repo) and removing one src's row leaves the
|
||||
// other intact.
|
||||
func TestSetTakedown_PerSrcIsolation(t *testing.T) {
|
||||
db, err := InitDB("file:TestSetTakedown_PerSrcIsolation?mode=memory&cache=shared", LibsqlConfig{})
|
||||
if err != nil {
|
||||
t.Fatalf("init db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
now := time.Now().UTC()
|
||||
if err := SetTakedown(db, "did:plc:labeler-a", "did:plc:user", "repo", now); err != nil {
|
||||
t.Fatalf("set src a: %v", err)
|
||||
}
|
||||
if err := SetTakedown(db, "did:plc:labeler-b", "did:plc:user", "repo", now); err != nil {
|
||||
t.Fatalf("set src b: %v", err)
|
||||
}
|
||||
|
||||
if err := RemoveTakedown(db, "did:plc:labeler-a", "did:plc:user", "repo"); err != nil {
|
||||
t.Fatalf("remove src a: %v", err)
|
||||
}
|
||||
|
||||
hit, err := IsTakenDown(db, "did:plc:user", "repo")
|
||||
if err != nil {
|
||||
t.Fatalf("is taken down: %v", err)
|
||||
}
|
||||
if !hit {
|
||||
t.Fatal("repo should still be taken down by src b after src a row removed")
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
description: Persist Jetstream cursor so reconnects resume from last processed event
|
||||
query: |
|
||||
CREATE TABLE IF NOT EXISTS jetstream_cursor (
|
||||
id INTEGER PRIMARY KEY CHECK (id = 1),
|
||||
cursor INTEGER NOT NULL,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
16
pkg/appview/db/migrations/0023_create_labels.yaml
Normal file
16
pkg/appview/db/migrations/0023_create_labels.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
description: Create labels table for ATProto content moderation (takedowns)
|
||||
query: |
|
||||
CREATE TABLE IF NOT EXISTS labels (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
src TEXT NOT NULL,
|
||||
uri TEXT NOT NULL,
|
||||
val TEXT NOT NULL,
|
||||
neg BOOLEAN NOT NULL DEFAULT 0,
|
||||
cts TIMESTAMP NOT NULL,
|
||||
subject_did TEXT NOT NULL,
|
||||
subject_repo TEXT NOT NULL DEFAULT '',
|
||||
seq INTEGER NOT NULL DEFAULT 0,
|
||||
UNIQUE(src, uri, val, neg)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_labels_subject ON labels(subject_did, subject_repo);
|
||||
CREATE INDEX IF NOT EXISTS idx_labels_val ON labels(val);
|
||||
3
pkg/appview/db/migrations/0024_add_exp_to_labels.yaml
Normal file
3
pkg/appview/db/migrations/0024_add_exp_to_labels.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
description: Add optional exp (expiration) column to labels for ATProto label spec
|
||||
query: |
|
||||
ALTER TABLE labels ADD COLUMN exp TIMESTAMP;
|
||||
16
pkg/appview/db/migrations/0025_simplify_labels.yaml
Normal file
16
pkg/appview/db/migrations/0025_simplify_labels.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
description: Replace labels (append-only mirror) with taken_down_subjects + labeler_cursor (current-state model)
|
||||
query: |
|
||||
DROP TABLE IF EXISTS labels;
|
||||
CREATE TABLE IF NOT EXISTS taken_down_subjects (
|
||||
src TEXT NOT NULL,
|
||||
did TEXT NOT NULL,
|
||||
repo TEXT NOT NULL DEFAULT '',
|
||||
cts TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY (src, did, repo)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_taken_down_subjects_did ON taken_down_subjects(did);
|
||||
CREATE INDEX IF NOT EXISTS idx_taken_down_subjects_did_repo ON taken_down_subjects(did, repo);
|
||||
CREATE TABLE IF NOT EXISTS labeler_cursor (
|
||||
src TEXT PRIMARY KEY,
|
||||
cursor INTEGER NOT NULL
|
||||
);
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@@ -15,20 +16,56 @@ func BlobCDNURL(did, cid string) string {
|
||||
return fmt.Sprintf("https://imgs.blue/%s/%s", did, cid)
|
||||
}
|
||||
|
||||
// activeTakedownClause returns a SQL fragment ready to drop into a `WHERE NOT
|
||||
// EXISTS (...)` filter for excluding rows whose `(did, repository)` pair is currently
|
||||
// taken down. The `alias` argument is the outer table alias (e.g. "m" for manifests,
|
||||
// "lm" for latest_manifests) and must already be in scope at the use site. Mirrors
|
||||
// `IsTakenDown` so listings stay consistent with the per-repo page check.
|
||||
func activeTakedownClause(alias string) string {
|
||||
return `NOT EXISTS (
|
||||
SELECT 1 FROM taken_down_subjects t
|
||||
WHERE t.did = ` + alias + `.did
|
||||
AND (t.repo = ` + alias + `.repository OR t.repo = '')
|
||||
)`
|
||||
}
|
||||
|
||||
// accessibleHoldsSubquery returns SQL that evaluates to the set of hold DIDs
|
||||
// the viewer is allowed to see in listings. Requires the viewerDID to be
|
||||
// passed twice as query arguments (once for the owner_did check and once
|
||||
// for the crew membership check). Empty viewerDID (anonymous) naturally
|
||||
// matches no owner or crew rows, so only public + self-service holds
|
||||
// (allow_all_crew=1) are returned.
|
||||
const accessibleHoldsSubquery = `(
|
||||
SELECT hold_did FROM hold_captain_records
|
||||
WHERE public = 1
|
||||
OR allow_all_crew = 1
|
||||
OR owner_did = ?
|
||||
OR hold_did IN (SELECT hold_did FROM hold_crew_members WHERE member_did = ?)
|
||||
)`
|
||||
|
||||
// Artifact type values stored in manifests.artifact_type and returned by
|
||||
// GetArtifactType. Container-image is the default for OCI/Docker images and
|
||||
// manifest lists; helm-chart is for OCI helm chart artifacts.
|
||||
const (
|
||||
ArtifactTypeContainerImage = "container-image"
|
||||
ArtifactTypeHelmChart = "helm-chart"
|
||||
ArtifactTypeUnknown = "unknown"
|
||||
)
|
||||
|
||||
// GetArtifactType determines the artifact type based on config media type
|
||||
// Returns: "helm-chart", "container-image", or "unknown"
|
||||
func GetArtifactType(configMediaType string) string {
|
||||
switch {
|
||||
case strings.Contains(configMediaType, "helm.config"):
|
||||
return "helm-chart"
|
||||
return ArtifactTypeHelmChart
|
||||
case strings.Contains(configMediaType, "oci.image.config") ||
|
||||
strings.Contains(configMediaType, "docker.container.image"):
|
||||
return "container-image"
|
||||
return ArtifactTypeContainerImage
|
||||
case configMediaType == "":
|
||||
// Manifest lists don't have a config - treat as container-image
|
||||
return "container-image"
|
||||
return ArtifactTypeContainerImage
|
||||
default:
|
||||
return "unknown"
|
||||
return ArtifactTypeUnknown
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,20 +105,22 @@ func SearchRepositories(db DBTX, query string, limit, offset int, currentUserDID
|
||||
WITH latest_manifests AS (
|
||||
SELECT did, repository, MAX(id) as latest_id
|
||||
FROM manifests
|
||||
WHERE hold_endpoint IN ` + accessibleHoldsSubquery + `
|
||||
GROUP BY did, repository
|
||||
),
|
||||
matching_repos AS (
|
||||
SELECT DISTINCT lm.did, lm.repository, lm.latest_id
|
||||
FROM latest_manifests lm
|
||||
JOIN users u ON lm.did = u.did
|
||||
WHERE u.handle LIKE ? ESCAPE '\'
|
||||
WHERE (u.handle LIKE ? ESCAPE '\'
|
||||
OR u.did = ?
|
||||
OR lm.repository LIKE ? ESCAPE '\'
|
||||
OR EXISTS (
|
||||
SELECT 1 FROM repository_annotations ra
|
||||
WHERE ra.did = lm.did AND ra.repository = lm.repository
|
||||
AND ra.value LIKE ? ESCAPE '\'
|
||||
)
|
||||
))
|
||||
AND ` + activeTakedownClause("lm") + `
|
||||
),
|
||||
repo_stats AS (
|
||||
SELECT
|
||||
@@ -118,7 +157,7 @@ func SearchRepositories(db DBTX, query string, limit, offset int, currentUserDID
|
||||
LIMIT ? OFFSET ?
|
||||
`
|
||||
|
||||
rows, err := db.Query(sqlQuery, searchPattern, query, searchPattern, searchPattern, currentUserDID, limit, offset)
|
||||
rows, err := db.Query(sqlQuery, currentUserDID, currentUserDID, searchPattern, query, searchPattern, searchPattern, currentUserDID, limit, offset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@@ -159,6 +198,7 @@ func SearchRepositories(db DBTX, query string, limit, offset int, currentUserDID
|
||||
WITH latest_manifests AS (
|
||||
SELECT did, repository, MAX(id) as latest_id
|
||||
FROM manifests
|
||||
WHERE hold_endpoint IN ` + accessibleHoldsSubquery + `
|
||||
GROUP BY did, repository
|
||||
)
|
||||
SELECT COUNT(DISTINCT lm.did || '/' || lm.repository)
|
||||
@@ -175,16 +215,29 @@ func SearchRepositories(db DBTX, query string, limit, offset int, currentUserDID
|
||||
`
|
||||
|
||||
var total int
|
||||
if err := db.QueryRow(countQuery, searchPattern, query, searchPattern, searchPattern).Scan(&total); err != nil {
|
||||
if err := db.QueryRow(countQuery, currentUserDID, currentUserDID, searchPattern, query, searchPattern, searchPattern).Scan(&total); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if err := PopulateRepoCardTags(db, cards); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return cards, total, nil
|
||||
}
|
||||
|
||||
// GetUserRepositories fetches all repositories for a user
|
||||
func GetUserRepositories(db DBTX, did string) ([]Repository, error) {
|
||||
// Get repository summary
|
||||
// GetUserRepositories fetches all repositories for a user.
|
||||
// viewerDID scopes results to repositories whose manifests live on holds the
|
||||
// viewer can access (empty viewerDID = anonymous → public + self-service only).
|
||||
//
|
||||
// Implementation: one summary query for the accessible repository set, then
|
||||
// four bulk queries (tags, manifests, annotations, repo_pages) all keyed by
|
||||
// did. Results are grouped in Go and assembled per repo. Total: 5 queries
|
||||
// regardless of how many repos the user owns.
|
||||
func GetUserRepositories(db DBTX, did string, viewerDID string) ([]Repository, error) {
|
||||
// Step 1: summary query. Both tags and manifests are filtered via join
|
||||
// onto manifests.hold_endpoint so repositories where every row lives on
|
||||
// an inaccessible hold drop out.
|
||||
rows, err := db.Query(`
|
||||
SELECT
|
||||
repository,
|
||||
@@ -192,104 +245,92 @@ func GetUserRepositories(db DBTX, did string) ([]Repository, error) {
|
||||
COUNT(DISTINCT digest) as manifest_count,
|
||||
MAX(created_at) as last_push
|
||||
FROM (
|
||||
SELECT repository, tag, digest, created_at FROM tags WHERE did = ?
|
||||
SELECT t.repository, t.tag, t.digest, t.created_at
|
||||
FROM tags t
|
||||
JOIN manifests tm ON t.did = tm.did AND t.repository = tm.repository AND t.digest = tm.digest
|
||||
WHERE t.did = ? AND tm.hold_endpoint IN `+accessibleHoldsSubquery+`
|
||||
UNION
|
||||
SELECT repository, NULL, digest, created_at FROM manifests WHERE did = ?
|
||||
SELECT m.repository, NULL, m.digest, m.created_at
|
||||
FROM manifests m
|
||||
WHERE m.did = ? AND m.hold_endpoint IN `+accessibleHoldsSubquery+`
|
||||
)
|
||||
GROUP BY repository
|
||||
ORDER BY last_push DESC
|
||||
`, did, did)
|
||||
|
||||
`, did, viewerDID, viewerDID, did, viewerDID, viewerDID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var repos []Repository
|
||||
type repoSummary struct {
|
||||
Name string
|
||||
TagCount int
|
||||
ManifestCount int
|
||||
LastPushStr string
|
||||
}
|
||||
var summaries []repoSummary
|
||||
for rows.Next() {
|
||||
var r Repository
|
||||
var lastPushStr string
|
||||
if err := rows.Scan(&r.Name, &r.TagCount, &r.ManifestCount, &lastPushStr); err != nil {
|
||||
var s repoSummary
|
||||
if err := rows.Scan(&s.Name, &s.TagCount, &s.ManifestCount, &s.LastPushStr); err != nil {
|
||||
rows.Close()
|
||||
return nil, err
|
||||
}
|
||||
summaries = append(summaries, s)
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
// Parse the timestamp string into time.Time
|
||||
if lastPushStr != "" {
|
||||
// Try multiple timestamp formats
|
||||
formats := []string{
|
||||
time.RFC3339Nano, // 2006-01-02T15:04:05.999999999Z07:00
|
||||
"2006-01-02 15:04:05.999999999-07:00", // SQLite with microseconds and timezone
|
||||
"2006-01-02 15:04:05.999999999", // SQLite with microseconds
|
||||
time.RFC3339, // 2006-01-02T15:04:05Z07:00
|
||||
"2006-01-02 15:04:05", // SQLite default
|
||||
}
|
||||
if len(summaries) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, format := range formats {
|
||||
if t, err := time.Parse(format, lastPushStr); err == nil {
|
||||
r.LastPush = t
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get tags for this repo
|
||||
tagRows, err := db.Query(`
|
||||
SELECT id, tag, digest, created_at
|
||||
FROM tags
|
||||
WHERE did = ? AND repository = ?
|
||||
ORDER BY created_at DESC
|
||||
`, did, r.Name)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for tagRows.Next() {
|
||||
var t Tag
|
||||
t.DID = did
|
||||
t.Repository = r.Name
|
||||
if err := tagRows.Scan(&t.ID, &t.Tag, &t.Digest, &t.CreatedAt); err != nil {
|
||||
tagRows.Close()
|
||||
return nil, err
|
||||
}
|
||||
r.Tags = append(r.Tags, t)
|
||||
}
|
||||
tagRows.Close()
|
||||
|
||||
// Get manifests for this repo
|
||||
manifestRows, err := db.Query(`
|
||||
SELECT id, digest, hold_endpoint, schema_version, media_type,
|
||||
config_digest, config_size, artifact_type, created_at
|
||||
FROM manifests
|
||||
WHERE did = ? AND repository = ?
|
||||
ORDER BY created_at DESC
|
||||
`, did, r.Name)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for manifestRows.Next() {
|
||||
var m Manifest
|
||||
m.DID = did
|
||||
m.Repository = r.Name
|
||||
|
||||
if err := manifestRows.Scan(&m.ID, &m.Digest, &m.HoldEndpoint, &m.SchemaVersion,
|
||||
&m.MediaType, &m.ConfigDigest, &m.ConfigSize, &m.ArtifactType, &m.CreatedAt); err != nil {
|
||||
manifestRows.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.Manifests = append(r.Manifests, m)
|
||||
}
|
||||
manifestRows.Close()
|
||||
|
||||
// Fetch repository-level annotations from annotations table
|
||||
annotations, err := GetRepositoryAnnotations(db, did, r.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Build the set of accessible repo names for filtering bulk-fetched rows
|
||||
// against repos that the viewer can't see (rows for repos owned by `did`
|
||||
// but stored on inaccessible holds).
|
||||
accessible := make(map[string]bool, len(summaries))
|
||||
for _, s := range summaries {
|
||||
accessible[s.Name] = true
|
||||
}
|
||||
|
||||
// Step 2: bulk-fetch tags for all repos owned by did, grouped by repo.
|
||||
tagsByRepo, err := bulkTagsByRepo(db, did, accessible)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Step 3: bulk-fetch manifests, grouped by repo.
|
||||
manifestsByRepo, err := bulkManifestsByRepo(db, did, accessible)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Step 4: bulk-fetch annotations, grouped by repo.
|
||||
annotationsByRepo, err := GetRepositoryAnnotationsByDID(db, did)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Step 5: bulk-fetch repo pages (existing helper), keyed by repo.
|
||||
pages, err := GetRepoPagesByDID(db, did)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pagesByRepo := make(map[string]*RepoPage, len(pages))
|
||||
for i := range pages {
|
||||
pagesByRepo[pages[i].Repository] = &pages[i]
|
||||
}
|
||||
|
||||
// Assemble results in summary order (preserves last_push DESC).
|
||||
repos := make([]Repository, 0, len(summaries))
|
||||
for _, s := range summaries {
|
||||
r := Repository{
|
||||
Name: s.Name,
|
||||
TagCount: s.TagCount,
|
||||
ManifestCount: s.ManifestCount,
|
||||
LastPush: parseRepoTimestamp(s.LastPushStr),
|
||||
Tags: tagsByRepo[s.Name],
|
||||
Manifests: manifestsByRepo[s.Name],
|
||||
}
|
||||
|
||||
annotations := annotationsByRepo[s.Name]
|
||||
r.Title = annotations["org.opencontainers.image.title"]
|
||||
r.Description = annotations["org.opencontainers.image.description"]
|
||||
r.SourceURL = annotations["org.opencontainers.image.source"]
|
||||
@@ -298,10 +339,9 @@ func GetUserRepositories(db DBTX, did string) ([]Repository, error) {
|
||||
r.IconURL = annotations["io.atcr.icon"]
|
||||
r.ReadmeURL = annotations["io.atcr.readme"]
|
||||
|
||||
// Check for repo page avatar (overrides annotation icon)
|
||||
repoPage, err := GetRepoPage(db, did, r.Name)
|
||||
if err == nil && repoPage != nil && repoPage.AvatarCID != "" {
|
||||
r.IconURL = BlobCDNURL(did, repoPage.AvatarCID)
|
||||
// Repo page avatar overrides annotation icon when present.
|
||||
if page, ok := pagesByRepo[s.Name]; ok && page.AvatarCID != "" {
|
||||
r.IconURL = BlobCDNURL(did, page.AvatarCID)
|
||||
}
|
||||
|
||||
repos = append(repos, r)
|
||||
@@ -310,6 +350,89 @@ func GetUserRepositories(db DBTX, did string) ([]Repository, error) {
|
||||
return repos, nil
|
||||
}
|
||||
|
||||
// bulkTagsByRepo fetches every tag owned by did and groups by repository,
|
||||
// dropping repos not in the accessible set. Result preserves created_at DESC
|
||||
// ordering within each repo.
|
||||
func bulkTagsByRepo(db DBTX, did string, accessible map[string]bool) (map[string][]Tag, error) {
|
||||
rows, err := db.Query(`
|
||||
SELECT id, repository, tag, digest, created_at
|
||||
FROM tags
|
||||
WHERE did = ?
|
||||
ORDER BY repository, created_at DESC
|
||||
`, did)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
out := make(map[string][]Tag)
|
||||
for rows.Next() {
|
||||
var t Tag
|
||||
t.DID = did
|
||||
if err := rows.Scan(&t.ID, &t.Repository, &t.Tag, &t.Digest, &t.CreatedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !accessible[t.Repository] {
|
||||
continue
|
||||
}
|
||||
out[t.Repository] = append(out[t.Repository], t)
|
||||
}
|
||||
return out, rows.Err()
|
||||
}
|
||||
|
||||
// bulkManifestsByRepo fetches every manifest owned by did and groups by
|
||||
// repository, dropping repos not in the accessible set. Result preserves
|
||||
// created_at DESC ordering within each repo.
|
||||
func bulkManifestsByRepo(db DBTX, did string, accessible map[string]bool) (map[string][]Manifest, error) {
|
||||
rows, err := db.Query(`
|
||||
SELECT id, repository, digest, hold_endpoint, schema_version, media_type,
|
||||
config_digest, config_size, artifact_type, created_at
|
||||
FROM manifests
|
||||
WHERE did = ?
|
||||
ORDER BY repository, created_at DESC
|
||||
`, did)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
out := make(map[string][]Manifest)
|
||||
for rows.Next() {
|
||||
var m Manifest
|
||||
m.DID = did
|
||||
if err := rows.Scan(&m.ID, &m.Repository, &m.Digest, &m.HoldEndpoint, &m.SchemaVersion,
|
||||
&m.MediaType, &m.ConfigDigest, &m.ConfigSize, &m.ArtifactType, &m.CreatedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !accessible[m.Repository] {
|
||||
continue
|
||||
}
|
||||
out[m.Repository] = append(out[m.Repository], m)
|
||||
}
|
||||
return out, rows.Err()
|
||||
}
|
||||
|
||||
// parseRepoTimestamp tolerates the several timestamp formats SQLite/libsql
|
||||
// can return for MAX(created_at) depending on driver and schema history.
|
||||
func parseRepoTimestamp(s string) time.Time {
|
||||
if s == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
formats := []string{
|
||||
time.RFC3339Nano, // 2006-01-02T15:04:05.999999999Z07:00
|
||||
"2006-01-02 15:04:05.999999999-07:00", // SQLite with microseconds and timezone
|
||||
"2006-01-02 15:04:05.999999999", // SQLite with microseconds
|
||||
time.RFC3339, // 2006-01-02T15:04:05Z07:00
|
||||
"2006-01-02 15:04:05", // SQLite default
|
||||
}
|
||||
for _, format := range formats {
|
||||
if t, err := time.Parse(format, s); err == nil {
|
||||
return t
|
||||
}
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// GetRepositoryMetadata retrieves metadata for a repository from annotations table
|
||||
// Returns a map of annotation key -> value for easy access in templates and handlers
|
||||
func GetRepositoryMetadata(db DBTX, did string, repository string) (map[string]string, error) {
|
||||
@@ -779,29 +902,37 @@ func CountTags(db DBTX, did, repository string) (int, error) {
|
||||
// Only multi-arch tags (manifest lists) have platform info in manifest_references
|
||||
// Single-arch tags will have empty Platforms slice (platform is obvious for single-arch)
|
||||
// Attestation references (unknown/unknown platforms) are filtered out but tracked via HasAttestations
|
||||
func GetTagsWithPlatforms(db DBTX, did, repository string, limit, offset int) ([]TagWithPlatforms, error) {
|
||||
return getTagsWithPlatformsFiltered(db, did, repository, "", limit, offset)
|
||||
func GetTagsWithPlatforms(db DBTX, did, repository string, limit, offset int, viewerDID string) ([]TagWithPlatforms, error) {
|
||||
return getTagsWithPlatformsFiltered(db, did, repository, "", limit, offset, viewerDID, true)
|
||||
}
|
||||
|
||||
// getTagsWithPlatformsFiltered is the shared implementation for GetTagsWithPlatforms and GetTagByName.
|
||||
// If tagName is non-empty, only that specific tag is returned.
|
||||
func getTagsWithPlatformsFiltered(db DBTX, did, repository, tagName string, limit, offset int) ([]TagWithPlatforms, error) {
|
||||
// When applyHoldFilter is true, rows are filtered by hold access for viewerDID.
|
||||
func getTagsWithPlatformsFiltered(db DBTX, did, repository, tagName string, limit, offset int, viewerDID string, applyHoldFilter bool) ([]TagWithPlatforms, error) {
|
||||
var tagFilter string
|
||||
var holdFilter string
|
||||
var args []any
|
||||
args = append(args, did, repository)
|
||||
if tagName != "" {
|
||||
tagFilter = "AND tag = ?"
|
||||
args = append(args, did, repository, tagName, limit, offset)
|
||||
} else {
|
||||
args = append(args, did, repository, limit, offset)
|
||||
tagFilter = "AND t.tag = ?"
|
||||
args = append(args, tagName)
|
||||
}
|
||||
if applyHoldFilter {
|
||||
holdFilter = "AND m.hold_endpoint IN " + accessibleHoldsSubquery
|
||||
args = append(args, viewerDID, viewerDID)
|
||||
}
|
||||
args = append(args, limit, offset)
|
||||
|
||||
query := `
|
||||
WITH paged_tags AS (
|
||||
SELECT id, did, repository, tag, digest, created_at
|
||||
FROM tags
|
||||
WHERE did = ? AND repository = ?
|
||||
SELECT t.id, t.did, t.repository, t.tag, t.digest, t.created_at
|
||||
FROM tags t
|
||||
JOIN manifests m ON t.did = m.did AND t.repository = m.repository AND t.digest = m.digest
|
||||
WHERE t.did = ? AND t.repository = ?
|
||||
` + tagFilter + `
|
||||
ORDER BY created_at DESC
|
||||
` + holdFilter + `
|
||||
ORDER BY t.created_at DESC
|
||||
LIMIT ? OFFSET ?
|
||||
)
|
||||
SELECT
|
||||
@@ -1117,7 +1248,7 @@ func GetManifestReferencesForManifest(db DBTX, manifestID int64) ([]ManifestRefe
|
||||
// GetTopLevelManifests returns only manifest lists and orphaned single-arch manifests
|
||||
// Filters out platform-specific manifests that are referenced by manifest lists
|
||||
// Note: Annotations are stored separately in repository_annotations table - use GetRepositoryMetadata to fetch them
|
||||
func GetTopLevelManifests(db DBTX, did, repository string, limit, offset int) ([]ManifestWithMetadata, error) {
|
||||
func GetTopLevelManifests(db DBTX, did, repository string, limit, offset int, viewerDID string) ([]ManifestWithMetadata, error) {
|
||||
rows, err := db.Query(`
|
||||
WITH manifest_list_children AS (
|
||||
-- Get all digests that are children of manifest lists
|
||||
@@ -1138,6 +1269,7 @@ func GetTopLevelManifests(db DBTX, did, repository string, limit, offset int) ([
|
||||
WHERE m.did = ? AND m.repository = ?
|
||||
AND m.subject_digest IS NULL
|
||||
AND m.artifact_type != 'unknown'
|
||||
AND m.hold_endpoint IN `+accessibleHoldsSubquery+`
|
||||
AND (
|
||||
-- Include manifest lists
|
||||
m.media_type LIKE '%index%' OR m.media_type LIKE '%manifest.list%'
|
||||
@@ -1148,7 +1280,7 @@ func GetTopLevelManifests(db DBTX, did, repository string, limit, offset int) ([
|
||||
GROUP BY m.id
|
||||
ORDER BY m.created_at DESC
|
||||
LIMIT ? OFFSET ?
|
||||
`, did, repository, did, repository, limit, offset)
|
||||
`, did, repository, did, repository, viewerDID, viewerDID, limit, offset)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1357,6 +1489,39 @@ func UpdateFirehoseCursor(db DBTX, cursor int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// GetChildManifestPlatform returns the platform info for a manifest that is
|
||||
// referenced as a child of a manifest list. Returns nil if the digest is not
|
||||
// a child of any manifest list (i.e., it's a top-level single-arch manifest).
|
||||
// Used by the diff handler to match architectures when comparing a platform
|
||||
// child digest against a manifest list (or another child).
|
||||
func GetChildManifestPlatform(db DBTX, did, repository, digest string) (*PlatformInfo, error) {
|
||||
var p PlatformInfo
|
||||
var arch, os, variant, osVersion sql.NullString
|
||||
err := db.QueryRow(`
|
||||
SELECT
|
||||
COALESCE(mr.platform_os, ''),
|
||||
COALESCE(mr.platform_architecture, ''),
|
||||
COALESCE(mr.platform_variant, ''),
|
||||
COALESCE(mr.platform_os_version, '')
|
||||
FROM manifest_references mr
|
||||
JOIN manifests m ON mr.manifest_id = m.id
|
||||
WHERE m.did = ? AND m.repository = ? AND mr.digest = ?
|
||||
LIMIT 1
|
||||
`, did, repository, digest).Scan(&os, &arch, &variant, &osVersion)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
p.OS = os.String
|
||||
p.Architecture = arch.String
|
||||
p.Variant = variant.String
|
||||
p.OSVersion = osVersion.String
|
||||
p.Digest = digest
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
// IsManifestReferenced checks if a manifest digest is referenced as a child of
|
||||
// any manifest list for the given user. Used to protect manifest list children
|
||||
// from auto-removal (they are untagged but still needed by their parent list).
|
||||
@@ -1988,6 +2153,32 @@ func (h *HoldDIDDB) UpdateManifestHoldDID(did, oldHoldDID, newHoldDID string) (i
|
||||
return UpdateManifestHoldDID(h.db, did, oldHoldDID, newHoldDID)
|
||||
}
|
||||
|
||||
// GetDistinctManifestHoldDIDs returns all distinct hold DIDs referenced by a user's manifests.
|
||||
func GetDistinctManifestHoldDIDs(db DBTX, did string) ([]string, error) {
|
||||
rows, err := db.Query(`
|
||||
SELECT DISTINCT hold_endpoint FROM manifests
|
||||
WHERE did = ? AND hold_endpoint != ''
|
||||
`, did)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var holds []string
|
||||
for rows.Next() {
|
||||
var h string
|
||||
if err := rows.Scan(&h); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
holds = append(holds, h)
|
||||
}
|
||||
return holds, rows.Err()
|
||||
}
|
||||
|
||||
// GetDistinctManifestHoldDIDs wraps the package-level function.
|
||||
func (h *HoldDIDDB) GetDistinctManifestHoldDIDs(did string) ([]string, error) {
|
||||
return GetDistinctManifestHoldDIDs(h.db, did)
|
||||
}
|
||||
|
||||
// IsManifestReferenced checks if a digest is a child of any manifest list for the user.
|
||||
// Implements storage.ManifestReferenceChecker.
|
||||
func (h *HoldDIDDB) IsManifestReferenced(did, digest string) (bool, error) {
|
||||
@@ -2019,6 +2210,7 @@ func GetRepoCards(db DBTX, limit int, currentUserDID string, sortOrder RepoCardS
|
||||
WITH latest_manifests AS (
|
||||
SELECT did, repository, MAX(id) as latest_id
|
||||
FROM manifests
|
||||
WHERE hold_endpoint IN ` + accessibleHoldsSubquery + `
|
||||
GROUP BY did, repository
|
||||
)
|
||||
SELECT
|
||||
@@ -2042,11 +2234,12 @@ func GetRepoCards(db DBTX, limit int, currentUserDID string, sortOrder RepoCardS
|
||||
JOIN users u ON m.did = u.did
|
||||
LEFT JOIN repository_stats rs ON m.did = rs.did AND m.repository = rs.repository
|
||||
LEFT JOIN repo_pages rp ON m.did = rp.did AND m.repository = rp.repository
|
||||
WHERE ` + activeTakedownClause("m") + `
|
||||
ORDER BY ` + orderBy + `
|
||||
LIMIT ?
|
||||
`
|
||||
|
||||
rows, err := db.Query(query, currentUserDID, limit)
|
||||
rows, err := db.Query(query, currentUserDID, currentUserDID, currentUserDID, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -2082,6 +2275,10 @@ func GetRepoCards(db DBTX, limit int, currentUserDID string, sortOrder RepoCardS
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := PopulateRepoCardTags(db, cards); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cards, nil
|
||||
}
|
||||
|
||||
@@ -2092,6 +2289,7 @@ func GetUserRepoCards(db DBTX, userDID string, currentUserDID string) ([]RepoCar
|
||||
SELECT did, repository, MAX(id) as latest_id
|
||||
FROM manifests
|
||||
WHERE did = ?
|
||||
AND hold_endpoint IN ` + accessibleHoldsSubquery + `
|
||||
GROUP BY did, repository
|
||||
)
|
||||
SELECT
|
||||
@@ -2115,10 +2313,11 @@ func GetUserRepoCards(db DBTX, userDID string, currentUserDID string) ([]RepoCar
|
||||
JOIN users u ON m.did = u.did
|
||||
LEFT JOIN repository_stats rs ON m.did = rs.did AND m.repository = rs.repository
|
||||
LEFT JOIN repo_pages rp ON m.did = rp.did AND m.repository = rp.repository
|
||||
WHERE ` + activeTakedownClause("m") + `
|
||||
ORDER BY MAX(rs.last_push, m.created_at) DESC
|
||||
`
|
||||
|
||||
rows, err := db.Query(query, userDID, currentUserDID)
|
||||
rows, err := db.Query(query, userDID, currentUserDID, currentUserDID, currentUserDID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -2154,6 +2353,98 @@ func GetUserRepoCards(db DBTX, userDID string, currentUserDID string) ([]RepoCar
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := PopulateRepoCardTags(db, cards); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cards, nil
|
||||
}
|
||||
|
||||
// GetStarredRepoCards fetches repository cards for repositories starred by
|
||||
// starrerDID. Stars whose target repo no longer has a manifest, lives on a
|
||||
// hold the viewer can't access, or is currently taken down are silently
|
||||
// dropped via the joins and filters. Ordered by star creation time DESC.
|
||||
func GetStarredRepoCards(db DBTX, starrerDID string, currentUserDID string) ([]RepoCardData, error) {
|
||||
query := `
|
||||
WITH starred AS (
|
||||
SELECT owner_did AS did, repository, created_at AS starred_at
|
||||
FROM stars
|
||||
WHERE starrer_did = ?
|
||||
),
|
||||
latest_manifests AS (
|
||||
SELECT m.did, m.repository, MAX(m.id) as latest_id
|
||||
FROM manifests m
|
||||
JOIN starred st ON m.did = st.did AND m.repository = st.repository
|
||||
WHERE m.hold_endpoint IN ` + accessibleHoldsSubquery + `
|
||||
GROUP BY m.did, m.repository
|
||||
)
|
||||
SELECT
|
||||
m.did,
|
||||
u.handle,
|
||||
COALESCE(u.avatar, ''),
|
||||
m.repository,
|
||||
COALESCE((SELECT value FROM repository_annotations WHERE did = m.did AND repository = m.repository AND key = 'org.opencontainers.image.title'), ''),
|
||||
COALESCE((SELECT value FROM repository_annotations WHERE did = m.did AND repository = m.repository AND key = 'org.opencontainers.image.description'), ''),
|
||||
COALESCE((SELECT value FROM repository_annotations WHERE did = m.did AND repository = m.repository AND key = 'io.atcr.icon'), ''),
|
||||
COALESCE((SELECT COUNT(*) FROM stars WHERE owner_did = m.did AND repository = m.repository), 0),
|
||||
COALESCE(rs.pull_count, 0),
|
||||
COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = m.did AND repository = m.repository), 0),
|
||||
COALESCE(m.artifact_type, 'container-image'),
|
||||
COALESCE((SELECT tag FROM tags WHERE did = m.did AND repository = m.repository ORDER BY created_at DESC LIMIT 1), ''),
|
||||
COALESCE(m.digest, ''),
|
||||
MAX(rs.last_push, m.created_at),
|
||||
COALESCE(rp.avatar_cid, ''),
|
||||
st.starred_at
|
||||
FROM latest_manifests lm
|
||||
JOIN manifests m ON lm.latest_id = m.id
|
||||
JOIN users u ON m.did = u.did
|
||||
JOIN starred st ON st.did = m.did AND st.repository = m.repository
|
||||
LEFT JOIN repository_stats rs ON m.did = rs.did AND m.repository = rs.repository
|
||||
LEFT JOIN repo_pages rp ON m.did = rp.did AND m.repository = rp.repository
|
||||
WHERE ` + activeTakedownClause("m") + `
|
||||
ORDER BY st.starred_at DESC
|
||||
`
|
||||
|
||||
rows, err := db.Query(query, starrerDID, currentUserDID, currentUserDID, currentUserDID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var cards []RepoCardData
|
||||
for rows.Next() {
|
||||
var c RepoCardData
|
||||
var ownerDID string
|
||||
var isStarredInt int
|
||||
var avatarCID string
|
||||
var lastUpdatedStr sql.NullString
|
||||
var starredAtStr sql.NullString
|
||||
|
||||
if err := rows.Scan(&ownerDID, &c.OwnerHandle, &c.OwnerAvatarURL, &c.Repository, &c.Title, &c.Description, &c.IconURL,
|
||||
&c.StarCount, &c.PullCount, &isStarredInt, &c.ArtifactType, &c.Tag, &c.Digest, &lastUpdatedStr, &avatarCID, &starredAtStr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.IsStarred = isStarredInt > 0
|
||||
if lastUpdatedStr.Valid {
|
||||
if t, err := parseTimestamp(lastUpdatedStr.String); err == nil {
|
||||
c.LastUpdated = t
|
||||
}
|
||||
}
|
||||
if avatarCID != "" {
|
||||
c.IconURL = BlobCDNURL(ownerDID, avatarCID)
|
||||
}
|
||||
|
||||
cards = append(cards, c)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := PopulateRepoCardTags(db, cards); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cards, nil
|
||||
}
|
||||
|
||||
@@ -2464,7 +2755,7 @@ func IsHoldCaptain(db DBTX, userDID string, managedHolds []string) (bool, error)
|
||||
// GetTagByName returns a single tag with platform information by tag name.
|
||||
// Returns nil, nil if the tag doesn't exist.
|
||||
func GetTagByName(db DBTX, did, repository, tagName string) (*TagWithPlatforms, error) {
|
||||
tags, err := getTagsWithPlatformsFiltered(db, did, repository, tagName, 1, 0)
|
||||
tags, err := getTagsWithPlatformsFiltered(db, did, repository, tagName, 1, 0, "", false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -2474,27 +2765,164 @@ func GetTagByName(db DBTX, did, repository, tagName string) (*TagWithPlatforms,
|
||||
return &tags[0], nil
|
||||
}
|
||||
|
||||
// GetAllTagNames returns all tag names for a repository, ordered by most recent first.
|
||||
func GetAllTagNames(db DBTX, did, repository string) ([]string, error) {
|
||||
// GetRepoHoldDIDs returns the distinct hold DIDs that host manifests for a
|
||||
// given repository, restricted to holds the viewer can access.
|
||||
func GetRepoHoldDIDs(db DBTX, did, repository string, viewerDID string) ([]string, error) {
|
||||
rows, err := db.Query(`
|
||||
SELECT tag FROM tags
|
||||
WHERE did = ? AND repository = ?
|
||||
ORDER BY created_at DESC
|
||||
`, did, repository)
|
||||
SELECT DISTINCT m.hold_endpoint
|
||||
FROM manifests m
|
||||
WHERE m.did = ? AND m.repository = ?
|
||||
AND m.hold_endpoint != ''
|
||||
AND m.hold_endpoint IN `+accessibleHoldsSubquery+`
|
||||
`, did, repository, viewerDID, viewerDID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var holds []string
|
||||
for rows.Next() {
|
||||
var h string
|
||||
if err := rows.Scan(&h); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
holds = append(holds, h)
|
||||
}
|
||||
return holds, rows.Err()
|
||||
}
|
||||
|
||||
// TagNameDigest is a lightweight (tag, digest) pair used for dropdown
|
||||
// population and default-selection heuristics.
|
||||
type TagNameDigest struct {
|
||||
Name string
|
||||
Digest string
|
||||
}
|
||||
|
||||
// shaTagPattern matches CI-style git-sha tags like "sha-937fa4c".
|
||||
var shaTagPattern = regexp.MustCompile(`^sha-[0-9a-f]{6,40}$`)
|
||||
|
||||
// PickDefaultTag chooses the best display tag from a list of (name, digest)
|
||||
// pairs ordered most-recent first.
|
||||
//
|
||||
// 1. Start with the newest tag.
|
||||
// 2. If that newest tag looks like a git-sha tag, look for a sibling with
|
||||
// the same digest that doesn't — happyview-style repos push both
|
||||
// "sha-937fa4c" and "2.0.0-dev.45" pointing at the same image; we'd
|
||||
// rather show the semver name.
|
||||
// 3. If "latest" exists AND points to the same digest as the chosen tag,
|
||||
// prefer "latest" as the friendliest label. A stale "latest" pointing
|
||||
// at an old digest is bypassed.
|
||||
func PickDefaultTag(tags []TagNameDigest) string {
|
||||
if len(tags) == 0 {
|
||||
return ""
|
||||
}
|
||||
chosen := tags[0]
|
||||
if shaTagPattern.MatchString(chosen.Name) {
|
||||
for _, t := range tags[1:] {
|
||||
if t.Digest == chosen.Digest && !shaTagPattern.MatchString(t.Name) {
|
||||
chosen = t
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if chosen.Name != "latest" {
|
||||
for _, t := range tags {
|
||||
if t.Name == "latest" && t.Digest == chosen.Digest {
|
||||
return "latest"
|
||||
}
|
||||
}
|
||||
}
|
||||
return chosen.Name
|
||||
}
|
||||
|
||||
// PopulateRepoCardTags overrides each card's Tag field with the best display
|
||||
// tag chosen by PickDefaultTag. Issues one batch query for all (handle, repository)
|
||||
// pairs in the slice. No-op for an empty slice.
|
||||
//
|
||||
// RepoCardData doesn't carry the owner DID, so we join through users.handle.
|
||||
// This is fine because (handle, repository) is unique within the appview.
|
||||
func PopulateRepoCardTags(db DBTX, cards []RepoCardData) error {
|
||||
if len(cards) == 0 {
|
||||
return nil
|
||||
}
|
||||
type key struct{ handle, repo string }
|
||||
placeholders := make([]string, 0, len(cards))
|
||||
args := make([]any, 0, len(cards)*2)
|
||||
for _, c := range cards {
|
||||
placeholders = append(placeholders, "(?, ?)")
|
||||
args = append(args, c.OwnerHandle, c.Repository)
|
||||
}
|
||||
q := `
|
||||
SELECT u.handle, t.repository, t.tag, t.digest
|
||||
FROM tags t
|
||||
JOIN users u ON t.did = u.did
|
||||
WHERE (u.handle, t.repository) IN (VALUES ` + strings.Join(placeholders, ",") + `)
|
||||
ORDER BY t.repository, t.created_at DESC
|
||||
`
|
||||
rows, err := db.Query(q, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
groups := make(map[key][]TagNameDigest)
|
||||
for rows.Next() {
|
||||
var handle, repo, tag, digest string
|
||||
if err := rows.Scan(&handle, &repo, &tag, &digest); err != nil {
|
||||
return err
|
||||
}
|
||||
k := key{handle, repo}
|
||||
groups[k] = append(groups[k], TagNameDigest{Name: tag, Digest: digest})
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range cards {
|
||||
k := key{cards[i].OwnerHandle, cards[i].Repository}
|
||||
if g, ok := groups[k]; ok && len(g) > 0 {
|
||||
cards[i].Tag = PickDefaultTag(g)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAllTagNames returns all tag names for a repository, ordered by most recent first.
|
||||
// Filters out tags whose manifests live on holds the viewer can't access.
|
||||
func GetAllTagNames(db DBTX, did, repository string, viewerDID string) ([]string, error) {
|
||||
pairs, err := GetAllTagsWithDigests(db, did, repository, viewerDID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := make([]string, len(pairs))
|
||||
for i, p := range pairs {
|
||||
names[i] = p.Name
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// GetAllTagsWithDigests returns all tags for a repository with their manifest
|
||||
// digests, ordered by most recent first. Filters out tags whose manifests live
|
||||
// on holds the viewer can't access.
|
||||
func GetAllTagsWithDigests(db DBTX, did, repository string, viewerDID string) ([]TagNameDigest, error) {
|
||||
rows, err := db.Query(`
|
||||
SELECT t.tag, t.digest FROM tags t
|
||||
JOIN manifests m ON t.did = m.did AND t.repository = m.repository AND t.digest = m.digest
|
||||
WHERE t.did = ? AND t.repository = ?
|
||||
AND m.hold_endpoint IN `+accessibleHoldsSubquery+`
|
||||
ORDER BY t.created_at DESC
|
||||
`, did, repository, viewerDID, viewerDID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var names []string
|
||||
var out []TagNameDigest
|
||||
for rows.Next() {
|
||||
var name string
|
||||
if err := rows.Scan(&name); err != nil {
|
||||
var p TagNameDigest
|
||||
if err := rows.Scan(&p.Name, &p.Digest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names = append(names, name)
|
||||
out = append(out, p)
|
||||
}
|
||||
return names, rows.Err()
|
||||
return out, rows.Err()
|
||||
}
|
||||
|
||||
// GetLayerCountForManifest returns the number of layers for a manifest identified by digest.
|
||||
|
||||
@@ -855,6 +855,15 @@ func TestGetTagsWithPlatforms(t *testing.T) {
|
||||
t.Fatalf("Failed to create test user: %v", err)
|
||||
}
|
||||
|
||||
// Register the test hold as public so the hold-access filter allows it
|
||||
if err := UpsertCaptainRecord(db, &HoldCaptainRecord{
|
||||
HoldDID: "did:web:hold.example.com",
|
||||
OwnerDID: "did:plc:holdowner",
|
||||
Public: true,
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to insert captain record: %v", err)
|
||||
}
|
||||
|
||||
// Test 1: Single-arch manifest (no platform info)
|
||||
singleArchManifest := &Manifest{
|
||||
DID: testUser.DID,
|
||||
@@ -882,7 +891,7 @@ func TestGetTagsWithPlatforms(t *testing.T) {
|
||||
t.Fatalf("Failed to insert single-arch tag: %v", err)
|
||||
}
|
||||
|
||||
tagsWithPlatforms, err := GetTagsWithPlatforms(db, testUser.DID, "myapp", 100, 0)
|
||||
tagsWithPlatforms, err := GetTagsWithPlatforms(db, testUser.DID, "myapp", 100, 0, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get tags with platforms: %v", err)
|
||||
}
|
||||
@@ -951,7 +960,7 @@ func TestGetTagsWithPlatforms(t *testing.T) {
|
||||
t.Fatalf("Failed to insert multi-arch tag: %v", err)
|
||||
}
|
||||
|
||||
multiTagsWithPlatforms, err := GetTagsWithPlatforms(db, testUser.DID, "multiapp", 100, 0)
|
||||
multiTagsWithPlatforms, err := GetTagsWithPlatforms(db, testUser.DID, "multiapp", 100, 0, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get multi-arch tags with platforms: %v", err)
|
||||
}
|
||||
@@ -1531,3 +1540,314 @@ func TestGetAllUntaggedManifestDigests(t *testing.T) {
|
||||
t.Errorf("Expected 3 digests, got %d: %v", len(digests), digests)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetUserRepositories_HoldAccessFilter verifies that repositories whose
|
||||
// manifests live on inaccessible holds are hidden from viewers without access.
|
||||
func TestGetUserRepositories_HoldAccessFilter(t *testing.T) {
|
||||
db, err := InitDB("file:TestGetUserRepositories_HoldAccessFilter?mode=memory&cache=shared", LibsqlConfig{})
|
||||
if err != nil {
|
||||
t.Fatalf("init db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
testUser := &User{DID: "did:plc:alice", Handle: "alice.test", PDSEndpoint: "https://pds.example", LastSeen: time.Now()}
|
||||
if err := UpsertUser(db, testUser); err != nil {
|
||||
t.Fatalf("upsert user: %v", err)
|
||||
}
|
||||
|
||||
// Public hold and a private invite-only hold
|
||||
if err := UpsertCaptainRecord(db, &HoldCaptainRecord{
|
||||
HoldDID: "did:web:public.example", OwnerDID: "did:plc:holdowner", Public: true,
|
||||
}); err != nil {
|
||||
t.Fatalf("seed public captain: %v", err)
|
||||
}
|
||||
if err := UpsertCaptainRecord(db, &HoldCaptainRecord{
|
||||
HoldDID: "did:web:private.example", OwnerDID: "did:plc:holdowner", Public: false, AllowAllCrew: false,
|
||||
}); err != nil {
|
||||
t.Fatalf("seed private captain: %v", err)
|
||||
}
|
||||
|
||||
// Two repos: one on the public hold, one on the private hold
|
||||
if _, err := InsertManifest(db, &Manifest{
|
||||
DID: testUser.DID, Repository: "publicrepo", Digest: "sha256:pub",
|
||||
HoldEndpoint: "did:web:public.example", SchemaVersion: 2,
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json", CreatedAt: time.Now(),
|
||||
}); err != nil {
|
||||
t.Fatalf("insert public manifest: %v", err)
|
||||
}
|
||||
if _, err := InsertManifest(db, &Manifest{
|
||||
DID: testUser.DID, Repository: "privaterepo", Digest: "sha256:priv",
|
||||
HoldEndpoint: "did:web:private.example", SchemaVersion: 2,
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json", CreatedAt: time.Now(),
|
||||
}); err != nil {
|
||||
t.Fatalf("insert private manifest: %v", err)
|
||||
}
|
||||
|
||||
// Anonymous viewer should see only the publicrepo
|
||||
repos, err := GetUserRepositories(db, testUser.DID, "")
|
||||
if err != nil {
|
||||
t.Fatalf("GetUserRepositories anon: %v", err)
|
||||
}
|
||||
if len(repos) != 1 || repos[0].Name != "publicrepo" {
|
||||
t.Errorf("anon viewer: expected [publicrepo], got %v", repos)
|
||||
}
|
||||
|
||||
// Make the private-hold owner a crew member and re-query as them
|
||||
if err := UpsertCrewMember(db, &CrewMember{
|
||||
HoldDID: "did:web:private.example", MemberDID: "did:plc:crewdave", Rkey: "rk1",
|
||||
}); err != nil {
|
||||
t.Fatalf("upsert crew: %v", err)
|
||||
}
|
||||
|
||||
repos, err = GetUserRepositories(db, testUser.DID, "did:plc:crewdave")
|
||||
if err != nil {
|
||||
t.Fatalf("GetUserRepositories crew: %v", err)
|
||||
}
|
||||
if len(repos) != 2 {
|
||||
t.Errorf("crew viewer: expected both repos, got %d: %v", len(repos), repos)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetUserRepositories_BulkGrouping verifies that the bulk-fetch
|
||||
// implementation correctly groups tags, manifests, annotations, and repo-page
|
||||
// avatars per repository — and that ordering (last_push DESC for repos,
|
||||
// created_at DESC for tags/manifests within a repo) is preserved.
|
||||
//
|
||||
// Regression guard for the previous N+1 implementation, which issued one
|
||||
// query per repo and per relation.
|
||||
func TestGetUserRepositories_BulkGrouping(t *testing.T) {
|
||||
db, err := InitDB("file:TestGetUserRepositories_BulkGrouping?mode=memory&cache=shared", LibsqlConfig{})
|
||||
if err != nil {
|
||||
t.Fatalf("init db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
user := &User{DID: "did:plc:owner", Handle: "owner.test", PDSEndpoint: "https://pds.example", LastSeen: time.Now()}
|
||||
if err := UpsertUser(db, user); err != nil {
|
||||
t.Fatalf("upsert user: %v", err)
|
||||
}
|
||||
if err := UpsertCaptainRecord(db, &HoldCaptainRecord{
|
||||
HoldDID: "did:web:hold.example", OwnerDID: "did:plc:holdowner", Public: true,
|
||||
}); err != nil {
|
||||
t.Fatalf("seed captain: %v", err)
|
||||
}
|
||||
|
||||
now := time.Now().UTC().Truncate(time.Second)
|
||||
mediaType := "application/vnd.oci.image.manifest.v1+json"
|
||||
|
||||
// repoA: two manifests (oldest then newer) and two tags. last_push = now+10s.
|
||||
manifestA1, err := InsertManifest(db, &Manifest{
|
||||
DID: user.DID, Repository: "repoA", Digest: "sha256:a1",
|
||||
HoldEndpoint: "did:web:hold.example", SchemaVersion: 2, MediaType: mediaType,
|
||||
CreatedAt: now,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("insert manifest a1: %v", err)
|
||||
}
|
||||
manifestA2, err := InsertManifest(db, &Manifest{
|
||||
DID: user.DID, Repository: "repoA", Digest: "sha256:a2",
|
||||
HoldEndpoint: "did:web:hold.example", SchemaVersion: 2, MediaType: mediaType,
|
||||
CreatedAt: now.Add(5 * time.Second),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("insert manifest a2: %v", err)
|
||||
}
|
||||
if err := UpsertTag(db, &Tag{DID: user.DID, Repository: "repoA", Tag: "v1", Digest: "sha256:a1", CreatedAt: now.Add(8 * time.Second)}); err != nil {
|
||||
t.Fatalf("upsert tag v1: %v", err)
|
||||
}
|
||||
if err := UpsertTag(db, &Tag{DID: user.DID, Repository: "repoA", Tag: "v2", Digest: "sha256:a2", CreatedAt: now.Add(10 * time.Second)}); err != nil {
|
||||
t.Fatalf("upsert tag v2: %v", err)
|
||||
}
|
||||
|
||||
// repoB: one manifest, one tag. last_push = now+1s (older than repoA → repoA sorts first).
|
||||
if _, err := InsertManifest(db, &Manifest{
|
||||
DID: user.DID, Repository: "repoB", Digest: "sha256:b1",
|
||||
HoldEndpoint: "did:web:hold.example", SchemaVersion: 2, MediaType: mediaType,
|
||||
CreatedAt: now.Add(1 * time.Second),
|
||||
}); err != nil {
|
||||
t.Fatalf("insert manifest b1: %v", err)
|
||||
}
|
||||
if err := UpsertTag(db, &Tag{DID: user.DID, Repository: "repoB", Tag: "latest", Digest: "sha256:b1", CreatedAt: now.Add(1 * time.Second)}); err != nil {
|
||||
t.Fatalf("upsert tag b latest: %v", err)
|
||||
}
|
||||
|
||||
// Annotations only on repoA, plus a repo-page avatar on repoB to exercise the icon override.
|
||||
if err := UpsertRepositoryAnnotations(db, user.DID, "repoA", map[string]string{
|
||||
"org.opencontainers.image.title": "Repo A Title",
|
||||
"org.opencontainers.image.description": "alpha",
|
||||
"io.atcr.icon": "https://example.com/a.png",
|
||||
}); err != nil {
|
||||
t.Fatalf("upsert annotations: %v", err)
|
||||
}
|
||||
if err := UpsertRepoPage(db, user.DID, "repoB", "", "bafyrepob", false, now, now); err != nil {
|
||||
t.Fatalf("upsert repo page: %v", err)
|
||||
}
|
||||
|
||||
repos, err := GetUserRepositories(db, user.DID, "")
|
||||
if err != nil {
|
||||
t.Fatalf("GetUserRepositories: %v", err)
|
||||
}
|
||||
|
||||
// Order: repoA first (newer last_push), then repoB.
|
||||
if len(repos) != 2 {
|
||||
t.Fatalf("expected 2 repos, got %d: %#v", len(repos), repos)
|
||||
}
|
||||
if repos[0].Name != "repoA" || repos[1].Name != "repoB" {
|
||||
t.Fatalf("expected order [repoA, repoB] (last_push DESC), got [%s, %s]", repos[0].Name, repos[1].Name)
|
||||
}
|
||||
|
||||
// repoA grouping
|
||||
a := repos[0]
|
||||
if len(a.Tags) != 2 {
|
||||
t.Errorf("repoA: expected 2 tags, got %d", len(a.Tags))
|
||||
}
|
||||
// tags ordered created_at DESC → v2 first
|
||||
if len(a.Tags) >= 2 && (a.Tags[0].Tag != "v2" || a.Tags[1].Tag != "v1") {
|
||||
t.Errorf("repoA tags out of order, want [v2, v1] got [%s, %s]", a.Tags[0].Tag, a.Tags[1].Tag)
|
||||
}
|
||||
if len(a.Manifests) != 2 {
|
||||
t.Errorf("repoA: expected 2 manifests, got %d", len(a.Manifests))
|
||||
}
|
||||
// manifests ordered created_at DESC → a2 first
|
||||
if len(a.Manifests) >= 2 && (a.Manifests[0].ID != manifestA2 || a.Manifests[1].ID != manifestA1) {
|
||||
t.Errorf("repoA manifests out of order, want [a2, a1] got [%d, %d]", a.Manifests[0].ID, a.Manifests[1].ID)
|
||||
}
|
||||
if a.Title != "Repo A Title" || a.Description != "alpha" {
|
||||
t.Errorf("repoA annotations not applied: title=%q desc=%q", a.Title, a.Description)
|
||||
}
|
||||
if a.IconURL != "https://example.com/a.png" {
|
||||
t.Errorf("repoA icon: expected annotation URL, got %q", a.IconURL)
|
||||
}
|
||||
|
||||
// repoB grouping + page-avatar override
|
||||
b := repos[1]
|
||||
if len(b.Tags) != 1 || b.Tags[0].Tag != "latest" {
|
||||
t.Errorf("repoB tags: %#v", b.Tags)
|
||||
}
|
||||
if len(b.Manifests) != 1 || b.Manifests[0].Digest != "sha256:b1" {
|
||||
t.Errorf("repoB manifests: %#v", b.Manifests)
|
||||
}
|
||||
if b.IconURL == "" {
|
||||
t.Errorf("repoB icon should be derived from repo-page avatar CID, got empty")
|
||||
}
|
||||
|
||||
// Cross-repo isolation: tags/manifests for repoB must not leak into repoA and vice versa.
|
||||
for _, tag := range a.Tags {
|
||||
if tag.Repository != "repoA" {
|
||||
t.Errorf("repoA tag has wrong repository: %#v", tag)
|
||||
}
|
||||
}
|
||||
for _, m := range b.Manifests {
|
||||
if m.Repository != "repoB" {
|
||||
t.Errorf("repoB manifest has wrong repository: %#v", m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetStarredRepoCards verifies the listing of repos starred by a user:
|
||||
// stars whose target repo no longer has a manifest are silently dropped (the
|
||||
// "still exists" filter the feature relies on), and results are ordered by
|
||||
// star creation time DESC.
|
||||
func TestGetStarredRepoCards(t *testing.T) {
|
||||
db, err := InitDB("file:TestGetStarredRepoCards?mode=memory&cache=shared", LibsqlConfig{})
|
||||
if err != nil {
|
||||
t.Fatalf("init db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
now := time.Now().UTC().Truncate(time.Second)
|
||||
mediaType := "application/vnd.oci.image.manifest.v1+json"
|
||||
|
||||
starrer := &User{DID: "did:plc:starrer", Handle: "starrer.test", PDSEndpoint: "https://pds.example", LastSeen: now}
|
||||
if err := UpsertUser(db, starrer); err != nil {
|
||||
t.Fatalf("upsert starrer: %v", err)
|
||||
}
|
||||
owner := &User{DID: "did:plc:owner", Handle: "owner.test", PDSEndpoint: "https://pds.example", LastSeen: now}
|
||||
if err := UpsertUser(db, owner); err != nil {
|
||||
t.Fatalf("upsert owner: %v", err)
|
||||
}
|
||||
// Owner of a deleted repo (still has a users row, just no manifests).
|
||||
ghost := &User{DID: "did:plc:ghost", Handle: "ghost.test", PDSEndpoint: "https://pds.example", LastSeen: now}
|
||||
if err := UpsertUser(db, ghost); err != nil {
|
||||
t.Fatalf("upsert ghost: %v", err)
|
||||
}
|
||||
|
||||
if err := UpsertCaptainRecord(db, &HoldCaptainRecord{
|
||||
HoldDID: "did:web:hold.example", OwnerDID: "did:plc:holdowner", Public: true,
|
||||
}); err != nil {
|
||||
t.Fatalf("seed captain: %v", err)
|
||||
}
|
||||
|
||||
// Two existing repos owned by `owner`.
|
||||
if _, err := InsertManifest(db, &Manifest{
|
||||
DID: owner.DID, Repository: "repo-old", Digest: "sha256:old",
|
||||
HoldEndpoint: "did:web:hold.example", SchemaVersion: 2, MediaType: mediaType,
|
||||
CreatedAt: now,
|
||||
}); err != nil {
|
||||
t.Fatalf("insert old manifest: %v", err)
|
||||
}
|
||||
if _, err := InsertManifest(db, &Manifest{
|
||||
DID: owner.DID, Repository: "repo-new", Digest: "sha256:new",
|
||||
HoldEndpoint: "did:web:hold.example", SchemaVersion: 2, MediaType: mediaType,
|
||||
CreatedAt: now.Add(5 * time.Second),
|
||||
}); err != nil {
|
||||
t.Fatalf("insert new manifest: %v", err)
|
||||
}
|
||||
|
||||
// Three stars: two for existing repos, one for a deleted repo.
|
||||
if err := UpsertStar(db, starrer.DID, owner.DID, "repo-old", now); err != nil {
|
||||
t.Fatalf("upsert star repo-old: %v", err)
|
||||
}
|
||||
if err := UpsertStar(db, starrer.DID, owner.DID, "repo-new", now.Add(10*time.Second)); err != nil {
|
||||
t.Fatalf("upsert star repo-new: %v", err)
|
||||
}
|
||||
if err := UpsertStar(db, starrer.DID, ghost.DID, "deleted-repo", now.Add(20*time.Second)); err != nil {
|
||||
t.Fatalf("upsert star deleted-repo: %v", err)
|
||||
}
|
||||
|
||||
cards, err := GetStarredRepoCards(db, starrer.DID, starrer.DID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetStarredRepoCards: %v", err)
|
||||
}
|
||||
|
||||
if len(cards) != 2 {
|
||||
t.Fatalf("expected 2 cards (deleted repo dropped), got %d: %+v", len(cards), cards)
|
||||
}
|
||||
|
||||
// Newest star first.
|
||||
if cards[0].Repository != "repo-new" || cards[1].Repository != "repo-old" {
|
||||
t.Errorf("expected order [repo-new, repo-old] (newest star first), got [%s, %s]", cards[0].Repository, cards[1].Repository)
|
||||
}
|
||||
|
||||
// IsStarred should reflect the viewer's perspective. Viewer == starrer here
|
||||
// so every returned row is starred-by-viewer.
|
||||
for _, c := range cards {
|
||||
if !c.IsStarred {
|
||||
t.Errorf("card %s/%s expected IsStarred=true for self-viewer", c.OwnerHandle, c.Repository)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetUserRepositories_Empty verifies the bulk-fetch path short-circuits
|
||||
// cleanly when the summary query returns no rows (no extra queries issued,
|
||||
// nil slice returned).
|
||||
func TestGetUserRepositories_Empty(t *testing.T) {
|
||||
db, err := InitDB("file:TestGetUserRepositories_Empty?mode=memory&cache=shared", LibsqlConfig{})
|
||||
if err != nil {
|
||||
t.Fatalf("init db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
user := &User{DID: "did:plc:nobody", Handle: "nobody.test", PDSEndpoint: "https://pds.example", LastSeen: time.Now()}
|
||||
if err := UpsertUser(db, user); err != nil {
|
||||
t.Fatalf("upsert user: %v", err)
|
||||
}
|
||||
|
||||
repos, err := GetUserRepositories(db, user.DID, "")
|
||||
if err != nil {
|
||||
t.Fatalf("GetUserRepositories empty: %v", err)
|
||||
}
|
||||
if repos != nil {
|
||||
t.Errorf("expected nil slice for user with no repos, got %#v", repos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,18 +39,17 @@ func InitializeDatabase(dbPath string, cfg LibsqlConfig) (*sql.DB, *sql.DB, *Ses
|
||||
} else {
|
||||
roDSN += "?mode=ro"
|
||||
}
|
||||
readOnlyDB, err := sql.Open("libsql", roDSN)
|
||||
// Wrap with busyTimeoutConnector so every pooled read-only connection
|
||||
// gets PRAGMA busy_timeout. Without this, reads return SQLITE_BUSY
|
||||
// immediately when a write is in progress on the read-write connection
|
||||
// (busy_timeout is per-connection, so a one-shot PRAGMA only configures
|
||||
// whichever conn served it).
|
||||
roBase, err := openLibsqlLocalConnector(roDSN)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to open read-only database connection", "error", err)
|
||||
slog.Warn("Failed to open read-only database connector", "error", err)
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// busy_timeout is per-connection — without this, reads return SQLITE_BUSY
|
||||
// immediately when a write is in progress on the read-write connection.
|
||||
var busyTimeout int
|
||||
if err := readOnlyDB.QueryRow("PRAGMA busy_timeout = 5000").Scan(&busyTimeout); err != nil {
|
||||
slog.Warn("Failed to set busy_timeout on read-only connection", "error", err)
|
||||
}
|
||||
readOnlyDB := sql.OpenDB(&busyTimeoutConnector{base: roBase, timeoutMs: 5000})
|
||||
|
||||
slog.Info("UI database initialized", "mode", "readonly", "path", dbPath)
|
||||
|
||||
|
||||
@@ -5,7 +5,9 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"embed"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
@@ -55,36 +57,34 @@ func InitDB(path string, cfg LibsqlConfig) (*sql.DB, error) {
|
||||
db = sql.OpenDB(connector)
|
||||
slog.Info("Database opened in embedded replica mode", "path", path, "sync_url", cfg.SyncURL)
|
||||
} else {
|
||||
// Local-only mode: plain file via libsql driver
|
||||
// Paths starting with "file:" or ":memory:" are already valid libsql URIs
|
||||
// Local-only mode: plain file via libsql driver, wrapped so every new
|
||||
// connection gets PRAGMA busy_timeout. SQLite's busy_timeout is
|
||||
// per-connection, so a one-shot db.Exec only configures whichever
|
||||
// pooled conn served the call — leaving the rest to fail SQLITE_BUSY
|
||||
// instantly on any write contention with the jetstream/backfill workers.
|
||||
// Paths starting with "file:" or ":memory:" are already valid libsql URIs.
|
||||
dsn := path
|
||||
if !strings.HasPrefix(path, "file:") && !strings.HasPrefix(path, ":memory:") {
|
||||
dsn = "file:" + path
|
||||
}
|
||||
var err error
|
||||
db, err = sql.Open("libsql", dsn)
|
||||
baseConnector, err := openLibsqlLocalConnector(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db = sql.OpenDB(&busyTimeoutConnector{base: baseConnector, timeoutMs: 5000})
|
||||
slog.Info("Database opened in local-only mode", "path", path)
|
||||
}
|
||||
|
||||
// In local-only mode, configure WAL and busy_timeout locally.
|
||||
// In embedded replica mode, the remote server manages these settings
|
||||
// and PRAGMA assignments are rejected as "unsupported statement"
|
||||
// (observed with Bunny Database; Turso may behave similarly).
|
||||
// In local-only mode, set WAL mode (database-wide setting, persists
|
||||
// across connections — single call is sufficient unlike busy_timeout).
|
||||
// In embedded replica mode, the remote server manages this and the
|
||||
// PRAGMA is rejected as "unsupported statement" (observed with Bunny;
|
||||
// Turso may behave similarly).
|
||||
if cfg.SyncURL == "" {
|
||||
// Enable WAL mode for concurrent read/write access
|
||||
var journalMode string
|
||||
if err := db.QueryRow("PRAGMA journal_mode = WAL").Scan(&journalMode); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Retry on lock instead of failing immediately (5s timeout)
|
||||
var busyTimeout int
|
||||
if err := db.QueryRow("PRAGMA busy_timeout = 5000").Scan(&busyTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Enable foreign keys
|
||||
@@ -92,6 +92,16 @@ func InitDB(path string, cfg LibsqlConfig) (*sql.DB, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Bound the connection pool. With a remote target (Bunny Database), each
|
||||
// idle conn is a stable libsql stream — keeping a handful warm avoids
|
||||
// reconnect cost, capping the total prevents runaway contention. Short
|
||||
// lifetimes ensure we recycle past any idle-side disconnects and drop any
|
||||
// poisoned conn that survived IsPoisonedTxErr eviction.
|
||||
db.SetMaxOpenConns(8)
|
||||
db.SetMaxIdleConns(4)
|
||||
db.SetConnMaxLifetime(5 * time.Minute)
|
||||
db.SetConnMaxIdleTime(2 * time.Minute)
|
||||
|
||||
// Check if this is an existing database with migrations applied
|
||||
isExisting, err := hasAppliedMigrations(db)
|
||||
if err != nil {
|
||||
@@ -202,20 +212,21 @@ func runMigrations(db *sql.DB, freshDB bool) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to begin transaction for migration %d: %w", m.Version, err)
|
||||
}
|
||||
// Deferred rollback is a no-op once Commit succeeds; it guards against
|
||||
// panics and any early return that forgets an explicit rollback.
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
// Split query into individual statements and execute each
|
||||
// go-sqlite3's Exec() doesn't reliably execute all statements in multi-statement queries
|
||||
statements := splitSQLStatements(m.Query)
|
||||
for i, stmt := range statements {
|
||||
if _, err := tx.Exec(stmt); err != nil {
|
||||
tx.Rollback()
|
||||
return fmt.Errorf("failed to apply migration %d (%s) statement %d: %w", m.Version, m.Name, i+1, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Record migration
|
||||
if _, err := tx.Exec("INSERT INTO schema_migrations (version) VALUES (?)", m.Version); err != nil {
|
||||
tx.Rollback()
|
||||
return fmt.Errorf("failed to record migration %d: %w", m.Version, err)
|
||||
}
|
||||
|
||||
@@ -366,3 +377,59 @@ func parseMigrationFilename(filename string) (int, string, error) {
|
||||
|
||||
return version, name, nil
|
||||
}
|
||||
|
||||
// openLibsqlLocalConnector returns a driver.Connector for a local libsql DSN.
|
||||
// go-libsql exports NewEmbeddedReplicaConnector for replica mode but no public
|
||||
// constructor for local files, so we obtain the driver via a probe sql.Open
|
||||
// (which is lazy and opens no connection) and ask it for a Connector.
|
||||
func openLibsqlLocalConnector(dsn string) (driver.Connector, error) {
|
||||
probe, err := sql.Open("libsql", dsn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("probe libsql driver: %w", err)
|
||||
}
|
||||
drv := probe.Driver()
|
||||
_ = probe.Close()
|
||||
|
||||
dctx, ok := drv.(driver.DriverContext)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("libsql driver does not implement driver.DriverContext")
|
||||
}
|
||||
return dctx.OpenConnector(dsn)
|
||||
}
|
||||
|
||||
// busyTimeoutConnector wraps a driver.Connector and runs PRAGMA busy_timeout
|
||||
// on every newly opened connection. SQLite's busy_timeout is per-connection,
|
||||
// so this is the only way to ensure every conn in the pool waits on lock
|
||||
// contention instead of returning SQLITE_BUSY immediately.
|
||||
type busyTimeoutConnector struct {
|
||||
base driver.Connector
|
||||
timeoutMs int
|
||||
}
|
||||
|
||||
func (c *busyTimeoutConnector) Connect(ctx context.Context) (driver.Conn, error) {
|
||||
conn, err := c.base.Connect(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// libsql treats PRAGMA assignments as queries that return a row, so we
|
||||
// must use QueryerContext rather than ExecerContext.
|
||||
queryer, ok := conn.(driver.QueryerContext)
|
||||
if !ok {
|
||||
_ = conn.Close()
|
||||
return nil, fmt.Errorf("libsql conn does not support QueryerContext")
|
||||
}
|
||||
|
||||
rows, err := queryer.QueryContext(ctx, fmt.Sprintf("PRAGMA busy_timeout = %d", c.timeoutMs), nil)
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, fmt.Errorf("set busy_timeout on new conn: %w", err)
|
||||
}
|
||||
_ = rows.Close()
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func (c *busyTimeoutConnector) Driver() driver.Driver {
|
||||
return c.base.Driver()
|
||||
}
|
||||
|
||||
@@ -180,6 +180,12 @@ CREATE TABLE IF NOT EXISTS repository_stats_daily (
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_repo_stats_daily_date ON repository_stats_daily(date DESC);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS jetstream_cursor (
|
||||
id INTEGER PRIMARY KEY CHECK (id = 1),
|
||||
cursor INTEGER NOT NULL,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS stars (
|
||||
starrer_did TEXT NOT NULL,
|
||||
owner_did TEXT NOT NULL,
|
||||
@@ -292,3 +298,18 @@ CREATE TABLE IF NOT EXISTS advisor_suggestions (
|
||||
suggestions_json TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS taken_down_subjects (
|
||||
src TEXT NOT NULL,
|
||||
did TEXT NOT NULL,
|
||||
repo TEXT NOT NULL DEFAULT '',
|
||||
cts TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY (src, did, repo)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_taken_down_subjects_did ON taken_down_subjects(did);
|
||||
CREATE INDEX IF NOT EXISTS idx_taken_down_subjects_did_repo ON taken_down_subjects(did, repo);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS labeler_cursor (
|
||||
src TEXT PRIMARY KEY,
|
||||
cursor INTEGER NOT NULL
|
||||
);
|
||||
|
||||
@@ -164,43 +164,15 @@ func (h *UnstarRepositoryHandler) ServeHTTP(w http.ResponseWriter, r *http.Reque
|
||||
render.JSON(w, r, map[string]bool{"starred": false})
|
||||
}
|
||||
|
||||
// CredentialHelperVersionResponse is the response for the credential helper version API
|
||||
type CredentialHelperVersionResponse struct {
|
||||
Latest string `json:"latest"`
|
||||
DownloadURLs map[string]string `json:"download_urls"`
|
||||
Checksums map[string]string `json:"checksums"`
|
||||
ReleaseNotes string `json:"release_notes,omitempty"`
|
||||
}
|
||||
|
||||
// CredentialHelperVersionHandler returns the latest credential helper version info
|
||||
// Note: Version info is fetched dynamically from TangledRepo's releases
|
||||
type CredentialHelperVersionHandler struct {
|
||||
TangledRepo string
|
||||
}
|
||||
|
||||
func (h *CredentialHelperVersionHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// This endpoint directs users to the Tangled repository for downloads
|
||||
// Version info should be fetched from the repository's releases page
|
||||
response := CredentialHelperVersionResponse{
|
||||
Latest: "",
|
||||
DownloadURLs: map[string]string{"tangled_repo": h.TangledRepo},
|
||||
Checksums: nil,
|
||||
ReleaseNotes: "Visit the Tangled repository for the latest releases: " + h.TangledRepo,
|
||||
}
|
||||
|
||||
render.SetContentType(render.ContentTypeJSON)
|
||||
w.Header().Set("Cache-Control", "public, max-age=300") // Cache for 5 minutes
|
||||
render.JSON(w, r, response)
|
||||
}
|
||||
|
||||
// renderStarComponent renders the star component HTML for HTMX responses
|
||||
func renderStarComponent(w http.ResponseWriter, tmpl *template.Template, handle, repository string, isStarred bool, starCount int) {
|
||||
data := map[string]any{
|
||||
"Interactive": true,
|
||||
"Handle": handle,
|
||||
"Repository": repository,
|
||||
"IsStarred": isStarred,
|
||||
"StarCount": starCount,
|
||||
"Interactive": true,
|
||||
"IsAuthenticated": true,
|
||||
"Handle": handle,
|
||||
"Repository": repository,
|
||||
"IsStarred": isStarred,
|
||||
"StarCount": starCount,
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"atcr.io/pkg/appview/webhooks"
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
"atcr.io/pkg/billing"
|
||||
indigooauth "github.com/bluesky-social/indigo/atproto/auth/oauth"
|
||||
"github.com/bluesky-social/indigo/atproto/identity"
|
||||
)
|
||||
|
||||
@@ -33,6 +34,7 @@ type BaseUIHandler struct {
|
||||
Directory identity.Directory
|
||||
BillingManager *billing.Manager
|
||||
WebhookDispatcher *webhooks.Dispatcher
|
||||
OAuthClientApp *indigooauth.ClientApp
|
||||
|
||||
// Stores
|
||||
SessionStore *db.SessionStore
|
||||
@@ -46,4 +48,5 @@ type BaseUIHandler struct {
|
||||
ClientName string // Full name: "AT Container Registry"
|
||||
ClientShortName string // Short name: "ATCR"
|
||||
AIAdvisorEnabled bool // True when Claude API key is configured
|
||||
SourceURL string // Source code URL for the footer "Source" link
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ type PageData struct {
|
||||
ClientShortName string // Brand name for templates (e.g., "ATCR")
|
||||
OciClient string // Preferred OCI client for pull commands (e.g., "docker", "podman")
|
||||
AIAdvisorEnabled bool // True when AI Image Advisor is available
|
||||
SourceURL string // Source code URL for the footer "Source" link
|
||||
CurrentPath string // Request path (used for OAuth return_to)
|
||||
}
|
||||
|
||||
// NewPageData creates a PageData struct with common fields populated from the request
|
||||
@@ -36,6 +38,8 @@ func NewPageData(r *http.Request, h *BaseUIHandler) PageData {
|
||||
ClientShortName: h.ClientShortName,
|
||||
OciClient: ociClient,
|
||||
AIAdvisorEnabled: h.AIAdvisorEnabled,
|
||||
SourceURL: h.SourceURL,
|
||||
CurrentPath: r.URL.RequestURI(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"atcr.io/pkg/atproto"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/render"
|
||||
)
|
||||
@@ -205,18 +207,18 @@ func (h *DeviceApprovalPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
|
||||
// Get pending authorization
|
||||
pending, ok := h.DeviceStore.GetPendingByUserCode(userCode)
|
||||
if !ok {
|
||||
h.renderError(w, "Invalid or expired authorization code")
|
||||
h.renderError(w, r, "That authorization code has expired or doesn't exist. Start a fresh `docker login` from your terminal to get a new one.")
|
||||
return
|
||||
}
|
||||
|
||||
// Check if already approved
|
||||
if pending.ApprovedDID != nil && *pending.ApprovedDID != "" {
|
||||
h.renderSuccess(w, pending.DeviceName)
|
||||
h.renderSuccess(w, r, pending.DeviceName)
|
||||
return
|
||||
}
|
||||
|
||||
// Render approval page
|
||||
h.renderApprovalPage(w, sess.Handle, pending)
|
||||
h.renderApprovalPage(w, r, sess, pending)
|
||||
}
|
||||
|
||||
// DeviceApproveRequest is the request to approve a device
|
||||
@@ -359,60 +361,157 @@ func (h *RevokeDeviceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
// renderApprovalPage renders the device authorization confirmation page.
|
||||
// The browser-side identity (avatar + handle + display name) and the
|
||||
// terminal-side device facts are paired side-by-side so a wrong-account
|
||||
// approval is visually obvious before the Approve button is clicked.
|
||||
func (h *DeviceApprovalPageHandler) renderApprovalPage(w http.ResponseWriter, r *http.Request, sess *db.Session, pending *db.PendingAuthorization) {
|
||||
// Hydrate the signed-in sailor: cached avatar/handle from our local
|
||||
// users table; live displayName from their PDS (best-effort with a tight
|
||||
// timeout — the page must still render quickly if Bluesky is slow).
|
||||
user := &db.User{
|
||||
DID: sess.DID,
|
||||
Handle: sess.Handle,
|
||||
PDSEndpoint: sess.PDSEndpoint,
|
||||
}
|
||||
if h.ReadOnlyDB != nil {
|
||||
if u, err := db.GetUserByDID(h.ReadOnlyDB, sess.DID); err == nil && u != nil {
|
||||
user = u
|
||||
}
|
||||
}
|
||||
|
||||
displayName := fetchDisplayName(r.Context(), sess)
|
||||
|
||||
meta := NewPageMeta(
|
||||
"Authorize device - "+h.ClientShortName,
|
||||
"Confirm device authorization for "+h.ClientShortName,
|
||||
).WithRobots("noindex").
|
||||
WithSiteName(h.ClientShortName)
|
||||
|
||||
pd := NewPageData(r, &h.BaseUIHandler)
|
||||
pd.User = user
|
||||
|
||||
func (h *DeviceApprovalPageHandler) renderApprovalPage(w http.ResponseWriter, handle string, pending *db.PendingAuthorization) {
|
||||
tmpl := template.Must(template.New("approval").Parse(deviceApprovalTemplate))
|
||||
data := struct {
|
||||
Handle string
|
||||
DeviceName string
|
||||
UserCode string
|
||||
IPAddress string
|
||||
PageData
|
||||
Meta *PageMeta
|
||||
Pending *db.PendingAuthorization
|
||||
ProfileDisplayName string
|
||||
UserDIDShort string
|
||||
UserAgentShort string
|
||||
}{
|
||||
Handle: handle,
|
||||
DeviceName: pending.DeviceName,
|
||||
UserCode: pending.UserCode,
|
||||
IPAddress: pending.IPAddress,
|
||||
PageData: pd,
|
||||
Meta: meta,
|
||||
Pending: pending,
|
||||
ProfileDisplayName: displayName,
|
||||
UserDIDShort: shortenDID(sess.DID),
|
||||
UserAgentShort: shortenUserAgent(pending.UserAgent),
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
if err := tmpl.Execute(w, data); err != nil {
|
||||
if err := h.Templates.ExecuteTemplate(w, "device-approve", data); err != nil {
|
||||
slog.Error("Failed to render device approval page", "component", "device/approve", "error", err)
|
||||
http.Error(w, "failed to render template", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *DeviceApprovalPageHandler) renderSuccess(w http.ResponseWriter, deviceName string) {
|
||||
tmpl := template.Must(template.New("success").Parse(deviceSuccessTemplate))
|
||||
func (h *DeviceApprovalPageHandler) renderSuccess(w http.ResponseWriter, r *http.Request, deviceName string) {
|
||||
meta := NewPageMeta(
|
||||
"Device authorized - "+h.ClientShortName,
|
||||
"Device authorization complete",
|
||||
).WithRobots("noindex").
|
||||
WithSiteName(h.ClientShortName)
|
||||
|
||||
data := struct {
|
||||
PageData
|
||||
Meta *PageMeta
|
||||
DeviceName string
|
||||
}{
|
||||
PageData: NewPageData(r, &h.BaseUIHandler),
|
||||
Meta: meta,
|
||||
DeviceName: deviceName,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
if err := tmpl.Execute(w, data); err != nil {
|
||||
if err := h.Templates.ExecuteTemplate(w, "device-approved", data); err != nil {
|
||||
slog.Error("Failed to render device success page", "component", "device/approve", "error", err)
|
||||
http.Error(w, "failed to render template", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *DeviceApprovalPageHandler) renderError(w http.ResponseWriter, message string) {
|
||||
tmpl := template.Must(template.New("error").Parse(deviceErrorTemplate))
|
||||
func (h *DeviceApprovalPageHandler) renderError(w http.ResponseWriter, r *http.Request, message string) {
|
||||
meta := NewPageMeta(
|
||||
"Authorization error - "+h.ClientShortName,
|
||||
"Device authorization could not be completed",
|
||||
).WithRobots("noindex").
|
||||
WithSiteName(h.ClientShortName)
|
||||
|
||||
data := struct {
|
||||
PageData
|
||||
Meta *PageMeta
|
||||
Message string
|
||||
}{
|
||||
Message: message,
|
||||
PageData: NewPageData(r, &h.BaseUIHandler),
|
||||
Meta: meta,
|
||||
Message: message,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
if err := tmpl.Execute(w, data); err != nil {
|
||||
http.Error(w, "failed to render template", http.StatusInternalServerError)
|
||||
return
|
||||
if err := h.Templates.ExecuteTemplate(w, "device-error", data); err != nil {
|
||||
slog.Error("Failed to render device error page", "component", "device/approve", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// fetchDisplayName best-effort fetches the sailor's display name from
|
||||
// their PDS. Returns "" on any failure — the template falls back to the
|
||||
// handle so the page never blocks on a slow upstream.
|
||||
func fetchDisplayName(ctx context.Context, sess *db.Session) string {
|
||||
if sess == nil || sess.PDSEndpoint == "" {
|
||||
return ""
|
||||
}
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 1500*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
client := atproto.NewClient(sess.PDSEndpoint, sess.DID, "")
|
||||
profile, err := client.GetActorProfile(timeoutCtx, sess.DID)
|
||||
if err != nil || profile == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(profile.DisplayName)
|
||||
}
|
||||
|
||||
// shortenDID returns a compact DID for display (e.g.
|
||||
// "did:plc:abc…xyz") without obscuring its kind.
|
||||
func shortenDID(did string) string {
|
||||
if len(did) <= 24 {
|
||||
return did
|
||||
}
|
||||
// Keep the prefix (did:plc: / did:web:) and the last 6 chars.
|
||||
prefixEnd := strings.Index(did[4:], ":")
|
||||
if prefixEnd < 0 {
|
||||
return did[:14] + "…" + did[len(did)-6:]
|
||||
}
|
||||
prefixEnd += 5 // include "did:" and the trailing ":"
|
||||
if len(did)-prefixEnd <= 14 {
|
||||
return did
|
||||
}
|
||||
return did[:prefixEnd+6] + "…" + did[len(did)-6:]
|
||||
}
|
||||
|
||||
// shortenUserAgent picks a readable summary of the device's UA string —
|
||||
// almost always something like "docker-credential-atcr/0.x" — and caps
|
||||
// the length so the device card doesn't blow up on long UA strings.
|
||||
func shortenUserAgent(ua string) string {
|
||||
ua = strings.TrimSpace(ua)
|
||||
if ua == "" {
|
||||
return ""
|
||||
}
|
||||
if len(ua) > 80 {
|
||||
return ua[:80] + "…"
|
||||
}
|
||||
return ua
|
||||
}
|
||||
|
||||
func getClientIP(r *http.Request) string {
|
||||
// Check X-Forwarded-For header
|
||||
xff := r.Header.Get("X-Forwarded-For")
|
||||
@@ -435,122 +534,3 @@ func getClientIP(r *http.Request) string {
|
||||
|
||||
return r.RemoteAddr
|
||||
}
|
||||
|
||||
// HTML templates
|
||||
|
||||
const deviceApprovalTemplate = `
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Authorize Device - ATCR</title>
|
||||
<style>
|
||||
body { font-family: sans-serif; max-width: 600px; margin: 50px auto; padding: 20px; }
|
||||
.approval-box { background: #e3f2fd; border: 1px solid #90caf9; padding: 30px; border-radius: 8px; }
|
||||
.user-code { font-size: 32px; font-weight: bold; letter-spacing: 4px; text-align: center; margin: 20px 0; color: #1976d2; }
|
||||
.device-info { background: #fff; padding: 15px; border-radius: 4px; margin: 15px 0; }
|
||||
.device-info dt { font-weight: bold; margin-top: 10px; }
|
||||
.device-info dd { margin-left: 0; color: #666; }
|
||||
.actions { text-align: center; margin-top: 30px; }
|
||||
button { font-size: 16px; padding: 12px 30px; margin: 0 10px; border: none; border-radius: 4px; cursor: pointer; }
|
||||
.approve { background: #4caf50; color: white; }
|
||||
.approve:hover { background: #45a049; }
|
||||
.deny { background: #f44336; color: white; }
|
||||
.deny:hover { background: #da190b; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="approval-box">
|
||||
<h1>Authorize Device</h1>
|
||||
<p>User: <strong>{{.Handle}}</strong></p>
|
||||
|
||||
<div class="user-code">{{.UserCode}}</div>
|
||||
|
||||
<div class="device-info">
|
||||
<dl>
|
||||
<dt>Device Name:</dt>
|
||||
<dd>{{.DeviceName}}</dd>
|
||||
<dt>IP Address:</dt>
|
||||
<dd>{{.IPAddress}}</dd>
|
||||
</dl>
|
||||
</div>
|
||||
|
||||
<p><strong>Do you want to authorize this device?</strong></p>
|
||||
<p>This device will be able to push and pull container images to your registry.</p>
|
||||
|
||||
<div class="actions">
|
||||
<button class="approve" onclick="approve(true)">Approve</button>
|
||||
<button class="deny" onclick="approve(false)">Deny</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
async function approve(approved) {
|
||||
const resp = await fetch('/device/approve', {
|
||||
method: 'POST',
|
||||
headers: {'Content-Type': 'application/json'},
|
||||
body: JSON.stringify({
|
||||
user_code: '{{.UserCode}}',
|
||||
approve: approved
|
||||
})
|
||||
});
|
||||
|
||||
if (resp.ok) {
|
||||
if (approved) {
|
||||
window.location.href = '/device?user_code={{.UserCode}}';
|
||||
} else {
|
||||
alert('Device authorization denied');
|
||||
window.location.href = '/';
|
||||
}
|
||||
} else {
|
||||
alert('Failed to process authorization');
|
||||
}
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
|
||||
const deviceSuccessTemplate = `
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Device Authorized - ATCR</title>
|
||||
<style>
|
||||
body { font-family: sans-serif; max-width: 600px; margin: 50px auto; padding: 20px; }
|
||||
.success { background: #d4edda; border: 1px solid #c3e6cb; padding: 30px; border-radius: 8px; }
|
||||
h1 { color: #155724; }
|
||||
a { color: #007bff; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="success">
|
||||
<h1>✓ Device Authorized!</h1>
|
||||
<p>Device <strong>{{.DeviceName}}</strong> has been successfully authorized.</p>
|
||||
<p>You can now close this window and return to your terminal.</p>
|
||||
<p><a href="/settings#devices">View your authorized devices</a></p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
|
||||
const deviceErrorTemplate = `
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Authorization Error - ATCR</title>
|
||||
<style>
|
||||
body { font-family: sans-serif; max-width: 600px; margin: 50px auto; padding: 20px; }
|
||||
.error { background: #f8d7da; border: 1px solid #f5c6cb; padding: 30px; border-radius: 8px; }
|
||||
h1 { color: #721c24; }
|
||||
a { color: #007bff; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="error">
|
||||
<h1>✗ Authorization Error</h1>
|
||||
<p>{{.Message}}</p>
|
||||
<p><a href="/">Return to home</a></p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
|
||||
@@ -183,14 +183,18 @@ func computeDiffSummary(fromLayers, toLayers []LayerDetail, vulnDiff []VulnDiffE
|
||||
}
|
||||
|
||||
func addToSevCount(s *vulnSummary, severity string) {
|
||||
switch severity {
|
||||
case "Critical":
|
||||
// Normalize to canonical casing so "CRITICAL", "critical", "Crit" all land
|
||||
// in the same bucket. Unknown severities count toward the total but don't
|
||||
// bump any bucket — the template renders them as "Unknown" via the
|
||||
// severityLabel helper.
|
||||
switch strings.ToLower(strings.TrimSpace(severity)) {
|
||||
case "critical", "crit", "c":
|
||||
s.Critical++
|
||||
case "High":
|
||||
case "high", "h":
|
||||
s.High++
|
||||
case "Medium":
|
||||
case "medium", "med", "m":
|
||||
s.Medium++
|
||||
case "Low":
|
||||
case "low", "l":
|
||||
s.Low++
|
||||
}
|
||||
s.Total++
|
||||
@@ -230,7 +234,7 @@ func (h *ManifestDiffHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
if owner.Handle != resolvedHandle {
|
||||
_ = db.UpdateUserHandle(h.ReadOnlyDB, did, resolvedHandle)
|
||||
_ = db.UpdateUserHandle(h.DB, did, resolvedHandle)
|
||||
owner.Handle = resolvedHandle
|
||||
}
|
||||
|
||||
@@ -329,6 +333,26 @@ func (h *ManifestDiffHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
fromPlatformDigest := ""
|
||||
toPlatformDigest := ""
|
||||
|
||||
// platKey returns "os/arch[/variant]" for a platform.
|
||||
platKey := func(os, arch, variant string) string {
|
||||
k := os + "/" + arch
|
||||
if variant != "" {
|
||||
k += "/" + variant
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
// pickPlatformChild returns the child digest from a manifest list whose
|
||||
// platform matches the given key. Returns "" if no match.
|
||||
pickPlatformChild := func(m *db.ManifestWithMetadata, key string) string {
|
||||
for _, p := range m.Platforms {
|
||||
if platKey(p.OS, p.Architecture, p.Variant) == key {
|
||||
return p.Digest
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
if isMultiArch {
|
||||
// Build intersection of platforms
|
||||
for _, fp := range fromManifest.Platforms {
|
||||
@@ -344,33 +368,47 @@ func (h *ManifestDiffHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
selectedPlatform = r.URL.Query().Get("platform")
|
||||
if len(commonPlatforms) > 0 {
|
||||
if selectedPlatform == "" {
|
||||
selectedPlatform = commonPlatforms[0].OS + "/" + commonPlatforms[0].Architecture
|
||||
if commonPlatforms[0].Variant != "" {
|
||||
selectedPlatform += "/" + commonPlatforms[0].Variant
|
||||
}
|
||||
selectedPlatform = platKey(commonPlatforms[0].OS, commonPlatforms[0].Architecture, commonPlatforms[0].Variant)
|
||||
}
|
||||
// Find matching platform digests
|
||||
for _, fp := range fromManifest.Platforms {
|
||||
platKey := fp.OS + "/" + fp.Architecture
|
||||
if fp.Variant != "" {
|
||||
platKey += "/" + fp.Variant
|
||||
}
|
||||
if platKey == selectedPlatform {
|
||||
fromPlatformDigest = fp.Digest
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, tp := range toManifest.Platforms {
|
||||
platKey := tp.OS + "/" + tp.Architecture
|
||||
if tp.Variant != "" {
|
||||
platKey += "/" + tp.Variant
|
||||
}
|
||||
if platKey == selectedPlatform {
|
||||
toPlatformDigest = tp.Digest
|
||||
fromPlatformDigest = pickPlatformChild(fromManifest, selectedPlatform)
|
||||
toPlatformDigest = pickPlatformChild(toManifest, selectedPlatform)
|
||||
}
|
||||
} else if fromManifest.IsManifestList != toManifest.IsManifestList {
|
||||
// Mixed: one side is a manifest list, the other is a platform child.
|
||||
// Match them by looking up the single-arch side's platform via its
|
||||
// parent manifest_references row and picking the matching child from
|
||||
// the manifest list side.
|
||||
var listSide *db.ManifestWithMetadata
|
||||
var childDigest string
|
||||
if fromManifest.IsManifestList {
|
||||
listSide = fromManifest
|
||||
childDigest = toDigest
|
||||
} else {
|
||||
listSide = toManifest
|
||||
childDigest = fromDigest
|
||||
}
|
||||
|
||||
plat, _ := db.GetChildManifestPlatform(h.ReadOnlyDB, owner.DID, repo, childDigest)
|
||||
var listChildDigest string
|
||||
if plat != nil {
|
||||
listChildDigest = pickPlatformChild(listSide, platKey(plat.OS, plat.Architecture, plat.Variant))
|
||||
}
|
||||
// Fallback: if we couldn't determine the platform (or no match),
|
||||
// default to the first non-attestation child of the manifest list so
|
||||
// the diff at least shows real layers instead of an empty index.
|
||||
if listChildDigest == "" {
|
||||
for _, p := range listSide.Platforms {
|
||||
if p.Digest != "" {
|
||||
listChildDigest = p.Digest
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if fromManifest.IsManifestList {
|
||||
fromPlatformDigest = listChildDigest
|
||||
} else {
|
||||
toPlatformDigest = listChildDigest
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch layer/vuln data in parallel
|
||||
@@ -387,17 +425,47 @@ func (h *ManifestDiffHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
if fromData.err != nil || toData.err != nil {
|
||||
RenderNotFound(w, r, &h.BaseUIHandler)
|
||||
return
|
||||
// Track per-side fetch failures so we render the page with an inline
|
||||
// alert naming which tag failed, instead of a generic 404 that makes
|
||||
// users guess whether they typoed a tag or hit a transient outage.
|
||||
// fromData.manifest / toData.manifest is nil only when the re-fetch at
|
||||
// the top of fetchManifest hit a DB error (the tag resolution earlier
|
||||
// already ruled out typos).
|
||||
fromFailed := fromData.err != nil || fromData.manifest == nil
|
||||
toFailed := toData.err != nil || toData.manifest == nil
|
||||
|
||||
// Fall back to the top-level manifest we already fetched so the page
|
||||
// still has something to render for tag labels and metadata.
|
||||
if fromFailed {
|
||||
fromData.manifest = fromManifest
|
||||
}
|
||||
if toFailed {
|
||||
toData.manifest = toManifest
|
||||
}
|
||||
|
||||
// Compute diffs
|
||||
layerDiff := computeLayerDiff(fromData.layers, toData.layers)
|
||||
|
||||
// ScanStatus distinguishes why vuln data may be missing: "ok" when both
|
||||
// sides returned clean scan results; "no-data" when a scan was never
|
||||
// recorded; "hold-unreachable" when we couldn't reach the hold to ask.
|
||||
// The template branches on these so users can tell "not scanned yet"
|
||||
// from "hold offline" at a glance.
|
||||
fromScanStatus := "ok"
|
||||
toScanStatus := "ok"
|
||||
if fromData.vulnData == nil {
|
||||
fromScanStatus = "hold-unreachable"
|
||||
} else if fromData.vulnData.Error != "" {
|
||||
fromScanStatus = "no-data"
|
||||
}
|
||||
if toData.vulnData == nil {
|
||||
toScanStatus = "hold-unreachable"
|
||||
} else if toData.vulnData.Error != "" {
|
||||
toScanStatus = "no-data"
|
||||
}
|
||||
|
||||
var vulnDiff []VulnDiffEntry
|
||||
hasVulnData := fromData.vulnData != nil && toData.vulnData != nil &&
|
||||
fromData.vulnData.Error == "" && toData.vulnData.Error == ""
|
||||
hasVulnData := fromScanStatus == "ok" && toScanStatus == "ok"
|
||||
if hasVulnData {
|
||||
vulnDiff = computeVulnDiff(fromData.vulnData.Matches, toData.vulnData.Matches)
|
||||
}
|
||||
@@ -448,6 +516,10 @@ func (h *ManifestDiffHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
NewVulns []vulnMatch
|
||||
UnchangedVulns []vulnMatch
|
||||
HasVulnData bool
|
||||
FromScanStatus string
|
||||
ToScanStatus string
|
||||
FromFailed bool
|
||||
ToFailed bool
|
||||
IsMultiArch bool
|
||||
CommonPlatforms []db.PlatformInfo
|
||||
SelectedPlatform string
|
||||
@@ -468,6 +540,10 @@ func (h *ManifestDiffHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
NewVulns: newVulns,
|
||||
UnchangedVulns: unchangedVulns,
|
||||
HasVulnData: hasVulnData,
|
||||
FromScanStatus: fromScanStatus,
|
||||
ToScanStatus: toScanStatus,
|
||||
FromFailed: fromFailed,
|
||||
ToFailed: toFailed,
|
||||
IsMultiArch: isMultiArch,
|
||||
CommonPlatforms: commonPlatforms,
|
||||
SelectedPlatform: selectedPlatform,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -21,6 +22,53 @@ type LayerDetail struct {
|
||||
EmptyLayer bool // ENV, LABEL, etc. — no actual layer blob
|
||||
}
|
||||
|
||||
// HelmChartContent is the data the helm-aware digest content needs: parsed
|
||||
// Chart.yaml metadata + a single chart-tarball "layer" pulled from the DB.
|
||||
type HelmChartContent struct {
|
||||
Meta *holdclient.HelmChartMeta
|
||||
Tarball *LayerDetail
|
||||
MetaFetchFailed bool // hold reachable but config blob couldn't be parsed
|
||||
HoldUnreachable bool
|
||||
}
|
||||
|
||||
// buildHelmContent fetches helm chart metadata + the single chart-tarball layer.
|
||||
// Returns a populated HelmChartContent even when the meta fetch fails so the
|
||||
// page can still render the artifact card.
|
||||
func buildHelmContent(ctx context.Context, holdURL string, digest string, dbLayers []db.Layer) *HelmChartContent {
|
||||
content := &HelmChartContent{}
|
||||
if holdURL == "" {
|
||||
content.HoldUnreachable = true
|
||||
} else {
|
||||
meta, err := holdclient.FetchHelmChartMeta(ctx, holdURL, digest)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to fetch helm chart meta", "error", err, "digest", digest)
|
||||
content.MetaFetchFailed = true
|
||||
} else {
|
||||
content.Meta = meta
|
||||
}
|
||||
}
|
||||
if len(dbLayers) > 0 {
|
||||
// Helm charts are always single-layer (the chart tarball). If somehow
|
||||
// multiple are present, pick the one with helm chart content media
|
||||
// type, falling back to the first.
|
||||
chosen := 0
|
||||
for i, l := range dbLayers {
|
||||
if strings.Contains(l.MediaType, "helm.chart.content") {
|
||||
chosen = i
|
||||
break
|
||||
}
|
||||
}
|
||||
l := dbLayers[chosen]
|
||||
content.Tarball = &LayerDetail{
|
||||
Index: l.LayerIndex + 1,
|
||||
Digest: l.Digest,
|
||||
Size: l.Size,
|
||||
MediaType: l.MediaType,
|
||||
}
|
||||
}
|
||||
return content
|
||||
}
|
||||
|
||||
// DigestDetailHandler renders the digest detail page with layers + vulnerabilities.
|
||||
type DigestDetailHandler struct {
|
||||
BaseUIHandler
|
||||
@@ -45,13 +93,19 @@ func (h *DigestDetailHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
// Check for takedown labels
|
||||
if taken, _ := db.IsTakenDown(h.ReadOnlyDB, did, repository); taken {
|
||||
RenderNotFound(w, r, &h.BaseUIHandler)
|
||||
return
|
||||
}
|
||||
|
||||
owner, err := db.GetUserByDID(h.ReadOnlyDB, did)
|
||||
if err != nil || owner == nil {
|
||||
RenderNotFound(w, r, &h.BaseUIHandler)
|
||||
return
|
||||
}
|
||||
if owner.Handle != resolvedHandle {
|
||||
_ = db.UpdateUserHandle(h.ReadOnlyDB, did, resolvedHandle)
|
||||
_ = db.UpdateUserHandle(h.DB, did, resolvedHandle)
|
||||
owner.Handle = resolvedHandle
|
||||
}
|
||||
|
||||
@@ -66,10 +120,28 @@ func (h *DigestDetailHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
var layers []LayerDetail
|
||||
var vulnData *vulnDetailsData
|
||||
var sbomData *sbomDetailsData
|
||||
var helmContent *HelmChartContent
|
||||
|
||||
if manifest.IsManifestList {
|
||||
// Manifest list: no layers, show platform picker
|
||||
// Platforms are already populated by GetManifestDetail
|
||||
} else if manifest.ArtifactType == db.ArtifactTypeHelmChart {
|
||||
// Helm chart: skip OCI history / vuln / SBOM entirely. Fetch helm
|
||||
// chart metadata from the same config blob and the single tarball
|
||||
// layer from the DB.
|
||||
dbLayers, err := db.GetLayersForManifest(h.ReadOnlyDB, manifest.ID)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to fetch layers", "error", err)
|
||||
}
|
||||
hold, holdErr := ResolveHold(r.Context(), h.ReadOnlyDB, manifest.HoldEndpoint)
|
||||
holdURL := ""
|
||||
if holdErr == nil {
|
||||
holdURL = hold.URL
|
||||
}
|
||||
helmContent = buildHelmContent(r.Context(), holdURL, digest, dbLayers)
|
||||
if holdErr != nil {
|
||||
helmContent.HoldUnreachable = true
|
||||
}
|
||||
} else {
|
||||
// Single manifest: fetch layers from DB
|
||||
dbLayers, err := db.GetLayersForManifest(h.ReadOnlyDB, manifest.ID)
|
||||
@@ -124,6 +196,7 @@ func (h *DigestDetailHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
WithCanonical("https://" + h.SiteURL + "/d/" + owner.Handle + "/" + repository + "/" + digest).
|
||||
WithSiteName(h.ClientShortName)
|
||||
|
||||
pageData := NewPageData(r, &h.BaseUIHandler)
|
||||
data := struct {
|
||||
PageData
|
||||
Meta *PageMeta
|
||||
@@ -133,9 +206,12 @@ func (h *DigestDetailHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
Layers []LayerDetail
|
||||
VulnData *vulnDetailsData
|
||||
SbomData *sbomDetailsData
|
||||
HelmContent *HelmChartContent
|
||||
SelectedPlatform string
|
||||
RegistryURL string
|
||||
OciClient string
|
||||
}{
|
||||
PageData: NewPageData(r, &h.BaseUIHandler),
|
||||
PageData: pageData,
|
||||
Meta: meta,
|
||||
Owner: owner,
|
||||
Repository: repository,
|
||||
@@ -143,7 +219,10 @@ func (h *DigestDetailHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
Layers: layers,
|
||||
VulnData: vulnData,
|
||||
SbomData: sbomData,
|
||||
HelmContent: helmContent,
|
||||
SelectedPlatform: selectedPlatform,
|
||||
RegistryURL: h.RegistryURL,
|
||||
OciClient: pageData.OciClient,
|
||||
}
|
||||
|
||||
if err := h.Templates.ExecuteTemplate(w, "digest", data); err != nil {
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"atcr.io/pkg/appview/holdclient"
|
||||
"atcr.io/pkg/appview/middleware"
|
||||
"atcr.io/pkg/atproto"
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
@@ -21,99 +24,202 @@ func (h *DigestContentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
identifier := chi.URLParam(r, "handle")
|
||||
wildcard := strings.TrimPrefix(chi.URLParam(r, "*"), "/")
|
||||
|
||||
// The wildcard is the repository name
|
||||
repository := wildcard
|
||||
|
||||
// The platform digest comes from query param
|
||||
digest := r.URL.Query().Get("digest")
|
||||
if digest == "" || repository == "" {
|
||||
http.Error(w, "missing parameters", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Resolve identity
|
||||
did, _, _, err := atproto.ResolveIdentity(r.Context(), identifier)
|
||||
if err != nil {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch manifest details for the platform digest
|
||||
manifest, err := db.GetManifestDetail(h.ReadOnlyDB, did, repository, digest)
|
||||
if err != nil {
|
||||
http.Error(w, "manifest not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch layers from DB
|
||||
var layers []LayerDetail
|
||||
var vulnData *vulnDetailsData
|
||||
|
||||
dbLayers, err := db.GetLayersForManifest(h.ReadOnlyDB, manifest.ID)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to fetch layers", "error", err)
|
||||
}
|
||||
|
||||
// Resolve hold endpoint (follow successor if migrated)
|
||||
hold, holdErr := ResolveHold(r.Context(), h.ReadOnlyDB, manifest.HoldEndpoint)
|
||||
holdReachable := holdErr == nil
|
||||
|
||||
// Fetch OCI image config from hold for layer history
|
||||
if holdErr == nil {
|
||||
config, err := holdclient.FetchImageConfig(r.Context(), hold.URL, digest)
|
||||
if err == nil {
|
||||
layers = buildLayerDetails(config.History, dbLayers)
|
||||
} else {
|
||||
slog.Warn("Failed to fetch image config", "error", err,
|
||||
"holdEndpoint", manifest.HoldEndpoint, "manifestDigest", digest)
|
||||
layers = buildLayerDetails(nil, dbLayers)
|
||||
// Helm charts have no scannable layers / vulns / SBOM. Render helm-aware
|
||||
// content for the default + "chart" sections, and a not-applicable
|
||||
// placeholder for the legacy layers / vulns / sbom sections (which
|
||||
// shouldn't be requested for helm but might be if a stale tab fires).
|
||||
if manifest.ArtifactType == db.ArtifactTypeHelmChart {
|
||||
holdURL := ""
|
||||
if holdReachable {
|
||||
holdURL = hold.URL
|
||||
}
|
||||
helm := buildHelmContent(r.Context(), holdURL, digest, dbLayers)
|
||||
if !holdReachable {
|
||||
helm.HoldUnreachable = true
|
||||
}
|
||||
helmData := struct {
|
||||
Manifest *db.ManifestWithMetadata
|
||||
HelmContent *HelmChartContent
|
||||
RegistryURL string
|
||||
OwnerHandle string
|
||||
RepoName string
|
||||
OciClient string
|
||||
IsLoggedIn bool
|
||||
}{
|
||||
Manifest: manifest,
|
||||
HelmContent: helm,
|
||||
RegistryURL: h.RegistryURL,
|
||||
OwnerHandle: identifier,
|
||||
RepoName: repository,
|
||||
OciClient: "", // helm switcher ignores this field
|
||||
IsLoggedIn: middleware.GetUser(r) != nil,
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
section := r.URL.Query().Get("section")
|
||||
switch section {
|
||||
case "chart":
|
||||
// Used by the repo page's chart tab — no install card here
|
||||
// because repo-tag-section already renders one at the top.
|
||||
if err := h.Templates.ExecuteTemplate(w, "helm-chart-info", helmData); err != nil {
|
||||
slog.Warn("Failed to render helm chart info", "error", err)
|
||||
RenderHTMXError(w, r, http.StatusInternalServerError, "Could not render helm chart", err)
|
||||
}
|
||||
case "layers", "vulns", "sbom":
|
||||
// Defensive fallback if a stale tab somehow fires. The repo page
|
||||
// hides these tabs for helm; this should be unreachable.
|
||||
fmt.Fprint(w, `<p class="text-base-content/70 py-8">Helm charts don't have layers, vulnerabilities, or SBOMs.</p>`)
|
||||
default:
|
||||
// Digest detail page (full helm view, with install card).
|
||||
if err := h.Templates.ExecuteTemplate(w, "helm-digest-content", helmData); err != nil {
|
||||
slog.Warn("Failed to render helm digest content", "error", err)
|
||||
RenderHTMXError(w, r, http.StatusInternalServerError, "Could not render helm chart", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Parallelize the three hold fetches. They're independent and each
|
||||
// takes a network round-trip; serial runs add up on slow links.
|
||||
var (
|
||||
layers []LayerDetail
|
||||
vulnData *vulnDetailsData
|
||||
sbomData *sbomDetailsData
|
||||
configFetchError bool
|
||||
)
|
||||
|
||||
if holdReachable {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(3)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
config, err := holdclient.FetchImageConfig(r.Context(), hold.URL, digest)
|
||||
if err == nil {
|
||||
layers = buildLayerDetails(config.History, dbLayers)
|
||||
} else {
|
||||
slog.Warn("Failed to fetch image config", "error", err,
|
||||
"holdEndpoint", manifest.HoldEndpoint, "manifestDigest", digest)
|
||||
layers = buildLayerDetails(nil, dbLayers)
|
||||
configFetchError = true
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
vd := FetchVulnDetails(r.Context(), hold.DID, digest)
|
||||
vulnData = &vd
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
sd := FetchSbomDetails(r.Context(), hold.DID, digest)
|
||||
sbomData = &sd
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
} else {
|
||||
layers = buildLayerDetails(nil, dbLayers)
|
||||
}
|
||||
|
||||
// Fetch vulnerability and SBOM details
|
||||
var sbomData *sbomDetailsData
|
||||
if holdErr == nil {
|
||||
vd := FetchVulnDetails(r.Context(), hold.DID, digest)
|
||||
vulnData = &vd
|
||||
sd := FetchSbomDetails(r.Context(), hold.DID, digest)
|
||||
sbomData = &sd
|
||||
// VulnReason / SbomReason let the template branch distinctly on why
|
||||
// data is missing instead of collapsing causes into a generic message.
|
||||
// ok — data is present
|
||||
// hold-unreachable — we couldn't reach the hold
|
||||
// not-scanned — hold is up but no scan record exists
|
||||
// not-applicable — scan record exists with status="skipped" (artifact
|
||||
// type isn't scanned, e.g. in-toto, DSSE — helm
|
||||
// charts go through a separate code path)
|
||||
// fetch-failed — scan record fetch failed on the hold
|
||||
vulnReason := "ok"
|
||||
if !holdReachable {
|
||||
vulnReason = "hold-unreachable"
|
||||
} else if vulnData == nil || vulnData.Error == "never-scanned" {
|
||||
vulnReason = "not-scanned"
|
||||
} else if vulnData.Status == atproto.ScanStatusSkipped {
|
||||
vulnReason = "not-applicable"
|
||||
} else if vulnData.Error != "" {
|
||||
vulnReason = "fetch-failed"
|
||||
}
|
||||
|
||||
sbomReason := "ok"
|
||||
if !holdReachable {
|
||||
sbomReason = "hold-unreachable"
|
||||
} else if sbomData == nil || sbomData.Error == "never-scanned" {
|
||||
sbomReason = "not-scanned"
|
||||
} else if sbomData.Status == atproto.ScanStatusSkipped {
|
||||
sbomReason = "not-applicable"
|
||||
} else if sbomData.Error != "" {
|
||||
sbomReason = "fetch-failed"
|
||||
}
|
||||
|
||||
data := struct {
|
||||
Layers []LayerDetail
|
||||
VulnData *vulnDetailsData
|
||||
SbomData *sbomDetailsData
|
||||
Layers []LayerDetail
|
||||
VulnData *vulnDetailsData
|
||||
SbomData *sbomDetailsData
|
||||
HoldReachable bool
|
||||
ConfigFetchError bool
|
||||
VulnReason string
|
||||
SbomReason string
|
||||
}{
|
||||
Layers: layers,
|
||||
VulnData: vulnData,
|
||||
SbomData: sbomData,
|
||||
Layers: layers,
|
||||
VulnData: vulnData,
|
||||
SbomData: sbomData,
|
||||
HoldReachable: holdReachable,
|
||||
ConfigFetchError: configFetchError,
|
||||
VulnReason: vulnReason,
|
||||
SbomReason: sbomReason,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
|
||||
// Support rendering individual sections for repo page tabs
|
||||
section := r.URL.Query().Get("section")
|
||||
switch section {
|
||||
case "layers":
|
||||
if err := h.Templates.ExecuteTemplate(w, "layers-section", data); err != nil {
|
||||
slog.Warn("Failed to render layers section", "error", err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
RenderHTMXError(w, r, http.StatusInternalServerError, "Could not render layers", err)
|
||||
}
|
||||
case "vulns":
|
||||
if err := h.Templates.ExecuteTemplate(w, "vulns-section", data); err != nil {
|
||||
slog.Warn("Failed to render vulns section", "error", err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
RenderHTMXError(w, r, http.StatusInternalServerError, "Could not render vulnerabilities", err)
|
||||
}
|
||||
case "sbom":
|
||||
if err := h.Templates.ExecuteTemplate(w, "sbom-section", data); err != nil {
|
||||
slog.Warn("Failed to render sbom section", "error", err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
RenderHTMXError(w, r, http.StatusInternalServerError, "Could not render SBOM", err)
|
||||
}
|
||||
default:
|
||||
if err := h.Templates.ExecuteTemplate(w, "digest-content", data); err != nil {
|
||||
slog.Warn("Failed to render digest content", "error", err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
RenderHTMXError(w, r, http.StatusInternalServerError, "Could not render content", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
@@ -36,3 +38,33 @@ func RenderNotFound(w http.ResponseWriter, r *http.Request, h *BaseUIHandler) {
|
||||
http.Error(w, "Page not found", http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
// RenderHTMXError sends an error response suitable for htmx. For htmx requests
|
||||
// it sets an HX-Trigger header so the client fires a toast event; the JS
|
||||
// fallback in app.js will show a generic toast even without the header.
|
||||
// For non-htmx requests it falls back to http.Error. serverErr is logged but
|
||||
// never exposed to the user — pass userMsg for anything screen-readable.
|
||||
func RenderHTMXError(w http.ResponseWriter, r *http.Request, status int, userMsg string, serverErr error) {
|
||||
if serverErr != nil {
|
||||
slog.Error("htmx handler error",
|
||||
"path", r.URL.Path,
|
||||
"status", status,
|
||||
"err", serverErr,
|
||||
)
|
||||
}
|
||||
if userMsg == "" {
|
||||
userMsg = http.StatusText(status)
|
||||
}
|
||||
if r.Header.Get("HX-Request") == "true" {
|
||||
trigger := map[string]map[string]string{
|
||||
"toast": {"message": userMsg, "type": "error"},
|
||||
}
|
||||
if b, err := json.Marshal(trigger); err == nil {
|
||||
w.Header().Set("HX-Trigger", string(b))
|
||||
}
|
||||
w.Header().Set("HX-Reswap", "none")
|
||||
w.WriteHeader(status)
|
||||
return
|
||||
}
|
||||
http.Error(w, userMsg, status)
|
||||
}
|
||||
|
||||
118
pkg/appview/handlers/hold_purge.go
Normal file
118
pkg/appview/handlers/hold_purge.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
"atcr.io/pkg/auth"
|
||||
"atcr.io/pkg/auth/oauth"
|
||||
)
|
||||
|
||||
// purgeManifestRequest is the JSON body sent to io.atcr.hold.purgeManifest.
|
||||
type purgeManifestRequest struct {
|
||||
ManifestURI string `json:"manifestUri"`
|
||||
}
|
||||
|
||||
// purgeOnHold tells the hold to delete the layer, scan, and image-config
|
||||
// records associated with a single manifest. This is best-effort: callers
|
||||
// should treat all errors as "log and continue" because lazy GC on the hold
|
||||
// will catch up either way (and on third-party holds the user may not even
|
||||
// have the captain/crew-admin permission needed for the call to succeed).
|
||||
//
|
||||
// holdDID identifies which hold owns the manifest's blobs (typically the
|
||||
// `hold_endpoint` column on the manifests row, or a freshly-resolved value
|
||||
// from the manifest record). userDID + pdsEndpoint are the OAuth-acting
|
||||
// user — the service token is minted from their PDS with audience = holdDID.
|
||||
func purgeOnHold(ctx context.Context, refresher *oauth.Refresher, userDID, pdsEndpoint, holdDID, manifestURI string) {
|
||||
if holdDID == "" || manifestURI == "" {
|
||||
return
|
||||
}
|
||||
if refresher == nil {
|
||||
slog.Debug("purgeOnHold: OAuth refresher unavailable; skipping",
|
||||
"hold_did", holdDID, "manifest", manifestURI)
|
||||
return
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
holdURL, err := atproto.ResolveHoldURL(timeoutCtx, holdDID)
|
||||
if err != nil {
|
||||
slog.Warn("purgeOnHold: failed to resolve hold URL",
|
||||
"hold_did", holdDID, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
serviceToken, err := auth.GetOrFetchServiceToken(timeoutCtx, refresher, userDID, holdDID, pdsEndpoint)
|
||||
if err != nil {
|
||||
slog.Warn("purgeOnHold: failed to mint service token",
|
||||
"hold_did", holdDID, "user_did", userDID, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := json.Marshal(purgeManifestRequest{ManifestURI: manifestURI})
|
||||
if err != nil {
|
||||
slog.Warn("purgeOnHold: failed to marshal request",
|
||||
"hold_did", holdDID, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(timeoutCtx, http.MethodPost,
|
||||
holdURL+atproto.HoldPurgeManifest, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
slog.Warn("purgeOnHold: failed to create request",
|
||||
"hold_did", holdDID, "error", err)
|
||||
return
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+serviceToken)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
slog.Warn("purgeOnHold: request failed",
|
||||
"hold_did", holdDID, "manifest", manifestURI, "error", err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusForbidden || resp.StatusCode == http.StatusUnauthorized {
|
||||
// Sailor pushing to a third-party hold won't have captain/crew-admin
|
||||
// rights; that's expected. Lazy GC on that hold will reclaim later.
|
||||
slog.Debug("purgeOnHold: not authorized on hold (lazy GC will handle)",
|
||||
"hold_did", holdDID, "manifest", manifestURI, "status", resp.StatusCode)
|
||||
return
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 4096))
|
||||
slog.Warn("purgeOnHold: hold returned non-OK status",
|
||||
"hold_did", holdDID, "manifest", manifestURI,
|
||||
"status", resp.StatusCode, "body", string(body))
|
||||
return
|
||||
}
|
||||
|
||||
var out struct {
|
||||
Success bool `json:"success"`
|
||||
LayersDeleted int `json:"layersDeleted"`
|
||||
ScanDeleted bool `json:"scanDeleted"`
|
||||
ImageConfigDeleted bool `json:"imageConfigDeleted"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
|
||||
slog.Warn("purgeOnHold: failed to parse response",
|
||||
"hold_did", holdDID, "manifest", manifestURI, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
slog.Info("purgeOnHold: purge succeeded",
|
||||
"hold_did", holdDID,
|
||||
"manifest", manifestURI,
|
||||
"layers_deleted", out.LayersDeleted,
|
||||
"scan_deleted", out.ScanDeleted,
|
||||
"image_config_deleted", out.ImageConfigDeleted,
|
||||
)
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"log"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
@@ -17,25 +17,29 @@ type HomeHandler struct {
|
||||
}
|
||||
|
||||
func (h *HomeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Get current user DID (empty string if not logged in)
|
||||
var currentUserDID string
|
||||
if user := middleware.GetUser(r); user != nil {
|
||||
currentUserDID = user.DID
|
||||
}
|
||||
|
||||
// Fetch featured repositories (top 6 by score - carousel cycles through them)
|
||||
// Track whether either card query failed so the page can surface a
|
||||
// distinct error banner instead of the "no repos yet" empty state.
|
||||
// Partial failures still render whatever did succeed.
|
||||
var queryError bool
|
||||
|
||||
featuredCards, err := db.GetRepoCards(h.ReadOnlyDB, 6, currentUserDID, db.SortByScore)
|
||||
if err != nil {
|
||||
log.Printf("Error fetching featured repos: %v", err)
|
||||
slog.Error("home: fetch featured repos", "err", err)
|
||||
featuredCards = []db.RepoCardData{}
|
||||
queryError = true
|
||||
}
|
||||
db.SetRegistryURL(featuredCards, h.RegistryURL)
|
||||
|
||||
// Fetch recently updated repositories (top 18 by last push - 6 rows)
|
||||
recentCards, err := db.GetRepoCards(h.ReadOnlyDB, 18, currentUserDID, db.SortByLastUpdate)
|
||||
if err != nil {
|
||||
log.Printf("Error fetching recent repos: %v", err)
|
||||
slog.Error("home: fetch recent repos", "err", err)
|
||||
recentCards = []db.RepoCardData{}
|
||||
queryError = true
|
||||
}
|
||||
db.SetRegistryURL(recentCards, h.RegistryURL)
|
||||
|
||||
@@ -48,6 +52,7 @@ func (h *HomeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
Meta *PageMeta
|
||||
FeaturedRepos []db.RepoCardData
|
||||
RecentRepos []db.RepoCardData
|
||||
HasError bool
|
||||
}{
|
||||
PageData: pageData,
|
||||
Meta: NewPageMeta(
|
||||
@@ -63,6 +68,7 @@ func (h *HomeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
),
|
||||
FeaturedRepos: featuredCards,
|
||||
RecentRepos: recentCards,
|
||||
HasError: queryError,
|
||||
}
|
||||
|
||||
if err := h.Templates.ExecuteTemplate(w, "home", data); err != nil {
|
||||
|
||||
@@ -40,8 +40,16 @@ type advisorSuggestion struct {
|
||||
type imageAdvisorData struct {
|
||||
Suggestions []advisorSuggestion
|
||||
Error string
|
||||
// Model is shown in the results footer so users can attribute the
|
||||
// suggestions to a specific model without us hardcoding it in the template.
|
||||
Model string
|
||||
}
|
||||
|
||||
// advisorModel is the Claude model used for image suggestions. Kept in one
|
||||
// place so the API call and the template footer stay in sync.
|
||||
const advisorModel = "claude-haiku-4-5-20251001"
|
||||
const advisorModelDisplay = "Claude Haiku 4.5"
|
||||
|
||||
// OCI config types for full image config parsing
|
||||
type advisorOCIConfig struct {
|
||||
Architecture string `json:"architecture"`
|
||||
@@ -168,7 +176,7 @@ func (h *ImageAdvisorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
suggestions, err := parseAdvisorResponse(cachedJSON)
|
||||
if err == nil {
|
||||
slog.Debug("Serving cached advisor suggestions", "digest", digest)
|
||||
h.renderResults(w, imageAdvisorData{Suggestions: suggestions})
|
||||
h.renderResults(w, imageAdvisorData{Suggestions: suggestions, Model: advisorModelDisplay})
|
||||
return
|
||||
}
|
||||
slog.Debug("Cached advisor data unparseable, fetching fresh", "digest", digest)
|
||||
@@ -217,11 +225,13 @@ func (h *ImageAdvisorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
var promptBuf strings.Builder
|
||||
generateAdvisorPrompt(&promptBuf, report)
|
||||
|
||||
// Call Claude API
|
||||
// Call Claude API. The raw error often contains upstream HTTP body text
|
||||
// which we must not surface to the user (potential secrets/PII). Log the
|
||||
// detail; show a stable, sanitized message.
|
||||
responseText, err := callClaudeAPI(ctx, h.ClaudeAPIKey, promptBuf.String())
|
||||
if err != nil {
|
||||
slog.Warn("Claude API call failed", "error", err)
|
||||
h.renderResults(w, imageAdvisorData{Error: "AI service request failed: " + err.Error()})
|
||||
h.renderResults(w, imageAdvisorData{Error: "The AI service couldn't generate suggestions right now. Please try again in a minute."})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -229,7 +239,7 @@ func (h *ImageAdvisorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
suggestions, err := parseAdvisorResponse(responseText)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to parse advisor response", "error", err, "response", responseText)
|
||||
h.renderResults(w, imageAdvisorData{Error: "Failed to parse AI response"})
|
||||
h.renderResults(w, imageAdvisorData{Error: "We got a response from the AI service but couldn't read it. Please try again."})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -238,7 +248,7 @@ func (h *ImageAdvisorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
slog.Warn("Failed to cache advisor suggestions", "error", err)
|
||||
}
|
||||
|
||||
h.renderResults(w, imageAdvisorData{Suggestions: suggestions})
|
||||
h.renderResults(w, imageAdvisorData{Suggestions: suggestions, Model: advisorModelDisplay})
|
||||
}
|
||||
|
||||
func (h *ImageAdvisorHandler) renderResults(w http.ResponseWriter, data imageAdvisorData) {
|
||||
@@ -583,7 +593,7 @@ func generateAdvisorPrompt(w io.Writer, r *advisorReportData) {
|
||||
// callClaudeAPI sends the prompt to Claude Haiku using tool use and returns the structured JSON.
|
||||
func callClaudeAPI(ctx context.Context, apiKey, prompt string) (string, error) {
|
||||
reqBody := map[string]any{
|
||||
"model": "claude-haiku-4-5-20251001",
|
||||
"model": advisorModel,
|
||||
"max_tokens": 2048,
|
||||
"system": "Analyze the container image data. Provide actionable suggestions sorted by impact (highest first).",
|
||||
"tools": []map[string]any{{
|
||||
|
||||
@@ -156,6 +156,16 @@ func (h *DeleteManifestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
}
|
||||
|
||||
// Read the appview's cached manifest row before deleting it so we know
|
||||
// which hold owned the blobs. Best-effort — if not cached, the manifest
|
||||
// record from the PDS would also have it but we don't pre-fetch the PDS
|
||||
// record just for this. Without a hold DID we just skip the eager purge
|
||||
// and fall back to lazy GC on the hold.
|
||||
var holdDID string
|
||||
if cached, err := db.GetManifestDetail(h.ReadOnlyDB, user.DID, repo, digest); err == nil && cached != nil {
|
||||
holdDID = cached.HoldEndpoint
|
||||
}
|
||||
|
||||
// Compute rkey for manifest record (digest without "sha256:" prefix)
|
||||
rkey := strings.TrimPrefix(digest, "sha256:")
|
||||
|
||||
@@ -176,6 +186,12 @@ func (h *DeleteManifestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
// Tell the hold to drop its layer/scan/image-config records for this
|
||||
// manifest. Best-effort — failures here only mean the hold's lazy GC
|
||||
// will clean up later, so we don't reflect the failure to the user.
|
||||
manifestURI := atproto.BuildManifestURI(user.DID, digest)
|
||||
purgeOnHold(r.Context(), h.Refresher, user.DID, user.PDSEndpoint, holdDID, manifestURI)
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
@@ -192,59 +208,85 @@ type DeleteUntaggedManifestsHandler struct {
|
||||
func (h *DeleteUntaggedManifestsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
user := middleware.GetUser(r)
|
||||
if user == nil {
|
||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||
render.Status(r, http.StatusUnauthorized)
|
||||
render.JSON(w, r, map[string]string{"error": "Unauthorized"})
|
||||
return
|
||||
}
|
||||
|
||||
var req deleteUntaggedRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
||||
render.Status(r, http.StatusBadRequest)
|
||||
render.JSON(w, r, map[string]string{"error": "Invalid request body"})
|
||||
return
|
||||
}
|
||||
|
||||
digests, err := db.GetAllUntaggedManifestDigests(h.DB, user.DID, req.Repo)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to query untagged manifests: %v", err), http.StatusInternalServerError)
|
||||
render.Status(r, http.StatusInternalServerError)
|
||||
render.JSON(w, r, map[string]any{
|
||||
"error": fmt.Sprintf("Failed to query untagged manifests: %v", err),
|
||||
"deleted": 0,
|
||||
"failed": 0,
|
||||
"total": 0,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if len(digests) == 0 {
|
||||
render.JSON(w, r, map[string]int{"deleted": 0})
|
||||
render.JSON(w, r, map[string]any{"deleted": 0, "failed": 0, "total": 0})
|
||||
return
|
||||
}
|
||||
|
||||
pdsClient := atproto.NewClientWithSessionProvider(user.PDSEndpoint, user.DID, h.Refresher)
|
||||
|
||||
deleted := 0
|
||||
type failure struct {
|
||||
Digest string `json:"digest"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
var failures []failure
|
||||
|
||||
for _, digest := range digests {
|
||||
rkey := strings.TrimPrefix(digest, "sha256:")
|
||||
|
||||
// Snapshot hold ownership before the delete so we can purge after.
|
||||
var holdDID string
|
||||
if cached, err := db.GetManifestDetail(h.ReadOnlyDB, user.DID, req.Repo, digest); err == nil && cached != nil {
|
||||
holdDID = cached.HoldEndpoint
|
||||
}
|
||||
|
||||
if err := pdsClient.DeleteRecord(r.Context(), atproto.ManifestCollection, rkey); err != nil {
|
||||
if handleOAuthError(r.Context(), h.Refresher, user.DID, err) {
|
||||
http.Error(w, "Authentication failed, please log in again", http.StatusUnauthorized)
|
||||
render.Status(r, http.StatusUnauthorized)
|
||||
render.JSON(w, r, map[string]any{
|
||||
"error": "Authentication failed, please log in again",
|
||||
"deleted": deleted,
|
||||
"failed": len(failures),
|
||||
"total": len(digests),
|
||||
})
|
||||
return
|
||||
}
|
||||
render.Status(r, http.StatusInternalServerError)
|
||||
render.JSON(w, r, map[string]any{
|
||||
"error": fmt.Sprintf("Failed to delete manifest %s from PDS: %v", digest, err),
|
||||
"deleted": deleted,
|
||||
})
|
||||
return
|
||||
failures = append(failures, failure{Digest: digest, Error: fmt.Sprintf("PDS: %v", err)})
|
||||
continue
|
||||
}
|
||||
|
||||
if err := db.DeleteManifest(h.DB, user.DID, req.Repo, digest); err != nil {
|
||||
render.Status(r, http.StatusInternalServerError)
|
||||
render.JSON(w, r, map[string]any{
|
||||
"error": fmt.Sprintf("Failed to delete manifest %s from cache: %v", digest, err),
|
||||
"deleted": deleted,
|
||||
})
|
||||
return
|
||||
failures = append(failures, failure{Digest: digest, Error: fmt.Sprintf("cache: %v", err)})
|
||||
continue
|
||||
}
|
||||
|
||||
manifestURI := atproto.BuildManifestURI(user.DID, digest)
|
||||
purgeOnHold(r.Context(), h.Refresher, user.DID, user.PDSEndpoint, holdDID, manifestURI)
|
||||
|
||||
deleted++
|
||||
}
|
||||
|
||||
render.JSON(w, r, map[string]int{"deleted": deleted})
|
||||
render.JSON(w, r, map[string]any{
|
||||
"deleted": deleted,
|
||||
"failed": len(failures),
|
||||
"total": len(digests),
|
||||
"failures": failures,
|
||||
})
|
||||
}
|
||||
|
||||
// UploadAvatarHandler handles uploading/updating a repository avatar
|
||||
|
||||
@@ -2,14 +2,49 @@ package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LegalPageData contains data for legal pages (terms, privacy)
|
||||
// LegalPageData contains data for legal pages (terms, privacy).
|
||||
type LegalPageData struct {
|
||||
PageData
|
||||
Meta *PageMeta
|
||||
CompanyName string
|
||||
Jurisdiction string
|
||||
LastUpdated string
|
||||
}
|
||||
|
||||
// legalDefaults applies sensible fallbacks for operators who haven't set
|
||||
// CompanyName/Jurisdiction in config.
|
||||
func legalDefaults(company, jurisdiction string) (string, string) {
|
||||
if company == "" {
|
||||
company = "the Service"
|
||||
}
|
||||
if jurisdiction == "" {
|
||||
jurisdiction = "United States"
|
||||
}
|
||||
return company, jurisdiction
|
||||
}
|
||||
|
||||
// Stamped at build time from the git commit date of the corresponding page
|
||||
// template via -ldflags -X (see Makefile). Empty falls back to legalFallbackDate
|
||||
// for bare `go build` / builds without a .git directory.
|
||||
var (
|
||||
privacyLastUpdated string
|
||||
termsLastUpdated string
|
||||
)
|
||||
|
||||
const legalFallbackDate = "April 2026"
|
||||
|
||||
func formatLegalDate(raw string) string {
|
||||
if raw == "" {
|
||||
return legalFallbackDate
|
||||
}
|
||||
t, err := time.Parse("2006-01-02", raw)
|
||||
if err != nil {
|
||||
return raw
|
||||
}
|
||||
return t.Format("January 2, 2006")
|
||||
}
|
||||
|
||||
// PrivacyPolicyHandler handles the /privacy page
|
||||
@@ -24,11 +59,13 @@ func (h *PrivacyPolicyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
).WithCanonical("https://" + h.SiteURL + "/privacy").
|
||||
WithSiteName(h.ClientShortName)
|
||||
|
||||
company, jurisdiction := legalDefaults(h.CompanyName, h.Jurisdiction)
|
||||
data := LegalPageData{
|
||||
PageData: NewPageData(r, &h.BaseUIHandler),
|
||||
Meta: meta,
|
||||
CompanyName: h.CompanyName,
|
||||
Jurisdiction: h.Jurisdiction,
|
||||
CompanyName: company,
|
||||
Jurisdiction: jurisdiction,
|
||||
LastUpdated: formatLegalDate(privacyLastUpdated),
|
||||
}
|
||||
|
||||
if err := h.Templates.ExecuteTemplate(w, "privacy", data); err != nil {
|
||||
@@ -49,11 +86,13 @@ func (h *TermsOfServiceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
).WithCanonical("https://" + h.SiteURL + "/terms").
|
||||
WithSiteName(h.ClientShortName)
|
||||
|
||||
company, jurisdiction := legalDefaults(h.CompanyName, h.Jurisdiction)
|
||||
data := LegalPageData{
|
||||
PageData: NewPageData(r, &h.BaseUIHandler),
|
||||
Meta: meta,
|
||||
CompanyName: h.CompanyName,
|
||||
Jurisdiction: h.Jurisdiction,
|
||||
CompanyName: company,
|
||||
Jurisdiction: jurisdiction,
|
||||
LastUpdated: formatLegalDate(termsLastUpdated),
|
||||
}
|
||||
|
||||
if err := h.Templates.ExecuteTemplate(w, "terms", data); err != nil {
|
||||
|
||||
@@ -2,12 +2,45 @@ package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// classifyHealthError maps a CheckHealth error into a short reason code that
|
||||
// the template turns into a distinct tooltip. Prevents the badge from
|
||||
// collapsing every failure mode into a generic "Offline".
|
||||
//
|
||||
// Returns one of: "dns", "tls", "refused", "timeout", "http", "unknown"
|
||||
// (empty string when err is nil).
|
||||
func classifyHealthError(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
var dnsErr *net.DNSError
|
||||
if errors.As(err, &dnsErr) {
|
||||
return "dns"
|
||||
}
|
||||
msg := strings.ToLower(err.Error())
|
||||
if strings.Contains(msg, "x509") || strings.Contains(msg, "tls:") || strings.Contains(msg, "certificate") {
|
||||
return "tls"
|
||||
}
|
||||
if strings.Contains(msg, "connection refused") {
|
||||
return "refused"
|
||||
}
|
||||
if strings.Contains(msg, "timeout") || strings.Contains(msg, "deadline exceeded") {
|
||||
return "timeout"
|
||||
}
|
||||
if strings.Contains(msg, "status") || strings.Contains(msg, "http") {
|
||||
return "http"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// ManifestHealthHandler handles HTMX polling for manifest health status
|
||||
type ManifestHealthHandler struct {
|
||||
BaseUIHandler
|
||||
@@ -32,7 +65,7 @@ func (h *ManifestHealthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
cached := h.HealthChecker.GetCachedStatus(endpoint)
|
||||
if cached != nil {
|
||||
// Cache hit - return final status
|
||||
h.renderBadge(w, endpoint, cached.Reachable, false)
|
||||
h.renderBadge(w, endpoint, cached.Reachable, false, "")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -43,30 +76,31 @@ func (h *ManifestHealthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
reachable, err := h.HealthChecker.CheckHealth(ctx, endpoint)
|
||||
|
||||
// Check for HTTP errors first (connection refused, network unreachable, etc.)
|
||||
// This ensures we catch real failures even when timing aligns with context timeout
|
||||
// This ensures we catch real failures even when timing aligns with context timeout.
|
||||
if err != nil {
|
||||
// Error - mark as unreachable
|
||||
h.renderBadge(w, endpoint, false, false)
|
||||
h.renderBadge(w, endpoint, false, false, classifyHealthError(err))
|
||||
} else if ctx.Err() == context.DeadlineExceeded {
|
||||
// Context timed out but no HTTP error yet - still pending
|
||||
h.renderBadge(w, endpoint, false, true)
|
||||
h.renderBadge(w, endpoint, false, true, "")
|
||||
} else {
|
||||
// Success
|
||||
h.renderBadge(w, endpoint, reachable, false)
|
||||
h.renderBadge(w, endpoint, reachable, false, "")
|
||||
}
|
||||
}
|
||||
|
||||
// renderBadge renders the appropriate badge HTML snippet
|
||||
func (h *ManifestHealthHandler) renderBadge(w http.ResponseWriter, endpoint string, reachable, pending bool) {
|
||||
// renderBadge renders the appropriate badge HTML snippet. Reason is one of the
|
||||
// classifyHealthError codes ("dns", "tls", "refused", "timeout", "http",
|
||||
// "unknown") or empty for success / pending states.
|
||||
func (h *ManifestHealthHandler) renderBadge(w http.ResponseWriter, endpoint string, reachable, pending bool, reason string) {
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
|
||||
data := struct {
|
||||
Pending bool
|
||||
Reachable bool
|
||||
Reason string
|
||||
RetryURL string
|
||||
}{
|
||||
Pending: pending,
|
||||
Reachable: reachable,
|
||||
Reason: reason,
|
||||
RetryURL: url.QueryEscape(endpoint),
|
||||
}
|
||||
|
||||
|
||||
@@ -3,18 +3,23 @@ package handlers
|
||||
// PageMeta holds all metadata for a page's <head> section.
|
||||
// Use the builder methods to construct it with a fluent API.
|
||||
type PageMeta struct {
|
||||
Title string // Page title (required)
|
||||
Description string // Meta description (required)
|
||||
Title string // Page title (required; empty falls back to SiteName in template)
|
||||
Description string // Meta description (required; empty omits the tag entirely)
|
||||
Canonical string // Canonical URL (optional)
|
||||
Robots string // Robots directive, e.g. "noindex" (optional, defaults to "index, follow")
|
||||
OGType string // OpenGraph type, defaults to "website"
|
||||
OGImage string // OpenGraph image URL (optional)
|
||||
OGImageAlt string // OpenGraph image alt text — improves social-share a11y
|
||||
OGLocale string // OpenGraph locale (e.g. "en_US"); blank falls back in template
|
||||
TwitterCard string // Twitter card type, defaults to "summary_large_image"
|
||||
SiteName string // Site name for og:site_name (optional, defaults to "ATCR")
|
||||
SiteName string // Site name for og:site_name (falls back to "ATCR" in template)
|
||||
JSONLD []any // JSON-LD structured data objects (optional)
|
||||
}
|
||||
|
||||
// NewPageMeta creates a new PageMeta with required fields and sensible defaults.
|
||||
// Callers should not pass empty title/description — the template falls back to
|
||||
// the SiteName for missing title and omits missing description, but those are
|
||||
// last-resort defenses.
|
||||
func NewPageMeta(title, description string) *PageMeta {
|
||||
return &PageMeta{
|
||||
Title: title,
|
||||
@@ -36,6 +41,19 @@ func (m *PageMeta) WithOGImage(url string) *PageMeta {
|
||||
return m
|
||||
}
|
||||
|
||||
// WithOGImageAlt sets the alt text for the OpenGraph image. Strongly recommended
|
||||
// when OGImage is set — screen readers on social platforms read this out.
|
||||
func (m *PageMeta) WithOGImageAlt(alt string) *PageMeta {
|
||||
m.OGImageAlt = alt
|
||||
return m
|
||||
}
|
||||
|
||||
// WithOGLocale overrides the default "en_US" locale.
|
||||
func (m *PageMeta) WithOGLocale(locale string) *PageMeta {
|
||||
m.OGLocale = locale
|
||||
return m
|
||||
}
|
||||
|
||||
// WithOGType sets the OpenGraph type (e.g., "website", "profile", "article").
|
||||
func (m *PageMeta) WithOGType(ogType string) *PageMeta {
|
||||
m.OGType = ogType
|
||||
@@ -54,7 +72,9 @@ func (m *PageMeta) WithJSONLD(data ...any) *PageMeta {
|
||||
return m
|
||||
}
|
||||
|
||||
// WithSiteName sets the site name for og:site_name.
|
||||
// WithSiteName sets the site name for og:site_name. Pass the caller's
|
||||
// ClientShortName — forgetting this on a branded deployment (e.g. Seamark)
|
||||
// leaks "ATCR" into social previews.
|
||||
func (m *PageMeta) WithSiteName(name string) *PageMeta {
|
||||
m.SiteName = name
|
||||
return m
|
||||
|
||||
@@ -170,8 +170,8 @@ func (h *UserOGHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
user = &db.User{DID: did, Handle: resolvedHandle}
|
||||
}
|
||||
|
||||
// Get repository count
|
||||
repos, err := db.GetUserRepositories(h.ReadOnlyDB, did)
|
||||
// Get repository count (OG cards render for anonymous crawlers)
|
||||
repos, err := db.GetUserRepositories(h.ReadOnlyDB, did, "")
|
||||
repoCount := 0
|
||||
if err == nil {
|
||||
repoCount = len(repos)
|
||||
|
||||
@@ -44,6 +44,12 @@ func (h *RepositoryPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
// Check for takedown labels
|
||||
if taken, _ := db.IsTakenDown(h.ReadOnlyDB, did, repository); taken {
|
||||
RenderNotFound(w, r, &h.BaseUIHandler)
|
||||
return
|
||||
}
|
||||
|
||||
// Look up user by DID
|
||||
owner, err := db.GetUserByDID(h.ReadOnlyDB, did)
|
||||
if err != nil {
|
||||
@@ -57,7 +63,7 @@ func (h *RepositoryPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
|
||||
// Opportunistically update cached handle if it changed
|
||||
if owner.Handle != resolvedHandle {
|
||||
_ = db.UpdateUserHandle(h.ReadOnlyDB, did, resolvedHandle)
|
||||
_ = db.UpdateUserHandle(h.DB, did, resolvedHandle)
|
||||
owner.Handle = resolvedHandle
|
||||
}
|
||||
|
||||
@@ -72,25 +78,26 @@ func (h *RepositoryPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch all tag names for the selector dropdown
|
||||
allTags, err := db.GetAllTagNames(h.ReadOnlyDB, owner.DID, repository)
|
||||
// Resolve viewer DID for hold-access filtering (empty string = anonymous)
|
||||
var viewerDID string
|
||||
if vu := middleware.GetUser(r); vu != nil {
|
||||
viewerDID = vu.DID
|
||||
}
|
||||
|
||||
// Fetch all tags (with digests) for the dropdown and default-selection heuristics.
|
||||
tagPairs, err := db.GetAllTagsWithDigests(h.ReadOnlyDB, owner.DID, repository, viewerDID)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to fetch tag names", "error", err)
|
||||
}
|
||||
allTags := make([]string, len(tagPairs))
|
||||
for i, p := range tagPairs {
|
||||
allTags[i] = p.Name
|
||||
}
|
||||
|
||||
// Determine which tag to show
|
||||
selectedTagName := r.URL.Query().Get("tag")
|
||||
if selectedTagName == "" {
|
||||
// Default: "latest" if it exists, otherwise most recent
|
||||
for _, t := range allTags {
|
||||
if t == "latest" {
|
||||
selectedTagName = "latest"
|
||||
break
|
||||
}
|
||||
}
|
||||
if selectedTagName == "" && len(allTags) > 0 {
|
||||
selectedTagName = allTags[0] // most recent (already sorted DESC)
|
||||
}
|
||||
if selectedTagName == "" && len(tagPairs) > 0 {
|
||||
selectedTagName = db.PickDefaultTag(tagPairs)
|
||||
}
|
||||
|
||||
// Fetch the selected tag's full data
|
||||
@@ -181,8 +188,11 @@ func (h *RepositoryPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
repo.Version = metadata["org.opencontainers.image.version"]
|
||||
}
|
||||
|
||||
// Fetch stats
|
||||
// Fetch stats. Track availability separately so the template can render
|
||||
// "—" or hide the stats row instead of showing zeros that masquerade as
|
||||
// real counts.
|
||||
stats, err := db.GetRepositoryStats(h.ReadOnlyDB, owner.DID, repository)
|
||||
statsAvailable := err == nil
|
||||
if err != nil {
|
||||
slog.Warn("Failed to fetch repository stats", "error", err)
|
||||
stats = &db.RepositoryStats{StarCount: 0}
|
||||
@@ -209,9 +219,13 @@ func (h *RepositoryPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
isOwner = (user.DID == owner.DID)
|
||||
}
|
||||
|
||||
// Fetch README content from repo page record or annotations
|
||||
// Fetch README content from repo page record or annotations.
|
||||
// ReadmeFetchFailed distinguishes "owner never provided a README" (show
|
||||
// CTA to add one) from "we tried to fetch the configured README and it
|
||||
// failed" (show retry CTA instead).
|
||||
var readmeHTML template.HTML
|
||||
var rawDescription string
|
||||
var readmeFetchFailed bool
|
||||
|
||||
repoPage, err := db.GetRepoPage(h.ReadOnlyDB, owner.DID, repository)
|
||||
if err == nil && repoPage != nil {
|
||||
@@ -237,15 +251,16 @@ func (h *RepositoryPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
}
|
||||
if readmeURL != "" {
|
||||
// Fetch raw markdown for editor pre-fill, then render
|
||||
rawBytes, fetchErr := h.ReadmeFetcher.FetchRaw(r.Context(), readmeURL)
|
||||
if fetchErr != nil {
|
||||
slog.Debug("Failed to fetch README from URL", "url", readmeURL, "error", fetchErr)
|
||||
readmeFetchFailed = true
|
||||
} else {
|
||||
rawDescription = string(rawBytes)
|
||||
html, renderErr := h.ReadmeFetcher.RenderMarkdown(rawBytes)
|
||||
if renderErr != nil {
|
||||
slog.Debug("Failed to render fetched README", "url", readmeURL, "error", renderErr)
|
||||
readmeFetchFailed = true
|
||||
} else {
|
||||
readmeHTML = template.HTML(html)
|
||||
}
|
||||
@@ -277,34 +292,59 @@ func (h *RepositoryPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
h.ClientShortName,
|
||||
))
|
||||
|
||||
// Compute cross-hold badge: if the viewer has a default hold set and this
|
||||
// repo has tags on any other accessible hold, flag them so the template
|
||||
// can show an informational chip.
|
||||
var nonDefaultHolds []string
|
||||
if viewerDID != "" {
|
||||
viewerDefaultHold := db.GetUserHoldDID(h.ReadOnlyDB, viewerDID)
|
||||
if viewerDefaultHold != "" {
|
||||
repoHolds, herr := db.GetRepoHoldDIDs(h.ReadOnlyDB, owner.DID, repository, viewerDID)
|
||||
if herr != nil {
|
||||
slog.Warn("Failed to fetch repo hold DIDs", "error", herr)
|
||||
}
|
||||
for _, rh := range repoHolds {
|
||||
if rh != viewerDefaultHold {
|
||||
nonDefaultHolds = append(nonDefaultHolds, rh)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data := struct {
|
||||
PageData
|
||||
Meta *PageMeta
|
||||
Owner *db.User
|
||||
Repository *db.Repository
|
||||
AllTags []string
|
||||
SelectedTag *SelectedTagData
|
||||
Stats *db.RepositoryStats
|
||||
TagCount int
|
||||
IsStarred bool
|
||||
IsOwner bool
|
||||
ReadmeHTML template.HTML
|
||||
RawDescription string
|
||||
ArtifactType string
|
||||
Meta *PageMeta
|
||||
Owner *db.User
|
||||
Repository *db.Repository
|
||||
AllTags []string
|
||||
SelectedTag *SelectedTagData
|
||||
Stats *db.RepositoryStats
|
||||
StatsAvailable bool
|
||||
TagCount int
|
||||
IsStarred bool
|
||||
IsOwner bool
|
||||
ReadmeHTML template.HTML
|
||||
ReadmeFetchFailed bool
|
||||
RawDescription string
|
||||
ArtifactType string
|
||||
NonDefaultHolds []string
|
||||
}{
|
||||
PageData: NewPageData(r, &h.BaseUIHandler),
|
||||
Meta: meta,
|
||||
Owner: owner,
|
||||
Repository: repo,
|
||||
AllTags: allTags,
|
||||
SelectedTag: selectedTag,
|
||||
Stats: stats,
|
||||
TagCount: tagCount,
|
||||
IsStarred: isStarred,
|
||||
IsOwner: isOwner,
|
||||
ReadmeHTML: readmeHTML,
|
||||
RawDescription: rawDescription,
|
||||
ArtifactType: artifactType,
|
||||
PageData: NewPageData(r, &h.BaseUIHandler),
|
||||
Meta: meta,
|
||||
Owner: owner,
|
||||
Repository: repo,
|
||||
AllTags: allTags,
|
||||
SelectedTag: selectedTag,
|
||||
Stats: stats,
|
||||
StatsAvailable: statsAvailable,
|
||||
TagCount: tagCount,
|
||||
IsStarred: isStarred,
|
||||
IsOwner: isOwner,
|
||||
ReadmeHTML: readmeHTML,
|
||||
ReadmeFetchFailed: readmeFetchFailed,
|
||||
RawDescription: rawDescription,
|
||||
ArtifactType: artifactType,
|
||||
NonDefaultHolds: nonDefaultHolds,
|
||||
}
|
||||
|
||||
// If the owner has disabled AI advisor in their profile, hide the button
|
||||
@@ -388,6 +428,12 @@ func (h *RepositoryTagsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve viewer DID for hold-access filtering (empty string = anonymous)
|
||||
var viewerDID string
|
||||
if vu := middleware.GetUser(r); vu != nil {
|
||||
viewerDID = vu.DID
|
||||
}
|
||||
|
||||
// Count total tags for pagination
|
||||
totalTags, err := db.CountTags(h.ReadOnlyDB, owner.DID, repository)
|
||||
if err != nil {
|
||||
@@ -396,7 +442,7 @@ func (h *RepositoryTagsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
// Fetch tags with platform information and compressed sizes
|
||||
tagsWithPlatforms, err := db.GetTagsWithPlatforms(h.ReadOnlyDB, owner.DID, repository, pageSize, offset)
|
||||
tagsWithPlatforms, err := db.GetTagsWithPlatforms(h.ReadOnlyDB, owner.DID, repository, pageSize, offset, viewerDID)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@@ -405,7 +451,7 @@ func (h *RepositoryTagsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
// Fetch untagged manifests only on first page
|
||||
var manifests []db.ManifestWithMetadata
|
||||
if offset == 0 {
|
||||
manifests, err = db.GetTopLevelManifests(h.ReadOnlyDB, owner.DID, repository, 50, 0)
|
||||
manifests, err = db.GetTopLevelManifests(h.ReadOnlyDB, owner.DID, repository, 50, 0, viewerDID)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@@ -561,28 +607,36 @@ func (h *RepositoryTagsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
ociClient = user.OciClient
|
||||
}
|
||||
|
||||
// Resolve viewer's default hold for per-entry badges
|
||||
var viewerDefaultHold string
|
||||
if viewerDID != "" {
|
||||
viewerDefaultHold = db.GetUserHoldDID(h.ReadOnlyDB, viewerDID)
|
||||
}
|
||||
|
||||
data := struct {
|
||||
Owner *db.User
|
||||
Repository *db.Repository
|
||||
Entries []db.ManifestEntry
|
||||
IsOwner bool
|
||||
ScanBatchParams []template.HTML
|
||||
RegistryURL string
|
||||
OciClient string
|
||||
HasMore bool
|
||||
NextOffset int
|
||||
IsFirstPage bool
|
||||
Owner *db.User
|
||||
Repository *db.Repository
|
||||
Entries []db.ManifestEntry
|
||||
IsOwner bool
|
||||
ScanBatchParams []template.HTML
|
||||
RegistryURL string
|
||||
OciClient string
|
||||
HasMore bool
|
||||
NextOffset int
|
||||
IsFirstPage bool
|
||||
ViewerDefaultHold string
|
||||
}{
|
||||
Owner: owner,
|
||||
Repository: &db.Repository{Name: repository},
|
||||
Entries: entries,
|
||||
IsOwner: isOwner,
|
||||
ScanBatchParams: scanBatchParams,
|
||||
RegistryURL: h.RegistryURL,
|
||||
OciClient: ociClient,
|
||||
HasMore: hasMore,
|
||||
NextOffset: offset + pageSize,
|
||||
IsFirstPage: isFirstPage,
|
||||
Owner: owner,
|
||||
Repository: &db.Repository{Name: repository},
|
||||
Entries: entries,
|
||||
IsOwner: isOwner,
|
||||
ScanBatchParams: scanBatchParams,
|
||||
RegistryURL: h.RegistryURL,
|
||||
OciClient: ociClient,
|
||||
HasMore: hasMore,
|
||||
NextOffset: offset + pageSize,
|
||||
IsFirstPage: isFirstPage,
|
||||
ViewerDefaultHold: viewerDefaultHold,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
|
||||
@@ -36,10 +36,14 @@ type spdxPackage struct {
|
||||
|
||||
// sbomDetailsData is the template data for the sbom-details partial.
|
||||
type sbomDetailsData struct {
|
||||
Packages []sbomPackage
|
||||
Total int
|
||||
Error string
|
||||
ScannedAt string
|
||||
Packages []sbomPackage
|
||||
Total int
|
||||
Error string
|
||||
Status string // scan record's status field (ok | failed | skipped); empty for legacy records
|
||||
Reason string // scan record's reason field (only meaningful when Status != ok)
|
||||
ScannedAt string
|
||||
Digest string // image digest (for download URLs)
|
||||
HoldEndpoint string // hold DID (for download URLs)
|
||||
}
|
||||
|
||||
type sbomPackage struct {
|
||||
@@ -128,10 +132,20 @@ func FetchSbomDetails(ctx context.Context, holdEndpoint, digest string) sbomDeta
|
||||
return sbomDetailsData{Error: "Failed to parse scan record"}
|
||||
}
|
||||
|
||||
if scanRecord.Status == atproto.ScanStatusSkipped {
|
||||
return sbomDetailsData{
|
||||
Status: scanRecord.Status,
|
||||
Reason: scanRecord.Reason,
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch the SBOM blob
|
||||
if scanRecord.SbomBlob == nil || scanRecord.SbomBlob.Ref.String() == "" {
|
||||
return sbomDetailsData{
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
Status: scanRecord.Status,
|
||||
Reason: scanRecord.Reason,
|
||||
Error: "No SBOM data available",
|
||||
}
|
||||
}
|
||||
@@ -199,9 +213,12 @@ func FetchSbomDetails(ctx context.Context, holdEndpoint, digest string) sbomDeta
|
||||
})
|
||||
|
||||
return sbomDetailsData{
|
||||
Packages: packages,
|
||||
Total: len(packages),
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
Packages: packages,
|
||||
Total: len(packages),
|
||||
Status: scanRecord.Status,
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
Digest: digest,
|
||||
HoldEndpoint: holdEndpoint,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
155
pkg/appview/handlers/scan_download.go
Normal file
155
pkg/appview/handlers/scan_download.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
)
|
||||
|
||||
// ScanDownloadHandler serves raw scan blobs (SPDX JSON or Grype JSON) as file downloads.
|
||||
// GET /api/scan-download?digest=sha256:...&holdEndpoint=did:web:...&type=vuln|sbom
|
||||
type ScanDownloadHandler struct {
|
||||
BaseUIHandler
|
||||
}
|
||||
|
||||
func (h *ScanDownloadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
digest := r.URL.Query().Get("digest")
|
||||
holdEndpoint := r.URL.Query().Get("holdEndpoint")
|
||||
blobType := r.URL.Query().Get("type") // "vuln" or "sbom"
|
||||
|
||||
if digest == "" || holdEndpoint == "" || (blobType != "vuln" && blobType != "sbom") {
|
||||
http.Error(w, "missing or invalid parameters", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
hold, err := ResolveHold(r.Context(), h.ReadOnlyDB, holdEndpoint)
|
||||
if err != nil {
|
||||
slog.Debug("Failed to resolve hold for download", "holdEndpoint", holdEndpoint, "error", err)
|
||||
http.Error(w, "could not resolve hold", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := fetchScanBlob(r.Context(), hold.DID, hold.URL, digest, blobType)
|
||||
if err != nil {
|
||||
slog.Debug("Failed to fetch scan blob", "type", blobType, "error", err)
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
shortDigest := strings.TrimPrefix(digest, "sha256:")
|
||||
if len(shortDigest) > 12 {
|
||||
shortDigest = shortDigest[:12]
|
||||
}
|
||||
|
||||
var filename, contentType string
|
||||
switch blobType {
|
||||
case "vuln":
|
||||
filename = fmt.Sprintf("vulnerabilities-%s.json", shortDigest)
|
||||
contentType = "application/json"
|
||||
case "sbom":
|
||||
filename = fmt.Sprintf("sbom-spdx-%s.json", shortDigest)
|
||||
contentType = "application/spdx+json"
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", filename))
|
||||
if _, err := w.Write(data); err != nil {
|
||||
slog.Debug("Failed to write scan download response", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// fetchScanBlob fetches the raw scan blob bytes from a hold.
|
||||
// blobType is "vuln" or "sbom".
|
||||
func fetchScanBlob(ctx context.Context, holdDID, holdURL, digest, blobType string) ([]byte, error) {
|
||||
rkey := strings.TrimPrefix(digest, "sha256:")
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Fetch the scan record
|
||||
scanURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
|
||||
holdURL,
|
||||
url.QueryEscape(holdDID),
|
||||
url.QueryEscape(atproto.ScanCollection),
|
||||
url.QueryEscape(rkey),
|
||||
)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", scanURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("hold unreachable: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("no scan record found")
|
||||
}
|
||||
|
||||
var envelope struct {
|
||||
Value json.RawMessage `json:"value"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil {
|
||||
return nil, fmt.Errorf("parse scan record: %w", err)
|
||||
}
|
||||
|
||||
var scanRecord atproto.ScanRecord
|
||||
if err := json.Unmarshal(envelope.Value, &scanRecord); err != nil {
|
||||
return nil, fmt.Errorf("parse scan record: %w", err)
|
||||
}
|
||||
|
||||
// Select the appropriate blob CID
|
||||
var blobCID string
|
||||
switch blobType {
|
||||
case "vuln":
|
||||
if scanRecord.VulnReportBlob == nil || scanRecord.VulnReportBlob.Ref.String() == "" {
|
||||
return nil, fmt.Errorf("no vulnerability report available")
|
||||
}
|
||||
blobCID = scanRecord.VulnReportBlob.Ref.String()
|
||||
case "sbom":
|
||||
if scanRecord.SbomBlob == nil || scanRecord.SbomBlob.Ref.String() == "" {
|
||||
return nil, fmt.Errorf("no SBOM available")
|
||||
}
|
||||
blobCID = scanRecord.SbomBlob.Ref.String()
|
||||
}
|
||||
|
||||
// Fetch the blob
|
||||
blobURL := fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob?did=%s&cid=%s",
|
||||
holdURL,
|
||||
url.QueryEscape(holdDID),
|
||||
url.QueryEscape(blobCID),
|
||||
)
|
||||
|
||||
blobReq, err := http.NewRequestWithContext(ctx, "GET", blobURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build blob request: %w", err)
|
||||
}
|
||||
|
||||
blobResp, err := http.DefaultClient.Do(blobReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fetch blob: %w", err)
|
||||
}
|
||||
defer blobResp.Body.Close()
|
||||
|
||||
if blobResp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("blob not accessible")
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(blobResp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read blob: %w", err)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
@@ -25,6 +25,16 @@ type ScanResultHandler struct {
|
||||
}
|
||||
|
||||
// vulnBadgeData is the template data for the vuln-badge partial.
|
||||
// The badge renders one of five states, in priority order:
|
||||
// 1. Error — we couldn't reach the hold at all (network/5xx)
|
||||
// 2. NotScanned — hold reachable, no scan record for this digest (404)
|
||||
// 3. Skipped — scan record explicitly marks this artifact as not-scannable
|
||||
// 4. ScanFailed — scan record exists but the scanner errored
|
||||
// 5. Found — scan succeeded; render tier counts (or "Clean" when zero)
|
||||
//
|
||||
// These states must stay distinct so users can tell "hold is down" from
|
||||
// "this hasn't been scanned yet" from "scanner errored on this image" from
|
||||
// "this artifact type is intentionally not scanned".
|
||||
type vulnBadgeData struct {
|
||||
Critical int64
|
||||
High int64
|
||||
@@ -32,13 +42,36 @@ type vulnBadgeData struct {
|
||||
Low int64
|
||||
Total int64
|
||||
ScannedAt string
|
||||
Found bool // true if scan record exists
|
||||
Error bool // true if hold unreachable or error
|
||||
ScanFailed bool // true if scan record exists but scan failed (no blobs)
|
||||
Found bool // true if scan record exists and succeeded
|
||||
Error bool // true if hold unreachable (network/5xx)
|
||||
NotScanned bool // true if hold is up but no scan record (404)
|
||||
ScanFailed bool // true if scan record exists but scan failed
|
||||
Skipped bool // true if scan record marks the artifact as intentionally not scanned (helm, in-toto, etc.)
|
||||
Digest string // for the detail modal link
|
||||
HoldEndpoint string // for the detail modal link
|
||||
}
|
||||
|
||||
// classifyScanRecord maps a scan record's Status field to badge data flags.
|
||||
// An empty Status is treated as a legacy record from before the status field
|
||||
// existed: nil-blob + zero-counts = treat as failed (preserves the prior badge
|
||||
// for un-backfilled holds); otherwise treat as success.
|
||||
func classifyScanRecord(scanRecord *atproto.ScanRecord) (found, skipped, failed bool) {
|
||||
switch scanRecord.Status {
|
||||
case atproto.ScanStatusSkipped:
|
||||
return false, true, false
|
||||
case atproto.ScanStatusFailed:
|
||||
return false, false, true
|
||||
case atproto.ScanStatusOK:
|
||||
return true, false, false
|
||||
default:
|
||||
// Legacy record (status field didn't exist when this was written).
|
||||
if scanRecord.SbomBlob == nil && scanRecord.Total == 0 {
|
||||
return false, false, true
|
||||
}
|
||||
return true, false, false
|
||||
}
|
||||
}
|
||||
|
||||
func (h *ScanResultHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
digest := r.URL.Query().Get("digest")
|
||||
holdEndpoint := r.URL.Query().Get("holdEndpoint")
|
||||
@@ -87,8 +120,9 @@ func (h *ScanResultHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
// No scan record — scanning disabled or not yet scanned. Render nothing.
|
||||
h.renderBadge(w, vulnBadgeData{Error: true})
|
||||
// Hold is reachable but has no scan record — not yet scanned, or
|
||||
// the image was pushed before scanning was enabled.
|
||||
h.renderBadge(w, vulnBadgeData{NotScanned: true})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -112,10 +146,7 @@ func (h *ScanResultHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// A failed scan has nil blobs (no SBOM generated) and zero counts.
|
||||
// Successful scans always have an SBOM blob even with 0 vulnerabilities.
|
||||
scanFailed := scanRecord.SbomBlob == nil && scanRecord.Total == 0
|
||||
|
||||
found, skipped, failed := classifyScanRecord(&scanRecord)
|
||||
h.renderBadge(w, vulnBadgeData{
|
||||
Critical: scanRecord.Critical,
|
||||
High: scanRecord.High,
|
||||
@@ -123,8 +154,9 @@ func (h *ScanResultHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
Low: scanRecord.Low,
|
||||
Total: scanRecord.Total,
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
Found: true,
|
||||
ScanFailed: scanFailed,
|
||||
Found: found,
|
||||
Skipped: skipped,
|
||||
ScanFailed: failed,
|
||||
Digest: digest,
|
||||
HoldEndpoint: holdDID,
|
||||
})
|
||||
@@ -160,6 +192,9 @@ func fetchScanRecord(ctx context.Context, holdEndpoint, holdDID, hexDigest strin
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return vulnBadgeData{NotScanned: true}
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return vulnBadgeData{Error: true}
|
||||
}
|
||||
@@ -176,6 +211,7 @@ func fetchScanRecord(ctx context.Context, holdEndpoint, holdDID, hexDigest strin
|
||||
return vulnBadgeData{Error: true}
|
||||
}
|
||||
|
||||
found, skipped, failed := classifyScanRecord(&scanRecord)
|
||||
return vulnBadgeData{
|
||||
Critical: scanRecord.Critical,
|
||||
High: scanRecord.High,
|
||||
@@ -183,8 +219,9 @@ func fetchScanRecord(ctx context.Context, holdEndpoint, holdDID, hexDigest strin
|
||||
Low: scanRecord.Low,
|
||||
Total: scanRecord.Total,
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
Found: true,
|
||||
ScanFailed: scanRecord.SbomBlob == nil && scanRecord.Total == 0,
|
||||
Found: found,
|
||||
Skipped: skipped,
|
||||
ScanFailed: failed,
|
||||
Digest: fullDigest,
|
||||
HoldEndpoint: holdDID,
|
||||
}
|
||||
@@ -214,8 +251,14 @@ func (h *BatchScanResultHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
|
||||
if err != nil {
|
||||
slog.Debug("Failed to resolve hold for batch scan", "holdEndpoint", holdEndpoint, "error", err)
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
// Emit "not scanned" badge for every digest so the placeholder resolves visibly.
|
||||
var buf bytes.Buffer
|
||||
if err := h.Templates.ExecuteTemplate(&buf, "vuln-badge", vulnBadgeData{Error: true}); err != nil {
|
||||
slog.Warn("Failed to render vuln-badge placeholder", "error", err)
|
||||
}
|
||||
for _, d := range digests {
|
||||
fmt.Fprintf(w, `<span id="scan-badge-%s" hx-swap-oob="outerHTML"></span>`, template.HTMLEscapeString(d))
|
||||
fmt.Fprintf(w, `<span id="scan-badge-%s" hx-swap-oob="outerHTML">%s</span>`,
|
||||
template.HTMLEscapeString(d), buf.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -95,19 +95,19 @@ func TestScanResult_WithVulnerabilities(t *testing.T) {
|
||||
if !strings.Contains(body, "vuln-box-critical") {
|
||||
t.Error("Expected body to contain vuln-box-critical for critical vulnerabilities")
|
||||
}
|
||||
if !strings.Contains(body, `data-tip="Critical">2<`) {
|
||||
if !strings.Contains(body, `aria-label="2 critical">2<`) {
|
||||
t.Error("Expected critical count of 2")
|
||||
}
|
||||
if !strings.Contains(body, "vuln-box-high") {
|
||||
t.Error("Expected body to contain vuln-box-high for high vulnerabilities")
|
||||
}
|
||||
if !strings.Contains(body, `data-tip="High">5<`) {
|
||||
if !strings.Contains(body, `aria-label="5 high">5<`) {
|
||||
t.Error("Expected high count of 5")
|
||||
}
|
||||
if !strings.Contains(body, `data-tip="Medium">10<`) {
|
||||
if !strings.Contains(body, `aria-label="10 medium">10<`) {
|
||||
t.Error("Expected medium count of 10")
|
||||
}
|
||||
if !strings.Contains(body, `data-tip="Low">3<`) {
|
||||
if !strings.Contains(body, `aria-label="3 low">3<`) {
|
||||
t.Error("Expected low count of 3")
|
||||
}
|
||||
// Should show vulnerability strip with tooltip
|
||||
@@ -165,9 +165,10 @@ func TestScanResult_NotFound(t *testing.T) {
|
||||
|
||||
body := strings.TrimSpace(rr.Body.String())
|
||||
|
||||
// 404 = no scan record. Should render NOTHING — not "Scan pending".
|
||||
if body != "" {
|
||||
t.Errorf("Expected empty body for 404, got: %q", body)
|
||||
// 404 = no scan record yet. Renders a visible "Not scanned" placeholder
|
||||
// so the htmx target resolves instead of staying empty forever.
|
||||
if !strings.Contains(body, "Not scanned") {
|
||||
t.Errorf("Expected 'Not scanned' placeholder for 404, got: %q", body)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,8 +190,9 @@ func TestScanResult_HoldError(t *testing.T) {
|
||||
|
||||
body := strings.TrimSpace(rr.Body.String())
|
||||
|
||||
if body != "" {
|
||||
t.Errorf("Expected empty body for hold error, got: %q", body)
|
||||
// Hold reachable but returned 5xx — distinct from "not scanned".
|
||||
if !strings.Contains(body, "Hold offline") {
|
||||
t.Errorf("Expected 'Hold offline' badge for hold error, got: %q", body)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,8 +209,9 @@ func TestScanResult_HoldUnreachable(t *testing.T) {
|
||||
|
||||
body := strings.TrimSpace(rr.Body.String())
|
||||
|
||||
if body != "" {
|
||||
t.Errorf("Expected empty body for unreachable hold, got: %q", body)
|
||||
// Network-unreachable hold — also distinct from "not scanned".
|
||||
if !strings.Contains(body, "Hold offline") {
|
||||
t.Errorf("Expected 'Hold offline' badge for unreachable hold, got: %q", body)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -274,7 +277,7 @@ func TestScanResult_OnlyCriticalShown(t *testing.T) {
|
||||
|
||||
body := rr.Body.String()
|
||||
|
||||
if !strings.Contains(body, `data-tip="Critical">3<`) {
|
||||
if !strings.Contains(body, `aria-label="3 critical">3<`) {
|
||||
t.Error("Expected critical count of 3")
|
||||
}
|
||||
// Zero-count badges should NOT appear
|
||||
@@ -353,7 +356,7 @@ func TestBatchScanResult_MultipleDigests(t *testing.T) {
|
||||
}
|
||||
|
||||
// abc123 should have vulnerability badges
|
||||
if !strings.Contains(body, `data-tip="Critical">2<`) {
|
||||
if !strings.Contains(body, `aria-label="2 critical">2<`) {
|
||||
t.Error("Expected critical count of 2 for abc123")
|
||||
}
|
||||
// def456 should have clean badge
|
||||
@@ -496,7 +499,7 @@ func TestBatchScanResult_SingleDigest(t *testing.T) {
|
||||
if !strings.Contains(body, `id="scan-badge-abc123"`) {
|
||||
t.Error("Expected OOB span for abc123")
|
||||
}
|
||||
if !strings.Contains(body, `data-tip="Critical">1<`) {
|
||||
if !strings.Contains(body, `aria-label="1 critical">1<`) {
|
||||
t.Error("Expected critical count of 1")
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user