Files
at-container-registry/deploy/upcloud/cloudinit.go
Evan Jarrett de02e1f046 remove distribution from hold, add vulnerability scanning in appview.
1. Removing distribution/distribution from the Hold Service (biggest change)
  The hold service previously used distribution's StorageDriver interface for all blob operations. This replaces it with direct AWS SDK v2 calls through ATCR's own pkg/s3.S3Service:
  - New S3Service methods: Stat(), PutBytes(), Move(), Delete(), WalkBlobs(), ListPrefix() added to pkg/s3/types.go
  - Pull zone fix: Presigned URLs are now generated against the real S3 endpoint, then the host is swapped to the CDN URL post-signing (previously the CDN URL was set as the endpoint, which
  broke SigV4 signatures)
  - All hold subsystems migrated: GC, OCI uploads, XRPC handlers, profile uploads, scan broadcaster, manifest posts — all now use *s3.S3Service instead of storagedriver.StorageDriver
  - Config simplified: Removed configuration.Storage type and buildStorageConfigFromFields(); replaced with a simple S3Params() method
  - Mock expanded: MockS3Client gains an in-memory object store + 5 new methods, replacing duplicate mockStorageDriver implementations in tests (~160 lines deleted from each test file)
2. Vulnerability Scan UI in AppView (new feature)
  Displays scan results from the hold's PDS on the repository page:
  - New lexicon: io/atcr/hold/scan.json with vulnReportBlob field for storing full Grype reports
  - Two new HTMX endpoints: /api/scan-result (badge) and /api/vuln-details (modal with CVE table)
  - New templates: vuln-badge.html (severity count chips) and vuln-details.html (full CVE table with NVD/GHSA links)
  - Repository page: Lazy-loads scan badges per manifest via HTMX
  - Tests: ~590 lines of test coverage for both handlers
3. S3 Diagnostic Tool
  New cmd/s3-test/main.go (418 lines) — tests S3 connectivity with both SDK v1 and v2, including presigned URL generation, pull zone host swapping, and verbose signing debug output.
4. Deployment Tooling
  - New syncServiceUnit() for comparing/updating systemd units on servers
  - Update command now syncs config keys (adds missing keys from template) and service units with daemon-reload
5. DB Migration
  0011_fix_captain_successor_column.yaml — rebuilds hold_captain_records to add the successor column that was missed in a previous migration.
6. Documentation
  - APPVIEW-UI-FUTURE.md rewritten as a status-tracked feature inventory
  - DISTRIBUTION.md renamed to CREDENTIAL_HELPER.md
  - New REMOVING_DISTRIBUTION.md — 480-line analysis of fully removing distribution from the appview side
7. go.mod
  aws-sdk-go v1 moved from indirect to direct (needed by cmd/s3-test).
2026-02-13 15:26:24 -06:00

330 lines
9.8 KiB
Go

package main
import (
"bytes"
_ "embed"
"fmt"
"strings"
"text/template"
"go.yaml.in/yaml/v3"
)
//go:embed systemd/appview.service.tmpl
var appviewServiceTmpl string
//go:embed systemd/hold.service.tmpl
var holdServiceTmpl string
//go:embed configs/appview.yaml.tmpl
var appviewConfigTmpl string
//go:embed configs/hold.yaml.tmpl
var holdConfigTmpl string
//go:embed configs/cloudinit.sh.tmpl
var cloudInitTmpl string
// ConfigValues holds values injected into config YAML templates.
// Only truly dynamic/computed values belong here — deployment-specific
// values like client_name, owner_did, etc. are literal in the templates.
type ConfigValues struct {
// S3 / Object Storage
S3Endpoint string
S3Region string
S3Bucket string
S3AccessKey string
S3SecretKey string
// Infrastructure (computed from zone + config)
Zone string // e.g. "us-chi1"
HoldDomain string // e.g. "us-chi1.cove.seamark.dev"
HoldDid string // e.g. "did:web:us-chi1.cove.seamark.dev"
BasePath string // e.g. "/var/lib/seamark"
}
// renderConfig executes a Go template with the given values.
func renderConfig(tmplStr string, vals *ConfigValues) (string, error) {
t, err := template.New("config").Parse(tmplStr)
if err != nil {
return "", fmt.Errorf("parse config template: %w", err)
}
var buf bytes.Buffer
if err := t.Execute(&buf, vals); err != nil {
return "", fmt.Errorf("render config template: %w", err)
}
return buf.String(), nil
}
// serviceUnitParams holds values for rendering systemd service unit templates.
type serviceUnitParams struct {
DisplayName string // e.g. "Seamark"
User string // e.g. "seamark"
BinaryPath string // e.g. "/opt/seamark/bin/seamark-appview"
ConfigPath string // e.g. "/etc/seamark/appview.yaml"
DataDir string // e.g. "/var/lib/seamark"
ServiceName string // e.g. "seamark-appview"
}
func renderServiceUnit(tmplStr string, p serviceUnitParams) (string, error) {
t, err := template.New("service").Parse(tmplStr)
if err != nil {
return "", fmt.Errorf("parse service template: %w", err)
}
var buf bytes.Buffer
if err := t.Execute(&buf, p); err != nil {
return "", fmt.Errorf("render service template: %w", err)
}
return buf.String(), nil
}
// generateAppviewCloudInit generates the cloud-init user-data script for the appview server.
func generateAppviewCloudInit(cfg *InfraConfig, vals *ConfigValues, goVersion string) (string, error) {
naming := cfg.Naming()
configYAML, err := renderConfig(appviewConfigTmpl, vals)
if err != nil {
return "", fmt.Errorf("appview config: %w", err)
}
serviceUnit, err := renderServiceUnit(appviewServiceTmpl, serviceUnitParams{
DisplayName: naming.DisplayName(),
User: naming.SystemUser(),
BinaryPath: naming.InstallDir() + "/bin/" + naming.Appview(),
ConfigPath: naming.AppviewConfigPath(),
DataDir: naming.BasePath(),
ServiceName: naming.Appview(),
})
if err != nil {
return "", fmt.Errorf("appview service unit: %w", err)
}
return generateCloudInit(cloudInitParams{
GoVersion: goVersion,
BinaryName: naming.Appview(),
BuildCmd: "appview",
ServiceUnit: serviceUnit,
ConfigYAML: configYAML,
ConfigPath: naming.AppviewConfigPath(),
ServiceName: naming.Appview(),
DataDir: naming.BasePath(),
RepoURL: cfg.RepoURL,
RepoBranch: cfg.RepoBranch,
InstallDir: naming.InstallDir(),
SystemUser: naming.SystemUser(),
ConfigDir: naming.ConfigDir(),
LogFile: naming.LogFile(),
DisplayName: naming.DisplayName(),
})
}
// generateHoldCloudInit generates the cloud-init user-data script for the hold server.
func generateHoldCloudInit(cfg *InfraConfig, vals *ConfigValues, goVersion string) (string, error) {
naming := cfg.Naming()
configYAML, err := renderConfig(holdConfigTmpl, vals)
if err != nil {
return "", fmt.Errorf("hold config: %w", err)
}
serviceUnit, err := renderServiceUnit(holdServiceTmpl, serviceUnitParams{
DisplayName: naming.DisplayName(),
User: naming.SystemUser(),
BinaryPath: naming.InstallDir() + "/bin/" + naming.Hold(),
ConfigPath: naming.HoldConfigPath(),
DataDir: naming.BasePath(),
ServiceName: naming.Hold(),
})
if err != nil {
return "", fmt.Errorf("hold service unit: %w", err)
}
return generateCloudInit(cloudInitParams{
GoVersion: goVersion,
BinaryName: naming.Hold(),
BuildCmd: "hold",
ServiceUnit: serviceUnit,
ConfigYAML: configYAML,
ConfigPath: naming.HoldConfigPath(),
ServiceName: naming.Hold(),
DataDir: naming.BasePath(),
RepoURL: cfg.RepoURL,
RepoBranch: cfg.RepoBranch,
InstallDir: naming.InstallDir(),
SystemUser: naming.SystemUser(),
ConfigDir: naming.ConfigDir(),
LogFile: naming.LogFile(),
DisplayName: naming.DisplayName(),
})
}
type cloudInitParams struct {
GoVersion string
BinaryName string
BuildCmd string
ServiceUnit string
ConfigYAML string
ConfigPath string
ServiceName string
DataDir string
RepoURL string
RepoBranch string
InstallDir string
SystemUser string
ConfigDir string
LogFile string
DisplayName string
}
func generateCloudInit(p cloudInitParams) (string, error) {
// Escape single quotes in embedded content for heredoc safety
p.ServiceUnit = strings.ReplaceAll(p.ServiceUnit, "'", "'\\''")
p.ConfigYAML = strings.ReplaceAll(p.ConfigYAML, "'", "'\\''")
t, err := template.New("cloudinit").Parse(cloudInitTmpl)
if err != nil {
return "", fmt.Errorf("parse cloudinit template: %w", err)
}
var buf bytes.Buffer
if err := t.Execute(&buf, p); err != nil {
return "", fmt.Errorf("render cloudinit template: %w", err)
}
return buf.String(), nil
}
// syncServiceUnit compares a rendered systemd service unit against what's on
// the server. If they differ, it writes the new unit file. Returns true if the
// unit was updated (caller should daemon-reload before restart).
func syncServiceUnit(name, ip, serviceName, renderedUnit string) (bool, error) {
unitPath := "/etc/systemd/system/" + serviceName + ".service"
remote, err := runSSH(ip, fmt.Sprintf("cat %s 2>/dev/null || echo '__MISSING__'", unitPath), false)
if err != nil {
fmt.Printf(" service unit sync: could not reach %s (%v)\n", name, err)
return false, nil
}
remote = strings.TrimSpace(remote)
rendered := strings.TrimSpace(renderedUnit)
if remote == "__MISSING__" {
fmt.Printf(" service unit: %s not found (cloud-init will handle it)\n", name)
return false, nil
}
if remote == rendered {
fmt.Printf(" service unit: %s up to date\n", name)
return false, nil
}
// Write the updated unit file
script := fmt.Sprintf("cat > %s << 'SVCEOF'\n%s\nSVCEOF", unitPath, rendered)
if _, err := runSSH(ip, script, false); err != nil {
return false, fmt.Errorf("write service unit: %w", err)
}
fmt.Printf(" service unit: %s updated\n", name)
return true, nil
}
// syncConfigKeys fetches the existing config from a server and merges in any
// missing keys from the rendered template. Existing values are never overwritten.
func syncConfigKeys(name, ip, configPath, templateYAML string) error {
remote, err := runSSH(ip, fmt.Sprintf("cat %s 2>/dev/null || echo '__MISSING__'", configPath), false)
if err != nil {
fmt.Printf(" config sync: could not reach %s (%v)\n", name, err)
return nil
}
remote = strings.TrimSpace(remote)
if remote == "__MISSING__" {
fmt.Printf(" config sync: %s not yet created (cloud-init will handle it)\n", name)
return nil
}
// Parse both into yaml.Node trees
var templateDoc yaml.Node
if err := yaml.Unmarshal([]byte(templateYAML), &templateDoc); err != nil {
return fmt.Errorf("parse template yaml: %w", err)
}
var existingDoc yaml.Node
if err := yaml.Unmarshal([]byte(remote), &existingDoc); err != nil {
return fmt.Errorf("parse remote yaml: %w", err)
}
// Unwrap document nodes to get the root mapping
templateRoot := unwrapDocNode(&templateDoc)
existingRoot := unwrapDocNode(&existingDoc)
if templateRoot == nil || existingRoot == nil {
fmt.Printf(" config sync: %s skipped (unexpected YAML structure)\n", name)
return nil
}
added := mergeYAMLNodes(templateRoot, existingRoot)
if !added {
fmt.Printf(" config sync: %s up to date\n", name)
return nil
}
// Marshal the modified tree back
merged, err := yaml.Marshal(&existingDoc)
if err != nil {
return fmt.Errorf("marshal merged yaml: %w", err)
}
// Write back to server
script := fmt.Sprintf("cat > %s << 'CFGEOF'\n%sCFGEOF", configPath, string(merged))
if _, err := runSSH(ip, script, false); err != nil {
return fmt.Errorf("write merged config: %w", err)
}
fmt.Printf(" config sync: %s updated with new keys\n", name)
return nil
}
// unwrapDocNode returns the root mapping node, unwrapping a DocumentNode wrapper if present.
func unwrapDocNode(n *yaml.Node) *yaml.Node {
if n.Kind == yaml.DocumentNode && len(n.Content) > 0 {
return n.Content[0]
}
if n.Kind == yaml.MappingNode {
return n
}
return nil
}
// mergeYAMLNodes recursively adds keys from base into existing that are not
// already present. Existing values are never overwritten. Returns true if any
// new keys were added.
func mergeYAMLNodes(base, existing *yaml.Node) bool {
if base.Kind != yaml.MappingNode || existing.Kind != yaml.MappingNode {
return false
}
added := false
for i := 0; i+1 < len(base.Content); i += 2 {
baseKey := base.Content[i]
baseVal := base.Content[i+1]
// Look for this key in existing
found := false
for j := 0; j+1 < len(existing.Content); j += 2 {
if existing.Content[j].Value == baseKey.Value {
found = true
// If both are mappings, recurse to merge sub-keys
if baseVal.Kind == yaml.MappingNode && existing.Content[j+1].Kind == yaml.MappingNode {
if mergeYAMLNodes(baseVal, existing.Content[j+1]) {
added = true
}
}
break
}
}
if !found {
// Append the missing key+value pair
existing.Content = append(existing.Content, baseKey, baseVal)
added = true
}
}
return added
}