mirror of
https://tangled.org/evan.jarrett.net/at-container-registry
synced 2026-04-30 13:07:00 +00:00
improvements to how scanning works, and helmchart ui
This commit is contained in:
176
cmd/hold/scan_backfill.go
Normal file
176
cmd/hold/scan_backfill.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
"atcr.io/pkg/hold"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Media-type fragments that identify artifact types the scanner intentionally
|
||||
// skips. Keep this list in sync with scanner/internal/scan/worker.go's
|
||||
// unscannableConfigTypes — that map keys on config media types; here we look
|
||||
// at *layer* media types because the backfill walks the hold's layer index
|
||||
// (which has manifest AT-URIs we can join against scan records).
|
||||
//
|
||||
// Detection by layer media type is reliable: helm charts always have a single
|
||||
// layer with media type application/vnd.cncf.helm.chart.content.v1.tar+gzip;
|
||||
// in-toto / DSSE attestations use distinct layer types too.
|
||||
var unscannableLayerMediaSubstrings = []string{
|
||||
"helm.chart.content",
|
||||
"in-toto",
|
||||
"dsse.envelope",
|
||||
}
|
||||
|
||||
var scanBackfillConfigFile string
|
||||
|
||||
var scanBackfillCmd = &cobra.Command{
|
||||
Use: "scan-backfill",
|
||||
Short: "Rewrite legacy scan records to use the status field",
|
||||
Long: `Walks every io.atcr.hold.scan record on this hold and assigns a status
|
||||
("skipped" or "failed") to records that pre-date the status field.
|
||||
|
||||
A legacy record is one with an empty status, no SBOM blob, and zero vulnerability
|
||||
counts. The tool inspects each record's manifest's layers to decide:
|
||||
|
||||
- layer media type matches helm/in-toto/DSSE → status="skipped"
|
||||
- everything else → status="failed"
|
||||
|
||||
The tool is idempotent: records that already have a status are left alone.
|
||||
Run once per hold after upgrading.`,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := hold.LoadConfig(scanBackfillConfigFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load config: %w", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
holdPDS, cleanup, err := openHoldPDS(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
ri := holdPDS.RecordsIndex()
|
||||
if ri == nil {
|
||||
return fmt.Errorf("records index not available")
|
||||
}
|
||||
|
||||
const batchSize = 200
|
||||
var (
|
||||
cursor string
|
||||
scanned int
|
||||
rewritten int
|
||||
markSkipped int
|
||||
markFailed int
|
||||
alreadyOK int
|
||||
)
|
||||
|
||||
for {
|
||||
records, nextCursor, err := ri.ListRecords(atproto.ScanCollection, batchSize, cursor, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list scan records: %w", err)
|
||||
}
|
||||
|
||||
for _, rec := range records {
|
||||
scanned++
|
||||
manifestDigest := "sha256:" + rec.Rkey
|
||||
|
||||
_, scanRecord, err := holdPDS.GetScanRecord(ctx, manifestDigest)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cmd.ErrOrStderr(), " skip rkey=%s: get failed: %v\n", rec.Rkey, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Already classified — nothing to do.
|
||||
if scanRecord.Status != "" {
|
||||
alreadyOK++
|
||||
continue
|
||||
}
|
||||
|
||||
// Only legacy records that signal failure (nil blob + zero
|
||||
// counts) are candidates. Records with real data don't need
|
||||
// rewriting; their absent status will be treated as "ok".
|
||||
if scanRecord.SbomBlob != nil || scanRecord.Total != 0 {
|
||||
alreadyOK++
|
||||
continue
|
||||
}
|
||||
|
||||
// Determine artifact type from layer media types.
|
||||
layers, err := holdPDS.ListLayerRecordsForManifest(ctx, scanRecord.Manifest)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cmd.ErrOrStderr(), " skip rkey=%s: list layers failed: %v\n", rec.Rkey, err)
|
||||
continue
|
||||
}
|
||||
|
||||
skipped := false
|
||||
for _, l := range layers {
|
||||
for _, frag := range unscannableLayerMediaSubstrings {
|
||||
if strings.Contains(l.MediaType, frag) {
|
||||
skipped = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if skipped {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var rewrite *atproto.ScanRecord
|
||||
if skipped {
|
||||
rewrite = atproto.NewSkippedScanRecord(
|
||||
manifestDigest,
|
||||
scanRecord.Repository,
|
||||
scanRecord.UserDID,
|
||||
"backfilled: unscannable artifact type",
|
||||
scanRecord.ScannerVersion,
|
||||
)
|
||||
markSkipped++
|
||||
} else {
|
||||
rewrite = atproto.NewFailedScanRecord(
|
||||
manifestDigest,
|
||||
scanRecord.Repository,
|
||||
scanRecord.UserDID,
|
||||
"backfilled: legacy record (no SBOM and zero counts)",
|
||||
scanRecord.ScannerVersion,
|
||||
)
|
||||
markFailed++
|
||||
}
|
||||
// Preserve the original ScannedAt — rewriting it would either
|
||||
// reset the rescan timer or invalidate audit signals.
|
||||
if scanRecord.ScannedAt != "" {
|
||||
rewrite.ScannedAt = scanRecord.ScannedAt
|
||||
}
|
||||
|
||||
if _, _, err := holdPDS.CreateScanRecord(ctx, rewrite); err != nil {
|
||||
fmt.Fprintf(cmd.ErrOrStderr(), " rewrite rkey=%s failed: %v\n", rec.Rkey, err)
|
||||
continue
|
||||
}
|
||||
rewritten++
|
||||
}
|
||||
|
||||
if nextCursor == "" || len(records) == 0 {
|
||||
break
|
||||
}
|
||||
cursor = nextCursor
|
||||
}
|
||||
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "Backfill complete:\n")
|
||||
fmt.Fprintf(cmd.OutOrStdout(), " scanned: %d\n", scanned)
|
||||
fmt.Fprintf(cmd.OutOrStdout(), " already-tagged: %d\n", alreadyOK)
|
||||
fmt.Fprintf(cmd.OutOrStdout(), " → skipped: %d\n", markSkipped)
|
||||
fmt.Fprintf(cmd.OutOrStdout(), " → failed: %d\n", markFailed)
|
||||
fmt.Fprintf(cmd.OutOrStdout(), " rewritten: %d\n", rewritten)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
scanBackfillCmd.Flags().StringVarP(&scanBackfillConfigFile, "config", "c", "", "path to YAML configuration file")
|
||||
rootCmd.AddCommand(scanBackfillCmd)
|
||||
}
|
||||
@@ -45,3 +45,5 @@ auth:
|
||||
legal:
|
||||
company_name: Seamark
|
||||
jurisdiction: State of Texas, United States
|
||||
ai:
|
||||
api_key: ""
|
||||
|
||||
@@ -202,6 +202,52 @@ SBOM results are stored in two places:
|
||||
- Record key: SBOM manifest digest
|
||||
- Contains reference to subject image
|
||||
|
||||
## Scan Record Status
|
||||
|
||||
Every scan attempt produces an `io.atcr.hold.scan` record. The `status` field
|
||||
tells the appview how to render the result:
|
||||
|
||||
| Status | Meaning | Stale-loop behavior |
|
||||
|-------------|--------------------------------------------------------------------------------------------------|--------------------------------------------------|
|
||||
| `ok` (or empty) | Scanner produced an SBOM. Vulnerability counts populated; SBOM blob populated. | Re-scanned on the rescan interval (default 7d). |
|
||||
| `failed` | Scanner ran but errored (network, OOM, parse failure). No SBOM, no counts. | Re-scanned on the rescan interval — failures may be transient. |
|
||||
| `skipped` | Scanner intentionally bypassed the artifact (helm chart, in-toto attestation, DSSE envelope). The `reason` field explains why. | **Never re-queued.** A skipped record won't change without a code change in the scanner. |
|
||||
|
||||
Records written before the `status` field existed have an empty status. The
|
||||
appview treats empty + nil-blob + zero-count as failed (legacy fallback).
|
||||
|
||||
### Unscannable artifact types
|
||||
|
||||
The scanner skips artifacts whose config media type appears in
|
||||
`unscannableConfigTypes` (`scanner/internal/scan/worker.go`). Currently:
|
||||
|
||||
- `application/vnd.cncf.helm.config.v1+json` — Helm charts. Rendered with a
|
||||
helm-aware digest page (`pkg/appview/handlers/digest.go`) that shows
|
||||
Chart.yaml metadata instead of layers / vulns / SBOM.
|
||||
- `application/vnd.in-toto+json` — in-toto attestations.
|
||||
- `application/vnd.dsse.envelope.v1+json` — DSSE envelopes (SLSA provenance).
|
||||
|
||||
For these types the appview's vuln/SBOM tabs render
|
||||
*"Vulnerability scanning isn't applied to this artifact type."* — no retry hint.
|
||||
|
||||
To add a new unscannable type: append the media type to
|
||||
`unscannableConfigTypes`. Existing records won't auto-rewrite — run
|
||||
`atcr-hold scan-backfill` once to convert any pre-existing failure records
|
||||
into skipped records (see below).
|
||||
|
||||
### Backfill tool
|
||||
|
||||
`atcr-hold scan-backfill --config <path>` walks every scan record on the
|
||||
hold and rewrites legacy ones (empty status + nil blob + zero counts) using
|
||||
the manifest's layer media types as a signal:
|
||||
|
||||
- Layer media type contains `helm.chart.content`, `in-toto`, or
|
||||
`dsse.envelope` → `status="skipped"`.
|
||||
- Otherwise → `status="failed"`.
|
||||
|
||||
The tool is idempotent and preserves the original `scannedAt`, so it can be
|
||||
re-run safely. Run once per hold after upgrading.
|
||||
|
||||
## Accessing SBOMs
|
||||
|
||||
Multiple methods for discovering and retrieving SBOM data.
|
||||
|
||||
@@ -69,6 +69,17 @@
|
||||
"type": "string",
|
||||
"format": "datetime",
|
||||
"description": "RFC3339 timestamp of when the scan completed"
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"knownValues": ["ok", "failed", "skipped"],
|
||||
"description": "Outcome of the scan attempt. 'ok' (or omitted, for back-compat) means the scanner produced an SBOM. 'failed' means the scanner ran but errored. 'skipped' means the scanner intentionally bypassed this artifact type (e.g. helm charts).",
|
||||
"maxLength": 32
|
||||
},
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": "Optional human-readable explanation for non-ok status (e.g. 'unscannable artifact type application/vnd.cncf.helm.config.v1+json').",
|
||||
"maxLength": 256
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,20 +30,29 @@ const accessibleHoldsSubquery = `(
|
||||
OR hold_did IN (SELECT hold_did FROM hold_crew_members WHERE member_did = ?)
|
||||
)`
|
||||
|
||||
// Artifact type values stored in manifests.artifact_type and returned by
|
||||
// GetArtifactType. Container-image is the default for OCI/Docker images and
|
||||
// manifest lists; helm-chart is for OCI helm chart artifacts.
|
||||
const (
|
||||
ArtifactTypeContainerImage = "container-image"
|
||||
ArtifactTypeHelmChart = "helm-chart"
|
||||
ArtifactTypeUnknown = "unknown"
|
||||
)
|
||||
|
||||
// GetArtifactType determines the artifact type based on config media type
|
||||
// Returns: "helm-chart", "container-image", or "unknown"
|
||||
func GetArtifactType(configMediaType string) string {
|
||||
switch {
|
||||
case strings.Contains(configMediaType, "helm.config"):
|
||||
return "helm-chart"
|
||||
return ArtifactTypeHelmChart
|
||||
case strings.Contains(configMediaType, "oci.image.config") ||
|
||||
strings.Contains(configMediaType, "docker.container.image"):
|
||||
return "container-image"
|
||||
return ArtifactTypeContainerImage
|
||||
case configMediaType == "":
|
||||
// Manifest lists don't have a config - treat as container-image
|
||||
return "container-image"
|
||||
return ArtifactTypeContainerImage
|
||||
default:
|
||||
return "unknown"
|
||||
return ArtifactTypeUnknown
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -21,6 +22,53 @@ type LayerDetail struct {
|
||||
EmptyLayer bool // ENV, LABEL, etc. — no actual layer blob
|
||||
}
|
||||
|
||||
// HelmChartContent is the data the helm-aware digest content needs: parsed
|
||||
// Chart.yaml metadata + a single chart-tarball "layer" pulled from the DB.
|
||||
type HelmChartContent struct {
|
||||
Meta *holdclient.HelmChartMeta
|
||||
Tarball *LayerDetail
|
||||
MetaFetchFailed bool // hold reachable but config blob couldn't be parsed
|
||||
HoldUnreachable bool
|
||||
}
|
||||
|
||||
// buildHelmContent fetches helm chart metadata + the single chart-tarball layer.
|
||||
// Returns a populated HelmChartContent even when the meta fetch fails so the
|
||||
// page can still render the artifact card.
|
||||
func buildHelmContent(ctx context.Context, holdURL string, digest string, dbLayers []db.Layer) *HelmChartContent {
|
||||
content := &HelmChartContent{}
|
||||
if holdURL == "" {
|
||||
content.HoldUnreachable = true
|
||||
} else {
|
||||
meta, err := holdclient.FetchHelmChartMeta(ctx, holdURL, digest)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to fetch helm chart meta", "error", err, "digest", digest)
|
||||
content.MetaFetchFailed = true
|
||||
} else {
|
||||
content.Meta = meta
|
||||
}
|
||||
}
|
||||
if len(dbLayers) > 0 {
|
||||
// Helm charts are always single-layer (the chart tarball). If somehow
|
||||
// multiple are present, pick the one with helm chart content media
|
||||
// type, falling back to the first.
|
||||
chosen := 0
|
||||
for i, l := range dbLayers {
|
||||
if strings.Contains(l.MediaType, "helm.chart.content") {
|
||||
chosen = i
|
||||
break
|
||||
}
|
||||
}
|
||||
l := dbLayers[chosen]
|
||||
content.Tarball = &LayerDetail{
|
||||
Index: l.LayerIndex + 1,
|
||||
Digest: l.Digest,
|
||||
Size: l.Size,
|
||||
MediaType: l.MediaType,
|
||||
}
|
||||
}
|
||||
return content
|
||||
}
|
||||
|
||||
// DigestDetailHandler renders the digest detail page with layers + vulnerabilities.
|
||||
type DigestDetailHandler struct {
|
||||
BaseUIHandler
|
||||
@@ -66,10 +114,28 @@ func (h *DigestDetailHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
var layers []LayerDetail
|
||||
var vulnData *vulnDetailsData
|
||||
var sbomData *sbomDetailsData
|
||||
var helmContent *HelmChartContent
|
||||
|
||||
if manifest.IsManifestList {
|
||||
// Manifest list: no layers, show platform picker
|
||||
// Platforms are already populated by GetManifestDetail
|
||||
} else if manifest.ArtifactType == db.ArtifactTypeHelmChart {
|
||||
// Helm chart: skip OCI history / vuln / SBOM entirely. Fetch helm
|
||||
// chart metadata from the same config blob and the single tarball
|
||||
// layer from the DB.
|
||||
dbLayers, err := db.GetLayersForManifest(h.ReadOnlyDB, manifest.ID)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to fetch layers", "error", err)
|
||||
}
|
||||
hold, holdErr := ResolveHold(r.Context(), h.ReadOnlyDB, manifest.HoldEndpoint)
|
||||
holdURL := ""
|
||||
if holdErr == nil {
|
||||
holdURL = hold.URL
|
||||
}
|
||||
helmContent = buildHelmContent(r.Context(), holdURL, digest, dbLayers)
|
||||
if holdErr != nil {
|
||||
helmContent.HoldUnreachable = true
|
||||
}
|
||||
} else {
|
||||
// Single manifest: fetch layers from DB
|
||||
dbLayers, err := db.GetLayersForManifest(h.ReadOnlyDB, manifest.ID)
|
||||
@@ -124,6 +190,7 @@ func (h *DigestDetailHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
WithCanonical("https://" + h.SiteURL + "/d/" + owner.Handle + "/" + repository + "/" + digest).
|
||||
WithSiteName(h.ClientShortName)
|
||||
|
||||
pageData := NewPageData(r, &h.BaseUIHandler)
|
||||
data := struct {
|
||||
PageData
|
||||
Meta *PageMeta
|
||||
@@ -133,9 +200,12 @@ func (h *DigestDetailHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
Layers []LayerDetail
|
||||
VulnData *vulnDetailsData
|
||||
SbomData *sbomDetailsData
|
||||
HelmContent *HelmChartContent
|
||||
SelectedPlatform string
|
||||
RegistryURL string
|
||||
OciClient string
|
||||
}{
|
||||
PageData: NewPageData(r, &h.BaseUIHandler),
|
||||
PageData: pageData,
|
||||
Meta: meta,
|
||||
Owner: owner,
|
||||
Repository: repository,
|
||||
@@ -143,7 +213,10 @@ func (h *DigestDetailHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
Layers: layers,
|
||||
VulnData: vulnData,
|
||||
SbomData: sbomData,
|
||||
HelmContent: helmContent,
|
||||
SelectedPlatform: selectedPlatform,
|
||||
RegistryURL: h.RegistryURL,
|
||||
OciClient: pageData.OciClient,
|
||||
}
|
||||
|
||||
if err := h.Templates.ExecuteTemplate(w, "digest", data); err != nil {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -8,6 +9,7 @@ import (
|
||||
|
||||
"atcr.io/pkg/appview/db"
|
||||
"atcr.io/pkg/appview/holdclient"
|
||||
"atcr.io/pkg/appview/middleware"
|
||||
"atcr.io/pkg/atproto"
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
@@ -49,6 +51,60 @@ func (h *DigestContentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
hold, holdErr := ResolveHold(r.Context(), h.ReadOnlyDB, manifest.HoldEndpoint)
|
||||
holdReachable := holdErr == nil
|
||||
|
||||
// Helm charts have no scannable layers / vulns / SBOM. Render helm-aware
|
||||
// content for the default + "chart" sections, and a not-applicable
|
||||
// placeholder for the legacy layers / vulns / sbom sections (which
|
||||
// shouldn't be requested for helm but might be if a stale tab fires).
|
||||
if manifest.ArtifactType == db.ArtifactTypeHelmChart {
|
||||
holdURL := ""
|
||||
if holdReachable {
|
||||
holdURL = hold.URL
|
||||
}
|
||||
helm := buildHelmContent(r.Context(), holdURL, digest, dbLayers)
|
||||
if !holdReachable {
|
||||
helm.HoldUnreachable = true
|
||||
}
|
||||
helmData := struct {
|
||||
Manifest *db.ManifestWithMetadata
|
||||
HelmContent *HelmChartContent
|
||||
RegistryURL string
|
||||
OwnerHandle string
|
||||
RepoName string
|
||||
OciClient string
|
||||
IsLoggedIn bool
|
||||
}{
|
||||
Manifest: manifest,
|
||||
HelmContent: helm,
|
||||
RegistryURL: h.RegistryURL,
|
||||
OwnerHandle: identifier,
|
||||
RepoName: repository,
|
||||
OciClient: "", // helm switcher ignores this field
|
||||
IsLoggedIn: middleware.GetUser(r) != nil,
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
section := r.URL.Query().Get("section")
|
||||
switch section {
|
||||
case "chart":
|
||||
// Used by the repo page's chart tab — no install card here
|
||||
// because repo-tag-section already renders one at the top.
|
||||
if err := h.Templates.ExecuteTemplate(w, "helm-chart-info", helmData); err != nil {
|
||||
slog.Warn("Failed to render helm chart info", "error", err)
|
||||
RenderHTMXError(w, r, http.StatusInternalServerError, "Could not render helm chart", err)
|
||||
}
|
||||
case "layers", "vulns", "sbom":
|
||||
// Defensive fallback if a stale tab somehow fires. The repo page
|
||||
// hides these tabs for helm; this should be unreachable.
|
||||
fmt.Fprint(w, `<p class="text-base-content/70 py-8">Helm charts don't have layers, vulnerabilities, or SBOMs.</p>`)
|
||||
default:
|
||||
// Digest detail page (full helm view, with install card).
|
||||
if err := h.Templates.ExecuteTemplate(w, "helm-digest-content", helmData); err != nil {
|
||||
slog.Warn("Failed to render helm digest content", "error", err)
|
||||
RenderHTMXError(w, r, http.StatusInternalServerError, "Could not render helm chart", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Parallelize the three hold fetches. They're independent and each
|
||||
// takes a network round-trip; serial runs add up on slow links.
|
||||
var (
|
||||
@@ -93,17 +149,21 @@ func (h *DigestContentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
// VulnReason / SbomReason let the template branch distinctly on why
|
||||
// data is missing instead of collapsing three causes into a generic
|
||||
// "not available" message.
|
||||
// ok — data is present
|
||||
// data is missing instead of collapsing causes into a generic message.
|
||||
// ok — data is present
|
||||
// hold-unreachable — we couldn't reach the hold
|
||||
// not-scanned — hold is up but no scan record exists
|
||||
// fetch-failed — scan record fetch failed on the hold
|
||||
// not-scanned — hold is up but no scan record exists
|
||||
// not-applicable — scan record exists with status="skipped" (artifact
|
||||
// type isn't scanned, e.g. in-toto, DSSE — helm
|
||||
// charts go through a separate code path)
|
||||
// fetch-failed — scan record fetch failed on the hold
|
||||
vulnReason := "ok"
|
||||
if !holdReachable {
|
||||
vulnReason = "hold-unreachable"
|
||||
} else if vulnData == nil || vulnData.Error == "never-scanned" {
|
||||
vulnReason = "not-scanned"
|
||||
} else if vulnData.Status == atproto.ScanStatusSkipped {
|
||||
vulnReason = "not-applicable"
|
||||
} else if vulnData.Error != "" {
|
||||
vulnReason = "fetch-failed"
|
||||
}
|
||||
@@ -113,6 +173,8 @@ func (h *DigestContentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
sbomReason = "hold-unreachable"
|
||||
} else if sbomData == nil || sbomData.Error == "never-scanned" {
|
||||
sbomReason = "not-scanned"
|
||||
} else if sbomData.Status == atproto.ScanStatusSkipped {
|
||||
sbomReason = "not-applicable"
|
||||
} else if sbomData.Error != "" {
|
||||
sbomReason = "fetch-failed"
|
||||
}
|
||||
|
||||
@@ -192,59 +192,76 @@ type DeleteUntaggedManifestsHandler struct {
|
||||
func (h *DeleteUntaggedManifestsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
user := middleware.GetUser(r)
|
||||
if user == nil {
|
||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||
render.Status(r, http.StatusUnauthorized)
|
||||
render.JSON(w, r, map[string]string{"error": "Unauthorized"})
|
||||
return
|
||||
}
|
||||
|
||||
var req deleteUntaggedRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
||||
render.Status(r, http.StatusBadRequest)
|
||||
render.JSON(w, r, map[string]string{"error": "Invalid request body"})
|
||||
return
|
||||
}
|
||||
|
||||
digests, err := db.GetAllUntaggedManifestDigests(h.DB, user.DID, req.Repo)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to query untagged manifests: %v", err), http.StatusInternalServerError)
|
||||
render.Status(r, http.StatusInternalServerError)
|
||||
render.JSON(w, r, map[string]any{
|
||||
"error": fmt.Sprintf("Failed to query untagged manifests: %v", err),
|
||||
"deleted": 0,
|
||||
"failed": 0,
|
||||
"total": 0,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if len(digests) == 0 {
|
||||
render.JSON(w, r, map[string]int{"deleted": 0})
|
||||
render.JSON(w, r, map[string]any{"deleted": 0, "failed": 0, "total": 0})
|
||||
return
|
||||
}
|
||||
|
||||
pdsClient := atproto.NewClientWithSessionProvider(user.PDSEndpoint, user.DID, h.Refresher)
|
||||
|
||||
deleted := 0
|
||||
type failure struct {
|
||||
Digest string `json:"digest"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
var failures []failure
|
||||
|
||||
for _, digest := range digests {
|
||||
rkey := strings.TrimPrefix(digest, "sha256:")
|
||||
|
||||
if err := pdsClient.DeleteRecord(r.Context(), atproto.ManifestCollection, rkey); err != nil {
|
||||
if handleOAuthError(r.Context(), h.Refresher, user.DID, err) {
|
||||
http.Error(w, "Authentication failed, please log in again", http.StatusUnauthorized)
|
||||
render.Status(r, http.StatusUnauthorized)
|
||||
render.JSON(w, r, map[string]any{
|
||||
"error": "Authentication failed, please log in again",
|
||||
"deleted": deleted,
|
||||
"failed": len(failures),
|
||||
"total": len(digests),
|
||||
})
|
||||
return
|
||||
}
|
||||
render.Status(r, http.StatusInternalServerError)
|
||||
render.JSON(w, r, map[string]any{
|
||||
"error": fmt.Sprintf("Failed to delete manifest %s from PDS: %v", digest, err),
|
||||
"deleted": deleted,
|
||||
})
|
||||
return
|
||||
failures = append(failures, failure{Digest: digest, Error: fmt.Sprintf("PDS: %v", err)})
|
||||
continue
|
||||
}
|
||||
|
||||
if err := db.DeleteManifest(h.DB, user.DID, req.Repo, digest); err != nil {
|
||||
render.Status(r, http.StatusInternalServerError)
|
||||
render.JSON(w, r, map[string]any{
|
||||
"error": fmt.Sprintf("Failed to delete manifest %s from cache: %v", digest, err),
|
||||
"deleted": deleted,
|
||||
})
|
||||
return
|
||||
failures = append(failures, failure{Digest: digest, Error: fmt.Sprintf("cache: %v", err)})
|
||||
continue
|
||||
}
|
||||
|
||||
deleted++
|
||||
}
|
||||
|
||||
render.JSON(w, r, map[string]int{"deleted": deleted})
|
||||
render.JSON(w, r, map[string]any{
|
||||
"deleted": deleted,
|
||||
"failed": len(failures),
|
||||
"total": len(digests),
|
||||
"failures": failures,
|
||||
})
|
||||
}
|
||||
|
||||
// UploadAvatarHandler handles uploading/updating a repository avatar
|
||||
|
||||
@@ -39,6 +39,8 @@ type sbomDetailsData struct {
|
||||
Packages []sbomPackage
|
||||
Total int
|
||||
Error string
|
||||
Status string // scan record's status field (ok | failed | skipped); empty for legacy records
|
||||
Reason string // scan record's reason field (only meaningful when Status != ok)
|
||||
ScannedAt string
|
||||
Digest string // image digest (for download URLs)
|
||||
HoldEndpoint string // hold DID (for download URLs)
|
||||
@@ -130,10 +132,20 @@ func FetchSbomDetails(ctx context.Context, holdEndpoint, digest string) sbomDeta
|
||||
return sbomDetailsData{Error: "Failed to parse scan record"}
|
||||
}
|
||||
|
||||
if scanRecord.Status == atproto.ScanStatusSkipped {
|
||||
return sbomDetailsData{
|
||||
Status: scanRecord.Status,
|
||||
Reason: scanRecord.Reason,
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch the SBOM blob
|
||||
if scanRecord.SbomBlob == nil || scanRecord.SbomBlob.Ref.String() == "" {
|
||||
return sbomDetailsData{
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
Status: scanRecord.Status,
|
||||
Reason: scanRecord.Reason,
|
||||
Error: "No SBOM data available",
|
||||
}
|
||||
}
|
||||
@@ -203,6 +215,7 @@ func FetchSbomDetails(ctx context.Context, holdEndpoint, digest string) sbomDeta
|
||||
return sbomDetailsData{
|
||||
Packages: packages,
|
||||
Total: len(packages),
|
||||
Status: scanRecord.Status,
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
Digest: digest,
|
||||
HoldEndpoint: holdEndpoint,
|
||||
|
||||
@@ -25,14 +25,16 @@ type ScanResultHandler struct {
|
||||
}
|
||||
|
||||
// vulnBadgeData is the template data for the vuln-badge partial.
|
||||
// The badge renders one of four states, in priority order:
|
||||
// The badge renders one of five states, in priority order:
|
||||
// 1. Error — we couldn't reach the hold at all (network/5xx)
|
||||
// 2. NotScanned — hold reachable, no scan record for this digest (404)
|
||||
// 3. ScanFailed — scan record exists but the scanner didn't produce an SBOM
|
||||
// 4. Found — scan succeeded; render tier counts (or "Clean" when zero)
|
||||
// 3. Skipped — scan record explicitly marks this artifact as not-scannable
|
||||
// 4. ScanFailed — scan record exists but the scanner errored
|
||||
// 5. Found — scan succeeded; render tier counts (or "Clean" when zero)
|
||||
//
|
||||
// These states must stay distinct so users can tell "hold is down" from
|
||||
// "this hasn't been scanned yet" from "scanner errored on this image".
|
||||
// "this hasn't been scanned yet" from "scanner errored on this image" from
|
||||
// "this artifact type is intentionally not scanned".
|
||||
type vulnBadgeData struct {
|
||||
Critical int64
|
||||
High int64
|
||||
@@ -43,11 +45,33 @@ type vulnBadgeData struct {
|
||||
Found bool // true if scan record exists and succeeded
|
||||
Error bool // true if hold unreachable (network/5xx)
|
||||
NotScanned bool // true if hold is up but no scan record (404)
|
||||
ScanFailed bool // true if scan record exists but scan failed (no SBOM)
|
||||
ScanFailed bool // true if scan record exists but scan failed
|
||||
Skipped bool // true if scan record marks the artifact as intentionally not scanned (helm, in-toto, etc.)
|
||||
Digest string // for the detail modal link
|
||||
HoldEndpoint string // for the detail modal link
|
||||
}
|
||||
|
||||
// classifyScanRecord maps a scan record's Status field to badge data flags.
|
||||
// An empty Status is treated as a legacy record from before the status field
|
||||
// existed: nil-blob + zero-counts = treat as failed (preserves the prior badge
|
||||
// for un-backfilled holds); otherwise treat as success.
|
||||
func classifyScanRecord(scanRecord *atproto.ScanRecord) (found, skipped, failed bool) {
|
||||
switch scanRecord.Status {
|
||||
case atproto.ScanStatusSkipped:
|
||||
return false, true, false
|
||||
case atproto.ScanStatusFailed:
|
||||
return false, false, true
|
||||
case atproto.ScanStatusOK:
|
||||
return true, false, false
|
||||
default:
|
||||
// Legacy record (status field didn't exist when this was written).
|
||||
if scanRecord.SbomBlob == nil && scanRecord.Total == 0 {
|
||||
return false, false, true
|
||||
}
|
||||
return true, false, false
|
||||
}
|
||||
}
|
||||
|
||||
func (h *ScanResultHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
digest := r.URL.Query().Get("digest")
|
||||
holdEndpoint := r.URL.Query().Get("holdEndpoint")
|
||||
@@ -122,10 +146,7 @@ func (h *ScanResultHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// A failed scan has nil blobs (no SBOM generated) and zero counts.
|
||||
// Successful scans always have an SBOM blob even with 0 vulnerabilities.
|
||||
scanFailed := scanRecord.SbomBlob == nil && scanRecord.Total == 0
|
||||
|
||||
found, skipped, failed := classifyScanRecord(&scanRecord)
|
||||
h.renderBadge(w, vulnBadgeData{
|
||||
Critical: scanRecord.Critical,
|
||||
High: scanRecord.High,
|
||||
@@ -133,8 +154,9 @@ func (h *ScanResultHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
Low: scanRecord.Low,
|
||||
Total: scanRecord.Total,
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
Found: true,
|
||||
ScanFailed: scanFailed,
|
||||
Found: found,
|
||||
Skipped: skipped,
|
||||
ScanFailed: failed,
|
||||
Digest: digest,
|
||||
HoldEndpoint: holdDID,
|
||||
})
|
||||
@@ -189,6 +211,7 @@ func fetchScanRecord(ctx context.Context, holdEndpoint, holdDID, hexDigest strin
|
||||
return vulnBadgeData{Error: true}
|
||||
}
|
||||
|
||||
found, skipped, failed := classifyScanRecord(&scanRecord)
|
||||
return vulnBadgeData{
|
||||
Critical: scanRecord.Critical,
|
||||
High: scanRecord.High,
|
||||
@@ -196,8 +219,9 @@ func fetchScanRecord(ctx context.Context, holdEndpoint, holdDID, hexDigest strin
|
||||
Low: scanRecord.Low,
|
||||
Total: scanRecord.Total,
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
Found: true,
|
||||
ScanFailed: scanRecord.SbomBlob == nil && scanRecord.Total == 0,
|
||||
Found: found,
|
||||
Skipped: skipped,
|
||||
ScanFailed: failed,
|
||||
Digest: fullDigest,
|
||||
HoldEndpoint: holdDID,
|
||||
}
|
||||
|
||||
@@ -57,6 +57,8 @@ type vulnDetailsData struct {
|
||||
Matches []vulnMatch
|
||||
Summary vulnSummary
|
||||
Error string // non-empty if something went wrong
|
||||
Status string // scan record's status field (ok | failed | skipped); empty for legacy records
|
||||
Reason string // scan record's reason field (only meaningful when Status != ok)
|
||||
ScannedAt string
|
||||
Digest string // image digest (for download URLs)
|
||||
HoldEndpoint string // hold DID (for download URLs)
|
||||
@@ -317,11 +319,26 @@ func FetchVulnDetails(ctx context.Context, holdEndpoint, digest string) vulnDeta
|
||||
Total: scanRecord.Total,
|
||||
}
|
||||
|
||||
// Skipped scan records have no blobs and a non-failure status. The caller
|
||||
// classifies these as "not-applicable" rather than "fetch-failed" so the
|
||||
// template can show "scanning isn't applied to this artifact" instead of a
|
||||
// retry hint.
|
||||
if scanRecord.Status == atproto.ScanStatusSkipped {
|
||||
return vulnDetailsData{
|
||||
Summary: summary,
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
Status: scanRecord.Status,
|
||||
Reason: scanRecord.Reason,
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch the vulnerability report blob
|
||||
if scanRecord.VulnReportBlob == nil || scanRecord.VulnReportBlob.Ref.String() == "" {
|
||||
return vulnDetailsData{
|
||||
Summary: summary,
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
Status: scanRecord.Status,
|
||||
Reason: scanRecord.Reason,
|
||||
Error: "No detailed vulnerability report available. Only summary counts were recorded.",
|
||||
}
|
||||
}
|
||||
@@ -335,22 +352,22 @@ func FetchVulnDetails(ctx context.Context, holdEndpoint, digest string) vulnDeta
|
||||
|
||||
blobReq, err := http.NewRequestWithContext(ctx, "GET", blobURL, nil)
|
||||
if err != nil {
|
||||
return vulnDetailsData{Summary: summary, ScannedAt: scanRecord.ScannedAt, Error: "Failed to build blob request"}
|
||||
return vulnDetailsData{Summary: summary, ScannedAt: scanRecord.ScannedAt, Status: scanRecord.Status, Error: "Failed to build blob request"}
|
||||
}
|
||||
|
||||
blobResp, err := http.DefaultClient.Do(blobReq)
|
||||
if err != nil {
|
||||
return vulnDetailsData{Summary: summary, ScannedAt: scanRecord.ScannedAt, Error: "Failed to fetch vulnerability report"}
|
||||
return vulnDetailsData{Summary: summary, ScannedAt: scanRecord.ScannedAt, Status: scanRecord.Status, Error: "Failed to fetch vulnerability report"}
|
||||
}
|
||||
defer blobResp.Body.Close()
|
||||
|
||||
if blobResp.StatusCode != http.StatusOK {
|
||||
return vulnDetailsData{Summary: summary, ScannedAt: scanRecord.ScannedAt, Error: "Vulnerability report not accessible"}
|
||||
return vulnDetailsData{Summary: summary, ScannedAt: scanRecord.ScannedAt, Status: scanRecord.Status, Error: "Vulnerability report not accessible"}
|
||||
}
|
||||
|
||||
var report grypeReport
|
||||
if err := json.NewDecoder(blobResp.Body).Decode(&report); err != nil {
|
||||
return vulnDetailsData{Summary: summary, ScannedAt: scanRecord.ScannedAt, Error: "Failed to parse vulnerability report"}
|
||||
return vulnDetailsData{Summary: summary, ScannedAt: scanRecord.ScannedAt, Status: scanRecord.Status, Error: "Failed to parse vulnerability report"}
|
||||
}
|
||||
|
||||
matches := make([]vulnMatch, 0, len(report.Matches))
|
||||
@@ -390,6 +407,7 @@ func FetchVulnDetails(ctx context.Context, holdEndpoint, digest string) vulnDeta
|
||||
return vulnDetailsData{
|
||||
Matches: matches,
|
||||
Summary: summary,
|
||||
Status: scanRecord.Status,
|
||||
ScannedAt: scanRecord.ScannedAt,
|
||||
Digest: digest,
|
||||
HoldEndpoint: holdEndpoint,
|
||||
|
||||
148
pkg/appview/holdclient/helm_config.go
Normal file
148
pkg/appview/holdclient/helm_config.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package holdclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"atcr.io/pkg/atproto"
|
||||
)
|
||||
|
||||
// HelmChartMeta is the parsed Chart.yaml-equivalent metadata extracted from a
|
||||
// helm chart's OCI config blob (media type
|
||||
// application/vnd.cncf.helm.config.v1+json). The helm config blob is Chart.yaml
|
||||
// rendered as JSON, so the field names mirror Chart.yaml.
|
||||
type HelmChartMeta struct {
|
||||
Name string
|
||||
Version string
|
||||
AppVersion string
|
||||
Type string // "application" | "library" (empty in older charts; treat as "application")
|
||||
Description string
|
||||
KubeVersion string
|
||||
Home string
|
||||
Icon string
|
||||
Sources []string
|
||||
Keywords []string
|
||||
Maintainers []HelmMaintainer
|
||||
Dependencies []HelmDependency
|
||||
Annotations map[string]string
|
||||
Deprecated bool
|
||||
}
|
||||
|
||||
type HelmMaintainer struct {
|
||||
Name string
|
||||
Email string
|
||||
URL string
|
||||
}
|
||||
|
||||
type HelmDependency struct {
|
||||
Name string
|
||||
Version string
|
||||
Repository string
|
||||
Alias string
|
||||
Condition string
|
||||
}
|
||||
|
||||
// helmConfigJSON matches the on-the-wire shape of a helm OCI config blob.
|
||||
// Field names track Chart.yaml's JSON form (lowerCamelCase for some fields,
|
||||
// kebab-case for none — helm's CLI marshals Chart.yaml struct directly).
|
||||
type helmConfigJSON struct {
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
AppVersion string `json:"appVersion"`
|
||||
Type string `json:"type"`
|
||||
Description string `json:"description"`
|
||||
KubeVersion string `json:"kubeVersion"`
|
||||
Home string `json:"home"`
|
||||
Icon string `json:"icon"`
|
||||
Sources []string `json:"sources"`
|
||||
Keywords []string `json:"keywords"`
|
||||
Maintainers []helmMaintainerJSON `json:"maintainers"`
|
||||
Dependencies []helmDependencyJSON `json:"dependencies"`
|
||||
Annotations map[string]string `json:"annotations"`
|
||||
Deprecated bool `json:"deprecated"`
|
||||
}
|
||||
|
||||
type helmMaintainerJSON struct {
|
||||
Name string `json:"name"`
|
||||
Email string `json:"email"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
type helmDependencyJSON struct {
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
Repository string `json:"repository"`
|
||||
Alias string `json:"alias"`
|
||||
Condition string `json:"condition"`
|
||||
}
|
||||
|
||||
// FetchHelmChartMeta fetches a helm chart's config blob from the hold and
|
||||
// parses it as Chart.yaml metadata. Uses the same getImageConfig XRPC as
|
||||
// FetchImageConfig but applies a helm-specific schema to the JSON.
|
||||
func FetchHelmChartMeta(ctx context.Context, holdURL, manifestDigest string) (*HelmChartMeta, error) {
|
||||
reqURL := fmt.Sprintf("%s%s?digest=%s",
|
||||
strings.TrimSuffix(holdURL, "/"),
|
||||
atproto.HoldGetImageConfig,
|
||||
url.QueryEscape(manifestDigest),
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", reqURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fetch helm chart config: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("hold returned status %d for %s", resp.StatusCode, reqURL)
|
||||
}
|
||||
|
||||
var record struct {
|
||||
ConfigJSON string `json:"configJson"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&record); err != nil {
|
||||
return nil, fmt.Errorf("parse image config response: %w", err)
|
||||
}
|
||||
|
||||
var raw helmConfigJSON
|
||||
if err := json.Unmarshal([]byte(record.ConfigJSON), &raw); err != nil {
|
||||
return nil, fmt.Errorf("parse helm chart config JSON: %w", err)
|
||||
}
|
||||
|
||||
meta := &HelmChartMeta{
|
||||
Name: raw.Name,
|
||||
Version: raw.Version,
|
||||
AppVersion: raw.AppVersion,
|
||||
Type: raw.Type,
|
||||
Description: raw.Description,
|
||||
KubeVersion: raw.KubeVersion,
|
||||
Home: raw.Home,
|
||||
Icon: raw.Icon,
|
||||
Sources: raw.Sources,
|
||||
Keywords: raw.Keywords,
|
||||
Annotations: raw.Annotations,
|
||||
Deprecated: raw.Deprecated,
|
||||
}
|
||||
if meta.Type == "" {
|
||||
meta.Type = "application"
|
||||
}
|
||||
for _, m := range raw.Maintainers {
|
||||
meta.Maintainers = append(meta.Maintainers, HelmMaintainer(m))
|
||||
}
|
||||
for _, d := range raw.Dependencies {
|
||||
meta.Dependencies = append(meta.Dependencies, HelmDependency(d))
|
||||
}
|
||||
return meta, nil
|
||||
}
|
||||
10
pkg/appview/public/js/bundle.min.js
vendored
10
pkg/appview/public/js/bundle.min.js
vendored
File diff suppressed because one or more lines are too long
@@ -438,6 +438,11 @@ async function deleteUntaggedManifests(repository) {
|
||||
const confirmBtn = document.getElementById('confirm-untagged-delete-btn');
|
||||
const originalText = confirmBtn.textContent;
|
||||
|
||||
const restoreButton = () => {
|
||||
confirmBtn.disabled = false;
|
||||
confirmBtn.textContent = originalText;
|
||||
};
|
||||
|
||||
try {
|
||||
confirmBtn.disabled = true;
|
||||
confirmBtn.textContent = 'Deleting...';
|
||||
@@ -449,26 +454,40 @@ async function deleteUntaggedManifests(repository) {
|
||||
body: JSON.stringify({ repo: repository }),
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
const raw = await response.text();
|
||||
let data = null;
|
||||
try {
|
||||
data = raw ? JSON.parse(raw) : null;
|
||||
} catch (_) {
|
||||
// Non-JSON body (e.g., upstream proxy error page) — fall through.
|
||||
}
|
||||
|
||||
if (response.ok) {
|
||||
const deleted = (data && data.deleted) || 0;
|
||||
const failed = (data && data.failed) || 0;
|
||||
closeDialog(document.getElementById('untagged-delete-modal'));
|
||||
showToast(`Deleted ${data.deleted} untagged manifest(s)`, 'success');
|
||||
if (data.deleted > 0) {
|
||||
location.reload();
|
||||
if (failed > 0) {
|
||||
showToast(`Deleted ${deleted} of ${deleted + failed} untagged manifest(s); ${failed} failed`, 'error');
|
||||
} else if (deleted > 0) {
|
||||
showToast(`Deleted ${deleted} untagged manifest(s)`, 'success');
|
||||
} else {
|
||||
showToast('No untagged manifests to delete', 'info');
|
||||
}
|
||||
confirmBtn.disabled = false;
|
||||
confirmBtn.textContent = originalText;
|
||||
if (deleted > 0) {
|
||||
location.reload();
|
||||
return;
|
||||
}
|
||||
restoreButton();
|
||||
} else {
|
||||
showToast(`Failed to delete untagged manifests: ${data.error || 'Unknown error'}`, 'error');
|
||||
confirmBtn.disabled = false;
|
||||
confirmBtn.textContent = originalText;
|
||||
const errMsg = (data && data.error) || raw || `HTTP ${response.status}`;
|
||||
const partial = data && data.deleted ? ` (${data.deleted} succeeded before failure)` : '';
|
||||
showToast(`Failed to delete untagged manifests: ${errMsg}${partial}`, 'error');
|
||||
restoreButton();
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Error deleting untagged manifests:', err);
|
||||
showToast(`Error: ${err.message}`, 'error');
|
||||
confirmBtn.disabled = false;
|
||||
confirmBtn.textContent = originalText;
|
||||
restoreButton();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -613,6 +632,51 @@ document.addEventListener('DOMContentLoaded', () => {
|
||||
if (sel) sel.addEventListener('change', () => updatePullCommand(sel.value));
|
||||
});
|
||||
|
||||
// Helm install/pull switcher. Persisted via localStorage (no profile setting
|
||||
// — too minor to round-trip through the API). Uses event delegation +
|
||||
// htmx:afterSettle so it survives the repo page's HTMX tag swaps; binding
|
||||
// on DOMContentLoaded alone would only catch the first render.
|
||||
function helmCmdSwitcher_render(mode) {
|
||||
const container = document.getElementById('helm-cmd-container');
|
||||
if (!container) return;
|
||||
const registryURL = container.dataset.registryUrl;
|
||||
const ownerHandle = container.dataset.ownerHandle;
|
||||
const repoName = container.dataset.repoName;
|
||||
const tag = container.dataset.tag || '';
|
||||
const ociRef = 'oci://' + registryURL + '/' + ownerHandle + '/' + repoName;
|
||||
const versionFlag = tag ? ' --version ' + tag : '';
|
||||
const cmd = mode === 'pull'
|
||||
? 'helm pull ' + ociRef + versionFlag
|
||||
: 'helm install ' + repoName + ' ' + ociRef + versionFlag;
|
||||
const display = document.getElementById('helm-cmd-display');
|
||||
if (!display) return;
|
||||
const code = display.querySelector('code');
|
||||
if (code) code.textContent = cmd;
|
||||
const btn = display.querySelector('[data-cmd]');
|
||||
if (btn) btn.dataset.cmd = cmd;
|
||||
}
|
||||
|
||||
function helmCmdSwitcher_apply() {
|
||||
const sel = document.getElementById('helm-cmd-switcher');
|
||||
if (!sel) return;
|
||||
const saved = lsGet('helm-cmd');
|
||||
if (saved === 'install' || saved === 'pull') {
|
||||
sel.value = saved;
|
||||
}
|
||||
helmCmdSwitcher_render(sel.value);
|
||||
}
|
||||
|
||||
document.addEventListener('DOMContentLoaded', helmCmdSwitcher_apply);
|
||||
document.body.addEventListener('htmx:afterSettle', helmCmdSwitcher_apply);
|
||||
|
||||
// Delegated change handler: works on the initial render AND on any switcher
|
||||
// HTMX swaps into the DOM later (repo page tag switch).
|
||||
document.addEventListener('change', (e) => {
|
||||
if (!e.target || e.target.id !== 'helm-cmd-switcher') return;
|
||||
lsSet('helm-cmd', e.target.value);
|
||||
helmCmdSwitcher_render(e.target.value);
|
||||
});
|
||||
|
||||
// Install page: platform tab switcher. Each .platform-tab has data-platform
|
||||
// pointing at a sibling panel (#<platform>-content). No-op off the install page.
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
|
||||
@@ -202,7 +202,7 @@ document.body.addEventListener('htmx:beforeSwap', () => {
|
||||
function initTabController() {
|
||||
if (!document.getElementById('tag-content')) return;
|
||||
|
||||
const validTabs = ['overview', 'layers', 'vulns', 'sbom', 'artifacts'];
|
||||
const validTabs = ['overview', 'layers', 'vulns', 'sbom', 'artifacts', 'chart'];
|
||||
// State per target id: 'loading' while a request is in-flight, 'loaded'
|
||||
// on success. On error we clear the entry so the retry button can
|
||||
// trigger a fresh fetch; without a separate 'loading' marker, a failing
|
||||
@@ -328,6 +328,7 @@ function initTabController() {
|
||||
if (tabId === 'layers') { const u = contentUrl('layers'); if (u) lazyLoad('layers-content', u); }
|
||||
if (tabId === 'vulns') { const u = contentUrl('vulns'); if (u) lazyLoad('vulns-content', u); }
|
||||
if (tabId === 'sbom') { const u = contentUrl('sbom'); if (u) lazyLoad('sbom-content', u); }
|
||||
if (tabId === 'chart') { const u = contentUrl('chart'); if (u) lazyLoad('chart-content', u); }
|
||||
};
|
||||
|
||||
function initTabs() {
|
||||
@@ -338,6 +339,7 @@ function initTabController() {
|
||||
['layers-tab-btn', 'layers-content', () => contentUrl('layers')],
|
||||
['vulns-tab-btn', 'vulns-content', () => contentUrl('vulns')],
|
||||
['sbom-tab-btn', 'sbom-content', () => contentUrl('sbom')],
|
||||
['chart-tab-btn', 'chart-content', () => contentUrl('chart')],
|
||||
];
|
||||
prefetch.forEach(([btnId, targetId, urlFn]) => {
|
||||
const btn = document.getElementById(btnId);
|
||||
@@ -386,7 +388,7 @@ function initTabController() {
|
||||
if (e.target.tagName === 'INPUT' || e.target.tagName === 'TEXTAREA' ||
|
||||
e.target.tagName === 'SELECT' || e.target.isContentEditable) return;
|
||||
if (e.ctrlKey || e.metaKey || e.altKey) return;
|
||||
const map = { o: 'overview', l: 'layers', v: 'vulns', s: 'sbom', a: 'artifacts' };
|
||||
const map = { o: 'overview', l: 'layers', v: 'vulns', s: 'sbom', a: 'artifacts', c: 'chart' };
|
||||
const tab = map[e.key.toLowerCase()];
|
||||
if (tab && validTabs.indexOf(tab) !== -1) window.switchRepoTab(tab);
|
||||
});
|
||||
|
||||
@@ -9,13 +9,26 @@
|
||||
Anonymous users: saves to localStorage.
|
||||
*/}}
|
||||
{{ if eq .ArtifactType "helm-chart" }}
|
||||
<div class="space-y-2">
|
||||
<p class="text-sm font-medium text-base-content/70">Pull this chart</p>
|
||||
{{ if .Tag }}
|
||||
{{ template "docker-command" (print "helm pull oci://" .RegistryURL "/" .OwnerHandle "/" .RepoName " --version " .Tag) }}
|
||||
{{ else }}
|
||||
{{ template "docker-command" (print "helm pull oci://" .RegistryURL "/" .OwnerHandle "/" .RepoName) }}
|
||||
{{ end }}
|
||||
{{/* Helm chart: small install/pull toggle. JS in app.js wires the
|
||||
switcher and persists the choice in localStorage. Default = install. */}}
|
||||
{{ $versionFlag := "" }}
|
||||
{{ if .Tag }}{{ $versionFlag = print " --version " .Tag }}{{ end }}
|
||||
{{ $ociRef := print "oci://" .RegistryURL "/" .OwnerHandle "/" .RepoName }}
|
||||
<div class="space-y-2" id="helm-cmd-container"
|
||||
data-registry-url="{{ .RegistryURL }}"
|
||||
data-owner-handle="{{ .OwnerHandle }}"
|
||||
data-repo-name="{{ .RepoName }}"
|
||||
data-tag="{{ if .Tag }}{{ .Tag }}{{ end }}">
|
||||
<label for="helm-cmd-switcher" class="text-sm font-medium text-base-content/70">Use this chart</label>
|
||||
<div class="flex items-center gap-2">
|
||||
<select id="helm-cmd-switcher" class="select select-xs select-bordered w-auto">
|
||||
<option value="install" selected>helm install</option>
|
||||
<option value="pull">helm pull</option>
|
||||
</select>
|
||||
<div id="helm-cmd-display" class="flex-1 min-w-0" aria-live="polite">
|
||||
{{ template "docker-command" (print "helm install " .RepoName " " $ociRef $versionFlag) }}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ else }}
|
||||
<div class="space-y-2" id="pull-cmd-container"
|
||||
|
||||
@@ -23,22 +23,44 @@
|
||||
<div class="card bg-base-200 shadow-sm border border-base-300 p-6 space-y-3">
|
||||
<div class="flex flex-wrap items-start justify-between gap-4">
|
||||
<div class="space-y-2 min-w-0">
|
||||
<!-- Title: tags or truncated digest -->
|
||||
{{/* Helm chart with parsed metadata: lead with chart name + version so the page reads as a chart, not a digest. */}}
|
||||
{{ $helmMeta := "" }}
|
||||
{{ if and .HelmContent .HelmContent.Meta }}{{ $helmMeta = .HelmContent.Meta }}{{ end }}
|
||||
<!-- Title -->
|
||||
<div class="flex flex-wrap items-center gap-2">
|
||||
{{ if .Manifest.Tags }}
|
||||
{{ if $helmMeta }}
|
||||
<h1 class="text-xl font-bold flex items-center gap-2 min-w-0">
|
||||
{{ icon "helm" "size-5 text-base-content/70 shrink-0" }}
|
||||
<span class="truncate" title="{{ $helmMeta.Name }}">{{ $helmMeta.Name }}</span>
|
||||
{{ if $helmMeta.Version }}<span class="text-base-content/70 font-normal">{{ $helmMeta.Version }}</span>{{ end }}
|
||||
</h1>
|
||||
<span class="badge badge-md badge-soft badge-helm">Helm chart</span>
|
||||
{{ else if .Manifest.Tags }}
|
||||
<h1 class="text-xl font-bold flex flex-wrap gap-x-1 items-center min-w-0">{{ range $i, $tag := .Manifest.Tags }}{{ if lt $i 3 }}{{ if $i }}<span aria-hidden="true">,</span>{{ end }}<span class="inline-block max-w-[24ch] truncate align-baseline" title="{{ $tag }}">{{ $tag }}</span>{{ end }}{{ end }}{{ if gt (len .Manifest.Tags) 3 }}<span class="text-sm font-normal text-base-content/60" title="{{ range $i, $tag := .Manifest.Tags }}{{ if $i }}, {{ end }}{{ $tag }}{{ end }}">+{{ sub (len .Manifest.Tags) 3 }} more</span>{{ end }}</h1>
|
||||
{{ else }}
|
||||
<h1 class="text-xl font-bold font-mono" title="{{ .Manifest.Digest }}">{{ truncateDigest (trimPrefix "sha256:" .Manifest.Digest) 16 }}</h1>
|
||||
{{ end }}
|
||||
{{ if .Manifest.IsManifestList }}
|
||||
<span class="badge badge-md badge-soft badge-accent">Multi-arch</span>
|
||||
{{ else if eq .Manifest.ArtifactType "helm-chart" }}
|
||||
{{ else if and (eq .Manifest.ArtifactType "helm-chart") (not $helmMeta) }}
|
||||
<span class="badge badge-md badge-soft badge-helm">{{ icon "helm" "size-3" }} Helm</span>
|
||||
{{ end }}
|
||||
{{ if .Manifest.HasAttestations }}
|
||||
<span class="badge badge-md badge-soft badge-success">{{ icon "shield-check" "size-3" }} Attested</span>
|
||||
{{ end }}
|
||||
</div>
|
||||
{{ if $helmMeta }}
|
||||
{{/* Subtitle: appVersion + chart type */}}
|
||||
<div class="text-sm text-base-content/70">
|
||||
{{ if $helmMeta.AppVersion }}appVersion {{ $helmMeta.AppVersion }}{{ if $helmMeta.Type }} · {{ end }}{{ end }}{{ if $helmMeta.Type }}{{ $helmMeta.Type }} chart{{ end }}
|
||||
{{ if $helmMeta.Deprecated }}<span class="badge badge-sm badge-warning ml-2">Deprecated</span>{{ end }}
|
||||
</div>
|
||||
{{ if .Manifest.Tags }}
|
||||
<div class="flex flex-wrap gap-1 text-xs">
|
||||
{{ range .Manifest.Tags }}<span class="badge badge-ghost badge-sm font-mono" title="tag">{{ . }}</span>{{ end }}
|
||||
</div>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
<!-- Digest (small) -->
|
||||
<div class="flex items-center gap-2 text-base-content/70">
|
||||
<code class="font-mono text-xs" title="{{ .Manifest.Digest }}">{{ truncateDigest (trimPrefix "sha256:" .Manifest.Digest) 16 }}</code>
|
||||
@@ -81,7 +103,7 @@
|
||||
hx-swap="innerHTML">
|
||||
</div>
|
||||
|
||||
<!-- Content: Layers + Vulnerabilities -->
|
||||
<!-- Content: Layers + Vulnerabilities (or chart info for helm) -->
|
||||
<div id="digest-content" aria-live="polite" aria-busy="false">
|
||||
{{ if .Manifest.IsManifestList }}
|
||||
{{ if .Manifest.Platforms }}
|
||||
@@ -98,6 +120,8 @@
|
||||
{{ else }}
|
||||
<p class="py-12 text-center text-base-content/60">No platform manifests found for this image index.</p>
|
||||
{{ end }}
|
||||
{{ else if eq .Manifest.ArtifactType "helm-chart" }}
|
||||
{{ template "helm-digest-content" (dict "Manifest" .Manifest "HelmContent" .HelmContent "RegistryURL" .RegistryURL "OwnerHandle" .Owner.Handle "RepoName" .Repository "OciClient" .OciClient "IsLoggedIn" (ne .User nil)) }}
|
||||
{{ else }}
|
||||
{{ template "digest-content" . }}
|
||||
{{ end }}
|
||||
|
||||
@@ -120,29 +120,9 @@
|
||||
</div>
|
||||
{{ end }}
|
||||
</div>
|
||||
{{ if .SelectedTag.Info.IsMultiArch }}
|
||||
<div id="platform-badges" class="flex flex-wrap items-center gap-1">
|
||||
{{ range .SelectedTag.Info.Platforms }}
|
||||
<span class="badge badge-sm badge-outline font-mono">{{ .OS }}/{{ .Architecture }}{{ if .Variant }}/{{ .Variant }}{{ end }}</span>
|
||||
{{ end }}
|
||||
</div>
|
||||
{{ else if gt (len .SelectedTag.Info.Platforms) 0 }}
|
||||
{{ $p := index .SelectedTag.Info.Platforms 0 }}
|
||||
{{ if $p.OS }}
|
||||
<div id="platform-badges">
|
||||
<span class="badge badge-sm badge-outline font-mono">{{ $p.OS }}/{{ $p.Architecture }}{{ if $p.Variant }}/{{ $p.Variant }}{{ end }}</span>
|
||||
</div>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ if .SelectedTag.Info.HasAttestations }}
|
||||
<button class="badge badge-sm badge-soft badge-success cursor-pointer hover:opacity-80"
|
||||
hx-get="/api/attestation-details?digest={{ .SelectedTag.Info.Digest | urlquery }}&did={{ .Owner.DID | urlquery }}&repo={{ .Repository.Name | urlquery }}"
|
||||
hx-target="#attestation-modal-body"
|
||||
hx-swap="innerHTML"
|
||||
data-action="show-modal" data-modal-id="attestation-detail-modal">
|
||||
{{ icon "shield-check" "size-3" }} Attested
|
||||
</button>
|
||||
{{ end }}
|
||||
{{/* Platform / attestation badges live INSIDE repo-tag-section
|
||||
so they re-render with each tag swap. Putting them here
|
||||
would freeze them at the page's initial-load tag. */}}
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
|
||||
92
pkg/appview/templates/partials/helm-digest-content.html
Normal file
92
pkg/appview/templates/partials/helm-digest-content.html
Normal file
@@ -0,0 +1,92 @@
|
||||
{{ define "helm-digest-content" }}
|
||||
{{/*
|
||||
Helm-aware digest content WITH the install/pull command. Used on the
|
||||
digest detail page (/d/<handle>/<repo>/<digest>) where there's no other
|
||||
install switcher above. The repo page's chart tab uses helm-chart-info
|
||||
(without the install card) since repo-tag-section already renders an
|
||||
install switcher at the top.
|
||||
|
||||
Expects dict: Manifest, HelmContent, RegistryURL, OwnerHandle, RepoName,
|
||||
OciClient, IsLoggedIn.
|
||||
*/}}
|
||||
<div class="space-y-6">
|
||||
{{ if .HelmContent.HoldUnreachable }}
|
||||
<div class="alert alert-warning" role="status">
|
||||
{{ icon "wifi-off" "size-4 shrink-0" }}
|
||||
<div>
|
||||
<p class="font-medium">We couldn't reach the hold</p>
|
||||
<p class="text-sm">Chart metadata is stored on the hold. It may be offline right now.</p>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
{{/* Install / Pull command — reuses the existing switcher component
|
||||
(helm branch renders an install/pull toggle). */}}
|
||||
{{ $tag := "" }}
|
||||
{{ if .Manifest.Tags }}{{ $tag = index .Manifest.Tags 0 }}{{ end }}
|
||||
{{ if and (not $tag) .HelmContent.Meta }}{{ $tag = .HelmContent.Meta.Version }}{{ end }}
|
||||
<div class="card bg-base-200 border border-base-300 p-6">
|
||||
{{ template "pull-command-switcher" (dict
|
||||
"RegistryURL" .RegistryURL
|
||||
"OwnerHandle" .OwnerHandle
|
||||
"RepoName" .RepoName
|
||||
"Tag" $tag
|
||||
"ArtifactType" "helm-chart"
|
||||
"OciClient" .OciClient
|
||||
"IsLoggedIn" .IsLoggedIn
|
||||
) }}
|
||||
</div>
|
||||
|
||||
{{ template "helm-chart-info" . }}
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
{{ define "helm-chart-info" }}
|
||||
{{/*
|
||||
Chart metadata + artifact card + scanning note. No install switcher —
|
||||
callers are expected to render one elsewhere on the page if needed.
|
||||
Standard tab-content padding (space-y-4 min-w-0 pt-6) matches the
|
||||
layers/vulns/sbom partials so the chart tab lines up visually.
|
||||
|
||||
Expects dict: HelmContent (and the rest is ignored).
|
||||
*/}}
|
||||
<div class="space-y-4 min-w-0 pt-6">
|
||||
<div class="grid grid-cols-1 lg:grid-cols-3 gap-6">
|
||||
{{/* Metadata (left, ~2/3) */}}
|
||||
<div class="lg:col-span-2 space-y-4 min-w-0">
|
||||
{{ if .HelmContent.Meta }}
|
||||
{{ template "helm-metadata" .HelmContent.Meta }}
|
||||
{{ else if .HelmContent.MetaFetchFailed }}
|
||||
<div class="card bg-base-200 shadow-sm border border-base-300 p-6">
|
||||
<p class="font-medium text-base-content">Couldn't read chart metadata</p>
|
||||
<p class="text-sm text-base-content/70 mt-1">The hold is reachable but didn't return a valid Chart.yaml for this digest. The chart tarball is still pullable below.</p>
|
||||
</div>
|
||||
{{ end }}
|
||||
</div>
|
||||
|
||||
{{/* Artifact + scanning note (right, ~1/3) */}}
|
||||
<div class="space-y-4 min-w-0">
|
||||
<div class="card bg-base-200 shadow-sm border border-base-300 p-6 space-y-3">
|
||||
<h2 class="text-sm font-semibold uppercase tracking-wider text-base-content/70">Chart artifact</h2>
|
||||
{{ if .HelmContent.Tarball }}
|
||||
<div class="space-y-2">
|
||||
<p class="font-mono text-xs break-all text-base-content/70" title="{{ .HelmContent.Tarball.MediaType }}">{{ .HelmContent.Tarball.MediaType }}</p>
|
||||
<p class="text-lg font-bold">{{ humanizeBytes .HelmContent.Tarball.Size }}</p>
|
||||
<div class="flex items-center gap-2 text-xs">
|
||||
<code class="font-mono break-all" title="{{ .HelmContent.Tarball.Digest }}">{{ truncateDigest (trimPrefix "sha256:" .HelmContent.Tarball.Digest) 16 }}</code>
|
||||
<button class="btn btn-ghost btn-xs" data-action="copy" data-copy="{{ .HelmContent.Tarball.Digest }}" aria-label="Copy digest">{{ icon "copy" "size-3" }}</button>
|
||||
</div>
|
||||
</div>
|
||||
{{ else }}
|
||||
<p class="text-sm text-base-content/70">No chart tarball recorded for this digest.</p>
|
||||
{{ end }}
|
||||
</div>
|
||||
|
||||
<div class="card bg-base-200 shadow-sm border border-base-300 p-6 space-y-2">
|
||||
<h2 class="text-sm font-semibold uppercase tracking-wider text-base-content/70">About scanning</h2>
|
||||
<p class="text-sm text-base-content/70">ATCR doesn't scan helm charts for vulnerabilities. Run a chart linter such as <a class="link link-primary" href="https://github.com/stackrox/kube-linter" rel="noopener noreferrer">kube-linter</a> in your CI before publishing.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
115
pkg/appview/templates/partials/helm-metadata.html
Normal file
115
pkg/appview/templates/partials/helm-metadata.html
Normal file
@@ -0,0 +1,115 @@
|
||||
{{ define "helm-metadata" }}
|
||||
{{/*
|
||||
Renders parsed Chart.yaml metadata as a description list. Expects a
|
||||
HelmChartMeta value. Rows are skipped silently when fields are empty so
|
||||
sparse charts don't render with a wall of "—" cells.
|
||||
*/}}
|
||||
<div class="card bg-base-200 shadow-sm border border-base-300 p-6 space-y-4 min-w-0">
|
||||
{{ if .Description }}
|
||||
<div>
|
||||
<h2 class="text-sm font-semibold uppercase tracking-wider text-base-content/70 mb-2">Description</h2>
|
||||
<p class="text-sm leading-relaxed">{{ .Description }}</p>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
<dl class="grid grid-cols-[max-content_1fr] gap-x-6 gap-y-2 text-sm">
|
||||
{{ if .Type }}
|
||||
<dt class="font-medium text-base-content/70">Type</dt>
|
||||
<dd>{{ .Type }}</dd>
|
||||
{{ end }}
|
||||
{{ if .AppVersion }}
|
||||
<dt class="font-medium text-base-content/70">App version</dt>
|
||||
<dd class="font-mono text-xs">{{ .AppVersion }}</dd>
|
||||
{{ end }}
|
||||
{{ if .KubeVersion }}
|
||||
<dt class="font-medium text-base-content/70">Kube version</dt>
|
||||
<dd class="font-mono text-xs">{{ .KubeVersion }}</dd>
|
||||
{{ end }}
|
||||
{{ if .Home }}
|
||||
<dt class="font-medium text-base-content/70">Home</dt>
|
||||
<dd class="min-w-0 truncate">
|
||||
{{ if or (hasPrefix .Home "http://") (hasPrefix .Home "https://") }}
|
||||
<a href="{{ .Home }}" class="link link-primary" rel="noopener noreferrer" title="{{ .Home }}">{{ .Home }}</a>
|
||||
{{ else }}
|
||||
{{/* Non-http schemes (oci://, etc.) get sanitized to #ZgotmplZ
|
||||
in href context. Render as plain text so the user can
|
||||
still see and copy the value. */}}
|
||||
<code class="font-mono text-xs break-all" title="{{ .Home }}">{{ .Home }}</code>
|
||||
{{ end }}
|
||||
</dd>
|
||||
{{ end }}
|
||||
{{ if .Sources }}
|
||||
<dt class="font-medium text-base-content/70">Sources</dt>
|
||||
<dd class="space-y-1 min-w-0">
|
||||
{{ range .Sources }}
|
||||
<div class="truncate">
|
||||
{{ if or (hasPrefix . "http://") (hasPrefix . "https://") }}
|
||||
<a href="{{ . }}" class="link link-primary" rel="noopener noreferrer" title="{{ . }}">{{ . }}</a>
|
||||
{{ else }}
|
||||
<code class="font-mono text-xs break-all" title="{{ . }}">{{ . }}</code>
|
||||
{{ end }}
|
||||
</div>
|
||||
{{ end }}
|
||||
</dd>
|
||||
{{ end }}
|
||||
{{ if .Keywords }}
|
||||
<dt class="font-medium text-base-content/70">Keywords</dt>
|
||||
<dd class="flex flex-wrap gap-1">
|
||||
{{ range .Keywords }}<span class="badge badge-ghost badge-sm">{{ . }}</span>{{ end }}
|
||||
</dd>
|
||||
{{ end }}
|
||||
{{ if .Maintainers }}
|
||||
<dt class="font-medium text-base-content/70">Maintainers</dt>
|
||||
<dd class="space-y-1">
|
||||
{{ range .Maintainers }}
|
||||
<div class="text-sm">
|
||||
{{ if and .URL (or (hasPrefix .URL "http://") (hasPrefix .URL "https://")) }}
|
||||
<a href="{{ .URL }}" class="link link-primary" rel="noopener noreferrer">{{ if .Name }}{{ .Name }}{{ else }}{{ .URL }}{{ end }}</a>
|
||||
{{ else if .Name }}
|
||||
{{ .Name }}
|
||||
{{ else if .URL }}
|
||||
<code class="font-mono text-xs">{{ .URL }}</code>
|
||||
{{ end }}
|
||||
{{ if .Email }}<span class="text-base-content/60"><{{ .Email }}></span>{{ end }}
|
||||
</div>
|
||||
{{ end }}
|
||||
</dd>
|
||||
{{ end }}
|
||||
</dl>
|
||||
|
||||
{{ if .Dependencies }}
|
||||
<div class="pt-2 border-t border-base-300">
|
||||
<h2 class="text-sm font-semibold uppercase tracking-wider text-base-content/70 mb-2">Dependencies</h2>
|
||||
<div class="overflow-x-auto">
|
||||
<table class="table table-xs">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>Version</th>
|
||||
<th>Repository</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{ range .Dependencies }}
|
||||
<tr>
|
||||
<td class="font-medium">{{ .Name }}{{ if .Alias }} <span class="text-base-content/60">({{ .Alias }})</span>{{ end }}</td>
|
||||
<td class="font-mono text-xs">{{ .Version }}</td>
|
||||
<td class="text-xs">
|
||||
{{ if .Repository }}
|
||||
{{ if or (hasPrefix .Repository "http://") (hasPrefix .Repository "https://") }}
|
||||
<a href="{{ .Repository }}" class="link link-primary" rel="noopener noreferrer">{{ .Repository }}</a>
|
||||
{{ else }}
|
||||
{{/* oci://, file://, etc. — render as code so it's copyable. */}}
|
||||
<code class="font-mono break-all">{{ .Repository }}</code>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
</div>
|
||||
{{ end }}
|
||||
@@ -1,6 +1,34 @@
|
||||
{{ define "repo-tag-section" }}
|
||||
<div id="tag-content" data-owner="{{ .Owner.Handle }}" data-repo="{{ .Repository.Name }}"{{ if .SelectedTag }} data-digest="{{ if and .SelectedTag.Info.IsMultiArch .SelectedTag.Info.Platforms }}{{ (index .SelectedTag.Info.Platforms 0).Digest }}{{ else }}{{ .SelectedTag.Info.Digest }}{{ end }}"{{ end }}>
|
||||
{{ if .SelectedTag }}
|
||||
{{/* Tag-scoped badges (platform / attested). These live INSIDE the
|
||||
HTMX-swapped section so they re-render on each tag change.
|
||||
Putting them in pages/repository.html freezes them at page load. */}}
|
||||
<div class="flex flex-wrap items-center gap-2 mb-2 min-h-6">
|
||||
{{ if .SelectedTag.Info.IsMultiArch }}
|
||||
{{ range .SelectedTag.Info.Platforms }}
|
||||
<span class="badge badge-sm badge-outline font-mono">{{ .OS }}/{{ .Architecture }}{{ if .Variant }}/{{ .Variant }}{{ end }}</span>
|
||||
{{ end }}
|
||||
<span class="badge badge-sm badge-soft badge-accent">Multi-arch</span>
|
||||
{{ else if gt (len .SelectedTag.Info.Platforms) 0 }}
|
||||
{{ $p := index .SelectedTag.Info.Platforms 0 }}
|
||||
{{ if $p.OS }}
|
||||
<span class="badge badge-sm badge-outline font-mono">{{ $p.OS }}/{{ $p.Architecture }}{{ if $p.Variant }}/{{ $p.Variant }}{{ end }}</span>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ if eq .ArtifactType "helm-chart" }}
|
||||
<span class="badge badge-sm badge-soft badge-helm">{{ icon "helm" "size-3" }} Helm</span>
|
||||
{{ end }}
|
||||
{{ if .SelectedTag.Info.HasAttestations }}
|
||||
<button class="badge badge-sm badge-soft badge-success cursor-pointer hover:opacity-80"
|
||||
hx-get="/api/attestation-details?digest={{ .SelectedTag.Info.Digest | urlquery }}&did={{ .Owner.DID | urlquery }}&repo={{ .Repository.Name | urlquery }}"
|
||||
hx-target="#attestation-modal-body"
|
||||
hx-swap="innerHTML"
|
||||
data-action="show-modal" data-modal-id="attestation-detail-modal">
|
||||
{{ icon "shield-check" "size-3" }} Attested
|
||||
</button>
|
||||
{{ end }}
|
||||
</div>
|
||||
<!-- Pull Command with Client Switcher -->
|
||||
{{ template "pull-command-switcher" (dict "RegistryURL" .RegistryURL "OwnerHandle" .Owner.Handle "RepoName" .Repository.Name "Tag" .SelectedTag.Info.Tag.Tag "ArtifactType" .ArtifactType "OciClient" .OciClient "IsLoggedIn" (ne .User nil)) }}
|
||||
|
||||
@@ -87,6 +115,19 @@
|
||||
Overview
|
||||
</button>
|
||||
{{ if .SelectedTag }}
|
||||
{{ if eq .ArtifactType "helm-chart" }}
|
||||
{{/* Helm charts: a single Chart tab replaces Layers/Vulns/SBOM —
|
||||
those concepts don't apply. */}}
|
||||
<button class="repo-tab shrink-0 whitespace-nowrap px-4 sm:px-6 py-3 text-sm font-medium border-b-2 border-transparent text-base-content/60 transition-colors cursor-pointer"
|
||||
data-tab="chart"
|
||||
role="tab"
|
||||
aria-selected="false"
|
||||
aria-controls="tab-chart"
|
||||
id="chart-tab-btn"
|
||||
data-action="switch-repo-tab">
|
||||
Chart
|
||||
</button>
|
||||
{{ else }}
|
||||
<button class="repo-tab shrink-0 whitespace-nowrap px-4 sm:px-6 py-3 text-sm font-medium border-b-2 border-transparent text-base-content/60 transition-colors cursor-pointer"
|
||||
data-tab="layers"
|
||||
role="tab"
|
||||
@@ -115,6 +156,7 @@
|
||||
SBOM
|
||||
</button>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
<button class="repo-tab shrink-0 whitespace-nowrap px-4 sm:px-6 py-3 text-sm font-medium border-b-2 border-transparent text-base-content/60 transition-colors cursor-pointer"
|
||||
data-tab="artifacts"
|
||||
role="tab"
|
||||
@@ -197,6 +239,15 @@
|
||||
</div>
|
||||
|
||||
{{ if .SelectedTag }}
|
||||
{{ if eq .ArtifactType "helm-chart" }}
|
||||
<div id="tab-chart" class="repo-panel hidden" role="tabpanel" aria-labelledby="chart-tab-btn" tabindex="0">
|
||||
<div id="chart-content">
|
||||
<div class="flex justify-center py-12">
|
||||
<span class="loading loading-spinner loading-lg"></span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ else }}
|
||||
<div id="tab-layers" class="repo-panel hidden" role="tabpanel" aria-labelledby="layers-tab-btn" tabindex="0">
|
||||
<div id="layers-content">
|
||||
<div class="flex justify-center py-12">
|
||||
@@ -221,6 +272,7 @@
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
<div id="tab-artifacts" class="repo-panel hidden" role="tabpanel" aria-labelledby="artifacts-tab-btn" tabindex="0">
|
||||
<div id="artifacts-content">
|
||||
|
||||
@@ -7,6 +7,8 @@
|
||||
{{ icon "wifi-off" "size-4 shrink-0" }}
|
||||
<span>We couldn't reach the hold to load the SBOM.</span>
|
||||
</div>
|
||||
{{ else if eq .SbomReason "not-applicable" }}
|
||||
<p class="text-base-content/70">SBOMs aren't generated for this artifact type. ATCR only produces SBOMs for container images.</p>
|
||||
{{ else if eq .SbomReason "fetch-failed" }}
|
||||
<p class="text-base-content/70">SBOM data couldn't be loaded. Try refreshing in a minute.</p>
|
||||
{{ else }}
|
||||
|
||||
@@ -6,6 +6,12 @@
|
||||
</span>
|
||||
{{ else if .NotScanned }}
|
||||
<span class="badge badge-sm badge-ghost" title="No scan recorded yet">Not scanned</span>
|
||||
{{ else if .Skipped }}
|
||||
{{/* Artifact is intentionally not scanned (helm chart, in-toto, DSSE).
|
||||
Render an empty span — the artifact-type badge already tells the user
|
||||
this isn't a container image, so a separate "not scannable" pill is
|
||||
redundant noise on listings. */}}
|
||||
<span></span>
|
||||
{{ else if .ScanFailed }}
|
||||
<span class="badge badge-sm badge-warning" title="Scanner ran but produced no SBOM">{{ icon "alert-triangle" "size-3" }} Scan failed</span>
|
||||
{{ else if eq .Total 0 }}
|
||||
|
||||
@@ -10,6 +10,11 @@
|
||||
<p class="text-sm">Scan data is stored on the hold. It may be offline or unreachable right now.</p>
|
||||
</div>
|
||||
</div>
|
||||
{{ else if eq .VulnReason "not-applicable" }}
|
||||
<div class="py-8 text-sm text-base-content/70 max-w-prose">
|
||||
<p class="font-medium text-base-content">Vulnerability scanning isn't applied to this artifact type</p>
|
||||
<p class="mt-1">ATCR's scanner only runs on container images. This artifact was intentionally skipped.</p>
|
||||
</div>
|
||||
{{ else if eq .VulnReason "fetch-failed" }}
|
||||
<div class="py-8 text-sm text-base-content/70 max-w-prose">
|
||||
<p class="font-medium text-base-content">Scan data couldn't be loaded</p>
|
||||
|
||||
@@ -296,6 +296,8 @@ func Templates(overrides *BrandingOverrides) (*template.Template, error) {
|
||||
return s
|
||||
},
|
||||
|
||||
"hasPrefix": strings.HasPrefix,
|
||||
|
||||
"displayHoldDID": func(holdDID string) string {
|
||||
// did:web:hold01.atcr.io → hold01.atcr.io
|
||||
if strings.HasPrefix(holdDID, "did:web:") {
|
||||
|
||||
@@ -2182,8 +2182,17 @@ func (t *ScanRecord) MarshalCBOR(w io.Writer) error {
|
||||
}
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
fieldCount := 15
|
||||
|
||||
if _, err := cw.Write([]byte{173}); err != nil {
|
||||
if t.Status == "" {
|
||||
fieldCount--
|
||||
}
|
||||
|
||||
if t.Reason == "" {
|
||||
fieldCount--
|
||||
}
|
||||
|
||||
if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2298,6 +2307,58 @@ func (t *ScanRecord) MarshalCBOR(w io.Writer) error {
|
||||
}
|
||||
}
|
||||
|
||||
// t.Reason (string) (string)
|
||||
if t.Reason != "" {
|
||||
|
||||
if len("reason") > 8192 {
|
||||
return xerrors.Errorf("Value in field \"reason\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("reason"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := cw.WriteString(string("reason")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(t.Reason) > 8192 {
|
||||
return xerrors.Errorf("Value in field t.Reason was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Reason))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := cw.WriteString(string(t.Reason)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.Status (string) (string)
|
||||
if t.Status != "" {
|
||||
|
||||
if len("status") > 8192 {
|
||||
return xerrors.Errorf("Value in field \"status\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("status"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := cw.WriteString(string("status")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(t.Status) > 8192 {
|
||||
return xerrors.Errorf("Value in field t.Status was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Status))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := cw.WriteString(string(t.Status)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.UserDID (string) (string)
|
||||
if len("userDid") > 8192 {
|
||||
return xerrors.Errorf("Value in field \"userDid\" was too long")
|
||||
@@ -2625,6 +2686,28 @@ func (t *ScanRecord) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
|
||||
t.Medium = int64(extraI)
|
||||
}
|
||||
// t.Reason (string) (string)
|
||||
case "reason":
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringWithMax(cr, 8192)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.Reason = string(sval)
|
||||
}
|
||||
// t.Status (string) (string)
|
||||
case "status":
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringWithMax(cr, 8192)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.Status = string(sval)
|
||||
}
|
||||
// t.UserDID (string) (string)
|
||||
case "userDid":
|
||||
|
||||
|
||||
@@ -848,8 +848,18 @@ type ScanRecord struct {
|
||||
Total int64 `json:"total" cborgen:"total"` // Total vulnerability count
|
||||
ScannerVersion string `json:"scannerVersion" cborgen:"scannerVersion"` // Scanner version (e.g., "atcr-scanner-v1.0.0")
|
||||
ScannedAt string `json:"scannedAt" cborgen:"scannedAt"` // RFC3339 timestamp of scan completion
|
||||
Status string `json:"status,omitempty" cborgen:"status,omitempty"` // Scan outcome: "ok" (default if empty), "failed", or "skipped"
|
||||
Reason string `json:"reason,omitempty" cborgen:"reason,omitempty"` // Optional reason for non-ok status (e.g., unscannable artifact type)
|
||||
}
|
||||
|
||||
// Scan record status values. An empty Status field is treated as ScanStatusOK
|
||||
// for back-compat with records written before the field was introduced.
|
||||
const (
|
||||
ScanStatusOK = "ok"
|
||||
ScanStatusFailed = "failed"
|
||||
ScanStatusSkipped = "skipped"
|
||||
)
|
||||
|
||||
// NewScanRecord creates a new scan record
|
||||
// manifestDigest: the manifest digest (e.g., "sha256:abc123...")
|
||||
// userDID: the DID of the image owner (used to build the manifest AT-URI)
|
||||
@@ -870,6 +880,41 @@ func NewScanRecord(manifestDigest, repository, userDID string, sbomBlob, vulnRep
|
||||
Total: int64(total),
|
||||
ScannerVersion: scannerVersion,
|
||||
ScannedAt: time.Now().Format(time.RFC3339),
|
||||
Status: ScanStatusOK,
|
||||
}
|
||||
}
|
||||
|
||||
// NewSkippedScanRecord creates a scan record marking an artifact as intentionally
|
||||
// not scanned (e.g., helm charts, in-toto attestations). The stale-scan loop
|
||||
// leaves these records alone since the outcome won't change without a code
|
||||
// change in the scanner.
|
||||
func NewSkippedScanRecord(manifestDigest, repository, userDID, reason, scannerVersion string) *ScanRecord {
|
||||
return &ScanRecord{
|
||||
Type: ScanCollection,
|
||||
Manifest: BuildManifestURI(userDID, manifestDigest),
|
||||
Repository: repository,
|
||||
UserDID: userDID,
|
||||
ScannerVersion: scannerVersion,
|
||||
ScannedAt: time.Now().Format(time.RFC3339),
|
||||
Status: ScanStatusSkipped,
|
||||
Reason: reason,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFailedScanRecord creates a scan record marking a scan attempt as failed
|
||||
// (e.g., scanner crash, OOM, network error during fetch). The stale-scan loop
|
||||
// will re-queue these records on the rescan interval — failures may be
|
||||
// transient.
|
||||
func NewFailedScanRecord(manifestDigest, repository, userDID, reason, scannerVersion string) *ScanRecord {
|
||||
return &ScanRecord{
|
||||
Type: ScanCollection,
|
||||
Manifest: BuildManifestURI(userDID, manifestDigest),
|
||||
Repository: repository,
|
||||
UserDID: userDID,
|
||||
ScannerVersion: scannerVersion,
|
||||
ScannedAt: time.Now().Format(time.RFC3339),
|
||||
Status: ScanStatusFailed,
|
||||
Reason: reason,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -522,6 +522,7 @@ func (ui *AdminUI) RegisterRoutes(r chi.Router) {
|
||||
r.Post("/admin/api/gc/reconcile", ui.handleGCReconcile)
|
||||
r.Post("/admin/api/gc/delete-records", ui.handleGCDeleteRecords)
|
||||
r.Post("/admin/api/gc/delete-blobs", ui.handleGCDeleteBlobs)
|
||||
r.Post("/admin/api/gc/backfill-configs-preview", ui.handleGCBackfillConfigsPreview)
|
||||
r.Post("/admin/api/gc/backfill-configs", ui.handleGCBackfillConfigs)
|
||||
r.Get("/admin/api/gc/status", ui.handleGCStatus)
|
||||
|
||||
|
||||
@@ -137,6 +137,28 @@ func (ui *AdminUI) handleGCDeleteRecords(w http.ResponseWriter, r *http.Request)
|
||||
})
|
||||
}
|
||||
|
||||
// handleGCBackfillConfigsPreview starts a dry-run scan that counts how many
|
||||
// manifests are missing an image config record without writing anything.
|
||||
func (ui *AdminUI) handleGCBackfillConfigsPreview(w http.ResponseWriter, r *http.Request) {
|
||||
if ui.gc == nil {
|
||||
ui.renderTemplate(w, "partials/gc_error.html", struct{ Error string }{"GC not available"})
|
||||
return
|
||||
}
|
||||
|
||||
session := getSessionFromContext(r.Context())
|
||||
|
||||
if ui.gc.StartBackfillConfigsPreview() {
|
||||
slog.Info("GC backfill configs preview started via admin panel", "by", session.DID)
|
||||
}
|
||||
|
||||
progress := ui.gc.GetProgress()
|
||||
ui.renderTemplate(w, "partials/gc_progress.html", gcProgressData{
|
||||
Phase: progress.Phase,
|
||||
Message: progress.Message,
|
||||
OpType: progress.OperationType,
|
||||
})
|
||||
}
|
||||
|
||||
// handleGCBackfillConfigs starts image config backfill in the background
|
||||
func (ui *AdminUI) handleGCBackfillConfigs(w http.ResponseWriter, r *http.Request) {
|
||||
if ui.gc == nil {
|
||||
@@ -222,6 +244,35 @@ func (ui *AdminUI) handleGCStatus(w http.ResponseWriter, r *http.Request) {
|
||||
Preview *gc.GCPreview
|
||||
}{Preview: preview})
|
||||
|
||||
case "backfill-configs-preview":
|
||||
preview, _ := ui.gc.LastBackfillPreview()
|
||||
if preview == nil {
|
||||
ui.renderTemplate(w, "partials/gc_error.html", struct{ Error string }{"No backfill preview results available"})
|
||||
return
|
||||
}
|
||||
slog.Info("GC backfill preview completed via admin panel",
|
||||
"missing", preview.MissingCount,
|
||||
"present", preview.PresentCount,
|
||||
"usersAffected", preview.UsersAffected,
|
||||
"duration", preview.Duration)
|
||||
ui.renderTemplate(w, "partials/gc_backfill_preview.html", struct {
|
||||
Preview *gc.BackfillConfigsPreview
|
||||
}{Preview: preview})
|
||||
|
||||
case "backfill-configs":
|
||||
result, _ := ui.gc.LastResult()
|
||||
if result == nil {
|
||||
ui.renderTemplate(w, "partials/gc_error.html", struct{ Error string }{"No results available"})
|
||||
return
|
||||
}
|
||||
slog.Info("GC backfill completed via admin panel",
|
||||
"created", result.RecordsReconciled,
|
||||
"skipped", result.RecordsSkipped,
|
||||
"duration", result.Duration)
|
||||
ui.renderTemplate(w, "partials/gc_backfill_result.html", struct {
|
||||
Result *gc.GCResult
|
||||
}{Result: result})
|
||||
|
||||
default:
|
||||
result, _ := ui.gc.LastResult()
|
||||
if result == nil {
|
||||
|
||||
90
pkg/hold/admin/templates/partials/gc_backfill_preview.html
Normal file
90
pkg/hold/admin/templates/partials/gc_backfill_preview.html
Normal file
@@ -0,0 +1,90 @@
|
||||
{{define "partials/gc_backfill_preview.html"}}
|
||||
<div class="space-y-6">
|
||||
<div class="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-4 gap-4">
|
||||
<div class="stats shadow bg-base-100">
|
||||
<div class="stat">
|
||||
<div class="stat-title">Missing</div>
|
||||
<div class="stat-value {{if .Preview.MissingCount}}text-warning{{end}}">{{.Preview.MissingCount}}</div>
|
||||
<div class="stat-desc">Would be created</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="stats shadow bg-base-100">
|
||||
<div class="stat">
|
||||
<div class="stat-title">Already Present</div>
|
||||
<div class="stat-value text-success">{{.Preview.PresentCount}}</div>
|
||||
<div class="stat-desc">Would be skipped</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="stats shadow bg-base-100">
|
||||
<div class="stat">
|
||||
<div class="stat-title">Manifests Checked</div>
|
||||
<div class="stat-value">{{.Preview.ManifestsChecked}}</div>
|
||||
<div class="stat-desc">Distinct from layer records</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="stats shadow bg-base-100">
|
||||
<div class="stat">
|
||||
<div class="stat-title">Users Affected</div>
|
||||
<div class="stat-value">{{.Preview.UsersAffected}}</div>
|
||||
<div class="stat-desc">{{formatDuration .Preview.Duration}}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{if .Preview.Missing}}
|
||||
<div class="collapse collapse-arrow bg-base-100 shadow-sm">
|
||||
<input type="checkbox" aria-label="Show manifests missing image configs" />
|
||||
<div class="collapse-title font-medium">
|
||||
{{ icon "file-plus" "size-4" }} Missing Image Configs ({{.Preview.MissingCount}}{{if gt .Preview.MissingCount (len .Preview.Missing)}}, showing first {{len .Preview.Missing}}{{end}})
|
||||
</div>
|
||||
<div class="collapse-content">
|
||||
<div class="overflow-x-auto">
|
||||
<table class="table table-sm table-fixed">
|
||||
<caption class="sr-only">Manifests missing image config records</caption>
|
||||
<colgroup>
|
||||
<col style="width: 30%">
|
||||
<col style="width: 30%">
|
||||
<col style="width: 40%">
|
||||
</colgroup>
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Digest</th>
|
||||
<th scope="col">User</th>
|
||||
<th scope="col">Manifest</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .Preview.Missing}}
|
||||
<tr>
|
||||
<td><code class="text-xs font-mono truncate block max-w-full" title="{{.Digest}}">{{truncate .Digest 24}}</code></td>
|
||||
<td><code class="text-xs font-mono truncate block max-w-full" title="{{.UserDID}}">{{truncate .UserDID 24}}</code></td>
|
||||
<td><code class="text-xs font-mono truncate block max-w-full" title="{{.ManifestURI}}">{{truncate .ManifestURI 50}}</code></td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{end}}
|
||||
|
||||
{{if .Preview.MissingCount}}
|
||||
<div class="flex flex-wrap items-center gap-3 mt-6">
|
||||
<button class="btn btn-warning gap-2"
|
||||
hx-post="/admin/api/gc/backfill-configs"
|
||||
hx-target="#gc-results"
|
||||
hx-swap="innerHTML"
|
||||
hx-confirm="Create {{.Preview.MissingCount}} image config records by reading config blobs from S3?">
|
||||
{{ icon "refresh-cw" "size-4" }}
|
||||
Backfill {{.Preview.MissingCount}} Image Configs
|
||||
</button>
|
||||
</div>
|
||||
<p class="text-sm text-base-content/50 mt-2">Backfill is idempotent: present records are left alone.</p>
|
||||
{{else}}
|
||||
<div class="alert alert-success">
|
||||
{{ icon "check-circle" "size-5" }}
|
||||
<span>Every manifest already has an image config record. Nothing to backfill.</span>
|
||||
</div>
|
||||
{{end}}
|
||||
</div>
|
||||
{{end}}
|
||||
31
pkg/hold/admin/templates/partials/gc_backfill_result.html
Normal file
31
pkg/hold/admin/templates/partials/gc_backfill_result.html
Normal file
@@ -0,0 +1,31 @@
|
||||
{{define "partials/gc_backfill_result.html"}}
|
||||
<div class="space-y-6">
|
||||
<div class="grid grid-cols-1 sm:grid-cols-3 gap-4">
|
||||
<div class="stats shadow bg-base-100">
|
||||
<div class="stat">
|
||||
<div class="stat-title">Created</div>
|
||||
<div class="stat-value text-success">{{.Result.RecordsReconciled}}</div>
|
||||
<div class="stat-desc">New image config records</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="stats shadow bg-base-100">
|
||||
<div class="stat">
|
||||
<div class="stat-title">Skipped</div>
|
||||
<div class="stat-value">{{.Result.RecordsSkipped}}</div>
|
||||
<div class="stat-desc">Already had a record</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="stats shadow bg-base-100">
|
||||
<div class="stat">
|
||||
<div class="stat-title">Duration</div>
|
||||
<div class="stat-value text-lg">{{formatDuration .Result.Duration}}</div>
|
||||
<div class="stat-desc">Wall-clock time</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<p class="text-sm text-base-content/50">
|
||||
Run Scan again to verify nothing else is missing.
|
||||
</p>
|
||||
</div>
|
||||
{{end}}
|
||||
@@ -42,13 +42,12 @@
|
||||
Scan for Orphans
|
||||
</button>
|
||||
<button class="btn btn-outline gap-2"
|
||||
hx-post="/admin/api/gc/backfill-configs"
|
||||
hx-post="/admin/api/gc/backfill-configs-preview"
|
||||
hx-target="#gc-results"
|
||||
hx-swap="innerHTML"
|
||||
hx-confirm="Backfill image config records from OCI config blobs in S3?"
|
||||
{{if .Running}}disabled{{end}}>
|
||||
{{ icon "refresh-cw" "size-4" }}
|
||||
Backfill Image Configs
|
||||
Scan Image Configs
|
||||
</button>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -63,6 +63,26 @@ type GCPreview struct {
|
||||
Duration time.Duration `json:"duration"`
|
||||
}
|
||||
|
||||
// BackfillConfigCandidate identifies one manifest that's missing its image
|
||||
// config record on the hold.
|
||||
type BackfillConfigCandidate struct {
|
||||
ManifestURI string `json:"manifestUri"`
|
||||
UserDID string `json:"userDid"`
|
||||
Digest string `json:"digest"`
|
||||
}
|
||||
|
||||
// BackfillConfigsPreview is the dry-run output for the image-config backfill.
|
||||
// No PDS writes or S3 fetches happen during preview — we only check which
|
||||
// manifest digests already have an io.atcr.hold.image.config record.
|
||||
type BackfillConfigsPreview struct {
|
||||
Missing []BackfillConfigCandidate `json:"missing"`
|
||||
MissingCount int `json:"missingCount"`
|
||||
PresentCount int `json:"presentCount"`
|
||||
ManifestsChecked int `json:"manifestsChecked"`
|
||||
UsersAffected int `json:"usersAffected"`
|
||||
Duration time.Duration `json:"duration"`
|
||||
}
|
||||
|
||||
// GarbageCollector handles cleanup of orphaned blobs from storage
|
||||
type GarbageCollector struct {
|
||||
pds *pds.HoldPDS
|
||||
@@ -80,10 +100,12 @@ type GarbageCollector struct {
|
||||
running bool
|
||||
|
||||
// Last results (for admin panel display)
|
||||
lastPreview *GCPreview
|
||||
lastPreviewAt time.Time
|
||||
lastResult *GCResult
|
||||
lastResultAt time.Time
|
||||
lastPreview *GCPreview
|
||||
lastPreviewAt time.Time
|
||||
lastResult *GCResult
|
||||
lastResultAt time.Time
|
||||
lastBackfillPreview *BackfillConfigsPreview
|
||||
lastBackfillPreviewAt time.Time
|
||||
|
||||
// Progress tracking for background operations
|
||||
phase string // "manifests", "records", "blobs", "deleting", "complete", "error"
|
||||
@@ -105,6 +127,7 @@ type GCResult struct {
|
||||
OrphanedBlobs int64 `json:"orphaned_blobs"`
|
||||
ReferencedBlobs int64 `json:"referenced_blobs"`
|
||||
RecordsReconciled int64 `json:"records_reconciled"`
|
||||
RecordsSkipped int64 `json:"records_skipped"`
|
||||
ManifestsChecked int64 `json:"manifests_checked"`
|
||||
UsersChecked int64 `json:"users_checked"`
|
||||
Duration time.Duration `json:"duration"`
|
||||
@@ -174,7 +197,7 @@ func (gc *GarbageCollector) setProgress(phase, msg, opType string) {
|
||||
type GCProgress struct {
|
||||
Phase string // "manifests", "records", "blobs", "deleting", "complete", "error"
|
||||
Message string
|
||||
OperationType string // "preview", "run", "reconcile", "delete-records", "delete-blobs"
|
||||
OperationType string // "preview", "run", "reconcile", "delete-records", "delete-blobs", "backfill-configs", "backfill-configs-preview"
|
||||
Running bool
|
||||
Error string
|
||||
}
|
||||
@@ -815,6 +838,16 @@ func (gc *GarbageCollector) reconcileMissingRecords(ctx context.Context, missing
|
||||
}
|
||||
}
|
||||
|
||||
// StartBackfillConfigsPreview launches a dry-run scan that classifies every
|
||||
// manifest URI referenced from layer records as either already having an
|
||||
// image config record or missing one. No PDS or S3 writes happen.
|
||||
func (gc *GarbageCollector) StartBackfillConfigsPreview() bool {
|
||||
return gc.startBackground("backfill-configs-preview", "records", "Scanning for manifests missing image config records...", func(ctx context.Context) error {
|
||||
_, err := gc.doBackfillConfigsPreview(ctx)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// StartBackfillConfigs launches image config backfill in the background.
|
||||
// Creates io.atcr.hold.image.config records for manifests that don't have one yet
|
||||
// by fetching OCI config blobs from S3.
|
||||
@@ -825,33 +858,34 @@ func (gc *GarbageCollector) StartBackfillConfigs() bool {
|
||||
})
|
||||
}
|
||||
|
||||
// doBackfillConfigs creates image config records for manifests that are missing them.
|
||||
func (gc *GarbageCollector) doBackfillConfigs(ctx context.Context) (*GCResult, error) {
|
||||
// scanBackfillCandidates walks every layer record, dedupes the manifest URIs
|
||||
// they reference, and bucket each one as already-present or missing an image
|
||||
// config record. Returns missing candidates and the count of present.
|
||||
//
|
||||
// opType is the GC operationType used for progress messages so this helper
|
||||
// can serve both the preview and the run.
|
||||
func (gc *GarbageCollector) scanBackfillCandidates(ctx context.Context, opType string) (missing []BackfillConfigCandidate, presentCount int, err error) {
|
||||
recordsIndex := gc.pds.RecordsIndex()
|
||||
if recordsIndex == nil {
|
||||
return nil, fmt.Errorf("records index not available")
|
||||
return nil, 0, fmt.Errorf("records index not available")
|
||||
}
|
||||
|
||||
// Step 1: Collect unique manifest URIs from layer records
|
||||
manifestURIs := make(map[string]bool)
|
||||
cursor := ""
|
||||
totalScanned := 0
|
||||
|
||||
for {
|
||||
records, nextCursor, err := recordsIndex.ListRecords(atproto.LayerCollection, 1000, cursor, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list layer records: %w", err)
|
||||
records, nextCursor, listErr := recordsIndex.ListRecords(atproto.LayerCollection, 1000, cursor, true)
|
||||
if listErr != nil {
|
||||
return nil, 0, fmt.Errorf("list layer records: %w", listErr)
|
||||
}
|
||||
|
||||
for _, rec := range records {
|
||||
totalScanned++
|
||||
layer, err := gc.decodeLayerRecord(ctx, rec)
|
||||
if err != nil {
|
||||
layer, decodeErr := gc.decodeLayerRecord(ctx, rec)
|
||||
if decodeErr != nil {
|
||||
continue
|
||||
}
|
||||
manifestURIs[layer.Manifest] = true
|
||||
}
|
||||
|
||||
if nextCursor == "" {
|
||||
break
|
||||
}
|
||||
@@ -862,36 +896,99 @@ func (gc *GarbageCollector) doBackfillConfigs(ctx context.Context) (*GCResult, e
|
||||
"manifests", len(manifestURIs),
|
||||
"layersScanned", totalScanned)
|
||||
|
||||
// Step 2: For each manifest, check if config record exists, create if not
|
||||
start := time.Now()
|
||||
result := &GCResult{}
|
||||
created := int64(0)
|
||||
skipped := int64(0)
|
||||
processed := 0
|
||||
httpClient := &http.Client{Timeout: 30 * time.Second}
|
||||
|
||||
for manifestURI := range manifestURIs {
|
||||
processed++
|
||||
gc.setProgress("records",
|
||||
fmt.Sprintf("Backfilling configs (%d/%d manifests)...", processed, len(manifestURIs)),
|
||||
"backfill-configs")
|
||||
fmt.Sprintf("Checking image configs (%d/%d manifests)...", processed, len(manifestURIs)),
|
||||
opType)
|
||||
|
||||
aturi, err := syntax.ParseATURI(manifestURI)
|
||||
if err != nil {
|
||||
gc.logger.Warn("Invalid manifest URI", "uri", manifestURI, "error", err)
|
||||
aturi, parseErr := syntax.ParseATURI(manifestURI)
|
||||
if parseErr != nil {
|
||||
gc.logger.Warn("Invalid manifest URI", "uri", manifestURI, "error", parseErr)
|
||||
continue
|
||||
}
|
||||
|
||||
manifestDigest := "sha256:" + aturi.RecordKey().String()
|
||||
|
||||
// Check if config record already exists
|
||||
if _, _, err := gc.pds.GetImageConfigRecord(ctx, manifestDigest); err == nil {
|
||||
skipped++
|
||||
if _, _, getErr := gc.pds.GetImageConfigRecord(ctx, manifestDigest); getErr == nil {
|
||||
presentCount++
|
||||
continue
|
||||
}
|
||||
missing = append(missing, BackfillConfigCandidate{
|
||||
ManifestURI: manifestURI,
|
||||
UserDID: aturi.Authority().String(),
|
||||
Digest: manifestDigest,
|
||||
})
|
||||
}
|
||||
return missing, presentCount, nil
|
||||
}
|
||||
|
||||
userDID := aturi.Authority().String()
|
||||
manifestRkey := aturi.RecordKey().String()
|
||||
// doBackfillConfigsPreview runs scanBackfillCandidates and stores the result
|
||||
// for the admin UI to display. The full missing slice is kept in memory but
|
||||
// rendering is capped via maxPreviewItems in the template layer.
|
||||
func (gc *GarbageCollector) doBackfillConfigsPreview(ctx context.Context) (*BackfillConfigsPreview, error) {
|
||||
start := time.Now()
|
||||
|
||||
missing, presentCount, err := gc.scanBackfillCandidates(ctx, "backfill-configs-preview")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
users := make(map[string]struct{}, len(missing))
|
||||
for _, c := range missing {
|
||||
users[c.UserDID] = struct{}{}
|
||||
}
|
||||
|
||||
missingCount := len(missing)
|
||||
display := missing
|
||||
if len(display) > maxPreviewItems {
|
||||
display = display[:maxPreviewItems]
|
||||
}
|
||||
|
||||
preview := &BackfillConfigsPreview{
|
||||
Missing: display,
|
||||
MissingCount: missingCount,
|
||||
PresentCount: presentCount,
|
||||
ManifestsChecked: missingCount + presentCount,
|
||||
UsersAffected: len(users),
|
||||
Duration: time.Since(start),
|
||||
}
|
||||
|
||||
gc.mu.Lock()
|
||||
gc.lastBackfillPreview = preview
|
||||
gc.lastBackfillPreviewAt = time.Now()
|
||||
gc.mu.Unlock()
|
||||
|
||||
gc.logger.Info("Image config backfill preview complete",
|
||||
"missing", missingCount,
|
||||
"present", presentCount,
|
||||
"usersAffected", preview.UsersAffected,
|
||||
"duration", preview.Duration)
|
||||
return preview, nil
|
||||
}
|
||||
|
||||
// doBackfillConfigs creates image config records for manifests that are missing them.
|
||||
func (gc *GarbageCollector) doBackfillConfigs(ctx context.Context) (*GCResult, error) {
|
||||
start := time.Now()
|
||||
|
||||
missing, presentCount, err := gc.scanBackfillCandidates(ctx, "backfill-configs")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &GCResult{RecordsSkipped: int64(presentCount)}
|
||||
created := int64(0)
|
||||
httpClient := &http.Client{Timeout: 30 * time.Second}
|
||||
|
||||
for i, candidate := range missing {
|
||||
gc.setProgress("records",
|
||||
fmt.Sprintf("Backfilling configs (%d/%d missing)...", i+1, len(missing)),
|
||||
"backfill-configs")
|
||||
|
||||
userDID := candidate.UserDID
|
||||
manifestRkey := strings.TrimPrefix(candidate.Digest, "sha256:")
|
||||
manifestURI := candidate.ManifestURI
|
||||
manifestDigest := candidate.Digest
|
||||
|
||||
pdsEndpoint, err := atproto.ResolveDIDToPDS(ctx, userDID)
|
||||
if err != nil {
|
||||
@@ -899,7 +996,6 @@ func (gc *GarbageCollector) doBackfillConfigs(ctx context.Context) (*GCResult, e
|
||||
continue
|
||||
}
|
||||
|
||||
// Fetch manifest via getRecord to get config digest
|
||||
reqURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
|
||||
pdsEndpoint,
|
||||
url.QueryEscape(userDID),
|
||||
@@ -938,14 +1034,12 @@ func (gc *GarbageCollector) doBackfillConfigs(ctx context.Context) (*GCResult, e
|
||||
continue
|
||||
}
|
||||
|
||||
// Fetch config blob from S3
|
||||
configBytes, err := gc.s3.GetBytes(ctx, s3.BlobPath(manifest.Config.Digest))
|
||||
if err != nil {
|
||||
gc.logger.Warn("Failed to fetch config blob", "digest", manifest.Config.Digest, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create image config record
|
||||
configRecord := atproto.NewImageConfigRecord(manifestURI, string(configBytes))
|
||||
if _, _, err := gc.pds.CreateImageConfigRecord(ctx, configRecord, manifestDigest); err != nil {
|
||||
gc.logger.Warn("Failed to create image config record", "manifest", manifestURI, "error", err)
|
||||
@@ -963,7 +1057,9 @@ func (gc *GarbageCollector) doBackfillConfigs(ctx context.Context) (*GCResult, e
|
||||
gc.lastResultAt = time.Now()
|
||||
gc.mu.Unlock()
|
||||
|
||||
gc.logger.Info("Image config backfill complete", "created", created, "skipped", skipped)
|
||||
gc.logger.Info("Image config backfill complete",
|
||||
"created", created,
|
||||
"skipped", result.RecordsSkipped)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -1510,6 +1606,14 @@ func (gc *GarbageCollector) LastResult() (*GCResult, time.Time) {
|
||||
return gc.lastResult, gc.lastResultAt
|
||||
}
|
||||
|
||||
// LastBackfillPreview returns the most recent image-config backfill preview
|
||||
// and when it was generated.
|
||||
func (gc *GarbageCollector) LastBackfillPreview() (*BackfillConfigsPreview, time.Time) {
|
||||
gc.mu.Lock()
|
||||
defer gc.mu.Unlock()
|
||||
return gc.lastBackfillPreview, gc.lastBackfillPreviewAt
|
||||
}
|
||||
|
||||
// IsRunning returns whether a GC operation is currently in progress
|
||||
func (gc *GarbageCollector) IsRunning() bool {
|
||||
gc.mu.Lock()
|
||||
|
||||
@@ -79,12 +79,13 @@ type ScanJobEvent struct {
|
||||
|
||||
// ScannerMessage is a message received from scanner over WebSocket
|
||||
type ScannerMessage struct {
|
||||
Type string `json:"type"` // "ack", "result", "error"
|
||||
Type string `json:"type"` // "ack", "result", "error", "skipped"
|
||||
Seq int64 `json:"seq"` // Job sequence number
|
||||
SBOM string `json:"sbom,omitempty"`
|
||||
VulnReport string `json:"vulnReport,omitempty"`
|
||||
Summary *VulnerabilitySummary `json:"summary,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Reason string `json:"reason,omitempty"` // Populated for "skipped" messages
|
||||
}
|
||||
|
||||
// VulnerabilitySummary contains counts of vulnerabilities by severity
|
||||
@@ -447,6 +448,8 @@ func (sb *ScanBroadcaster) handleReader(sub *ScanSubscriber) {
|
||||
sb.handleResult(sub, msg)
|
||||
case "error":
|
||||
sb.handleError(sub, msg)
|
||||
case "skipped":
|
||||
sb.handleSkipped(sub, msg)
|
||||
default:
|
||||
slog.Warn("Unknown scanner message type",
|
||||
"type", msg.Type,
|
||||
@@ -579,12 +582,12 @@ func (sb *ScanBroadcaster) handleResult(sub *ScanSubscriber, msg ScannerMessage)
|
||||
"total", msg.Summary.Total)
|
||||
}
|
||||
|
||||
// handleError marks a job as failed and creates a scan record so the proactive
|
||||
// scanner treats it as "stale" rather than "never scanned" (avoids retry loops).
|
||||
// handleError marks a job as failed and creates a scan record so the stale
|
||||
// loop won't immediately retry. Failed records still get retried on the
|
||||
// rescan interval since failures may be transient (network, OOM, etc.).
|
||||
func (sb *ScanBroadcaster) handleError(sub *ScanSubscriber, msg ScannerMessage) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Get job details to create failure scan record
|
||||
var manifestDigest, repository, userDID string
|
||||
err := sb.db.QueryRow(`
|
||||
SELECT manifest_digest, repository, user_did
|
||||
@@ -594,13 +597,9 @@ func (sb *ScanBroadcaster) handleError(sub *ScanSubscriber, msg ScannerMessage)
|
||||
slog.Error("Failed to get job details for failure record",
|
||||
"seq", msg.Seq, "error", err)
|
||||
} else {
|
||||
// Create a scan record with zero counts and nil blobs — marks it as
|
||||
// "scanned" so the proactive scheduler won't retry until rescan interval.
|
||||
// Nil blobs signal failure to the appview (successful scans always have blobs).
|
||||
scanRecord := atproto.NewScanRecord(
|
||||
scanRecord := atproto.NewFailedScanRecord(
|
||||
manifestDigest, repository, userDID,
|
||||
nil, nil, // no SBOM or vuln report — signals scan failure
|
||||
0, 0, 0, 0, 0,
|
||||
msg.Error,
|
||||
"atcr-scanner-v1.0.0",
|
||||
)
|
||||
if _, _, err := sb.pds.CreateScanRecord(ctx, scanRecord); err != nil {
|
||||
@@ -609,7 +608,6 @@ func (sb *ScanBroadcaster) handleError(sub *ScanSubscriber, msg ScannerMessage)
|
||||
}
|
||||
}
|
||||
|
||||
// Mark job as failed
|
||||
_, err = sb.db.Exec(`
|
||||
UPDATE scan_jobs SET status = 'failed', completed_at = ?
|
||||
WHERE seq = ?
|
||||
@@ -620,7 +618,6 @@ func (sb *ScanBroadcaster) handleError(sub *ScanSubscriber, msg ScannerMessage)
|
||||
"error", err)
|
||||
}
|
||||
|
||||
// Remove from in-flight tracking and wake dispatch loop
|
||||
sb.removeInflight(manifestDigest)
|
||||
sb.signalCompletion()
|
||||
|
||||
@@ -630,6 +627,51 @@ func (sb *ScanBroadcaster) handleError(sub *ScanSubscriber, msg ScannerMessage)
|
||||
"error", msg.Error)
|
||||
}
|
||||
|
||||
// handleSkipped marks a job complete and creates a scan record with
|
||||
// status="skipped". The stale-scan loop will leave these records alone — the
|
||||
// outcome won't change until the scanner gains support for the artifact type.
|
||||
func (sb *ScanBroadcaster) handleSkipped(sub *ScanSubscriber, msg ScannerMessage) {
|
||||
ctx := context.Background()
|
||||
|
||||
var manifestDigest, repository, userDID string
|
||||
err := sb.db.QueryRow(`
|
||||
SELECT manifest_digest, repository, user_did
|
||||
FROM scan_jobs WHERE seq = ?
|
||||
`, msg.Seq).Scan(&manifestDigest, &repository, &userDID)
|
||||
if err != nil {
|
||||
slog.Error("Failed to get job details for skip record",
|
||||
"seq", msg.Seq, "error", err)
|
||||
} else {
|
||||
scanRecord := atproto.NewSkippedScanRecord(
|
||||
manifestDigest, repository, userDID,
|
||||
msg.Reason,
|
||||
"atcr-scanner-v1.0.0",
|
||||
)
|
||||
if _, _, err := sb.pds.CreateScanRecord(ctx, scanRecord); err != nil {
|
||||
slog.Error("Failed to store skipped scan record",
|
||||
"seq", msg.Seq, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = sb.db.Exec(`
|
||||
UPDATE scan_jobs SET status = 'completed', completed_at = ?
|
||||
WHERE seq = ?
|
||||
`, time.Now(), msg.Seq)
|
||||
if err != nil {
|
||||
slog.Error("Failed to mark scan job as completed (skipped)",
|
||||
"seq", msg.Seq,
|
||||
"error", err)
|
||||
}
|
||||
|
||||
sb.removeInflight(manifestDigest)
|
||||
sb.signalCompletion()
|
||||
|
||||
slog.Info("Scan job skipped",
|
||||
"seq", msg.Seq,
|
||||
"subscriberId", sub.id,
|
||||
"reason", msg.Reason)
|
||||
}
|
||||
|
||||
// drainPendingJobs sends pending/timed-out jobs to a newly connected scanner.
|
||||
// Collects all pending rows first, closes cursor, then assigns and dispatches
|
||||
// to avoid holding a SELECT cursor open during UPDATEs (prevents SQLite BUSY).
|
||||
@@ -1051,6 +1093,14 @@ func (sb *ScanBroadcaster) runStalePass() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Permanently-skipped records (helm charts, in-toto, etc.) won't
|
||||
// change outcome on retry — leave them alone. Failed records still
|
||||
// get retried since failures may be transient.
|
||||
if scanRecord.Status == atproto.ScanStatusSkipped {
|
||||
sb.removeInflight(manifestDigest)
|
||||
continue
|
||||
}
|
||||
|
||||
scannedAt, err := time.Parse(time.RFC3339, scanRecord.ScannedAt)
|
||||
if err != nil {
|
||||
sb.removeInflight(manifestDigest)
|
||||
|
||||
@@ -194,6 +194,13 @@ func (c *HoldClient) SendError(seq int64, errMsg string) {
|
||||
c.sendJSON(scanner.ErrorMessage{Type: "error", Seq: seq, Error: errMsg})
|
||||
}
|
||||
|
||||
// SendSkipped sends a skipped message for an artifact the scanner intentionally
|
||||
// won't process (e.g., helm charts). Distinct from SendError so the hold can
|
||||
// distinguish a permanent skip from a retryable failure.
|
||||
func (c *HoldClient) SendSkipped(seq int64, reason string) {
|
||||
c.sendJSON(scanner.SkippedMessage{Type: "skipped", Seq: seq, Reason: reason})
|
||||
}
|
||||
|
||||
func (c *HoldClient) sendJSON(v any) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
@@ -4,11 +4,11 @@ package scan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -18,6 +18,17 @@ import (
|
||||
"atcr.io/scanner/internal/queue"
|
||||
)
|
||||
|
||||
// SkipError is returned by processJob when the scanner intentionally bypasses
|
||||
// an artifact type it can't analyze (helm charts, in-toto attestations, DSSE).
|
||||
// The worker dispatches these to hold via SendSkipped so the hold can mark
|
||||
// the scan record "skipped" instead of "failed". Skipped records are never
|
||||
// retried by the stale-scan loop; failures are.
|
||||
type SkipError struct {
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e *SkipError) Error() string { return "skipped: " + e.Reason }
|
||||
|
||||
// WorkerPool manages a pool of scan workers
|
||||
type WorkerPool struct {
|
||||
cfg *config.Config
|
||||
@@ -94,15 +105,20 @@ func (wp *WorkerPool) worker(ctx context.Context, id int) {
|
||||
|
||||
result, err := wp.processJob(ctx, job)
|
||||
if err != nil {
|
||||
logLevel := slog.LevelError
|
||||
if strings.HasPrefix(err.Error(), "skipped:") {
|
||||
logLevel = slog.LevelInfo
|
||||
var skipErr *SkipError
|
||||
if errors.As(err, &skipErr) {
|
||||
slog.Info("Scan job skipped",
|
||||
"worker_id", id,
|
||||
"repository", job.Repository,
|
||||
"reason", skipErr.Reason)
|
||||
wp.client.SendSkipped(job.Seq, skipErr.Reason)
|
||||
} else {
|
||||
slog.Error("Scan job failed",
|
||||
"worker_id", id,
|
||||
"repository", job.Repository,
|
||||
"error", err)
|
||||
wp.client.SendError(job.Seq, err.Error())
|
||||
}
|
||||
slog.Log(ctx, logLevel, "Scan job failed",
|
||||
"worker_id", id,
|
||||
"repository", job.Repository,
|
||||
"error", err)
|
||||
wp.client.SendError(job.Seq, err.Error())
|
||||
} else {
|
||||
wp.client.SendResult(job.Seq, result)
|
||||
|
||||
@@ -138,9 +154,12 @@ var unscannableConfigTypes = map[string]bool{
|
||||
func (wp *WorkerPool) processJob(ctx context.Context, job *scanner.ScanJob) (*scanner.ScanResult, error) {
|
||||
startTime := time.Now()
|
||||
|
||||
// Skip non-container OCI artifacts (Helm charts, WASM modules, etc.)
|
||||
// Skip non-container OCI artifacts (Helm charts, in-toto, DSSE, etc.).
|
||||
// Returning *SkipError tells the worker dispatch loop to send a "skipped"
|
||||
// message rather than an "error" — the hold marks these records as
|
||||
// permanently skipped and won't retry them on the rescan interval.
|
||||
if unscannableConfigTypes[job.Config.MediaType] {
|
||||
return nil, fmt.Errorf("skipped: unscannable artifact type %s", job.Config.MediaType)
|
||||
return nil, &SkipError{Reason: fmt.Sprintf("unscannable artifact type %s", job.Config.MediaType)}
|
||||
}
|
||||
|
||||
// Ensure tmp dir exists
|
||||
|
||||
@@ -82,3 +82,14 @@ type ErrorMessage struct {
|
||||
Seq int64 `json:"seq"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// SkippedMessage is sent from scanner to hold when an artifact is intentionally
|
||||
// not scanned (e.g., helm charts, in-toto attestations). Distinct from
|
||||
// ErrorMessage so the hold can mark the scan record as "skipped" rather than
|
||||
// "failed" — the stale-scan loop will leave skipped records alone since the
|
||||
// outcome won't change without a code change in the scanner.
|
||||
type SkippedMessage struct {
|
||||
Type string `json:"type"` // "skipped"
|
||||
Seq int64 `json:"seq"`
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user