mirror of
https://tangled.org/evan.jarrett.net/at-container-registry
synced 2026-04-20 16:40:29 +00:00
823 lines
22 KiB
Go
823 lines
22 KiB
Go
// image-advisor fetches OCI image config, SBOM, and vulnerability data from
|
|
// the ATCR system and outputs a structured markdown report suitable for LLM
|
|
// analysis of container image improvements.
|
|
//
|
|
// Usage:
|
|
//
|
|
// go run ./cmd/image-advisor --url https://seamark.dev/r/therobbiedavis.com/listenarr --tag latest
|
|
// go run ./cmd/image-advisor --url https://seamark.dev/r/therobbiedavis.com/listenarr --digest sha256:abc...
|
|
// go run ./cmd/image-advisor --url https://seamark.dev/r/therobbiedavis.com/listenarr --platform linux/arm64
|
|
package main
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"flag"
|
|
"fmt"
|
|
"io"
|
|
"log"
|
|
"net/http"
|
|
"net/url"
|
|
"os"
|
|
"sort"
|
|
"strings"
|
|
"time"
|
|
|
|
"atcr.io/pkg/atproto"
|
|
)
|
|
|
|
// OCI config types (full config, not just history)
|
|
type ociFullConfig struct {
|
|
Architecture string `json:"architecture"`
|
|
OS string `json:"os"`
|
|
Config ociContainerConfig `json:"config"`
|
|
History []ociHistoryEntry `json:"history"`
|
|
RootFS ociRootFS `json:"rootfs"`
|
|
}
|
|
|
|
type ociContainerConfig struct {
|
|
Env []string `json:"Env"`
|
|
Cmd []string `json:"Cmd"`
|
|
Entrypoint []string `json:"Entrypoint"`
|
|
WorkingDir string `json:"WorkingDir"`
|
|
ExposedPorts map[string]struct{} `json:"ExposedPorts"`
|
|
Labels map[string]string `json:"Labels"`
|
|
User string `json:"User"`
|
|
Volumes map[string]struct{} `json:"Volumes"`
|
|
}
|
|
|
|
type ociHistoryEntry struct {
|
|
Created string `json:"created"`
|
|
CreatedBy string `json:"created_by"`
|
|
EmptyLayer bool `json:"empty_layer"`
|
|
Comment string `json:"comment"`
|
|
}
|
|
|
|
type ociRootFS struct {
|
|
Type string `json:"type"`
|
|
DiffIDs []string `json:"diff_ids"`
|
|
}
|
|
|
|
// Grype vulnerability report types
|
|
type grypeReport struct {
|
|
Matches []grypeMatch `json:"matches"`
|
|
}
|
|
|
|
type grypeMatch struct {
|
|
Vulnerability grypeVuln `json:"Vulnerability"`
|
|
Package grypePackage `json:"Package"`
|
|
}
|
|
|
|
type grypeVuln struct {
|
|
ID string `json:"ID"`
|
|
Metadata grypeMetadata `json:"Metadata"`
|
|
Fix grypeFix `json:"Fix"`
|
|
}
|
|
|
|
type grypeMetadata struct {
|
|
Severity string `json:"Severity"`
|
|
}
|
|
|
|
type grypeFix struct {
|
|
Versions []string `json:"Versions"`
|
|
State string `json:"State"`
|
|
}
|
|
|
|
type grypePackage struct {
|
|
Name string `json:"Name"`
|
|
Version string `json:"Version"`
|
|
Type string `json:"Type"`
|
|
}
|
|
|
|
// SPDX SBOM types
|
|
type spdxDocument struct {
|
|
Packages []spdxPackage `json:"packages"`
|
|
}
|
|
|
|
type spdxPackage struct {
|
|
SPDXID string `json:"SPDXID"`
|
|
Name string `json:"name"`
|
|
VersionInfo string `json:"versionInfo"`
|
|
Supplier string `json:"supplier"`
|
|
LicenseConcluded string `json:"licenseConcluded"`
|
|
}
|
|
|
|
// reportData holds all fetched data for markdown generation
|
|
type reportData struct {
|
|
Handle string
|
|
Repository string
|
|
Tag string
|
|
Digest string
|
|
Platform string
|
|
HoldURL string
|
|
ScannedAt string
|
|
|
|
Config *ociFullConfig
|
|
ConfigErr string
|
|
Layers []atproto.BlobReference // from manifest record
|
|
VulnReport *grypeReport
|
|
VulnErr string
|
|
ScanRecord *atproto.ScanRecord
|
|
SBOM *spdxDocument
|
|
SBOMErr string
|
|
}
|
|
|
|
func main() {
|
|
registryURL := flag.String("url", "", "Registry URL (e.g., https://seamark.dev/r/therobbiedavis.com/listenarr)")
|
|
tag := flag.String("tag", "latest", "Image tag to look up")
|
|
digest := flag.String("digest", "", "Manifest digest (overrides --tag)")
|
|
platform := flag.String("platform", "linux/amd64", "Platform to select from manifest index (os/arch)")
|
|
holdURL := flag.String("hold", "https://us-chi1.cove.seamark.dev", "Hold service URL")
|
|
flag.Parse()
|
|
|
|
if *registryURL == "" {
|
|
fmt.Fprintln(os.Stderr, "error: --url is required")
|
|
flag.Usage()
|
|
os.Exit(1)
|
|
}
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
|
defer cancel()
|
|
|
|
handle, repository, err := parseRegistryURL(*registryURL)
|
|
if err != nil {
|
|
log.Fatalf("Failed to parse URL: %v", err)
|
|
}
|
|
|
|
fmt.Fprintf(os.Stderr, "Resolving identity for %s...\n", handle)
|
|
did, resolvedHandle, pdsEndpoint, err := atproto.ResolveIdentity(ctx, handle)
|
|
if err != nil {
|
|
log.Fatalf("Failed to resolve identity %q: %v", handle, err)
|
|
}
|
|
fmt.Fprintf(os.Stderr, " DID: %s\n Handle: %s\n PDS: %s\n", did, resolvedHandle, pdsEndpoint)
|
|
|
|
// Resolve hold DID
|
|
fmt.Fprintf(os.Stderr, "Resolving hold DID for %s...\n", *holdURL)
|
|
holdDID, err := atproto.ResolveHoldDID(ctx, *holdURL)
|
|
if err != nil {
|
|
log.Fatalf("Failed to resolve hold DID: %v", err)
|
|
}
|
|
fmt.Fprintf(os.Stderr, " Hold DID: %s\n", holdDID)
|
|
|
|
// Resolve manifest digest
|
|
manifestDigest := *digest
|
|
tagName := *tag
|
|
if manifestDigest == "" {
|
|
fmt.Fprintf(os.Stderr, "Looking up tag %q for %s/%s...\n", tagName, resolvedHandle, repository)
|
|
tagRecord, err := fetchTagRecord(ctx, pdsEndpoint, did, repository, tagName)
|
|
if err != nil {
|
|
log.Fatalf("Failed to fetch tag record: %v", err)
|
|
}
|
|
manifestDigest, err = tagRecord.GetManifestDigest()
|
|
if err != nil {
|
|
log.Fatalf("Failed to get manifest digest from tag: %v", err)
|
|
}
|
|
fmt.Fprintf(os.Stderr, " Digest: %s\n", manifestDigest)
|
|
} else {
|
|
tagName = ""
|
|
}
|
|
|
|
// Fetch manifest record
|
|
fmt.Fprintf(os.Stderr, "Fetching manifest record...\n")
|
|
manifest, err := fetchManifestRecord(ctx, pdsEndpoint, did, manifestDigest)
|
|
if err != nil {
|
|
log.Fatalf("Failed to fetch manifest record: %v", err)
|
|
}
|
|
|
|
// Handle manifest index
|
|
if len(manifest.Manifests) > 0 {
|
|
fmt.Fprintf(os.Stderr, "Manifest is an index with %d platforms:\n", len(manifest.Manifests))
|
|
for _, m := range manifest.Manifests {
|
|
if m.Platform != nil {
|
|
p := m.Platform
|
|
platStr := p.OS + "/" + p.Architecture
|
|
if p.Variant != "" {
|
|
platStr += "/" + p.Variant
|
|
}
|
|
fmt.Fprintf(os.Stderr, " - %s (%s)\n", platStr, truncate(m.Digest, 24))
|
|
}
|
|
}
|
|
|
|
child, err := selectPlatform(manifest.Manifests, *platform)
|
|
if err != nil {
|
|
fmt.Fprintf(os.Stderr, "Warning: %v, using first platform\n", err)
|
|
child = &manifest.Manifests[0]
|
|
}
|
|
|
|
manifestDigest = child.Digest
|
|
if child.Platform != nil {
|
|
*platform = child.Platform.OS + "/" + child.Platform.Architecture
|
|
if child.Platform.Variant != "" {
|
|
*platform += "/" + child.Platform.Variant
|
|
}
|
|
}
|
|
fmt.Fprintf(os.Stderr, "Selected platform %s → %s\n", *platform, truncate(manifestDigest, 24))
|
|
|
|
// Re-fetch the child manifest record
|
|
manifest, err = fetchManifestRecord(ctx, pdsEndpoint, did, manifestDigest)
|
|
if err != nil {
|
|
log.Fatalf("Failed to fetch child manifest: %v", err)
|
|
}
|
|
}
|
|
|
|
report := &reportData{
|
|
Handle: resolvedHandle,
|
|
Repository: repository,
|
|
Tag: tagName,
|
|
Digest: manifestDigest,
|
|
Platform: *platform,
|
|
HoldURL: *holdURL,
|
|
Layers: manifest.Layers,
|
|
}
|
|
|
|
// Fetch image config
|
|
fmt.Fprintf(os.Stderr, "Fetching image config...\n")
|
|
config, err := fetchFullImageConfig(ctx, *holdURL, manifestDigest)
|
|
if err != nil {
|
|
fmt.Fprintf(os.Stderr, " Warning: %v\n", err)
|
|
report.ConfigErr = err.Error()
|
|
} else {
|
|
report.Config = config
|
|
}
|
|
|
|
// Fetch scan data (scan record + SBOM blob + vuln blob)
|
|
fmt.Fprintf(os.Stderr, "Fetching scan data...\n")
|
|
scanRecord, sbom, vulnReport, scanErr := fetchScanData(ctx, *holdURL, holdDID, manifestDigest)
|
|
if scanErr != nil {
|
|
fmt.Fprintf(os.Stderr, " Warning: %v\n", scanErr)
|
|
report.VulnErr = scanErr.Error()
|
|
report.SBOMErr = scanErr.Error()
|
|
} else {
|
|
report.ScanRecord = scanRecord
|
|
report.ScannedAt = scanRecord.ScannedAt
|
|
if vulnReport != nil {
|
|
report.VulnReport = vulnReport
|
|
} else {
|
|
report.VulnErr = "No vulnerability report blob available"
|
|
}
|
|
if sbom != nil {
|
|
report.SBOM = sbom
|
|
} else {
|
|
report.SBOMErr = "No SBOM blob available"
|
|
}
|
|
}
|
|
|
|
fmt.Fprintf(os.Stderr, "Generating prompt...\n")
|
|
generatePrompt(os.Stdout, report)
|
|
}
|
|
|
|
func parseRegistryURL(rawURL string) (handle, repository string, err error) {
|
|
u, err := url.Parse(rawURL)
|
|
if err != nil {
|
|
return "", "", fmt.Errorf("invalid URL: %w", err)
|
|
}
|
|
|
|
path := strings.TrimPrefix(u.Path, "/")
|
|
path = strings.TrimPrefix(path, "r/")
|
|
path = strings.TrimSuffix(path, "/")
|
|
|
|
parts := strings.SplitN(path, "/", 2)
|
|
if len(parts) < 2 {
|
|
return "", "", fmt.Errorf("URL must be in format: https://domain/r/<handle>/<repository>")
|
|
}
|
|
|
|
return parts[0], parts[1], nil
|
|
}
|
|
|
|
func fetchTagRecord(ctx context.Context, pdsEndpoint, did, repository, tag string) (*atproto.TagRecord, error) {
|
|
rkey := atproto.RepositoryTagToRKey(repository, tag)
|
|
reqURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
|
|
strings.TrimSuffix(pdsEndpoint, "/"),
|
|
url.QueryEscape(did),
|
|
url.QueryEscape(atproto.TagCollection),
|
|
url.QueryEscape(rkey),
|
|
)
|
|
|
|
resp, err := httpGet(ctx, reqURL)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
return nil, fmt.Errorf("tag %q not found (HTTP %d)", tag, resp.StatusCode)
|
|
}
|
|
|
|
var envelope struct {
|
|
Value json.RawMessage `json:"value"`
|
|
}
|
|
if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil {
|
|
return nil, fmt.Errorf("parse response: %w", err)
|
|
}
|
|
|
|
var tagRecord atproto.TagRecord
|
|
if err := json.Unmarshal(envelope.Value, &tagRecord); err != nil {
|
|
return nil, fmt.Errorf("parse tag record: %w", err)
|
|
}
|
|
|
|
return &tagRecord, nil
|
|
}
|
|
|
|
func fetchManifestRecord(ctx context.Context, pdsEndpoint, did, digest string) (*atproto.ManifestRecord, error) {
|
|
rkey := strings.TrimPrefix(digest, "sha256:")
|
|
reqURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
|
|
strings.TrimSuffix(pdsEndpoint, "/"),
|
|
url.QueryEscape(did),
|
|
url.QueryEscape(atproto.ManifestCollection),
|
|
url.QueryEscape(rkey),
|
|
)
|
|
|
|
resp, err := httpGet(ctx, reqURL)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
return nil, fmt.Errorf("manifest not found (HTTP %d)", resp.StatusCode)
|
|
}
|
|
|
|
var envelope struct {
|
|
Value json.RawMessage `json:"value"`
|
|
}
|
|
if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil {
|
|
return nil, fmt.Errorf("parse response: %w", err)
|
|
}
|
|
|
|
var manifest atproto.ManifestRecord
|
|
if err := json.Unmarshal(envelope.Value, &manifest); err != nil {
|
|
return nil, fmt.Errorf("parse manifest record: %w", err)
|
|
}
|
|
|
|
return &manifest, nil
|
|
}
|
|
|
|
func selectPlatform(manifests []atproto.ManifestReference, platform string) (*atproto.ManifestReference, error) {
|
|
parts := strings.Split(platform, "/")
|
|
wantOS := parts[0]
|
|
wantArch := ""
|
|
wantVariant := ""
|
|
if len(parts) > 1 {
|
|
wantArch = parts[1]
|
|
}
|
|
if len(parts) > 2 {
|
|
wantVariant = parts[2]
|
|
}
|
|
|
|
for i := range manifests {
|
|
m := &manifests[i]
|
|
if m.Platform == nil {
|
|
continue
|
|
}
|
|
if m.Platform.OS == wantOS && m.Platform.Architecture == wantArch {
|
|
if wantVariant == "" || m.Platform.Variant == wantVariant {
|
|
return m, nil
|
|
}
|
|
}
|
|
}
|
|
|
|
return nil, fmt.Errorf("no platform matching %s found", platform)
|
|
}
|
|
|
|
func fetchFullImageConfig(ctx context.Context, holdURL, manifestDigest string) (*ociFullConfig, error) {
|
|
reqURL := fmt.Sprintf("%s%s?digest=%s",
|
|
strings.TrimSuffix(holdURL, "/"),
|
|
atproto.HoldGetImageConfig,
|
|
url.QueryEscape(manifestDigest),
|
|
)
|
|
|
|
resp, err := httpGet(ctx, reqURL)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
return nil, fmt.Errorf("image config not found (HTTP %d)", resp.StatusCode)
|
|
}
|
|
|
|
var record struct {
|
|
ConfigJSON string `json:"configJson"`
|
|
}
|
|
if err := json.NewDecoder(resp.Body).Decode(&record); err != nil {
|
|
return nil, fmt.Errorf("parse response: %w", err)
|
|
}
|
|
|
|
var config ociFullConfig
|
|
if err := json.Unmarshal([]byte(record.ConfigJSON), &config); err != nil {
|
|
return nil, fmt.Errorf("parse OCI config: %w", err)
|
|
}
|
|
|
|
return &config, nil
|
|
}
|
|
|
|
func fetchScanData(ctx context.Context, holdURL, holdDID, manifestDigest string) (*atproto.ScanRecord, *spdxDocument, *grypeReport, error) {
|
|
rkey := strings.TrimPrefix(manifestDigest, "sha256:")
|
|
|
|
// Fetch scan record
|
|
scanURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
|
|
strings.TrimSuffix(holdURL, "/"),
|
|
url.QueryEscape(holdDID),
|
|
url.QueryEscape(atproto.ScanCollection),
|
|
url.QueryEscape(rkey),
|
|
)
|
|
|
|
resp, err := httpGet(ctx, scanURL)
|
|
if err != nil {
|
|
return nil, nil, nil, fmt.Errorf("fetch scan record: %w", err)
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
return nil, nil, nil, fmt.Errorf("no scan record found (HTTP %d)", resp.StatusCode)
|
|
}
|
|
|
|
var envelope struct {
|
|
Value json.RawMessage `json:"value"`
|
|
}
|
|
if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil {
|
|
return nil, nil, nil, fmt.Errorf("parse scan response: %w", err)
|
|
}
|
|
|
|
var scanRecord atproto.ScanRecord
|
|
if err := json.Unmarshal(envelope.Value, &scanRecord); err != nil {
|
|
return nil, nil, nil, fmt.Errorf("parse scan record: %w", err)
|
|
}
|
|
|
|
// Fetch SBOM blob
|
|
var sbom *spdxDocument
|
|
if scanRecord.SbomBlob != nil && scanRecord.SbomBlob.Ref.String() != "" {
|
|
blobURL := fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob?did=%s&cid=%s",
|
|
strings.TrimSuffix(holdURL, "/"),
|
|
url.QueryEscape(holdDID),
|
|
url.QueryEscape(scanRecord.SbomBlob.Ref.String()),
|
|
)
|
|
blobResp, err := httpGet(ctx, blobURL)
|
|
if err == nil {
|
|
defer blobResp.Body.Close()
|
|
if blobResp.StatusCode == http.StatusOK {
|
|
var doc spdxDocument
|
|
if err := json.NewDecoder(blobResp.Body).Decode(&doc); err == nil {
|
|
sbom = &doc
|
|
} else {
|
|
fmt.Fprintf(os.Stderr, " Warning: failed to parse SBOM: %v\n", err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Fetch vuln report blob
|
|
var vulnReport *grypeReport
|
|
if scanRecord.VulnReportBlob != nil && scanRecord.VulnReportBlob.Ref.String() != "" {
|
|
blobURL := fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob?did=%s&cid=%s",
|
|
strings.TrimSuffix(holdURL, "/"),
|
|
url.QueryEscape(holdDID),
|
|
url.QueryEscape(scanRecord.VulnReportBlob.Ref.String()),
|
|
)
|
|
blobResp, err := httpGet(ctx, blobURL)
|
|
if err == nil {
|
|
defer blobResp.Body.Close()
|
|
if blobResp.StatusCode == http.StatusOK {
|
|
var report grypeReport
|
|
if err := json.NewDecoder(blobResp.Body).Decode(&report); err == nil {
|
|
vulnReport = &report
|
|
} else {
|
|
fmt.Fprintf(os.Stderr, " Warning: failed to parse vuln report: %v\n", err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return &scanRecord, sbom, vulnReport, nil
|
|
}
|
|
|
|
func httpGet(ctx context.Context, rawURL string) (*http.Response, error) {
|
|
req, err := http.NewRequestWithContext(ctx, "GET", rawURL, nil)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("build request: %w", err)
|
|
}
|
|
return http.DefaultClient.Do(req)
|
|
}
|
|
|
|
// --- Output generation ---
|
|
|
|
func generatePrompt(w io.Writer, r *reportData) {
|
|
// System instruction
|
|
fmt.Fprintln(w, `Respond ONLY with raw YAML. No markdown fences, no explanation, no preamble.
|
|
Analyze the container image data below. Output a list of actionable suggestions sorted by impact (highest first).
|
|
|
|
schema:
|
|
suggestions:
|
|
- action: "<specific actionable step>"
|
|
category: vulnerability|size|cache|security|best-practice
|
|
impact: high|medium|low
|
|
effort: low|medium|high
|
|
cves_fixed: <int or 0>
|
|
size_saved_mb: <int or 0>
|
|
detail: "<one sentence with specific package names, versions, or commands>"
|
|
---`)
|
|
|
|
// Compact data block - no markdown formatting, just facts
|
|
ref := r.Handle + "/" + r.Repository
|
|
if r.Tag != "" {
|
|
ref += ":" + r.Tag
|
|
}
|
|
totalSize := int64(0)
|
|
for _, l := range r.Layers {
|
|
totalSize += l.Size
|
|
}
|
|
|
|
fmt.Fprintf(w, "\nimage: %s\ndigest: %s\nplatform: %s\ntotal_size: %s\nlayers: %d\n",
|
|
ref, r.Digest, r.Platform, humanSize(totalSize), len(r.Layers))
|
|
|
|
// Config
|
|
if r.Config != nil {
|
|
c := r.Config.Config
|
|
user := c.User
|
|
if user == "" {
|
|
user = "root"
|
|
}
|
|
fmt.Fprintf(w, "user: %s\n", user)
|
|
if c.WorkingDir != "" {
|
|
fmt.Fprintf(w, "workdir: %s\n", c.WorkingDir)
|
|
}
|
|
if len(c.Entrypoint) > 0 {
|
|
fmt.Fprintf(w, "entrypoint: %s\n", strings.Join(c.Entrypoint, " "))
|
|
}
|
|
if len(c.Cmd) > 0 {
|
|
fmt.Fprintf(w, "cmd: %s\n", strings.Join(c.Cmd, " "))
|
|
}
|
|
if len(c.ExposedPorts) > 0 {
|
|
ports := make([]string, 0, len(c.ExposedPorts))
|
|
for p := range c.ExposedPorts {
|
|
ports = append(ports, p)
|
|
}
|
|
fmt.Fprintf(w, "ports: %s\n", strings.Join(ports, ","))
|
|
}
|
|
if len(c.Env) > 0 {
|
|
fmt.Fprintln(w, "env:")
|
|
for _, env := range c.Env {
|
|
parts := strings.SplitN(env, "=", 2)
|
|
if shouldRedact(parts[0]) {
|
|
fmt.Fprintf(w, " - %s=[REDACTED]\n", parts[0])
|
|
} else {
|
|
fmt.Fprintf(w, " - %s\n", env)
|
|
}
|
|
}
|
|
}
|
|
if len(c.Labels) > 0 {
|
|
fmt.Fprintln(w, "labels:")
|
|
keys := make([]string, 0, len(c.Labels))
|
|
for k := range c.Labels {
|
|
keys = append(keys, k)
|
|
}
|
|
sort.Strings(keys)
|
|
for _, k := range keys {
|
|
v := c.Labels[k]
|
|
if len(v) > 80 {
|
|
v = v[:77] + "..."
|
|
}
|
|
fmt.Fprintf(w, " %s: %s\n", k, v)
|
|
}
|
|
}
|
|
|
|
// History as compact list
|
|
fmt.Fprintln(w, "history:")
|
|
layerIdx := 0
|
|
for _, h := range r.Config.History {
|
|
cmd := cleanCommand(h.CreatedBy)
|
|
if len(cmd) > 100 {
|
|
cmd = cmd[:97] + "..."
|
|
}
|
|
if !h.EmptyLayer && layerIdx < len(r.Layers) {
|
|
fmt.Fprintf(w, " - [%s] %s\n", humanSize(r.Layers[layerIdx].Size), cmd)
|
|
layerIdx++
|
|
} else {
|
|
fmt.Fprintf(w, " - %s\n", cmd)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Vuln summary
|
|
if r.ScanRecord != nil {
|
|
sr := r.ScanRecord
|
|
fmt.Fprintf(w, "vulns: {critical: %d, high: %d, medium: %d, low: %d, total: %d}\n",
|
|
sr.Critical, sr.High, sr.Medium, sr.Low, sr.Total)
|
|
}
|
|
|
|
// Fixable vulns - compact list
|
|
if r.VulnReport != nil {
|
|
// Group by package: name -> {version, type, fixes[], cves[]}
|
|
type pkgInfo struct {
|
|
version string
|
|
typ string
|
|
fixes map[string]bool
|
|
cves []string
|
|
maxSev int
|
|
}
|
|
pkgs := map[string]*pkgInfo{}
|
|
|
|
for _, m := range r.VulnReport.Matches {
|
|
sev := m.Vulnerability.Metadata.Severity
|
|
if sev != "Critical" && sev != "High" {
|
|
continue
|
|
}
|
|
key := m.Package.Name
|
|
p, ok := pkgs[key]
|
|
if !ok {
|
|
p = &pkgInfo{version: m.Package.Version, typ: m.Package.Type, fixes: map[string]bool{}, maxSev: 5}
|
|
pkgs[key] = p
|
|
}
|
|
p.cves = append(p.cves, m.Vulnerability.ID)
|
|
for _, f := range m.Vulnerability.Fix.Versions {
|
|
p.fixes[f] = true
|
|
}
|
|
if s := severityOrder(sev); s < p.maxSev {
|
|
p.maxSev = s
|
|
}
|
|
}
|
|
|
|
if len(pkgs) > 0 {
|
|
fmt.Fprintln(w, "fixable_critical_high:")
|
|
// Sort by severity then CVE count
|
|
type entry struct {
|
|
name string
|
|
info *pkgInfo
|
|
}
|
|
sorted := make([]entry, 0, len(pkgs))
|
|
for n, p := range pkgs {
|
|
sorted = append(sorted, entry{n, p})
|
|
}
|
|
sort.Slice(sorted, func(i, j int) bool {
|
|
if sorted[i].info.maxSev != sorted[j].info.maxSev {
|
|
return sorted[i].info.maxSev < sorted[j].info.maxSev
|
|
}
|
|
return len(sorted[i].info.cves) > len(sorted[j].info.cves)
|
|
})
|
|
|
|
for _, e := range sorted {
|
|
fixes := make([]string, 0, len(e.info.fixes))
|
|
for f := range e.info.fixes {
|
|
fixes = append(fixes, f)
|
|
}
|
|
sort.Strings(fixes)
|
|
fmt.Fprintf(w, " - pkg: %s@%s (%s) cves: %d fix: %s\n",
|
|
e.name, e.info.version, e.info.typ, len(e.info.cves), strings.Join(fixes, ","))
|
|
}
|
|
}
|
|
|
|
// Unfixable counts
|
|
unfixable := map[string]int{}
|
|
for _, m := range r.VulnReport.Matches {
|
|
if len(m.Vulnerability.Fix.Versions) == 0 {
|
|
unfixable[m.Vulnerability.Metadata.Severity]++
|
|
}
|
|
}
|
|
if len(unfixable) > 0 {
|
|
fmt.Fprintf(w, "unfixable:")
|
|
for _, sev := range []string{"Critical", "High", "Medium", "Low", "Negligible", "Unknown"} {
|
|
if c, ok := unfixable[sev]; ok {
|
|
fmt.Fprintf(w, " %s=%d", strings.ToLower(sev), c)
|
|
}
|
|
}
|
|
fmt.Fprintln(w)
|
|
}
|
|
}
|
|
|
|
// SBOM summary - just type counts
|
|
if r.SBOM != nil {
|
|
typeCounts := map[string]int{}
|
|
total := 0
|
|
for _, p := range r.SBOM.Packages {
|
|
if strings.HasPrefix(p.SPDXID, "SPDXRef-DocumentRoot") || p.SPDXID == "SPDXRef-DOCUMENT" {
|
|
continue
|
|
}
|
|
total++
|
|
pkgType := extractPackageType(p.Supplier)
|
|
if pkgType == "" {
|
|
pkgType = "other"
|
|
}
|
|
typeCounts[pkgType]++
|
|
}
|
|
fmt.Fprintf(w, "sbom_packages: %d", total)
|
|
for t, c := range typeCounts {
|
|
fmt.Fprintf(w, " %s=%d", t, c)
|
|
}
|
|
fmt.Fprintln(w)
|
|
|
|
// Top vulnerable packages
|
|
if r.VulnReport != nil {
|
|
vulnPkgs := map[string]int{}
|
|
for _, m := range r.VulnReport.Matches {
|
|
vulnPkgs[m.Package.Name]++
|
|
}
|
|
type pv struct {
|
|
name string
|
|
count int
|
|
}
|
|
sorted := make([]pv, 0, len(vulnPkgs))
|
|
for n, c := range vulnPkgs {
|
|
sorted = append(sorted, pv{n, c})
|
|
}
|
|
sort.Slice(sorted, func(i, j int) bool { return sorted[i].count > sorted[j].count })
|
|
// Top 10 only
|
|
if len(sorted) > 10 {
|
|
sorted = sorted[:10]
|
|
}
|
|
fmt.Fprintln(w, "top_vulnerable_packages:")
|
|
for _, p := range sorted {
|
|
fmt.Fprintf(w, " - %s: %d\n", p.name, p.count)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// --- Helpers ---
|
|
|
|
func severityOrder(s string) int {
|
|
switch s {
|
|
case "Critical":
|
|
return 0
|
|
case "High":
|
|
return 1
|
|
case "Medium":
|
|
return 2
|
|
case "Low":
|
|
return 3
|
|
case "Negligible":
|
|
return 4
|
|
default:
|
|
return 5
|
|
}
|
|
}
|
|
|
|
func humanSize(bytes int64) string {
|
|
const (
|
|
KB = 1024
|
|
MB = 1024 * KB
|
|
GB = 1024 * MB
|
|
)
|
|
switch {
|
|
case bytes >= GB:
|
|
return fmt.Sprintf("%.1f GB", float64(bytes)/float64(GB))
|
|
case bytes >= MB:
|
|
return fmt.Sprintf("%.1f MB", float64(bytes)/float64(MB))
|
|
case bytes >= KB:
|
|
return fmt.Sprintf("%.1f KB", float64(bytes)/float64(KB))
|
|
default:
|
|
return fmt.Sprintf("%d B", bytes)
|
|
}
|
|
}
|
|
|
|
func cleanCommand(cmd string) string {
|
|
// Remove common prefixes that add noise
|
|
cmd = strings.TrimPrefix(cmd, "/bin/sh -c ")
|
|
cmd = strings.TrimPrefix(cmd, "#(nop) ")
|
|
return strings.TrimSpace(cmd)
|
|
}
|
|
|
|
func shouldRedact(envName string) bool {
|
|
upper := strings.ToUpper(envName)
|
|
for _, suffix := range []string{"_KEY", "_SECRET", "_PASSWORD", "_TOKEN", "_CREDENTIALS", "_API_KEY"} {
|
|
if strings.HasSuffix(upper, suffix) {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
func truncate(s string, n int) string {
|
|
if len(s) <= n {
|
|
return s
|
|
}
|
|
return s[:n] + "..."
|
|
}
|
|
|
|
func extractPackageType(supplier string) string {
|
|
s := strings.ToLower(supplier)
|
|
switch {
|
|
case strings.Contains(s, "npmjs") || strings.Contains(s, "npm"):
|
|
return "npm"
|
|
case strings.Contains(s, "pypi") || strings.Contains(s, "python"):
|
|
return "python"
|
|
case strings.Contains(s, "rubygems"):
|
|
return "gem"
|
|
case strings.Contains(s, "golang") || strings.Contains(s, "go"):
|
|
return "go"
|
|
case strings.Contains(s, "debian") || strings.Contains(s, "ubuntu"):
|
|
return "deb"
|
|
case strings.Contains(s, "alpine"):
|
|
return "apk"
|
|
case strings.Contains(s, "redhat") || strings.Contains(s, "fedora") || strings.Contains(s, "centos"):
|
|
return "rpm"
|
|
case strings.Contains(s, "maven") || strings.Contains(s, "java"):
|
|
return "java"
|
|
case strings.Contains(s, "nuget") || strings.Contains(s, ".net"):
|
|
return "nuget"
|
|
case strings.Contains(s, "cargo") || strings.Contains(s, "rust"):
|
|
return "rust"
|
|
default:
|
|
return ""
|
|
}
|
|
}
|