mirror of
https://tangled.org/evan.jarrett.net/at-container-registry
synced 2026-04-20 16:40:29 +00:00
567 lines
18 KiB
Go
567 lines
18 KiB
Go
package pds
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"fmt"
|
|
"io"
|
|
"log/slog"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
|
|
"atcr.io/pkg/atproto"
|
|
"atcr.io/pkg/auth/oauth"
|
|
holddb "atcr.io/pkg/hold/db"
|
|
"atcr.io/pkg/s3"
|
|
"github.com/bluesky-social/indigo/atproto/atcrypto"
|
|
lexutil "github.com/bluesky-social/indigo/lex/util"
|
|
"github.com/bluesky-social/indigo/models"
|
|
"github.com/bluesky-social/indigo/repo"
|
|
"github.com/ipfs/go-cid"
|
|
)
|
|
|
|
// init registers our custom ATProto types with indigo's lexutil type registry
|
|
// This allows repomgr.GetRecord to automatically unmarshal our types
|
|
func init() {
|
|
// Register captain, crew, tangled profile, layer, and stats record types
|
|
// These must match the $type field in the records
|
|
lexutil.RegisterType(atproto.CaptainCollection, &atproto.CaptainRecord{})
|
|
lexutil.RegisterType(atproto.CrewCollection, &atproto.CrewRecord{})
|
|
lexutil.RegisterType(atproto.LayerCollection, &atproto.LayerRecord{})
|
|
lexutil.RegisterType(atproto.TangledProfileCollection, &atproto.TangledProfileRecord{})
|
|
lexutil.RegisterType(atproto.StatsCollection, &atproto.StatsRecord{})
|
|
lexutil.RegisterType(atproto.DailyStatsCollection, &atproto.DailyStatsRecord{})
|
|
lexutil.RegisterType(atproto.ScanCollection, &atproto.ScanRecord{})
|
|
lexutil.RegisterType(atproto.ImageConfigCollection, &atproto.ImageConfigRecord{})
|
|
}
|
|
|
|
// HoldPDS is a minimal ATProto PDS implementation for a hold service
|
|
type HoldPDS struct {
|
|
did string
|
|
PublicURL string
|
|
appviewURL string
|
|
appviewMeta *atproto.AppviewMetadata
|
|
carstore holddb.CarStore
|
|
repomgr RepoOperator
|
|
dbPath string
|
|
uid models.Uid
|
|
signingKey *atcrypto.PrivateKeyK256
|
|
enableBlueskyPosts bool
|
|
recordsIndex *RecordsIndex
|
|
}
|
|
|
|
// AppviewURL returns the configured appview base URL for links in Bluesky posts.
|
|
func (p *HoldPDS) AppviewURL() string { return p.appviewURL }
|
|
|
|
// AppviewMeta returns cached appview metadata, or defaults derived from the appview URL.
|
|
func (p *HoldPDS) AppviewMeta() atproto.AppviewMetadata {
|
|
if p.appviewMeta != nil {
|
|
return *p.appviewMeta
|
|
}
|
|
return atproto.DefaultAppviewMetadata(p.appviewURL)
|
|
}
|
|
|
|
// SetAppviewMeta caches appview metadata fetched on startup.
|
|
func (p *HoldPDS) SetAppviewMeta(m *atproto.AppviewMetadata) {
|
|
p.appviewMeta = m
|
|
}
|
|
|
|
// NewHoldPDS creates or opens a hold PDS with SQLite carstore
|
|
func NewHoldPDS(ctx context.Context, did, publicURL, appviewURL, dbPath, keyPath string, enableBlueskyPosts bool) (*HoldPDS, error) {
|
|
// Generate or load signing key
|
|
signingKey, err := oauth.GenerateOrLoadPDSKey(keyPath)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to initialize signing key: %w", err)
|
|
}
|
|
|
|
// Create SQLite-backed carstore (using vendored libsql-based store)
|
|
var sqlStore *holddb.SQLiteStore
|
|
|
|
if dbPath == ":memory:" {
|
|
// In-memory mode for tests: create carstore manually and open with :memory:
|
|
sqlStore = new(holddb.SQLiteStore)
|
|
if err := sqlStore.Open(":memory:"); err != nil {
|
|
return nil, fmt.Errorf("failed to open in-memory sqlite store: %w", err)
|
|
}
|
|
} else {
|
|
// File mode for production: create directory and use NewSqliteStore
|
|
dir := filepath.Dir(dbPath)
|
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
|
return nil, fmt.Errorf("failed to create database directory: %w", err)
|
|
}
|
|
|
|
// dbPath is the directory, carstore creates and opens db.sqlite3 inside it
|
|
sqlStore, err = holddb.NewSqliteStore(dbPath)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to create sqlite store: %w", err)
|
|
}
|
|
}
|
|
|
|
// Use SQLiteStore directly, not the CarStore() wrapper
|
|
// The wrapper has a bug where GetUserRepoHead checks CarShard.ID which SQLite doesn't populate
|
|
cs := sqlStore
|
|
|
|
// For a single-user hold, we use a fixed UID (1)
|
|
uid := models.Uid(1)
|
|
|
|
// Create KeyManager wrapper for our signing key
|
|
kmgr := NewHoldKeyManager(signingKey)
|
|
|
|
// Create repo operator - handles all session/repo lifecycle
|
|
rm := NewDirectRepoOperator(cs, kmgr)
|
|
|
|
// Check if repo already exists, if not create initial commit
|
|
head, err := cs.GetUserRepoHead(ctx, uid)
|
|
hasValidRepo := (err == nil && head.Defined())
|
|
|
|
if !hasValidRepo {
|
|
// Initialize empty repo with first commit
|
|
// RepoManager requires at least one commit to exist
|
|
// We'll create this by doing a dummy operation in Bootstrap
|
|
slog.Info("New hold repo - will be initialized in Bootstrap")
|
|
}
|
|
|
|
// Initialize records index for efficient listing queries
|
|
// Uses same database as carstore for simplicity
|
|
var recordsIndex *RecordsIndex
|
|
if dbPath != ":memory:" {
|
|
recordsIndex, err = NewRecordsIndex(dbPath + "/db.sqlite3")
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to create records index: %w", err)
|
|
}
|
|
}
|
|
|
|
return &HoldPDS{
|
|
did: did,
|
|
PublicURL: publicURL,
|
|
appviewURL: appviewURL,
|
|
carstore: cs,
|
|
repomgr: rm,
|
|
dbPath: dbPath,
|
|
uid: uid,
|
|
signingKey: signingKey,
|
|
enableBlueskyPosts: enableBlueskyPosts,
|
|
recordsIndex: recordsIndex,
|
|
}, nil
|
|
}
|
|
|
|
// NewHoldPDSWithDB creates or opens a hold PDS using an existing *sql.DB connection.
|
|
// The caller is responsible for the DB lifecycle. Used when the database is
|
|
// centrally managed (e.g., with libsql embedded replicas).
|
|
func NewHoldPDSWithDB(ctx context.Context, did, publicURL, appviewURL, dbPath, keyPath string, enableBlueskyPosts bool, db *sql.DB) (*HoldPDS, error) {
|
|
signingKey, err := oauth.GenerateOrLoadPDSKey(keyPath)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to initialize signing key: %w", err)
|
|
}
|
|
|
|
// Use shared DB for carstore
|
|
sqlStore, err := holddb.NewSQLiteStoreWithDB(dbPath, db)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to create sqlite store with shared DB: %w", err)
|
|
}
|
|
|
|
cs := sqlStore
|
|
uid := models.Uid(1)
|
|
kmgr := NewHoldKeyManager(signingKey)
|
|
rm := NewDirectRepoOperator(cs, kmgr)
|
|
|
|
head, err := cs.GetUserRepoHead(ctx, uid)
|
|
hasValidRepo := (err == nil && head.Defined())
|
|
if !hasValidRepo {
|
|
slog.Info("New hold repo - will be initialized in Bootstrap")
|
|
}
|
|
|
|
// Use shared DB for records index
|
|
recordsIndex, err := NewRecordsIndexWithDB(db)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to create records index with shared DB: %w", err)
|
|
}
|
|
|
|
return &HoldPDS{
|
|
did: did,
|
|
PublicURL: publicURL,
|
|
appviewURL: appviewURL,
|
|
carstore: cs,
|
|
repomgr: rm,
|
|
dbPath: dbPath,
|
|
uid: uid,
|
|
signingKey: signingKey,
|
|
enableBlueskyPosts: enableBlueskyPosts,
|
|
recordsIndex: recordsIndex,
|
|
}, nil
|
|
}
|
|
|
|
// DID returns the hold's DID
|
|
func (p *HoldPDS) DID() string {
|
|
return p.did
|
|
}
|
|
|
|
// SigningKey returns the hold's signing key
|
|
func (p *HoldPDS) SigningKey() *atcrypto.PrivateKeyK256 {
|
|
return p.signingKey
|
|
}
|
|
|
|
// RepomgrRef returns a reference to the RepoOperator for event handler setup
|
|
func (p *HoldPDS) RepomgrRef() RepoOperator {
|
|
return p.repomgr
|
|
}
|
|
|
|
// RecordsIndex returns the records index for efficient listing
|
|
func (p *HoldPDS) RecordsIndex() *RecordsIndex {
|
|
return p.recordsIndex
|
|
}
|
|
|
|
// Carstore returns the carstore for repo operations
|
|
func (p *HoldPDS) Carstore() holddb.CarStore {
|
|
return p.carstore
|
|
}
|
|
|
|
// UID returns the user ID for this hold
|
|
func (p *HoldPDS) UID() models.Uid {
|
|
return p.uid
|
|
}
|
|
|
|
// GetRecordBytes retrieves raw CBOR bytes for a record
|
|
// recordPath format: "collection/rkey"
|
|
func (p *HoldPDS) GetRecordBytes(ctx context.Context, recordPath string) (cid.Cid, *[]byte, error) {
|
|
session, err := p.carstore.ReadOnlySession(p.uid)
|
|
if err != nil {
|
|
return cid.Undef, nil, fmt.Errorf("failed to create session: %w", err)
|
|
}
|
|
|
|
head, err := p.carstore.GetUserRepoHead(ctx, p.uid)
|
|
if err != nil {
|
|
return cid.Undef, nil, fmt.Errorf("failed to get repo head: %w", err)
|
|
}
|
|
|
|
if !head.Defined() {
|
|
return cid.Undef, nil, fmt.Errorf("repo is empty")
|
|
}
|
|
|
|
repoHandle, err := repo.OpenRepo(ctx, session, head)
|
|
if err != nil {
|
|
return cid.Undef, nil, fmt.Errorf("failed to open repo: %w", err)
|
|
}
|
|
|
|
recordCID, recBytes, err := repoHandle.GetRecordBytes(ctx, recordPath)
|
|
if err != nil {
|
|
return cid.Undef, nil, fmt.Errorf("failed to get record: %w", err)
|
|
}
|
|
|
|
return recordCID, recBytes, nil
|
|
}
|
|
|
|
// BootstrapConfig holds all configuration needed for Bootstrap.
|
|
// Defined in the pds package to avoid circular imports with the hold package.
|
|
type BootstrapConfig struct {
|
|
OwnerDID string // DID of the hold captain
|
|
Public bool // Allow unauthenticated blob reads
|
|
AllowAllCrew bool // Create wildcard crew record
|
|
ProfileAvatarURL string // URL to fetch avatar image from
|
|
ProfileDisplayName string // Bluesky profile display name
|
|
ProfileDescription string // Bluesky profile description
|
|
Region string // Deployment region
|
|
}
|
|
|
|
// Bootstrap initializes the hold with the captain record, owner as first crew member, and profile
|
|
func (p *HoldPDS) Bootstrap(ctx context.Context, s3svc *s3.S3Service, cfg BootstrapConfig) error {
|
|
if cfg.OwnerDID == "" {
|
|
return nil
|
|
}
|
|
|
|
// Check if captain record already exists (idempotent bootstrap)
|
|
_, _, err := p.GetCaptainRecord(ctx)
|
|
captainExists := (err == nil)
|
|
|
|
if captainExists {
|
|
// Captain record exists, skip captain/crew setup but still create profile if needed
|
|
slog.Info("Captain record exists, skipping captain/crew setup")
|
|
} else {
|
|
slog.Info("Bootstrapping hold PDS", "owner", cfg.OwnerDID)
|
|
}
|
|
|
|
if !captainExists {
|
|
|
|
// Initialize repo if it doesn't exist yet
|
|
// Check if repo exists by trying to get the head
|
|
head, err := p.carstore.GetUserRepoHead(ctx, p.uid)
|
|
if err != nil || !head.Defined() {
|
|
// Repo doesn't exist, initialize it
|
|
// InitNewActor creates an empty repo with initial commit
|
|
err = p.repomgr.InitNewActor(ctx, p.uid, "", p.did, "", "", "")
|
|
if err != nil {
|
|
return fmt.Errorf("failed to initialize repo: %w", err)
|
|
}
|
|
slog.Info("Initialized empty repo")
|
|
}
|
|
|
|
// Create captain record (hold ownership and settings)
|
|
_, err = p.CreateCaptainRecord(ctx, cfg.OwnerDID, cfg.Public, cfg.AllowAllCrew, p.enableBlueskyPosts, cfg.Region)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create captain record: %w", err)
|
|
}
|
|
|
|
slog.Info("Created captain record",
|
|
"public", cfg.Public,
|
|
"allowAllCrew", cfg.AllowAllCrew,
|
|
"enableBlueskyPosts", p.enableBlueskyPosts,
|
|
"region", cfg.Region)
|
|
|
|
// Add hold owner as first crew member with admin role
|
|
_, err = p.AddCrewMember(ctx, cfg.OwnerDID, "admin", []string{"blob:read", "blob:write", "crew:admin"}, "")
|
|
if err != nil {
|
|
return fmt.Errorf("failed to add owner as crew member: %w", err)
|
|
}
|
|
|
|
slog.Info("Added owner as hold admin", "did", cfg.OwnerDID)
|
|
} else {
|
|
// Captain record exists, check if we need to sync settings from config
|
|
_, existingCaptain, err := p.GetCaptainRecord(ctx)
|
|
if err == nil {
|
|
// Check if any settings need updating
|
|
needsUpdate := existingCaptain.Public != cfg.Public ||
|
|
existingCaptain.AllowAllCrew != cfg.AllowAllCrew ||
|
|
existingCaptain.EnableBlueskyPosts != p.enableBlueskyPosts
|
|
|
|
if needsUpdate {
|
|
// Update captain record to match config (preserves other fields like Successor)
|
|
existingCaptain.Public = cfg.Public
|
|
existingCaptain.AllowAllCrew = cfg.AllowAllCrew
|
|
existingCaptain.EnableBlueskyPosts = p.enableBlueskyPosts
|
|
_, err = p.UpdateCaptainRecord(ctx, existingCaptain)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to update captain record: %w", err)
|
|
}
|
|
slog.Info("Synced captain record from config",
|
|
"public", cfg.Public,
|
|
"allowAllCrew", cfg.AllowAllCrew,
|
|
"enableBlueskyPosts", p.enableBlueskyPosts)
|
|
}
|
|
}
|
|
}
|
|
|
|
// TODO(crew-migration): Remove this call after all holds have been upgraded (added 2026-01-06)
|
|
// Migrate TID-based crew records to hash-based rkeys for O(1) lookups
|
|
if migrated, err := p.MigrateCrewRecordsToHashRkeys(ctx); err != nil {
|
|
slog.Warn("Crew record migration failed", "error", err)
|
|
} else if migrated > 0 {
|
|
slog.Info("Migrated crew records to hash-based rkeys", "count", migrated)
|
|
}
|
|
|
|
// Create or sync Bluesky profile record from config
|
|
// This runs even if captain exists (for existing holds being upgraded)
|
|
// Skip if no S3 service (e.g., in tests)
|
|
if s3svc != nil {
|
|
_, existingProfile, profileErr := p.GetProfileRecord(ctx)
|
|
if profileErr != nil {
|
|
// Profile doesn't exist, create it fresh
|
|
_, err = p.CreateProfileRecord(ctx, s3svc, cfg.ProfileDisplayName, cfg.ProfileDescription, cfg.ProfileAvatarURL)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create bluesky profile record: %w", err)
|
|
}
|
|
slog.Info("Created Bluesky profile record", "displayName", cfg.ProfileDisplayName)
|
|
} else {
|
|
// Profile exists — sync fields from config (like captain record sync above)
|
|
needsUpdate := false
|
|
|
|
if cfg.ProfileDisplayName != "" && (existingProfile.DisplayName == nil || *existingProfile.DisplayName != cfg.ProfileDisplayName) {
|
|
existingProfile.DisplayName = &cfg.ProfileDisplayName
|
|
needsUpdate = true
|
|
}
|
|
if cfg.ProfileDescription != "" && (existingProfile.Description == nil || *existingProfile.Description != cfg.ProfileDescription) {
|
|
existingProfile.Description = &cfg.ProfileDescription
|
|
needsUpdate = true
|
|
}
|
|
if cfg.ProfileAvatarURL != "" && existingProfile.Avatar == nil {
|
|
imageData, mimeType, dlErr := downloadImage(ctx, cfg.ProfileAvatarURL)
|
|
if dlErr != nil {
|
|
slog.Warn("Failed to download avatar for profile update", "error", dlErr)
|
|
} else {
|
|
avatarBlob, uploadErr := uploadBlobToStorage(ctx, s3svc, p.did, imageData, mimeType)
|
|
if uploadErr != nil {
|
|
slog.Warn("Failed to upload avatar for profile update", "error", uploadErr)
|
|
} else {
|
|
existingProfile.Avatar = avatarBlob
|
|
needsUpdate = true
|
|
}
|
|
}
|
|
}
|
|
|
|
if needsUpdate {
|
|
_, err = p.UpdateProfileRecord(ctx, existingProfile)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to update bluesky profile record: %w", err)
|
|
}
|
|
slog.Info("Synced Bluesky profile record from config",
|
|
"displayName", cfg.ProfileDisplayName)
|
|
} else {
|
|
slog.Info("Bluesky profile record already matches config, skipping")
|
|
}
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// ListCollections returns all collections present in the hold's repository
|
|
func (p *HoldPDS) ListCollections(ctx context.Context) ([]string, error) {
|
|
session, err := p.carstore.ReadOnlySession(p.uid)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to create read-only session: %w", err)
|
|
}
|
|
|
|
head, err := p.carstore.GetUserRepoHead(ctx, p.uid)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get repo head: %w", err)
|
|
}
|
|
|
|
if !head.Defined() {
|
|
// Empty repo, no collections
|
|
return []string{}, nil
|
|
}
|
|
|
|
r, err := repo.OpenRepo(ctx, session, head)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to open repo: %w", err)
|
|
}
|
|
|
|
collections := make(map[string]bool)
|
|
|
|
// Walk all records in the repo to discover collections
|
|
err = r.ForEach(ctx, "", func(k string, v cid.Cid) error {
|
|
// k is like "io.atcr.hold.captain/self" or "io.atcr.hold.crew/3m3by7msdln22"
|
|
parts := strings.Split(k, "/")
|
|
if len(parts) >= 1 {
|
|
collections[parts[0]] = true
|
|
}
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to enumerate collections: %w", err)
|
|
}
|
|
|
|
// Convert map to sorted slice
|
|
result := make([]string, 0, len(collections))
|
|
for collection := range collections {
|
|
result = append(result, collection)
|
|
}
|
|
|
|
return result, nil
|
|
}
|
|
|
|
// Close closes the carstore and records index
|
|
func (p *HoldPDS) Close() error {
|
|
if p.recordsIndex != nil {
|
|
if err := p.recordsIndex.Close(); err != nil {
|
|
return fmt.Errorf("failed to close records index: %w", err)
|
|
}
|
|
}
|
|
if closer, ok := p.carstore.(io.Closer); ok {
|
|
if err := closer.Close(); err != nil {
|
|
return fmt.Errorf("failed to close carstore: %w", err)
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// CreateRecordsIndexEventHandler creates an event handler that indexes records
|
|
// and also calls the provided broadcaster handler
|
|
func (p *HoldPDS) CreateRecordsIndexEventHandler(broadcasterHandler func(context.Context, *RepoEvent)) func(context.Context, *RepoEvent) {
|
|
return func(ctx context.Context, event *RepoEvent) {
|
|
// Index/delete records based on event operations
|
|
if p.recordsIndex != nil {
|
|
for _, op := range event.Ops {
|
|
switch op.Kind {
|
|
case EvtKindCreateRecord, EvtKindUpdateRecord:
|
|
// Index the record
|
|
cidStr := ""
|
|
if op.RecCid != nil {
|
|
cidStr = op.RecCid.String()
|
|
}
|
|
// Extract fields from record based on collection type
|
|
did := extractDIDFromOp(op)
|
|
digest, size := extractLayerFieldsFromOp(op)
|
|
if err := p.recordsIndex.IndexRecord(op.Collection, op.Rkey, cidStr, did, digest, size); err != nil {
|
|
slog.Warn("Failed to index record", "collection", op.Collection, "rkey", op.Rkey, "error", err)
|
|
}
|
|
case EvtKindDeleteRecord:
|
|
// Remove from index
|
|
if err := p.recordsIndex.DeleteRecord(op.Collection, op.Rkey); err != nil {
|
|
slog.Warn("Failed to delete record from index", "collection", op.Collection, "rkey", op.Rkey, "error", err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Call the broadcaster handler
|
|
if broadcasterHandler != nil {
|
|
broadcasterHandler(ctx, event)
|
|
}
|
|
}
|
|
}
|
|
|
|
// extractDIDFromOp extracts the associated DID from a repo operation based on collection type
|
|
func extractDIDFromOp(op RepoOp) string {
|
|
if op.Record == nil {
|
|
return ""
|
|
}
|
|
switch op.Collection {
|
|
case atproto.CrewCollection:
|
|
if rec, ok := op.Record.(*atproto.CrewRecord); ok {
|
|
return rec.Member
|
|
}
|
|
case atproto.LayerCollection:
|
|
if rec, ok := op.Record.(*atproto.LayerRecord); ok {
|
|
return rec.UserDID
|
|
}
|
|
case atproto.StatsCollection:
|
|
if rec, ok := op.Record.(*atproto.StatsRecord); ok {
|
|
return rec.OwnerDID
|
|
}
|
|
}
|
|
return ""
|
|
}
|
|
|
|
// extractLayerFieldsFromOp extracts digest and size from a layer record operation
|
|
func extractLayerFieldsFromOp(op RepoOp) (string, int64) {
|
|
if op.Record == nil || op.Collection != atproto.LayerCollection {
|
|
return "", 0
|
|
}
|
|
if rec, ok := op.Record.(*atproto.LayerRecord); ok {
|
|
return rec.Digest, rec.Size
|
|
}
|
|
return "", 0
|
|
}
|
|
|
|
// BackfillRecordsIndex populates the records index from existing MST data
|
|
func (p *HoldPDS) BackfillRecordsIndex(ctx context.Context) error {
|
|
if p.recordsIndex == nil {
|
|
return nil // No index to backfill
|
|
}
|
|
|
|
// Create session to read repo
|
|
session, err := p.carstore.ReadOnlySession(p.uid)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create session: %w", err)
|
|
}
|
|
|
|
head, err := p.carstore.GetUserRepoHead(ctx, p.uid)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to get repo head: %w", err)
|
|
}
|
|
|
|
if !head.Defined() {
|
|
slog.Debug("No repo head, skipping backfill")
|
|
return nil
|
|
}
|
|
|
|
repoHandle, err := repo.OpenRepo(ctx, session, head)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to open repo: %w", err)
|
|
}
|
|
|
|
return p.recordsIndex.BackfillFromRepo(ctx, repoHandle)
|
|
}
|