[breaking-change] Redesign environment var configuration overrides.

This is done using reflection to avoid boilerplate and potential desync
of the two configuration interfaces. The `[[wildcards]]` section did
not fit well into the "splat every config key" paradigm, so it is
unmarshalled as a whole from a JSON payload in an environment variable.

This commit also splits up the `Config` type into small per-section
struct types and removes most references to the global `config` in
favor of passing pointers to sections around.

A new option, `-print-config-env-vars`, shows the names and types of
all of the available configuration knobs.
This commit is contained in:
Catherine
2025-09-22 04:28:04 +00:00
parent d81676fea0
commit a159dba0b8
11 changed files with 301 additions and 178 deletions

View File

@@ -17,11 +17,11 @@ You will need [Go](https://go.dev/) 1.25 or newer. Run:
```console
$ mkdir -p data
$ cp conf/config.toml.example config.toml
$ INSECURE=very go run ./src
$ cp conf/config.example.toml config.toml
$ PAGES_INSECURE=1 go run ./src
```
These commands starts an HTTP server on `0.0.0.0:3000` and use the `data` directory for persistence. **Authentication is disabled via `INSECURE=very`** to avoid the need to set up a DNS server as well; never set `INSECURE=very` in production.
These commands starts an HTTP server on `0.0.0.0:3000` and use the `data` directory for persistence. **Authentication is disabled via `PAGES_INSECURE=1`** to avoid the need to set up a DNS server as well; never enable `PAGES_INSECURE=1` in production.
To publish a site, run the following commands:

View File

@@ -25,10 +25,10 @@
}
storage s3 {
host "{env.S3_ENDPOINT}"
access_id "{env.S3_ACCESS_KEY_ID}"
secret_key "{env.S3_SECRET_ACCESS_KEY}"
bucket "{env.S3_BUCKET}"
host "{env.PAGES_STORAGE_S3_ENDPOINT}"
access_id "{env.PAGES_STORAGE_S3_ACCESS_KEY_ID}"
secret_key "{env.PAGES_STORAGE_S3_SECRET_ACCESS_KEY}"
bucket "{env.PAGES_STORAGE_S3_BUCKET}"
prefix "ssl"
}
@@ -68,7 +68,7 @@
on_demand
}
@h2c `{env.FEATURES}.matches(r"\bh2c\b")`
@h2c `{env.PAGES_FEATURES}.matches(r"\bh2c\b")`
reverse_proxy @h2c h2c://{$GIT_PAGES_ADDRESS:localhost}:3000
reverse_proxy http://{$GIT_PAGES_ADDRESS:localhost}:3000

View File

@@ -3,8 +3,8 @@
log-format = "datetime+message"
[listen]
# Use "-" to turn off the listener.
[server]
# Use "-" to disable the handler.
pages = "tcp/:3000"
caddy = "tcp/:3001"
health = "tcp/:3002"
@@ -15,25 +15,25 @@ clone-url = "https://codeberg.org/<user>/<project>.git"
index-repos = ["<user>.codeberg.page", "pages"]
fallback-proxy-to = "https://codeberg.page"
[backend]
[storage]
type = "fs"
[backend.fs]
[storage.fs]
root = "./data"
[backend.s3] # non-default bucket configuration
[storage.s3] # non-default bucket configuration
endpoint = "play.min.io"
access-key-id = "Q3AM3UQ867SPQQA43P2F"
secret-access-key = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
region = "us-east-1"
bucket = "git-pages-demo"
[backend.s3.blob-cache]
[storage.s3.blob-cache]
max-size = "256MB"
[backend.s3.site-cache]
[storage.s3.site-cache]
max-size = "16MB"
max-age = "60s"
max-age = 60 # seconds
[limits]
max-site-size = "128M"
@@ -41,5 +41,5 @@ max-manifest-size = "1M"
max-inline-file-size = "256B"
git-large-object-threshold = "1M"
max-symlink-depth = 16
update-timeout = "60s"
max-heap-size-ratio = 0.5
update-timeout = 60 # seconds
max-heap-size-ratio = 0.5 # * RAM_size

View File

@@ -1,10 +1,10 @@
# Requires secrets to be set:
# - ACME_EMAIL
# - S3_ENDPOINT
# - S3_ACCESS_KEY_ID
# - S3_SECRET_ACCESS_KEY
# - S3_BUCKET
# - PAGES_CONFIG
# - PAGES_CONFIG_FILE
# - PAGES_STORAGE_S3_ENDPOINT
# - PAGES_STORAGE_S3_ACCESS_KEY_ID
# - PAGES_STORAGE_S3_SECRET_ACCESS_KEY
# - PAGES_STORAGE_S3_BUCKET
[build]
dockerfile = "Dockerfile"
@@ -24,7 +24,7 @@ AUTOMEMLIMIT = "0.25"
[[files]]
guest_path = "/app/config.toml"
secret_name = "PAGES_CONFIG"
local_path = "config.fly.toml"
[[services]]
internal_port = 80

View File

@@ -9,7 +9,6 @@ import (
"net"
"net/http"
"net/url"
"os"
"slices"
"strings"
)
@@ -31,10 +30,6 @@ func IsUnauthorized(err error) bool {
return false
}
func InsecureMode() bool {
return os.Getenv("INSECURE") == "very"
}
func GetHost(r *http.Request) (string, error) {
// FIXME: handle IDNA
host, _, err := net.SplitHostPort(r.Host)
@@ -221,7 +216,7 @@ func authorizeWildcardMatchSite(r *http.Request, pattern *WildcardPattern) (*Aut
func AuthorizeMetadataRetrieval(r *http.Request) (*Authorization, error) {
causes := []error{AuthError{http.StatusUnauthorized, "unauthorized"}}
if InsecureMode() {
if config.Insecure {
log.Println("auth: INSECURE mode")
return &Authorization{}, nil // for testing only
}
@@ -257,7 +252,7 @@ func AuthorizeMetadataRetrieval(r *http.Request) (*Authorization, error) {
func AuthorizeUpdateFromRepository(r *http.Request) (*Authorization, error) {
causes := []error{AuthError{http.StatusUnauthorized, "unauthorized"}}
if InsecureMode() {
if config.Insecure {
log.Println("auth: INSECURE mode: allow *")
return &Authorization{}, nil // for testing only
}
@@ -349,7 +344,7 @@ func AuthorizeBranch(branch string, auth *Authorization) error {
func AuthorizeUpdateFromArchive(r *http.Request) (*Authorization, error) {
causes := []error{AuthError{http.StatusUnauthorized, "unauthorized"}}
if InsecureMode() {
if config.Insecure {
log.Println("auth: INSECURE mode")
return &Authorization{}, nil // for testing only
}

View File

@@ -22,7 +22,7 @@ func splitBlobName(name string) []string {
type Backend interface {
// Retrieve a blob. Returns `reader, mtime, err`.
GetBlob(name string) (io.ReadSeeker, time.Time, error)
GetBlob(name string) (reader io.ReadSeeker, mtime time.Time, err error)
// Store a blob. If a blob called `name` already exists, this function returns `nil` without
// regards to the old or new contents. It is expected that blobs are content-addressed, i.e.
@@ -48,33 +48,25 @@ type Backend interface {
DeleteManifest(name string) error
// Check whether a domain has any deployments.
CheckDomain(domain string) (bool, error)
CheckDomain(domain string) (found bool, err error)
}
var backend Backend
func ConfigureBackend() error {
var err error
switch config.Backend.Type {
func ConfigureBackend(config *StorageConfig) (err error) {
switch config.Type {
case "fs":
if backend, err = NewFSBackend(config.Backend.FS.Root); err != nil {
return fmt.Errorf("fs backend: %w", err)
if backend, err = NewFSBackend(&config.FS); err != nil {
err = fmt.Errorf("fs backend: %w", err)
}
case "s3":
if backend, err = NewS3Backend(
config.Backend.S3.Endpoint,
config.Backend.S3.Insecure,
config.Backend.S3.AccessKeyID,
config.Backend.S3.SecretAccessKey,
config.Backend.S3.Region,
config.Backend.S3.Bucket,
); err != nil {
return fmt.Errorf("s3 backend: %w", err)
if backend, err = NewS3Backend(&config.S3); err != nil {
err = fmt.Errorf("s3 backend: %w", err)
}
default:
return fmt.Errorf("unknown backend: %s", config.Backend.Type)
err = fmt.Errorf("unknown backend: %s", config.Type)
}
return nil
return
}

View File

@@ -49,12 +49,12 @@ func createTempInRoot(root *os.Root, name string, data []byte) (string, error) {
return tempPath, nil
}
func NewFSBackend(dir string) (*FSBackend, error) {
blobRoot, err := maybeCreateOpenRoot(dir, "blob")
func NewFSBackend(config *FSConfig) (*FSBackend, error) {
blobRoot, err := maybeCreateOpenRoot(config.Root, "blob")
if err != nil {
return nil, fmt.Errorf("blob: %w", err)
}
siteRoot, err := maybeCreateOpenRoot(dir, "site")
siteRoot, err := maybeCreateOpenRoot(config.Root, "site")
if err != nil {
return nil, fmt.Errorf("site: %w", err)
}

View File

@@ -40,7 +40,7 @@ type S3Backend struct {
}
func makeCacheOptions[K comparable, V any](
config CacheConfig,
config *CacheConfig,
weigher func(K, V) uint32,
) *otter.Options[K, V] {
options := &otter.Options[K, V]{}
@@ -55,47 +55,43 @@ func makeCacheOptions[K comparable, V any](
}
func NewS3Backend(
endpoint string,
insecure bool,
accessKeyID string,
secretAccessKey string,
region string,
bucket string,
config *S3Config,
) (*S3Backend, error) {
ctx := context.Background()
client, err := minio.New(config.Backend.S3.Endpoint, &minio.Options{
client, err := minio.New(config.Endpoint, &minio.Options{
Creds: credentials.NewStaticV4(
config.Backend.S3.AccessKeyID,
config.Backend.S3.SecretAccessKey,
config.AccessKeyID,
config.SecretAccessKey,
"",
),
Secure: !config.Backend.S3.Insecure,
Secure: !config.Insecure,
})
if err != nil {
return nil, err
}
exists, err := client.BucketExists(ctx, config.Backend.S3.Bucket)
bucket := config.Bucket
exists, err := client.BucketExists(ctx, bucket)
if err != nil {
return nil, err
} else if !exists {
log.Printf("s3: create bucket %s\n", config.Backend.S3.Bucket)
log.Printf("s3: create bucket %s\n", bucket)
err = client.MakeBucket(ctx, config.Backend.S3.Bucket,
minio.MakeBucketOptions{Region: config.Backend.S3.Region})
err = client.MakeBucket(ctx, bucket,
minio.MakeBucketOptions{Region: config.Region})
if err != nil {
return nil, err
}
}
blobCache, err := otter.New(makeCacheOptions(config.Backend.S3.BlobCache,
blobCache, err := otter.New(makeCacheOptions(&config.BlobCache,
func(key string, value *CachedBlob) uint32 { return uint32(len(value.blob)) }))
if err != nil {
return nil, err
}
siteCache, err := otter.New(makeCacheOptions(config.Backend.S3.SiteCache,
siteCache, err := otter.New(makeCacheOptions(&config.SiteCache,
func(key string, value *CachedManifest) uint32 { return value.weight }))
if err != nil {
return nil, err

View File

@@ -1,7 +1,14 @@
package main
import (
"bytes"
"encoding/json"
"fmt"
"os"
"reflect"
"slices"
"strconv"
"strings"
"time"
"github.com/c2h5oh/datasize"
@@ -9,93 +16,226 @@ import (
"github.com/pelletier/go-toml/v2"
)
type Config struct {
Insecure bool `toml:"-" env:"insecure"`
Features []string `toml:"features"`
LogFormat string `toml:"log-format" default:"datetime+message"`
Server ServerConfig `toml:"server"`
Wildcard []WildcardConfig `toml:"wildcard"`
Storage StorageConfig `toml:"storage"`
Limits LimitsConfig `toml:"limits"`
}
type ServerConfig struct {
Pages string `toml:"pages" default:"tcp/:3000"`
Caddy string `toml:"caddy" default:"tcp/:3001"`
Health string `toml:"health" default:"tcp/:3002"`
}
type WildcardConfig struct {
Domain string `toml:"domain"`
CloneURL string `toml:"clone-url"`
IndexRepos []string `toml:"index-repos" default:"[]"`
FallbackProxyTo string `toml:"fallback-proxy-to"`
}
type CacheConfig struct {
MaxSize datasize.ByteSize `toml:"max-size"`
MaxAge uint `toml:"max-age"` // in seconds
}
type Config struct {
LogFormat string `toml:"log-format" default:"datetime+message"`
Listen struct {
Pages string `toml:"pages" default:"tcp/:3000"`
Caddy string `toml:"caddy" default:"tcp/:3001"`
Health string `toml:"health" default:"tcp/:3002"`
} `toml:"listen"`
Wildcard []struct {
Domain string `toml:"domain"`
CloneURL string `toml:"clone-url"`
IndexRepos []string `toml:"index-repos"`
FallbackProxyTo string `toml:"fallback-proxy-to"`
} `toml:"wildcard"`
Backend struct {
Type string `toml:"type" default:"fs"`
FS struct {
Root string `toml:"root" default:"./data"`
} `toml:"fs"`
S3 struct {
Endpoint string `toml:"endpoint"`
Insecure bool `toml:"insecure"`
AccessKeyID string `toml:"access-key-id"`
SecretAccessKey string `toml:"secret-access-key"`
Region string `toml:"region"`
Bucket string `toml:"bucket"`
BlobCache CacheConfig `toml:"blob-cache" default:"{\"MaxSize\":\"256MB\"}"`
SiteCache CacheConfig `toml:"site-cache" default:"{\"MaxAge\":60,\"MaxSize\":\"16MB\"}"`
}
} `toml:"backend"`
Limits struct {
// Maximum size of a single published site. Also used to limit the size of archive
// uploads and other similar overconsumption conditions.
MaxSiteSize datasize.ByteSize `toml:"max-site-size" default:"128M"`
// Maximum size of a single site manifest, computed over its binary Protobuf
// serialization.
MaxManifestSize datasize.ByteSize `toml:"max-manifest-size" default:"1M"`
// Maximum size of a file that will still be inlined into the site manifest.
MaxInlineFileSize datasize.ByteSize `toml:"max-inline-file-size" default:"256B"`
// Maximum size of a Git object that will be cached in memory during Git operations.
GitLargeObjectThreshold datasize.ByteSize `toml:"git-large-object-threshold" default:"1M"`
// Maximum number of symbolic link traversals before the path is considered unreachable.
MaxSymlinkDepth uint `toml:"max-symlink-depth" default:"16"`
// Maximum time that an update operation (PUT or POST request) could take before being
// interrupted.
UpdateTimeout time.Duration `toml:"update-timeout" default:"60s"`
// Soft limit on Go heap size, expressed as a fraction of total available RAM.
MaxHeapSizeRatio float64 `toml:"max-heap-size-ratio" default:"0.5"`
} `toml:"limits"`
type StorageConfig struct {
Type string `toml:"type" default:"fs"`
FS FSConfig `toml:"fs" default:"{\"Root\":\"./data\"}"`
S3 S3Config `toml:"s3"`
}
var config Config
type FSConfig struct {
Root string `toml:"root"`
}
func ReadConfig(path string) error {
file, err := os.Open(path)
type S3Config struct {
Endpoint string `toml:"endpoint"`
Insecure bool `toml:"insecure"`
AccessKeyID string `toml:"access-key-id"`
SecretAccessKey string `toml:"secret-access-key"`
Region string `toml:"region"`
Bucket string `toml:"bucket"`
BlobCache CacheConfig `toml:"blob-cache" default:"{\"MaxSize\":\"256MB\"}"`
SiteCache CacheConfig `toml:"site-cache" default:"{\"MaxAge\":60,\"MaxSize\":\"16MB\"}"`
}
type LimitsConfig struct {
// Maximum size of a single published site. Also used to limit the size of archive
// uploads and other similar overconsumption conditions.
MaxSiteSize datasize.ByteSize `toml:"max-site-size" default:"128M"`
// Maximum size of a single site manifest, computed over its binary Protobuf
// serialization.
MaxManifestSize datasize.ByteSize `toml:"max-manifest-size" default:"1M"`
// Maximum size of a file that will still be inlined into the site manifest.
MaxInlineFileSize datasize.ByteSize `toml:"max-inline-file-size" default:"256B"`
// Maximum size of a Git object that will be cached in memory during Git operations.
GitLargeObjectThreshold datasize.ByteSize `toml:"git-large-object-threshold" default:"1M"`
// Maximum number of symbolic link traversals before the path is considered unreachable.
MaxSymlinkDepth uint `toml:"max-symlink-depth" default:"16"`
// Maximum time that an update operation (PUT or POST request) could take before being
// interrupted.
UpdateTimeout time.Duration `toml:"update-timeout" default:"60s"`
// Soft limit on Go heap size, expressed as a fraction of total available RAM.
MaxHeapSizeRatio float64 `toml:"max-heap-size-ratio" default:"0.5"`
}
func (config *Config) DebugJSON() string {
result, err := json.MarshalIndent(config, "", " ")
if err != nil {
return err
panic(err)
}
defer file.Close()
return string(result)
}
decoder := toml.NewDecoder(file)
decoder.DisallowUnknownFields()
if err := decoder.Decode(&config); err != nil {
return err
func (config *Config) Feature(name string) bool {
return slices.Contains(config.Features, name)
}
type walkConfigState struct {
config reflect.Value
scopeType reflect.Type
index []int
segments []string
}
func walkConfigScope(scopeState walkConfigState, onKey func(string, reflect.Value) error) (err error) {
for _, field := range reflect.VisibleFields(scopeState.scopeType) {
fieldState := walkConfigState{config: scopeState.config}
fieldState.scopeType = field.Type
fieldState.index = append(scopeState.index, field.Index...)
var tagValue, ok = "", false
if tagValue, ok = field.Tag.Lookup("env"); !ok {
if tagValue, ok = field.Tag.Lookup("toml"); !ok {
continue // implicit skip
}
} else if tagValue == "-" {
continue // explicit skip
}
fieldSegment := strings.ReplaceAll(strings.ToUpper(tagValue), "-", "_")
fieldState.segments = append(scopeState.segments, fieldSegment)
switch field.Type.Kind() {
case reflect.Struct:
err = walkConfigScope(fieldState, onKey)
default:
err = onKey(
strings.Join(fieldState.segments, "_"),
scopeState.config.FieldByIndex(fieldState.index),
)
}
if err != nil {
return
}
}
return
}
func walkConfig(config *Config, onKey func(string, reflect.Value) error) error {
state := walkConfigState{
config: reflect.ValueOf(config).Elem(),
scopeType: reflect.TypeOf(config).Elem(),
index: []int{},
segments: []string{"PAGES"},
}
return walkConfigScope(state, onKey)
}
func setConfigValue(reflValue reflect.Value, repr string) (err error) {
valueAny := reflValue.Interface()
switch valueCast := valueAny.(type) {
case string:
reflValue.SetString(repr)
case []string:
reflValue.Set(reflect.ValueOf(strings.Split(repr, ",")))
case bool:
if valueCast, err = strconv.ParseBool(repr); err == nil {
reflValue.SetBool(valueCast)
}
case uint:
var parsed uint64
if parsed, err = strconv.ParseUint(repr, 10, strconv.IntSize); err == nil {
reflValue.SetUint(parsed)
}
case float64:
if valueCast, err = strconv.ParseFloat(repr, 64); err == nil {
reflValue.SetFloat(valueCast)
}
case datasize.ByteSize:
if valueCast, err = datasize.ParseString(repr); err == nil {
reflValue.Set(reflect.ValueOf(valueCast))
}
case time.Duration:
if valueCast, err = time.ParseDuration(repr); err == nil {
reflValue.Set(reflect.ValueOf(valueCast))
}
case []WildcardConfig:
var parsed []*WildcardConfig
decoder := json.NewDecoder(bytes.NewReader([]byte(repr)))
decoder.DisallowUnknownFields()
if err = decoder.Decode(&parsed); err == nil {
var assigned []WildcardConfig
for _, wildcard := range parsed {
defaults.MustSet(wildcard)
assigned = append(assigned, *wildcard)
}
reflValue.Set(reflect.ValueOf(assigned))
}
default:
panic("unhandled config value type")
}
return err
}
func PrintConfigEnvVars() {
config := Config{}
defaults.MustSet(&config)
return nil
walkConfig(&config, func(envName string, reflValue reflect.Value) (err error) {
value := reflValue.Interface()
reprBefore := fmt.Sprint(value)
fmt.Printf("%s %T = %q\n", envName, value, reprBefore)
// make sure that the value, at least, roundtrips
setConfigValue(reflValue, reprBefore)
reprAfter := fmt.Sprint(value)
if reprBefore != reprAfter {
panic("failed to roundtrip config value")
}
return
})
}
func updateFromEnv(dest *string, key string) {
if value, found := os.LookupEnv(key); found {
*dest = value
func Configure(tomlPath string) (config *Config, err error) {
// start with an all-default configuration
config = new(Config)
defaults.MustSet(config)
// inject values from `config.toml`
if tomlPath != "" {
var file *os.File
file, err = os.Open(tomlPath)
if err != nil {
return
}
defer file.Close()
decoder := toml.NewDecoder(file)
decoder.DisallowUnknownFields()
if err = decoder.Decode(&config); err != nil {
return
}
}
}
func UpdateConfigEnv() {
updateFromEnv(&config.Backend.Type, "BACKEND")
updateFromEnv(&config.Backend.FS.Root, "FS_ROOT")
updateFromEnv(&config.Backend.S3.Endpoint, "S3_ENDPOINT")
updateFromEnv(&config.Backend.S3.AccessKeyID, "S3_ACCESS_KEY_ID")
updateFromEnv(&config.Backend.S3.SecretAccessKey, "S3_SECRET_ACCESS_KEY")
updateFromEnv(&config.Backend.S3.Region, "S3_REGION")
updateFromEnv(&config.Backend.S3.Bucket, "S3_BUCKET")
// inject values from the environment, overriding everything else
err = walkConfig(config, func(envName string, reflValue reflect.Value) error {
if envValue, found := os.LookupEnv(envName); found {
return setConfigValue(reflValue, envValue)
}
return nil
})
return
}

View File

@@ -1,29 +1,19 @@
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"log/slog"
"net"
"net/http"
"os"
"runtime/debug"
"slices"
"strings"
"github.com/KimMachineGun/automemlimit/memlimit"
)
var features []string
func FeatureActive(feature string) bool {
if features == nil {
features = strings.Split(strings.ToLower(os.Getenv("FEATURES")), ",")
}
return slices.Contains(features, strings.ToLower(feature))
}
var config *Config
func listen(name string, listen string) net.Listener {
if listen == "-" {
@@ -69,7 +59,7 @@ func serve(listener net.Listener, serve func(http.ResponseWriter, *http.Request)
server := http.Server{Handler: handler}
server.Protocols = new(http.Protocols)
server.Protocols.SetHTTP1(true)
if FeatureActive("h2c") {
if config.Feature("h2c") {
server.Protocols.SetUnencryptedHTTP2(true)
}
log.Fatalln(server.Serve(listener))
@@ -79,22 +69,28 @@ func serve(listener net.Listener, serve func(http.ResponseWriter, *http.Request)
func main() {
InitObservability()
configPath := flag.String("config", "config.toml",
"path to configuration file")
checkConfig := flag.Bool("check-config", false,
"validate configuration, print it as JSON, and exit")
printConfigEnvVars := flag.Bool("print-config-env-vars", false,
"print every recognized configuration environment variable and exit")
printConfig := flag.Bool("print-config", false,
"print configuration as JSON and exit")
configTomlPath := flag.String("config", "config.toml",
"set path to configuration file")
getManifest := flag.String("get-manifest", "",
"retrieve manifest for web root as ProtoJSON")
flag.Parse()
if err := ReadConfig(*configPath); err != nil {
if *printConfigEnvVars {
PrintConfigEnvVars()
return
}
var err error
if config, err = Configure(*configTomlPath); err != nil {
log.Fatalln("config:", err)
}
UpdateConfigEnv() // environment takes priority
if *checkConfig {
configJSON, _ := json.MarshalIndent(&config, "", " ")
fmt.Println(string(configJSON))
if *printConfig {
fmt.Println(config.DebugJSON())
return
}
@@ -105,6 +101,10 @@ func main() {
log.SetFlags(log.Ldate | log.Ltime | log.LUTC)
}
if len(config.Features) > 0 {
log.Println("features:", strings.Join(config.Features, ", "))
}
// Avoid being OOM killed by not garbage collecting early enough.
memlimit.SetGoMemLimitWithOpts(
memlimit.WithLogger(slog.Default()),
@@ -118,7 +118,7 @@ func main() {
)
if *getManifest != "" {
if err := ConfigureBackend(); err != nil {
if err := ConfigureBackend(&config.Storage); err != nil {
log.Fatalln(err)
}
@@ -137,15 +137,15 @@ func main() {
// spends some time initializing (which the S3 backend does) a proxy like Caddy can race
// with git-pages on startup and return errors for requests that would have been served
// just 0.5s later.
pagesListener := listen("pages", config.Listen.Pages)
caddyListener := listen("caddy", config.Listen.Caddy)
healthListener := listen("health", config.Listen.Health)
pagesListener := listen("pages", config.Server.Pages)
caddyListener := listen("caddy", config.Server.Caddy)
healthListener := listen("health", config.Server.Health)
if err := ConfigureBackend(); err != nil {
if err := ConfigureBackend(&config.Storage); err != nil {
log.Fatalln(err)
}
if err := ConfigureWildcards(); err != nil {
if err := ConfigureWildcards(config.Wildcard); err != nil {
log.Fatalln(err)
}
@@ -153,10 +153,10 @@ func main() {
go serve(caddyListener, ServeCaddy)
go serve(healthListener, ServeHealth)
if InsecureMode() {
log.Println("ready (INSECURE)")
if config.Insecure {
log.Println("serve: ready (INSECURE)")
} else {
log.Println("ready")
log.Println("serve: ready")
}
select {}
}

View File

@@ -70,20 +70,20 @@ func HandleWildcardFallback(w http.ResponseWriter, r *http.Request) (bool, error
return false, nil
}
func ConfigureWildcards() error {
for _, configWildcard := range config.Wildcard {
func ConfigureWildcards(config []WildcardConfig) error {
for _, wildcardConfig := range config {
wildcardPattern := WildcardPattern{
Domain: strings.Split(configWildcard.Domain, "."),
Domain: strings.Split(wildcardConfig.Domain, "."),
}
template, err := fasttemplate.NewTemplate(configWildcard.CloneURL, "<", ">")
template, err := fasttemplate.NewTemplate(wildcardConfig.CloneURL, "<", ">")
if err != nil {
return fmt.Errorf("wildcard pattern: clone URL: %w", err)
} else {
wildcardPattern.CloneURL = template
}
for _, indexRepo := range configWildcard.IndexRepos {
for _, indexRepo := range wildcardConfig.IndexRepos {
template, err := fasttemplate.NewTemplate(indexRepo, "<", ">")
if err != nil {
return fmt.Errorf("wildcard pattern: clone URL: %w", err)
@@ -92,8 +92,8 @@ func ConfigureWildcards() error {
}
}
if configWildcard.FallbackProxyTo != "" {
wildcardPattern.FallbackURL, err = url.Parse(configWildcard.FallbackProxyTo)
if wildcardConfig.FallbackProxyTo != "" {
wildcardPattern.FallbackURL, err = url.Parse(wildcardConfig.FallbackProxyTo)
if err != nil {
return fmt.Errorf("wildcard pattern: fallback URL: %w", err)
}