mirror of
https://codeberg.org/git-pages/git-pages.git
synced 2026-05-14 11:11:35 +00:00
Compare commits
209 Commits
v0.1.0
...
cat/audit-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
94f51d8138 | ||
|
|
55f87083e5 | ||
|
|
a9fc5780b1 | ||
|
|
ad92847fa0 | ||
|
|
3311fb639d | ||
|
|
93ce4f9671 | ||
|
|
73e47cd8d5 | ||
|
|
dd7268a657 | ||
|
|
edae862551 | ||
|
|
5808e90e5a | ||
|
|
684553ba72 | ||
|
|
89f672beda | ||
|
|
a233cdfbb8 | ||
|
|
4d8e620846 | ||
|
|
e8112c1abe | ||
|
|
b0a674abf4 | ||
|
|
f001107056 | ||
|
|
b7170e3077 | ||
|
|
7f5e02081d | ||
|
|
59cf185143 | ||
|
|
c5c5306688 | ||
|
|
27a6de792c | ||
|
|
2c109a5e1e | ||
|
|
d17c645927 | ||
|
|
57e9d05c7f | ||
|
|
1e6afe6570 | ||
|
|
b3692362d8 | ||
|
|
021c493daa | ||
|
|
b54664258b | ||
|
|
57dc8f8520 | ||
|
|
2b35996f62 | ||
|
|
cf050f505b | ||
|
|
6097a9abb8 | ||
|
|
fe329d748d | ||
|
|
bbdaae7280 | ||
|
|
f400f8d246 | ||
|
|
86259acf9c | ||
|
|
af7657a787 | ||
|
|
ed24f08d5f | ||
|
|
d7651941c0 | ||
|
|
bcd628fa6b | ||
|
|
6a3372a36a | ||
|
|
8d4ea36dec | ||
|
|
6509a8e1d2 | ||
|
|
6775f4aab5 | ||
|
|
1df1402f6b | ||
|
|
8dffd9cf11 | ||
|
|
5258bf756b | ||
|
|
38eb8afd0e | ||
|
|
2fdf0b805d | ||
|
|
e28d8cf0f2 | ||
|
|
ccabfc22a6 | ||
|
|
005e0fefed | ||
|
|
2267ab929c | ||
|
|
338487c048 | ||
|
|
b84a533be7 | ||
|
|
678868f7e6 | ||
|
|
1ca67f0590 | ||
|
|
c74ec4ad23 | ||
|
|
b37ca8cd14 | ||
|
|
310cc7d438 | ||
|
|
ad327b0382 | ||
|
|
b737e1bb9b | ||
|
|
8711e35c8e | ||
|
|
d2b5144182 | ||
|
|
34985c89bf | ||
|
|
050a002ddc | ||
|
|
559f0c6ae8 | ||
|
|
52fa8d1462 | ||
|
|
3830af5392 | ||
|
|
9e9664013b | ||
|
|
3e377986bc | ||
|
|
c85c7327bf | ||
|
|
886ee2ddae | ||
|
|
ac751e23b5 | ||
|
|
ebe7d07b3b | ||
|
|
4f14c345a6 | ||
|
|
7e293d6ef9 | ||
|
|
f7067b939b | ||
|
|
6bf4200f26 | ||
|
|
e9a5a901ec | ||
|
|
d3c8db6229 | ||
|
|
8f811147d6 | ||
|
|
0d33c64372 | ||
|
|
9b25ccdc35 | ||
|
|
18012d46e8 | ||
|
|
750f76aa9d | ||
|
|
6019a64c41 | ||
|
|
890029a98d | ||
|
|
cf26a89026 | ||
|
|
b75c37f651 | ||
|
|
c84e773df1 | ||
|
|
07133df6d2 | ||
|
|
1f1927d95d | ||
|
|
7334b8f637 | ||
|
|
96f210d253 | ||
|
|
a4bfa82388 | ||
|
|
338957eb3f | ||
|
|
26d9d784ba | ||
|
|
f163b9a42a | ||
|
|
04729c1f48 | ||
|
|
121f557048 | ||
|
|
c5df116673 | ||
|
|
71fd1c39df | ||
|
|
d97f5ac056 | ||
|
|
79407ba406 | ||
|
|
937aadc5d3 | ||
|
|
24dbab6813 | ||
|
|
30b6db2758 | ||
|
|
7655400560 | ||
|
|
32ccb0920f | ||
|
|
c88d04c71b | ||
|
|
86845f2505 | ||
|
|
7f112a761c | ||
|
|
a9cf69c04a | ||
|
|
132d093021 | ||
|
|
62917824fa | ||
|
|
62ef4a5366 | ||
|
|
8fa986015d | ||
|
|
8d574e5e7d | ||
|
|
91f05e210e | ||
|
|
bc70cba215 | ||
|
|
8b049da3c7 | ||
|
|
325d6bedda | ||
|
|
fc9e6fcf7b | ||
|
|
3840ba3c98 | ||
|
|
b58fe54c50 | ||
|
|
d1f55d6776 | ||
|
|
9e0267828d | ||
|
|
cf2c8f6270 | ||
|
|
43b6d92492 | ||
|
|
609e5ca452 | ||
|
|
82aebb70bf | ||
|
|
7c3b2248c9 | ||
|
|
9c6f735df0 | ||
|
|
ed2d853cbe | ||
|
|
1e3c39b7f6 | ||
|
|
92dc8f7231 | ||
|
|
e9edfb8f5c | ||
|
|
2cd8b58944 | ||
|
|
1283b4e0eb | ||
|
|
7313ab7d13 | ||
|
|
bd44f65b51 | ||
|
|
8d58793576 | ||
|
|
6076c17c51 | ||
|
|
959715269f | ||
|
|
faa486c779 | ||
|
|
50d28f3c8b | ||
|
|
eb6418b9b6 | ||
|
|
32c449e380 | ||
|
|
9036915ff9 | ||
|
|
6cf49c6093 | ||
|
|
da0758b972 | ||
|
|
8eeaf222ca | ||
|
|
8c29ba3fe7 | ||
|
|
464c40db9c | ||
|
|
93565e4e04 | ||
|
|
5f1ce5d334 | ||
|
|
f59830ae20 | ||
|
|
1c017020c1 | ||
|
|
e730b2bcd2 | ||
|
|
886635ce5e | ||
|
|
4161013fc0 | ||
|
|
8f0712b3ad | ||
|
|
2ebf4400bf | ||
|
|
1ad5d5590c | ||
|
|
75489f563e | ||
|
|
d5360817f3 | ||
|
|
e8c9cf588c | ||
|
|
f5c48d0759 | ||
|
|
92d6796ad9 | ||
|
|
460ff41cc9 | ||
|
|
be75cc82a4 | ||
|
|
e99fade242 | ||
|
|
21b82f8e2c | ||
|
|
6e7b42b942 | ||
|
|
5b8267ace5 | ||
|
|
955af0d589 | ||
|
|
95c4f1041d | ||
|
|
e226f51dd4 | ||
|
|
dcf70dfdda | ||
|
|
cc5f8f608e | ||
|
|
baae1e6560 | ||
|
|
6faf3b1ee3 | ||
|
|
f089208ca7 | ||
|
|
c250922f8d | ||
|
|
32111307eb | ||
|
|
89c57cfadb | ||
|
|
af40848d9f | ||
|
|
689030c28a | ||
|
|
30bde8c1c4 | ||
|
|
e1a2143d22 | ||
|
|
0b82dcbc25 | ||
|
|
f9669e1c69 | ||
|
|
4cca8abaf0 | ||
|
|
d82ae69625 | ||
|
|
fa02595f8b | ||
|
|
80d2a7a792 | ||
|
|
988da5243e | ||
|
|
eda6d8b6f6 | ||
|
|
fcc109c315 | ||
|
|
4d8f6d5e9d | ||
|
|
cb7802df10 | ||
|
|
b01e67f993 | ||
|
|
b5a1626a10 | ||
|
|
b1b8ae26e8 | ||
|
|
eac02e5758 | ||
|
|
7e1185309b | ||
|
|
351d0a0c85 |
@@ -5,18 +5,21 @@ on:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
FORGE: codeberg.org
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: codeberg-small-lazy
|
||||
runs-on: debian-trixie
|
||||
container:
|
||||
image: docker.io/library/node:24-trixie-slim@sha256:45babd1b4ce0349fb12c4e24bf017b90b96d52806db32e001e3013f341bef0fe
|
||||
image: docker.io/library/node:24-trixie-slim@sha256:28fd420825d8e922eab0fd91740c7cf88ddbdc8116a2b20a82049f0c946feb03
|
||||
steps:
|
||||
- name: Check out source code
|
||||
uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
|
||||
uses: https://code.forgejo.org/actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Set up toolchain
|
||||
uses: https://code.forgejo.org/actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6
|
||||
uses: https://code.forgejo.org/actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
|
||||
with:
|
||||
go-version: '>=1.25.0'
|
||||
go-version: '>=1.25.6'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt-get -y update
|
||||
@@ -25,29 +28,36 @@ jobs:
|
||||
- name: Build service
|
||||
run: |
|
||||
go build
|
||||
- name: Run tests
|
||||
run: |
|
||||
go test ./...
|
||||
- name: Run static analysis
|
||||
run: |
|
||||
go vet
|
||||
staticcheck
|
||||
go vet ./...
|
||||
staticcheck ./...
|
||||
|
||||
release:
|
||||
# IMPORTANT: This workflow step will not work without the Releases unit enabled!
|
||||
if: ${{ forge.ref == 'refs/heads/main' || startsWith(forge.event.ref, 'refs/tags/v') }}
|
||||
needs: [check]
|
||||
runs-on: codeberg-medium-lazy
|
||||
runs-on: debian-trixie
|
||||
container:
|
||||
image: docker.io/library/node:24-trixie-slim@sha256:ef4ca6d078dd18322059a1f051225f7bbfc2bb60c16cbb5d8a1ba2cc8964fe8a
|
||||
image: docker.io/library/node:24-trixie-slim@sha256:28fd420825d8e922eab0fd91740c7cf88ddbdc8116a2b20a82049f0c946feb03
|
||||
steps:
|
||||
- name: Check out source code
|
||||
uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
|
||||
- name: Set up toolchain
|
||||
uses: https://code.forgejo.org/actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6
|
||||
with:
|
||||
go-version: '>=1.25.0'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt-get -y update
|
||||
apt-get -y install ca-certificates
|
||||
apt-get -y install ca-certificates git
|
||||
# git needs to be installed for build information embedding to work
|
||||
- name: Check out source code
|
||||
uses: https://code.forgejo.org/actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
- name: Set up toolchain
|
||||
uses: https://code.forgejo.org/actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
|
||||
with:
|
||||
go-version: '>=1.25.6'
|
||||
- name: Build release assets
|
||||
# If you want more platforms to be represented, send a pull request.
|
||||
run: |
|
||||
@@ -58,7 +68,7 @@ jobs:
|
||||
build linux arm64
|
||||
build darwin arm64
|
||||
- name: Create release
|
||||
uses: https://code.forgejo.org/actions/forgejo-release@v2.7.3
|
||||
uses: https://code.forgejo.org/actions/forgejo-release@6a9510a9ea01b8b9b435933bf3c0fa45597ad530 # v2.11.3
|
||||
with:
|
||||
tag: ${{ startsWith(forge.event.ref, 'refs/tags/v') && forge.ref_name || 'latest' }}
|
||||
release-dir: assets
|
||||
@@ -69,35 +79,36 @@ jobs:
|
||||
package:
|
||||
if: ${{ forge.ref == 'refs/heads/main' || startsWith(forge.event.ref, 'refs/tags/v') }}
|
||||
needs: [check]
|
||||
runs-on: codeberg-medium-lazy
|
||||
runs-on: debian-trixie
|
||||
container:
|
||||
image: docker.io/library/node:24-trixie-slim@sha256:ef4ca6d078dd18322059a1f051225f7bbfc2bb60c16cbb5d8a1ba2cc8964fe8a
|
||||
image: docker.io/library/node:24-trixie-slim@sha256:28fd420825d8e922eab0fd91740c7cf88ddbdc8116a2b20a82049f0c946feb03
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt-get -y update
|
||||
apt-get -y install buildah ca-certificates
|
||||
apt-get -y install ca-certificates buildah qemu-user-binfmt
|
||||
- name: Check out source code
|
||||
uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
|
||||
uses: https://code.forgejo.org/actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Authenticate with Docker
|
||||
run: |
|
||||
buildah login --authfile=/tmp/authfile-${FORGE}.json \
|
||||
-u ${{ vars.PACKAGES_USER }} -p ${{ secrets.PACKAGES_TOKEN }} ${FORGE}
|
||||
- name: Build container
|
||||
run: |
|
||||
printf '[storage]\ndriver="vfs"\nrunroot="/run/containers/storage"\ngraphroot="/var/lib/containers/storage"\n' | tee /etc/containers/storage.conf
|
||||
buildah build --arch=amd64 --tag=container:${VER}-amd64 .
|
||||
buildah build --arch=arm64 --tag=container:${VER}-arm64 .
|
||||
buildah build ${CACHE} --arch=amd64 --tag=container:${VER}-amd64
|
||||
buildah build ${CACHE} --arch=arm64 --tag=container:${VER}-arm64
|
||||
buildah manifest create container:${VER} \
|
||||
container:${VER}-amd64 \
|
||||
container:${VER}-arm64
|
||||
env:
|
||||
BUILDAH_ISOLATION: chroot
|
||||
VER: ${{ startsWith(forge.event.ref, 'refs/tags/v') && forge.ref_name || 'latest' }}
|
||||
CACHE: ${{ format('--authfile=/tmp/authfile-{0}.json --layers --cache-from {0}/{1}/cache --cache-to {0}/{1}/cache', env.FORGE, forge.repository) }}
|
||||
- if: ${{ forge.repository == 'git-pages/git-pages' }}
|
||||
name: Push container to Codeberg
|
||||
run: |
|
||||
buildah login --authfile=/tmp/authfile-${FORGE}.json \
|
||||
-u ${{ vars.PACKAGES_USER }} -p ${{ secrets.PACKAGES_TOKEN }} ${FORGE}
|
||||
buildah manifest push --authfile=/tmp/authfile-${FORGE}.json \
|
||||
--all container:${VER} "docker://${FORGE}/${{ forge.repository }}:${VER/v/}"
|
||||
env:
|
||||
BUILDAH_ISOLATION: chroot
|
||||
FORGE: codeberg.org
|
||||
VER: ${{ startsWith(forge.event.ref, 'refs/tags/v') && forge.ref_name || 'latest' }}
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -4,3 +4,5 @@
|
||||
/data
|
||||
/config*.toml*
|
||||
/git-pages
|
||||
/site
|
||||
/assets
|
||||
|
||||
25
Dockerfile
25
Dockerfile
@@ -1,35 +1,32 @@
|
||||
# Install CA certificates.
|
||||
FROM docker.io/library/alpine:latest@sha256:4b7ce07002c69e8f3d704a9c5d6fd3053be500b7f1c69fc0d80990c2ad8dd412 AS ca-certificates-builder
|
||||
FROM docker.io/library/alpine:3 AS ca-certificates-builder
|
||||
RUN apk --no-cache add ca-certificates
|
||||
|
||||
# Build supervisor.
|
||||
FROM docker.io/library/golang:1.25-alpine@sha256:aee43c3ccbf24fdffb7295693b6e33b21e01baec1b2a55acc351fde345e9ec34 AS supervisor-builder
|
||||
FROM docker.io/library/golang:1.26-alpine@sha256:f85330846cde1e57ca9ec309382da3b8e6ae3ab943d2739500e08c86393a21b1 AS supervisor-builder
|
||||
RUN apk --no-cache add git
|
||||
WORKDIR /build
|
||||
RUN git clone https://github.com/ochinchina/supervisord . && \
|
||||
git checkout 16cb640325b3a4962b2ba17d68fb5c2b1e1b6b3c
|
||||
RUN GOBIN=/usr/bin go install -ldflags "-s -w" && \
|
||||
go clean -cache -modcache
|
||||
RUN GOBIN=/usr/bin go install -ldflags "-s -w"
|
||||
|
||||
# Build Caddy with S3 storage backend.
|
||||
FROM docker.io/library/caddy:2.10.2-builder@sha256:53f91ad7c5f1ab9a607953199b7c1e10920c570ae002aef913d68ed7464fb19f AS caddy-builder
|
||||
FROM docker.io/library/caddy:2.11.2-builder@sha256:10ed0251c5cd1dbb4db0b71ad43121147961a51adfec35febce2c93ea25c24f4 AS caddy-builder
|
||||
RUN xcaddy build ${CADDY_VERSION} \
|
||||
--with=github.com/ss098/certmagic-s3@v0.0.0-20250922022452-8af482af5f39 && \
|
||||
go clean -cache -modcache
|
||||
--with=github.com/ss098/certmagic-s3@v0.0.0-20250922022452-8af482af5f39
|
||||
|
||||
# Build git-pages.
|
||||
FROM docker.io/library/golang:1.25-alpine@sha256:aee43c3ccbf24fdffb7295693b6e33b21e01baec1b2a55acc351fde345e9ec34 AS git-pages-builder
|
||||
FROM docker.io/library/golang:1.26-alpine@sha256:f85330846cde1e57ca9ec309382da3b8e6ae3ab943d2739500e08c86393a21b1 AS git-pages-builder
|
||||
RUN apk --no-cache add git
|
||||
WORKDIR /build
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
COPY *.go ./
|
||||
COPY src/ ./src/
|
||||
RUN go build -ldflags "-s -w" -o git-pages . && \
|
||||
go clean -cache -modcache
|
||||
RUN go build -ldflags "-s -w" -o git-pages .
|
||||
|
||||
# Compose git-pages and Caddy.
|
||||
FROM docker.io/library/busybox:1.37.0-musl@sha256:ef13e7482851632be3faf5bd1d28d4727c0810901d564b35416f309975a12a30
|
||||
FROM docker.io/library/busybox:1.37.0-musl@sha256:19b646668802469d968a05342a601e78da4322a414a7c09b1c9ee25165042138
|
||||
COPY --from=ca-certificates-builder /etc/ssl/cert.pem /etc/ssl/cert.pem
|
||||
COPY --from=supervisor-builder /usr/bin/supervisord /bin/supervisord
|
||||
COPY --from=caddy-builder /usr/bin/caddy /bin/caddy
|
||||
@@ -39,7 +36,7 @@ WORKDIR /app
|
||||
RUN mkdir /app/data
|
||||
COPY conf/supervisord.conf /app/supervisord.conf
|
||||
COPY conf/Caddyfile /app/Caddyfile
|
||||
COPY conf/config.example.toml /app/config.toml
|
||||
COPY conf/config.docker.toml /app/config.toml
|
||||
|
||||
# Caddy ports:
|
||||
EXPOSE 80/tcp 443/tcp 443/udp
|
||||
@@ -49,8 +46,8 @@ EXPOSE 3000/tcp 3001/tcp 3002/tcp
|
||||
# While the default command is to run git-pages standalone, the intended configuration
|
||||
# is to use it with Caddy and store both site data and credentials to an S3-compatible
|
||||
# object store.
|
||||
# * In a standalone configuration, the default, git-caddy listens on port 3000 (http).
|
||||
# * In a combined configuration, supervisord launches both git-caddy and Caddy, and
|
||||
# * In a standalone configuration, the default, git-pages listens on port 3000 (http).
|
||||
# * In a combined configuration, supervisord launches both git-pages and Caddy, and
|
||||
# Caddy listens on ports 80 (http) and 443 (https).
|
||||
CMD ["git-pages"]
|
||||
# CMD ["supervisord"]
|
||||
|
||||
74
README.md
74
README.md
@@ -1,13 +1,23 @@
|
||||
git-pages
|
||||
=========
|
||||
|
||||
_git-pages_ is a static site server for use with Git forges (i.e. a GitHub Pages replacement). It is written with efficiency in mind, scaling horizontally to any number of deployed sites and concurrent requests and serving sites up to hundreds of megabytes in size, while being equally suitable for single-user deployments.
|
||||
<a href="ircs://irc.libera.chat/#git-pages"><img alt="Discuss on IRC at #git-pages on libera.chat" src="https://img.shields.io/badge/irc-libera.chat-blue"></a>
|
||||
<a href="https://matrix.to/#/#git-pages:catircservices.org"><img alt="Discuss on Matrix at #git-pages:catircservices.org" src="https://img.shields.io/matrix/git-pages%3Acatircservices.org?server_fqdn=matrix.org&fetchMode=summary&label=matrix&color=blue"></a>
|
||||
|
||||
It is implemented in Go and has no other mandatory dependencies, although it is designed to be used together with the [Caddy server][caddy] (for TLS termination) and an [Amazon S3](https://aws.amazon.com/s3/) compatible object store (for horizontal scalability of storage).
|
||||
_git-pages_ is a static site server for use with Git forges (i.e. a GitHub Pages replacement). It is written with efficiency in mind, scaling horizontally to any number of machines and serving sites up to multiple gigabytes in size, while being equally suitable for small single-user deployments.
|
||||
|
||||
The included Docker container provides everything needed to deploy a Pages service, including zero-configuration on-demand provisioning of TLS certificates from [Let's Encrypt](https://letsencrypt.org/), and runs on any commodity cloud infrastructure. There is also a first-party deployment of _git-pages_ at [grebedoc.dev](https://grebedoc.dev).
|
||||
It is implemented in Go and has no other mandatory dependencies, although it is designed to be used together with the [Caddy server][caddy] for TLS termination. Site data may be stored on the filesystem or in an [Amazon S3](https://aws.amazon.com/s3/) compatible object store.
|
||||
|
||||
The included Docker container provides everything needed to deploy a Pages service, including zero-configuration on-demand provisioning of TLS certificates from [Let's Encrypt](https://letsencrypt.org/), and runs on any commodity cloud infrastructure.
|
||||
|
||||
> [!TIP]
|
||||
> If you want to publish a site using _git-pages_ to an existing service like Codeberg Pages or [Grebedoc][grebedoc], consider using the [CLI tool][git-pages-cli] or the [Forgejo Action][git-pages-action].
|
||||
|
||||
[caddy]: https://caddyserver.com/
|
||||
[git-pages-cli]: https://codeberg.org/git-pages/git-pages-cli
|
||||
[git-pages-action]: https://codeberg.org/git-pages/action
|
||||
[codeberg-pages]: https://codeberg.page
|
||||
[grebedoc]: https://grebedoc.dev
|
||||
|
||||
|
||||
Quickstart
|
||||
@@ -32,8 +42,6 @@ b70644b523c4aaf4efd206a588087a1d406cb047
|
||||
|
||||
The `pages` branch of the repository is now available at http://localhost:3000/!
|
||||
|
||||
[git-pages-cli]: https://codeberg.org/git-pages/git-pages-cli
|
||||
|
||||
|
||||
Deployment
|
||||
----------
|
||||
@@ -63,21 +71,41 @@ Features
|
||||
- Site URLs that have a path starting with `.git-pages/...` are reserved for _git-pages_ itself.
|
||||
- The `.git-pages/health` URL returns `ok` with the `Last-Modified:` header set to the manifest modification time.
|
||||
- The `.git-pages/manifest.json` URL returns a [ProtoJSON](https://protobuf.dev/programming-guides/json/) representation of the deployed site manifest with the `Last-Modified:` header set to the manifest modification time. It enumerates site structure, redirect rules, and errors that were not severe enough to abort publishing. Note that **the manifest JSON format is not stable and will change without notice**.
|
||||
- **With feature `archive-site`:** The `.git-pages/archive.tar` URL returns a tar archive of all site contents, including `_redirects` and `_headers` files (reconstructed from the manifest), with the `Last-Modified:` header set to the manifest modification time. Compression can be enabled using the `Accept-Encoding:` HTTP header (only).
|
||||
- The `.git-pages/archive.tar` URL returns a tar archive of all site contents, including `_redirects` and `_headers` files (reconstructed from the manifest), with the `Last-Modified:` header set to the manifest modification time. Compression can be enabled using the `Accept-Encoding:` HTTP header (only).
|
||||
* In response to a `PUT` or `POST` request, the server updates a site with new content. The URL of the request must be the root URL of the site that is being published.
|
||||
- If the `PUT` method receives an `application/x-www-form-urlencoded` body, it contains a repository URL to be shallowly cloned. The `Branch` header contains the branch to be checked out; the `pages` branch is used if the header is absent.
|
||||
- If the `PUT` method receives an `application/x-tar`, `application/x-tar+gzip`, `application/x-tar+zstd`, or `application/zip` body, it contains an archive to be extracted.
|
||||
- The `POST` method requires an `application/json` body containing a Forgejo/Gitea/Gogs/GitHub webhook event payload. Requests where the `ref` key contains anything other than `refs/heads/pages` are ignored, and only the `pages` branch is used. The `repository.clone_url` key contains a repository URL to be shallowly cloned.
|
||||
- If the received contents is empty, performs the same action as `DELETE`.
|
||||
* In response to a `PATCH` request, the server partially updates a site with new content. The URL of the request must be the root URL of the site that is being published.
|
||||
- The request must have a `application/x-tar`, `application/x-tar+gzip`, or `application/x-tar+zstd` body, whose contents is *merged* with the existing site contents as follows:
|
||||
- A character device entry with major 0 and minor 0 is treated as a "whiteout marker" (following [unionfs][whiteout]): it causes any existing file or directory with the same name to be deleted.
|
||||
- A directory entry replaces any existing file or directory with the same name (if any), recursively removing the old contents.
|
||||
- A file or symlink entry replaces any existing file or directory with the same name (if any).
|
||||
- If there is no `Create-Parents:` header or a `Create-Parents: no` header is present, the parent path of an entry must exist and refer to a directory.
|
||||
- If a `Create-Parents: yes` header is present, any missing segments in the parent path of an entry will be created (like `mkdir -p`). Any existing segments refer to directories.
|
||||
- The request must have a `Atomic: yes` or `Atomic: no` header. Not every backend configuration makes it possible to perform atomic compare-and-swap operations; on backends without atomic CAS support, `Atomic: yes` requests will fail, while `Atomic: no` requests will provide a best-effort approximation.
|
||||
- If a `PATCH` request loses a race against another content update request, it may return `409 Conflict`. This is true regardless of the `Atomic:` header value. Whenever this happens, resubmit the request as-is.
|
||||
- If the site has no contents after the update is applied, performs the same action as `DELETE`.
|
||||
* In response to a `DELETE` request, the server unpublishes a site. The URL of the request must be the root URL of the site that is being unpublished. Site data remains stored for an indeterminate period of time, but becomes completely inaccessible.
|
||||
* If a `Dry-Run: yes` header is provided with a `PUT`, `PATCH`, `DELETE`, or `POST` request, only the authorization checks are run; no destructive updates are made.
|
||||
* All updates to site content are atomic (subject to consistency guarantees of the storage backend). That is, there is an instantaneous moment during an update before which the server will return the old content and after which it will return the new content.
|
||||
* Files with a certain name, when placed in the root of a site, have special functions:
|
||||
- [Netlify `_redirects`][_redirects] file can be used to specify HTTP redirect and rewrite rules. The _git-pages_ implementation currently does not support placeholders, query parameters, or conditions, and may differ from Netlify in other minor ways. If you find that a supported `_redirects` file feature does not work the same as on Netlify, please file an issue. (Note that _git-pages_ does not perform URL normalization; `/foo` and `/foo/` are *not* the same, unlike with Netlify.)
|
||||
- [Netlify `_headers`][_headers] file can be used to specify custom HTTP response headers (if allowlisted by configuration). In particular, this is useful to enable [CORS requests][cors]. The _git-pages_ implementation may differ from Netlify in minor ways; if you find that a `_headers` file feature does not work the same as on Netlify, please file an issue.
|
||||
- [Netlify `_headers`][_headers] file can be used to specify custom HTTP response headers (if allowlisted by configuration). In particular, this is useful to enable [cross-origin isolation (COOP/COEP)][isolation]. The _git-pages_ implementation may differ from Netlify in minor ways; if you find that a `_headers` file feature does not work the same as on Netlify, please file an issue.
|
||||
- [Netlify `Basic-Auth:`][basic-auth] pseudo-header in the `_headers` file can be used to password-protect parts of a site, if enabled via the `[limits].allow-basic-auth` configuration option. **This is not a security feature: credentials are stored in cleartext and are accessible to anyone who can update the site. *Only* use it in low-stakes applications, e.g. preventing search engines from indexing parts of a site.** The authors of _git-pages_ shall not be held liable for any unauthorized information disclosures resulting from the use of this feature.
|
||||
* Incremental updates can be made using `PUT` or `PATCH` requests where the body contains an archive (both tar and zip are supported).
|
||||
- Any archive entry that is a symlink to `/git/blobs/<git-sha256>` is replaced with an existing manifest entry for the same site whose git blob hash matches `<git-sha256>`. If there is no existing manifest entry with the specified git hash, the update fails with a `422 Unprocessable Entity`.
|
||||
- For this error response only, if the negotiated content type is `application/vnd.git-pages.unresolved`, the response will contain the `<git-sha256>` of each unresolved reference, one per line.
|
||||
* Support for SHA-256 Git hashes is [limited by go-git][go-git-sha256]; once go-git implements the required features, _git-pages_ will automatically gain support for SHA-256 Git hashes. Note that shallow clones (used by _git-pages_ to conserve bandwidth if available) aren't supported yet in the Git protocol as of 2025.
|
||||
* Git LFS is not supported: it is a single-vendor specification/implementation with no stable Go API and a risk of misuse for reflected HTTP DoS attacks. A diagnostic is emitted for any files uploaded have the `filter=lfs` attribute set via `.gitattributes`.
|
||||
|
||||
[_redirects]: https://docs.netlify.com/manage/routing/redirects/overview/
|
||||
[_headers]: https://docs.netlify.com/manage/routing/headers/
|
||||
[cors]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/CORS
|
||||
[basic-auth]: https://docs.netlify.com/manage/security/secure-access-to-sites/basic-authentication-with-custom-http-headers/
|
||||
[isolation]: https://web.dev/articles/cross-origin-isolation-guide
|
||||
[go-git-sha256]: https://github.com/go-git/go-git/issues/706
|
||||
[whiteout]: https://docs.kernel.org/filesystems/overlayfs.html#whiteouts-and-opaque-directories
|
||||
|
||||
|
||||
Authorization
|
||||
@@ -85,24 +113,25 @@ Authorization
|
||||
|
||||
DNS is the primary authorization method, using either TXT records or wildcard matching. In certain cases, git forge authorization is used in addition to DNS.
|
||||
|
||||
The authorization flow for content updates (`PUT`, `DELETE`, `POST` requests) proceeds sequentially in the following order, with the first of multiple applicable rule taking precedence:
|
||||
The authorization flow for content updates (`PUT`, `PATCH`, `DELETE`, `POST` requests) proceeds sequentially in the following order, with the first of multiple applicable rule taking precedence:
|
||||
|
||||
1. **Development Mode:** If the environment variable `PAGES_INSECURE` is set to a truthful value like `1`, the request is authorized.
|
||||
2. **DNS Challenge:** If the method is `PUT`, `DELETE`, `POST`, and a well-formed `Authorization:` header is provided containing a `<token>`, and a TXT record lookup at `_git-pages-challenge.<host>` returns a record whose concatenated value equals `SHA256("<host> <token>")`, the request is authorized.
|
||||
2. **DNS Challenge:** If the method is `PUT`, `PATCH`, `DELETE`, `POST`, and a well-formed `Authorization:` header is provided containing a `<token>`, and a TXT record lookup at `_git-pages-challenge.<host>` returns a record whose concatenated value equals `SHA256("<host> <token>")`, and (for `PUT` and `POST` requests) the requested branch is `pages`, the request is authorized.
|
||||
- **`Pages` scheme:** Request includes an `Authorization: Pages <token>` header.
|
||||
- **`Basic` scheme:** Request includes an `Authorization: Basic <basic>` header, where `<basic>` is equal to `Base64("Pages:<token>")`. (Useful for non-Forgejo forges.)
|
||||
3. **DNS Allowlist:** If the method is `PUT` or `POST`, and a TXT record lookup at `_git-pages-repository.<host>` returns a set of well-formed absolute URLs, and (for `PUT` requests) the body contains a repository URL, and the requested clone URLs is contained in this set of URLs, the request is authorized.
|
||||
4. **Wildcard Match (content):** If the method is `POST`, and a `[[wildcard]]` configuration section exists where the suffix of a hostname (compared label-wise) is equal to `[[wildcard]].domain`, and (for `PUT` requests) the body contains a repository URL, and the requested clone URL is a *matching* clone URL, the request is authorized.
|
||||
- **Index repository:** If the request URL is `scheme://<user>.<host>/`, a *matching* clone URL is computed by templating `[[wildcard]].clone-url` with `<user>` and `<project>`, where `<project>` is computed by templating each element of `[[wildcard]].index-repos` with `<user>`, and `[[wildcard]]` is the section where the match occurred.
|
||||
- **Project repository:** If the request URL is `scheme://<user>.<host>/<project>/`, a *matching* clone URL is computed by templating `[[wildcard]].clone-url` with `<user>` and `<project>`, and `[[wildcard]]` is the section where the match occurred.
|
||||
5. **Forge Authorization:** If the method is `PUT`, and the body contains an archive, and a `[[wildcard]]` configuration section exists where the suffix of a hostname (compared label-wise) is equal to `[[wildcard]].domain`, and `[[wildcard]].authorization` is non-empty, and the request includes a `Forge-Authorization:` header, and the header (when forwarded as `Authorization:`) grants push permissions to a repository at the *matching* clone URL (as defined above) as determined by an API call to the forge, the request is authorized. (This enables publishing a site for a private repository.)
|
||||
5. **Default Deny:** Otherwise, the request is not authorized.
|
||||
3. **DNS Allowlist:** If the method is `PUT` or `POST`, and the request URL is `scheme://<user>.<host>/`, and a TXT record lookup at `_git-pages-repository.<host>` returns a set of well-formed absolute URLs, and (for `PUT` requests) the body contains a repository URL or (for `POST` requests) the body contains a GitHub-style webhook payload, and the requested clone URLs is contained in this set of URLs, and the requested branch is `pages`, the request is authorized.
|
||||
4. **Wildcard Match (content):** If the method is `POST`, and the body contains a GitHub-style webhook payload, and a `[[wildcard]]` configuration section exists such that `[[wildcard]].domain` is a suffix of the site hostname (compared label-wise), and the body contains a repository URL, and the requested clone URL is a *matching* clone URL, and the requested branch is a *matching* branch, the request is authorized.
|
||||
- **Index repository:** If the request URL is `scheme://<user>.<host>/`: a *matching* clone URL is computed by templating `[[wildcard]].clone-url` with `<user>` and `<project>`, where `<project>` is computed by templating `[[wildcard]].index-repo` with `<user>`, and `[[wildcard]]` is the section where the match occurred; and a *matching* branch is specified by `[[wildcard]].index-repo-branch`.
|
||||
- **Project repository:** If the request URL is `scheme://<user>.<host>/<project>/`: a *matching* clone URL is computed by templating `[[wildcard]].clone-url` with `<user>` and `<project>`, and `[[wildcard]]` is the section where the match occurred; and a *matching* branch is `pages`.
|
||||
5. **Forge Authorization (wildcard):** If the method is `PUT` or `PATCH` or `DELETE`, and (unless the method is `DELETE`) the body contains an archive, and a `[[wildcard]]` configuration section exists such that `[[wildcard]].domain` is a suffix of the site hostname (compared label-wise), and `[[wildcard]].authorization` is defined, and the request includes a `Forge-Authorization:` header, and the header (when forwarded as `Authorization:`) grants push permissions to a repository at the *matching* clone URL (as defined above) as determined by an API call to the forge, the request is authorized.
|
||||
6. **Forge Authorization (DNS allowlist):** If the method is `PUT` or `PATCH` or `DELETE`, and (unless the method is `DELETE`) the body contains an archive, and the request URL is `scheme://<host>/`, and a TXT record lookup at `_git-pages-forge-allowlist.<host>` returns a set of well-formed absolute URLs, and the request includes a `Forge-Authorization:` header, and the header (when forwarded as `Authorization:`) grants push permissions to a repository at any of the URLs in the TXT records as determined by an API call to the forge, the request is authorized.
|
||||
7. **Default Deny:** Otherwise, the request is not authorized.
|
||||
|
||||
The authorization flow for metadata retrieval (`GET` requests with site paths starting with `.git-pages/`) in the following order, with the first of multiple applicable rule taking precedence:
|
||||
|
||||
1. **Development Mode:** Same as for content updates.
|
||||
2. **DNS Challenge:** Same as for content updates.
|
||||
3. **Wildcard Match (metadata):** If a `[[wildcard]]` configuration section exists where the suffix of a hostname (compared label-wise) is equal to `[[wildcard]].domain`, the request is authorized.
|
||||
3. **Wildcard Match (metadata):** If a `[[wildcard]]` configuration section exists where the suffix of a hostname (compared label-wise) is equal to `[[wildcard]].domain`, and the site never uses the `Basic-Auth:` pseudo-header, the request is authorized.
|
||||
4. **Default Deny:** Otherwise, the request is not authorized.
|
||||
|
||||
|
||||
@@ -111,10 +140,11 @@ Observability
|
||||
|
||||
_git-pages_ has robust observability features built in:
|
||||
* The metrics endpoint (bound to `:3002` by default) returns Go, pages server, and storage backend metrics in the [Prometheus](https://prometheus.io/) format.
|
||||
* Optional [Sentry](https://sentry.io/) integration allows greater visibility into the application. The `ENVIRONMENT` environment variable configures the deploy environment name (`development` by default).
|
||||
* If `SENTRY_DSN` environment variable is set, panics are reported to Sentry.
|
||||
* If `SENTRY_DSN` and `SENTRY_LOGS=1` environment variables are set, logs are uploaded to Sentry.
|
||||
* If `SENTRY_DSN` and `SENTRY_TRACING=1` environment variables are set, traces are uploaded to Sentry.
|
||||
* Optional syslog integration allows transmitting application logs to a syslog daemon. When present, the `SYSLOG_ADDR` environment variable enables the integration, and the value is used to configure the syslog destination. The value must follow the format `family/address` and is usually one of the following:
|
||||
* a Unix datagram socket: `unixgram//dev/log`;
|
||||
* TLS over TCP: `tcp+tls/host:port`;
|
||||
* plain TCP: `tcp/host:post`;
|
||||
* UDP: `udp/host:port`.
|
||||
|
||||
|
||||
Architecture (v2)
|
||||
@@ -160,4 +190,4 @@ The specific arrangement used is clearly not optimal; at a minimum it is likely
|
||||
License
|
||||
-------
|
||||
|
||||
[0-clause BSD](LICENSE-0BSD.txt)
|
||||
[0-clause BSD](LICENSE.txt)
|
||||
|
||||
@@ -25,11 +25,5 @@ https://, http:// {
|
||||
on_demand
|
||||
}
|
||||
|
||||
# initial PUT/POST for a new domain has to happen over HTTP
|
||||
@upgrade `method('GET') && protocol('http')`
|
||||
redir @upgrade https://{host}{uri} 301
|
||||
|
||||
reverse_proxy http://localhost:3000
|
||||
header Alt-Svc `h3=":443"; persist=1, h2=":443"; persist=1`
|
||||
encode
|
||||
}
|
||||
|
||||
37
conf/config.default.toml
Normal file
37
conf/config.default.toml
Normal file
@@ -0,0 +1,37 @@
|
||||
# This is a configuration containing default values only. The `config.example.toml` file contains
|
||||
# a configuration more useful for demonstration purposes.
|
||||
|
||||
log-format = 'text'
|
||||
|
||||
[server]
|
||||
pages = 'tcp/localhost:3000'
|
||||
caddy = 'tcp/localhost:3001'
|
||||
metrics = 'tcp/localhost:3002'
|
||||
|
||||
[storage]
|
||||
type = 'fs'
|
||||
|
||||
[storage.fs]
|
||||
root = './data'
|
||||
|
||||
[limits]
|
||||
max-site-size = '128MB'
|
||||
max-manifest-size = '1MB'
|
||||
max-inline-file-size = '256B'
|
||||
git-large-object-threshold = '1MB'
|
||||
max-symlink-depth = 16
|
||||
update-timeout = '1m0s'
|
||||
concurrent-uploads = 1024
|
||||
max-heap-size-ratio = 0.5
|
||||
forbidden-domains = []
|
||||
allowed-repository-url-prefixes = []
|
||||
allowed-custom-headers = ['X-Clacks-Overhead']
|
||||
allow-basic-auth = false
|
||||
|
||||
[audit]
|
||||
node-id = 0
|
||||
collect = false
|
||||
include-ip = ''
|
||||
|
||||
[observability]
|
||||
slow-response-threshold = '500ms'
|
||||
4
conf/config.docker.toml
Normal file
4
conf/config.docker.toml
Normal file
@@ -0,0 +1,4 @@
|
||||
[server]
|
||||
pages = "tcp/:3000"
|
||||
caddy = "tcp/:3001"
|
||||
metrics = "tcp/:3002"
|
||||
@@ -1,21 +1,24 @@
|
||||
# Unless otherwise noted, every value in this file is the same
|
||||
# as the intrinsic default value.
|
||||
# This is a configuration used for demonstration purposes. The `config.default.toml` file contains
|
||||
# a configuration corresponding to default values only.
|
||||
|
||||
log-format = "text"
|
||||
|
||||
[server]
|
||||
# Use "-" to disable the handler.
|
||||
pages = "tcp/:3000"
|
||||
caddy = "tcp/:3001"
|
||||
metrics = "tcp/:3002"
|
||||
pages = "tcp/localhost:3000"
|
||||
caddy = "tcp/localhost:3001"
|
||||
metrics = "tcp/localhost:3002"
|
||||
|
||||
[[wildcard]] # non-default section
|
||||
domain = "codeberg.page"
|
||||
clone-url = "https://codeberg.org/<user>/<project>.git"
|
||||
index-repos = ["<user>.codeberg.page", "pages"]
|
||||
index-repo = "pages"
|
||||
index-repo-branch = "main"
|
||||
authorization = "forgejo"
|
||||
fallback-proxy-to = "https://codeberg.page"
|
||||
|
||||
[fallback] # non-default section
|
||||
proxy-to = "https://codeberg.page"
|
||||
insecure = false
|
||||
|
||||
[storage]
|
||||
type = "fs"
|
||||
@@ -23,7 +26,7 @@ type = "fs"
|
||||
[storage.fs]
|
||||
root = "./data"
|
||||
|
||||
[storage.s3] # non-default bucket configuration
|
||||
[storage.s3] # non-default section
|
||||
endpoint = "play.min.io"
|
||||
access-key-id = "Q3AM3UQ867SPQQA43P2F"
|
||||
secret-access-key = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
@@ -45,10 +48,18 @@ max-inline-file-size = "256B"
|
||||
git-large-object-threshold = "1M"
|
||||
max-symlink-depth = 16
|
||||
update-timeout = "60s"
|
||||
concurrent-uploads = 1024
|
||||
max-heap-size-ratio = 0.5 # * RAM_size
|
||||
forbidden-domains = []
|
||||
# allowed-repository-url-prefixes = <nil>
|
||||
allowed-repository-url-prefixes = []
|
||||
allowed-custom-headers = ["X-Clacks-Overhead"]
|
||||
allow-basic-auth = false
|
||||
|
||||
[audit]
|
||||
node-id = 0
|
||||
collect = false
|
||||
include-ip = ""
|
||||
notify-url = ""
|
||||
|
||||
[observability]
|
||||
slow-response-threshold = "500ms"
|
||||
|
||||
24
flake.lock
generated
24
flake.lock
generated
@@ -18,6 +18,29 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gomod2nix": {
|
||||
"inputs": {
|
||||
"flake-utils": [
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1763982521,
|
||||
"narHash": "sha256-ur4QIAHwgFc0vXiaxn5No/FuZicxBr2p0gmT54xZkUQ=",
|
||||
"owner": "nix-community",
|
||||
"repo": "gomod2nix",
|
||||
"rev": "02e63a239d6eabd595db56852535992c898eba72",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "gomod2nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-filter": {
|
||||
"locked": {
|
||||
"lastModified": 1757882181,
|
||||
@@ -52,6 +75,7 @@
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"gomod2nix": "gomod2nix",
|
||||
"nix-filter": "nix-filter",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
|
||||
35
flake.nix
35
flake.nix
@@ -3,6 +3,12 @@
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
nix-filter.url = "github:numtide/nix-filter";
|
||||
|
||||
gomod2nix = {
|
||||
url = "github:nix-community/gomod2nix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
inputs.flake-utils.follows = "flake-utils";
|
||||
};
|
||||
};
|
||||
|
||||
outputs =
|
||||
@@ -11,13 +17,20 @@
|
||||
nixpkgs,
|
||||
flake-utils,
|
||||
nix-filter,
|
||||
}:
|
||||
...
|
||||
}@inputs:
|
||||
flake-utils.lib.eachDefaultSystem (
|
||||
system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
pkgs = import nixpkgs {
|
||||
inherit system;
|
||||
|
||||
git-pages = pkgs.buildGo125Module {
|
||||
overlays = [
|
||||
inputs.gomod2nix.overlays.default
|
||||
];
|
||||
};
|
||||
|
||||
git-pages = pkgs.buildGoApplication {
|
||||
pname = "git-pages";
|
||||
version = "0";
|
||||
|
||||
@@ -33,17 +46,24 @@
|
||||
];
|
||||
};
|
||||
|
||||
buildInputs = with pkgs; [
|
||||
pkgsStatic.musl
|
||||
];
|
||||
buildInputs = pkgs.lib.optionals pkgs.stdenv.targetPlatform.isLinux (
|
||||
with pkgs;
|
||||
[
|
||||
pkgsStatic.musl
|
||||
]
|
||||
);
|
||||
|
||||
ldflags = [
|
||||
"-linkmode external"
|
||||
"-X main.versionOverride=${self.shortRev or self.dirtyShortRev}"
|
||||
]
|
||||
++ pkgs.lib.optionals pkgs.stdenv.targetPlatform.isLinux [
|
||||
"-extldflags -static"
|
||||
"-s -w"
|
||||
];
|
||||
|
||||
vendorHash = "sha256-oVXELOXbRTzzU8pUGNE4K552thlZXGAX7qpv6ETwz6o=";
|
||||
go = pkgs.go_1_25;
|
||||
modules = ./gomod2nix.toml;
|
||||
};
|
||||
in
|
||||
{
|
||||
@@ -56,6 +76,7 @@
|
||||
|
||||
packages = with pkgs; [
|
||||
caddy
|
||||
gomod2nix
|
||||
];
|
||||
};
|
||||
|
||||
|
||||
62
go.mod
62
go.mod
@@ -3,62 +3,74 @@ module codeberg.org/git-pages/git-pages
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
codeberg.org/git-pages/go-headers v1.1.0
|
||||
codeberg.org/git-pages/go-headers v1.1.1
|
||||
codeberg.org/git-pages/go-slog-syslog v0.0.0-20251207093707-892f654e80b7
|
||||
github.com/BurntSushi/toml v1.6.0
|
||||
github.com/KimMachineGun/automemlimit v0.7.5
|
||||
github.com/bits-and-blooms/bloom/v3 v3.7.1
|
||||
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
|
||||
github.com/creasty/defaults v1.8.0
|
||||
github.com/getsentry/sentry-go v0.36.2
|
||||
github.com/getsentry/sentry-go/slog v0.36.2
|
||||
github.com/go-git/go-billy/v6 v6.0.0-20251026101908-623011986e70
|
||||
github.com/go-git/go-git/v6 v6.0.0-20251029213217-0bbfc0875edd
|
||||
github.com/klauspost/compress v1.18.1
|
||||
github.com/maypok86/otter/v2 v2.2.1
|
||||
github.com/minio/minio-go/v7 v7.0.95
|
||||
github.com/pelletier/go-toml/v2 v2.2.4
|
||||
github.com/fatih/color v1.19.0
|
||||
github.com/go-git/go-billy/v6 v6.0.0-20260410103409-85b6241850b5
|
||||
github.com/go-git/go-git/v6 v6.0.0-alpha.2
|
||||
github.com/jpillora/backoff v1.0.0
|
||||
github.com/kankanreno/go-snowflake v1.2.0
|
||||
github.com/klauspost/compress v1.18.5
|
||||
github.com/maypok86/otter/v2 v2.3.0
|
||||
github.com/minio/minio-go/v7 v7.0.100
|
||||
github.com/pquerna/cachecontrol v0.2.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/samber/slog-multi v1.5.0
|
||||
github.com/samber/slog-multi v1.8.0
|
||||
github.com/tj/go-redirects v0.0.0-20200911105812-fd1ba1020b37
|
||||
github.com/valyala/fasttemplate v1.2.2
|
||||
google.golang.org/protobuf v1.36.10
|
||||
golang.org/x/net v0.53.0
|
||||
google.golang.org/protobuf v1.36.11
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.4.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.24.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.5.0 // indirect
|
||||
github.com/cloudflare/circl v1.6.3 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.6.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/go-git/gcfg/v2 v2.0.2 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/kevinburke/ssh_config v1.4.0 // indirect
|
||||
github.com/kevinburke/ssh_config v1.6.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/minio/crc64nvme v1.0.2 // indirect
|
||||
github.com/klauspost/crc32 v1.3.0 // indirect
|
||||
github.com/leodido/go-syslog/v4 v4.3.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/crc64nvme v1.1.1 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/philhofer/fwd v1.2.0 // indirect
|
||||
github.com/pjbgf/sha1cd v0.5.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.66.1 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/rs/xid v1.6.0 // indirect
|
||||
github.com/samber/lo v1.51.0 // indirect
|
||||
github.com/samber/slog-common v0.19.0 // indirect
|
||||
github.com/samber/lo v1.53.0 // indirect
|
||||
github.com/samber/slog-common v0.21.0 // indirect
|
||||
github.com/sergi/go-diff v1.4.0 // indirect
|
||||
github.com/tinylib/msgp v1.3.0 // indirect
|
||||
github.com/stretchr/testify v1.11.1 // indirect
|
||||
github.com/tinylib/msgp v1.6.1 // indirect
|
||||
github.com/tj/assert v0.0.3 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/crypto v0.43.0 // indirect
|
||||
golang.org/x/net v0.46.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.50.0 // indirect
|
||||
golang.org/x/sync v0.20.0 // indirect
|
||||
golang.org/x/sys v0.43.0 // indirect
|
||||
golang.org/x/text v0.36.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
142
go.sum
142
go.sum
@@ -1,98 +1,101 @@
|
||||
codeberg.org/git-pages/go-headers v1.0.0 h1:hvGU97hQdXaT5HwCpZJWQdg7akvtOBCSUNL4u2a5uTs=
|
||||
codeberg.org/git-pages/go-headers v1.0.0/go.mod h1:N4gwH0U3YPwmuyxqH7xBA8j44fTPX+vOEP7ejJVBPts=
|
||||
codeberg.org/git-pages/go-headers v1.1.0 h1:rk7/SOSsn+XuL7PUQZFYUaWKHEaj6K8mXmUV9rF2VxE=
|
||||
codeberg.org/git-pages/go-headers v1.1.0/go.mod h1:N4gwH0U3YPwmuyxqH7xBA8j44fTPX+vOEP7ejJVBPts=
|
||||
codeberg.org/git-pages/go-headers v1.1.1 h1:fpIBELKo66Z2k+gCeYl5mCEXVQ99Lmx1iup1nbo2shE=
|
||||
codeberg.org/git-pages/go-headers v1.1.1/go.mod h1:N4gwH0U3YPwmuyxqH7xBA8j44fTPX+vOEP7ejJVBPts=
|
||||
codeberg.org/git-pages/go-slog-syslog v0.0.0-20251207093707-892f654e80b7 h1:+rkrAxhNZo/eKEcKOqVOsF6ohAPv5amz0JLburOeRjs=
|
||||
codeberg.org/git-pages/go-slog-syslog v0.0.0-20251207093707-892f654e80b7/go.mod h1:8NPSXbYcVb71qqNM5cIgn1/uQgMisLbu2dVD1BNxsUw=
|
||||
github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
|
||||
github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/KimMachineGun/automemlimit v0.7.5 h1:RkbaC0MwhjL1ZuBKunGDjE/ggwAX43DwZrJqVwyveTk=
|
||||
github.com/KimMachineGun/automemlimit v0.7.5/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
|
||||
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
|
||||
github.com/ProtonMail/go-crypto v1.4.1 h1:9RfcZHqEQUvP8RzecWEUafnZVtEvrBVL9BiF67IQOfM=
|
||||
github.com/ProtonMail/go-crypto v1.4.1/go.mod h1:e1OaTyu5SYVrO9gKOEhTc+5UcXtTUa+P3uLudwcgPqo=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.24.2 h1:M7/NzVbsytmtfHbumG+K2bremQPMJuqv1JD3vOaFxp0=
|
||||
github.com/bits-and-blooms/bitset v1.24.2/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bloom/v3 v3.7.1 h1:WXovk4TRKZttAMJfoQx6K2DM0zNIt8w+c67UqO+etV0=
|
||||
github.com/bits-and-blooms/bloom/v3 v3.7.1/go.mod h1:rZzYLLje2dfzXfAkJNxQQHsKurAyK55KUnL43Euk0hU=
|
||||
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 h1:6lhrsTEnloDPXyeZBvSYvQf8u86jbKehZPVDDlkgDl4=
|
||||
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
|
||||
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=
|
||||
github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
|
||||
github.com/creasty/defaults v1.8.0 h1:z27FJxCAa0JKt3utc0sCImAEb+spPucmKoOdLHvHYKk=
|
||||
github.com/creasty/defaults v1.8.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM=
|
||||
github.com/cyphar/filepath-securejoin v0.5.0 h1:hIAhkRBMQ8nIeuVwcAoymp7MY4oherZdAxD+m0u9zaw=
|
||||
github.com/cyphar/filepath-securejoin v0.5.0/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
||||
github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE=
|
||||
github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=
|
||||
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
github.com/getsentry/sentry-go v0.36.2 h1:uhuxRPTrUy0dnSzTd0LrYXlBYygLkKY0hhlG5LXarzM=
|
||||
github.com/getsentry/sentry-go v0.36.2/go.mod h1:p5Im24mJBeruET8Q4bbcMfCQ+F+Iadc4L48tB1apo2c=
|
||||
github.com/getsentry/sentry-go/slog v0.36.2 h1:PM27JHFE3lsE8fgI/cOueEOtjiktnC3Za2o5oL9PbJQ=
|
||||
github.com/getsentry/sentry-go/slog v0.36.2/go.mod h1:aVFAxnpA3FEtZeSBhBFAnWOlqhiLjaaoOZ0bmBN9IHo=
|
||||
github.com/fatih/color v1.19.0 h1:Zp3PiM21/9Ld6FzSKyL5c/BULoe/ONr9KlbYVOfG8+w=
|
||||
github.com/fatih/color v1.19.0/go.mod h1:zNk67I0ZUT1bEGsSGyCZYZNrHuTkJJB+r6Q9VuMi0LE=
|
||||
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
|
||||
github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-git/gcfg/v2 v2.0.2 h1:MY5SIIfTGGEMhdA7d7JePuVVxtKL7Hp+ApGDJAJ7dpo=
|
||||
github.com/go-git/gcfg/v2 v2.0.2/go.mod h1:/lv2NsxvhepuMrldsFilrgct6pxzpGdSRC13ydTLSLs=
|
||||
github.com/go-git/go-billy/v6 v6.0.0-20251026101908-623011986e70 h1:TWpNrg9JPxp0q+KG0hoFGBulPIP/kMK1b0mDqjdEB/s=
|
||||
github.com/go-git/go-billy/v6 v6.0.0-20251026101908-623011986e70/go.mod h1:TpCYxdQ0tWZkrnAkd7yqK+z1C8RKcyjcaYAJNAcnUnM=
|
||||
github.com/go-git/go-git-fixtures/v5 v5.1.1 h1:OH8i1ojV9bWfr0ZfasfpgtUXQHQyVS8HXik/V1C099w=
|
||||
github.com/go-git/go-git-fixtures/v5 v5.1.1/go.mod h1:Altk43lx3b1ks+dVoAG2300o5WWUnktvfY3VI6bcaXU=
|
||||
github.com/go-git/go-git/v6 v6.0.0-20251029213217-0bbfc0875edd h1:pn6+tR4O8McyqEr2MbQwqcySovpG8jDd11F/jQ6aAfA=
|
||||
github.com/go-git/go-git/v6 v6.0.0-20251029213217-0bbfc0875edd/go.mod h1:z9pQiXCfyOZIs/8qa5zmozzbcsDPtGN91UD7+qeX3hk=
|
||||
github.com/go-git/go-billy/v6 v6.0.0-20260410103409-85b6241850b5 h1:r5Y4Hn9QwQj+u6vN0Ib1MipHkanYaG8Zj0kxsnv8Bu4=
|
||||
github.com/go-git/go-billy/v6 v6.0.0-20260410103409-85b6241850b5/go.mod h1:CdBVp7CXl9l3sOyNEog46cP1Pvx/hjCe9AD0mtaIUYU=
|
||||
github.com/go-git/go-git-fixtures/v6 v6.0.0-20260405195209-b16dd39735e0 h1:XoTsdvaghuVfIr7HpNTmFDLu2nz3I2iGqyn6Uk6MkJc=
|
||||
github.com/go-git/go-git-fixtures/v6 v6.0.0-20260405195209-b16dd39735e0/go.mod h1:1Lr7/vYEYyl6Ir9Ku0tKrCIRreM5zovv0Jdx2MPSM4s=
|
||||
github.com/go-git/go-git/v6 v6.0.0-alpha.2 h1:T3loNtDuAixNzXtlQxZhnYiYpaQ3CA4vn9RssAniEeI=
|
||||
github.com/go-git/go-git/v6 v6.0.0-alpha.2/go.mod h1:oCD3i19CTz7gBpeb11ZZqL91WzqbMq9avn5KpUYy/Ak=
|
||||
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
|
||||
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ=
|
||||
github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M=
|
||||
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
|
||||
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
|
||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/kankanreno/go-snowflake v1.2.0 h1:Zx2SctsH5pivIj9vyhwyDyQS23jcDJx4iT49Bjv81kk=
|
||||
github.com/kankanreno/go-snowflake v1.2.0/go.mod h1:6CZ+10PeVsFXKZUTYyJzPiRIjn1IXbInaWLCX/LDJ0g=
|
||||
github.com/kevinburke/ssh_config v1.6.0 h1:J1FBfmuVosPHf5GRdltRLhPJtJpTlMdKTBjRgTaQBFY=
|
||||
github.com/kevinburke/ssh_config v1.6.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M=
|
||||
github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=
|
||||
github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM=
|
||||
github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/maypok86/otter/v2 v2.2.1 h1:hnGssisMFkdisYcvQ8L019zpYQcdtPse+g0ps2i7cfI=
|
||||
github.com/maypok86/otter/v2 v2.2.1/go.mod h1:1NKY9bY+kB5jwCXBJfE59u+zAwOt6C7ni1FTlFFMqVs=
|
||||
github.com/minio/crc64nvme v1.0.2 h1:6uO1UxGAD+kwqWWp7mBFsi5gAse66C4NXO8cmcVculg=
|
||||
github.com/minio/crc64nvme v1.0.2/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/leodido/go-syslog/v4 v4.3.0 h1:bbSpI/41bYK9iSdlYzcwvlxuLOE8yi4VTFmedtnghdA=
|
||||
github.com/leodido/go-syslog/v4 v4.3.0/go.mod h1:eJ8rUfDN5OS6dOkCOBYlg2a+hbAg6pJa99QXXgMrd98=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/maypok86/otter/v2 v2.3.0 h1:8H8AVVFUSzJwIegKwv1uF5aGitTY+AIrtktg7OcLs8w=
|
||||
github.com/maypok86/otter/v2 v2.3.0/go.mod h1:XgIdlpmL6jYz882/CAx1E4C1ukfgDKSaw4mWq59+7l8=
|
||||
github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI=
|
||||
github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.95 h1:ywOUPg+PebTMTzn9VDsoFJy32ZuARN9zhB+K3IYEvYU=
|
||||
github.com/minio/minio-go/v7 v7.0.95/go.mod h1:wOOX3uxS334vImCNRVyIDdXX9OsXDm89ToynKgqUKlo=
|
||||
github.com/minio/minio-go/v7 v7.0.100 h1:ShkWi8Tyj9RtU57OQB2HIXKz4bFgtVib0bbT1sbtLI8=
|
||||
github.com/minio/minio-go/v7 v7.0.100/go.mod h1:EtGNKtlX20iL2yaYnxEigaIvj0G0GwSDnifnG8ClIdw=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=
|
||||
github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pjbgf/sha1cd v0.5.0 h1:a+UkboSi1znleCDUNT3M5YxjOnN1fz2FhN48FlwCxs0=
|
||||
github.com/pjbgf/sha1cd v0.5.0/go.mod h1:lhpGlyHLpQZoxMv8HcgXvZEhcGs0PG/vsZnEJ7H0iCM=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
@@ -109,16 +112,16 @@ github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9Z
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI=
|
||||
github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
|
||||
github.com/samber/slog-common v0.19.0 h1:fNcZb8B2uOLooeYwFpAlKjkQTUafdjfqKcwcC89G9YI=
|
||||
github.com/samber/slog-common v0.19.0/go.mod h1:dTz+YOU76aH007YUU0DffsXNsGFQRQllPQh9XyNoA3M=
|
||||
github.com/samber/slog-multi v1.5.0 h1:UDRJdsdb0R5vFQFy3l26rpX3rL3FEPJTJ2yKVjoiT1I=
|
||||
github.com/samber/slog-multi v1.5.0/go.mod h1:im2Zi3mH/ivSY5XDj6LFcKToRIWPw1OcjSVSdXt+2d0=
|
||||
github.com/samber/lo v1.53.0 h1:t975lj2py4kJPQ6haz1QMgtId2gtmfktACxIXArw3HM=
|
||||
github.com/samber/lo v1.53.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
|
||||
github.com/samber/slog-common v0.21.0 h1:Wo2hTly1Br5RjYqX/BTWJJeDnTE85oWk/7vqlpZuAUc=
|
||||
github.com/samber/slog-common v0.21.0/go.mod h1:d/6OaSlzdkl9PFpfRLgn8FwY1OW6EFmPtBpsHX4MrU0=
|
||||
github.com/samber/slog-multi v1.8.0 h1:E05c1wnQ+8M58oQDBABlJ4TEIJWssNgtckso3zlaLlI=
|
||||
github.com/samber/slog-multi v1.8.0/go.mod h1:6+3j/ILxDvAcLD75YdQAm6iKWu6AmwlohLgQxL/2aiI=
|
||||
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
|
||||
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@@ -126,12 +129,14 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww=
|
||||
github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0=
|
||||
github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY=
|
||||
github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA=
|
||||
github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk=
|
||||
github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk=
|
||||
github.com/tj/go-redirects v0.0.0-20200911105812-fd1ba1020b37 h1:K11tjwz8zTTSZkz4TUjfLN+y8uJWP38BbyPqZ2yB/Yk=
|
||||
github.com/tj/go-redirects v0.0.0-20200911105812-fd1ba1020b37/go.mod h1:E0E2H2gQA+uoi27VCSU+a/BULPtadQA78q3cpTjZbZw=
|
||||
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
|
||||
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
|
||||
@@ -140,18 +145,23 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
|
||||
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI=
|
||||
golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q=
|
||||
golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA=
|
||||
golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs=
|
||||
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
|
||||
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
|
||||
golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||
golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY=
|
||||
golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY=
|
||||
golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg=
|
||||
golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
|
||||
204
gomod2nix.toml
Normal file
204
gomod2nix.toml
Normal file
@@ -0,0 +1,204 @@
|
||||
schema = 3
|
||||
|
||||
[mod]
|
||||
[mod."codeberg.org/git-pages/go-headers"]
|
||||
version = "v1.1.1"
|
||||
hash = "sha256-qgL7l1FHXxcBWhBnBLEI0yENd6P+frvwlKxEAXLA3VY="
|
||||
[mod."codeberg.org/git-pages/go-slog-syslog"]
|
||||
version = "v0.0.0-20251207093707-892f654e80b7"
|
||||
hash = "sha256-ye+DBIyxqTEOViYRrQPWyGJCaLmyKSDwH5btlqDPizM="
|
||||
[mod."github.com/BurntSushi/toml"]
|
||||
version = "v1.6.0"
|
||||
hash = "sha256-ptdUJvuc21ixeLt+M5way/na3aCnCO4MYHWulWp8NEY="
|
||||
[mod."github.com/KimMachineGun/automemlimit"]
|
||||
version = "v0.7.5"
|
||||
hash = "sha256-lH/ip9j2hbYUc2W/XIYve/5TScQPZtEZe3hu76CY//k="
|
||||
[mod."github.com/Microsoft/go-winio"]
|
||||
version = "v0.6.2"
|
||||
hash = "sha256-tVNWDUMILZbJvarcl/E7tpSnkn7urqgSHa2Eaka5vSU="
|
||||
[mod."github.com/ProtonMail/go-crypto"]
|
||||
version = "v1.4.1"
|
||||
hash = "sha256-6iGAFCjoNveY+ipbKqq2gt+RXpi2eQyPXAY01rxPcWc="
|
||||
[mod."github.com/beorn7/perks"]
|
||||
version = "v1.0.1"
|
||||
hash = "sha256-h75GUqfwJKngCJQVE5Ao5wnO3cfKD9lSIteoLp/3xJ4="
|
||||
[mod."github.com/bits-and-blooms/bitset"]
|
||||
version = "v1.24.2"
|
||||
hash = "sha256-hT88EpdWmKnqdxApJhs/aIAptf33HmtSp2KXPI+Ym7o="
|
||||
[mod."github.com/bits-and-blooms/bloom/v3"]
|
||||
version = "v3.7.1"
|
||||
hash = "sha256-KZduCu+k4+xqBcFRTfg8Yc/PEf5jfpjn0I1YoxfnVPo="
|
||||
[mod."github.com/c2h5oh/datasize"]
|
||||
version = "v0.0.0-20231215233829-aa82cc1e6500"
|
||||
hash = "sha256-8MqL7xCvE6fIjanz2jwkaLP1OE5kLu62TOcQx452DHQ="
|
||||
[mod."github.com/cespare/xxhash/v2"]
|
||||
version = "v2.3.0"
|
||||
hash = "sha256-7hRlwSR+fos1kx4VZmJ/7snR7zHh8ZFKX+qqqqGcQpY="
|
||||
[mod."github.com/cloudflare/circl"]
|
||||
version = "v1.6.3"
|
||||
hash = "sha256-XZm4EastgX67Dgm5BpOEW/PY4aLcHM/O8+Xbz26PuTY="
|
||||
[mod."github.com/creasty/defaults"]
|
||||
version = "v1.8.0"
|
||||
hash = "sha256-I1LE1cfOhMS5JxB7+fWTKieefw2Gge1UhIZh+A6pa6s="
|
||||
[mod."github.com/cyphar/filepath-securejoin"]
|
||||
version = "v0.6.1"
|
||||
hash = "sha256-obqip8c1c9mjXFznyXF8aDnpcMw7ttzv+e28anCa/v0="
|
||||
[mod."github.com/davecgh/go-spew"]
|
||||
version = "v1.1.1"
|
||||
hash = "sha256-nhzSUrE1fCkN0+RL04N4h8jWmRFPPPWbCuDc7Ss0akI="
|
||||
[mod."github.com/dustin/go-humanize"]
|
||||
version = "v1.0.1"
|
||||
hash = "sha256-yuvxYYngpfVkUg9yAmG99IUVmADTQA0tMbBXe0Fq0Mc="
|
||||
[mod."github.com/emirpasic/gods"]
|
||||
version = "v1.18.1"
|
||||
hash = "sha256-hGDKddjLj+5dn2woHtXKUdd49/3xdsqnhx7VEdCu1m4="
|
||||
[mod."github.com/fatih/color"]
|
||||
version = "v1.19.0"
|
||||
hash = "sha256-YgMm1nid8yigNLG6aHfuMbsvMI1UYVf/Rkg44pp/NTU="
|
||||
[mod."github.com/go-git/gcfg/v2"]
|
||||
version = "v2.0.2"
|
||||
hash = "sha256-icqMDeC/tEg/3979EuEN67Ml5KjdDA0R3QvR6iLLrSI="
|
||||
[mod."github.com/go-git/go-billy/v6"]
|
||||
version = "v6.0.0-20260410103409-85b6241850b5"
|
||||
hash = "sha256-2qQeUjkswSqI9joCKhvMB1lvnKHL9INbAzy4UBveHsw="
|
||||
[mod."github.com/go-git/go-git/v6"]
|
||||
version = "v6.0.0-alpha.2"
|
||||
hash = "sha256-nUjRn1uIZKhIKqdNXfTirGtm07XCUKF2z3aat9O0dqM="
|
||||
[mod."github.com/go-ini/ini"]
|
||||
version = "v1.67.0"
|
||||
hash = "sha256-V10ahGNGT+NLRdKUyRg1dos5RxLBXBk1xutcnquc/+4="
|
||||
[mod."github.com/google/uuid"]
|
||||
version = "v1.6.0"
|
||||
hash = "sha256-VWl9sqUzdOuhW0KzQlv0gwwUQClYkmZwSydHG2sALYw="
|
||||
[mod."github.com/jpillora/backoff"]
|
||||
version = "v1.0.0"
|
||||
hash = "sha256-uxHg68NN8hrwPCrPfLYYprZHf7dMyEoPoF46JFx0IHU="
|
||||
[mod."github.com/kankanreno/go-snowflake"]
|
||||
version = "v1.2.0"
|
||||
hash = "sha256-713xGEqjwaUGIu2EHII5sldWmcquFpxZmte/7R/O6LA="
|
||||
[mod."github.com/kevinburke/ssh_config"]
|
||||
version = "v1.6.0"
|
||||
hash = "sha256-i/EYNJx0+HbAGFVoiKV4QF/zqb4fWewh+bpBKUkXDCc="
|
||||
[mod."github.com/klauspost/compress"]
|
||||
version = "v1.18.5"
|
||||
hash = "sha256-H9b5iFJf4XbEnkGQCjGQAJ3aYhVDiolKrDewTbhuzQo="
|
||||
[mod."github.com/klauspost/cpuid/v2"]
|
||||
version = "v2.3.0"
|
||||
hash = "sha256-50JhbQyT67BK38HIdJihPtjV7orYp96HknI2VP7A9Yc="
|
||||
[mod."github.com/klauspost/crc32"]
|
||||
version = "v1.3.0"
|
||||
hash = "sha256-RsS/MDJbVzVB+i74whqABgwZJWMw+AutF6HhJBVgbag="
|
||||
[mod."github.com/leodido/go-syslog/v4"]
|
||||
version = "v4.3.0"
|
||||
hash = "sha256-fCJ2rgrrPR/Ey/PoAsJhd8Sl8mblAnnMAmBuoWFBTgg="
|
||||
[mod."github.com/mattn/go-colorable"]
|
||||
version = "v0.1.14"
|
||||
hash = "sha256-JC60PjKj7MvhZmUHTZ9p372FV72I9Mxvli3fivTbxuA="
|
||||
[mod."github.com/mattn/go-isatty"]
|
||||
version = "v0.0.20"
|
||||
hash = "sha256-qhw9hWtU5wnyFyuMbKx+7RB8ckQaFQ8D+8GKPkN3HHQ="
|
||||
[mod."github.com/maypok86/otter/v2"]
|
||||
version = "v2.3.0"
|
||||
hash = "sha256-ELzmi/s2WqDeUmzSGnfx+ys2Hs28XHqF7vlEzyRotIA="
|
||||
[mod."github.com/minio/crc64nvme"]
|
||||
version = "v1.1.1"
|
||||
hash = "sha256-RVVi/gWPBEQqcW4n+KIKxlA3uY5+77e2rhkVk8fFNUo="
|
||||
[mod."github.com/minio/md5-simd"]
|
||||
version = "v1.1.2"
|
||||
hash = "sha256-vykcXvy2VBBAXnJott/XsGTT0gk2UL36JzZKfJ1KAUY="
|
||||
[mod."github.com/minio/minio-go/v7"]
|
||||
version = "v7.0.100"
|
||||
hash = "sha256-MjWYoX4b+OOSOkjsitQQqcTbpQ7CYNghN9XCdrqgYaM="
|
||||
[mod."github.com/munnerz/goautoneg"]
|
||||
version = "v0.0.0-20191010083416-a7dc8b61c822"
|
||||
hash = "sha256-79URDDFenmGc9JZu+5AXHToMrtTREHb3BC84b/gym9Q="
|
||||
[mod."github.com/pbnjay/memory"]
|
||||
version = "v0.0.0-20210728143218-7b4eea64cf58"
|
||||
hash = "sha256-QI+F1oPLOOtwNp8+m45OOoSfYFs3QVjGzE0rFdpF/IA="
|
||||
[mod."github.com/philhofer/fwd"]
|
||||
version = "v1.2.0"
|
||||
hash = "sha256-cGx2/0QQay46MYGZuamFmU0TzNaFyaO+J7Ddzlr/3dI="
|
||||
[mod."github.com/pjbgf/sha1cd"]
|
||||
version = "v0.5.0"
|
||||
hash = "sha256-11XBkhdciQGsQ7jEMZ6PgphRKjruTSc7ZxfOwDuPCr8="
|
||||
[mod."github.com/pkg/errors"]
|
||||
version = "v0.9.1"
|
||||
hash = "sha256-mNfQtcrQmu3sNg/7IwiieKWOgFQOVVe2yXgKBpe/wZw="
|
||||
[mod."github.com/pmezard/go-difflib"]
|
||||
version = "v1.0.0"
|
||||
hash = "sha256-/FtmHnaGjdvEIKAJtrUfEhV7EVo5A/eYrtdnUkuxLDA="
|
||||
[mod."github.com/pquerna/cachecontrol"]
|
||||
version = "v0.2.0"
|
||||
hash = "sha256-tuTERCFfwmqPepw/rs5cyv9fArCD30BqgjZqwMV+vzQ="
|
||||
[mod."github.com/prometheus/client_golang"]
|
||||
version = "v1.23.2"
|
||||
hash = "sha256-3GD4fBFa1tJu8MS4TNP6r2re2eViUE+kWUaieIOQXCg="
|
||||
[mod."github.com/prometheus/client_model"]
|
||||
version = "v0.6.2"
|
||||
hash = "sha256-q6Fh6v8iNJN9ypD47LjWmx66YITa3FyRjZMRsuRTFeQ="
|
||||
[mod."github.com/prometheus/common"]
|
||||
version = "v0.66.1"
|
||||
hash = "sha256-bqHPaV9IV70itx63wqwgy2PtxMN0sn5ThVxDmiD7+Tk="
|
||||
[mod."github.com/prometheus/procfs"]
|
||||
version = "v0.16.1"
|
||||
hash = "sha256-OBCvKlLW2obct35p0L9Q+1ZrxZjpTmbgHMP2rng9hpo="
|
||||
[mod."github.com/rs/xid"]
|
||||
version = "v1.6.0"
|
||||
hash = "sha256-rJB7h3KuH1DPp5n4dY3MiGnV1Y96A10lf5OUl+MLkzU="
|
||||
[mod."github.com/samber/lo"]
|
||||
version = "v1.53.0"
|
||||
hash = "sha256-RCf4Buf357TTWQnMPSWKrfdJ4L/RqOHNBD0g3+VpMw8="
|
||||
[mod."github.com/samber/slog-common"]
|
||||
version = "v0.21.0"
|
||||
hash = "sha256-i9Nl4xRbk8qYM+0n48IQ6+vGZiS7xFe+GgyV3X9/Spc="
|
||||
[mod."github.com/samber/slog-multi"]
|
||||
version = "v1.8.0"
|
||||
hash = "sha256-KsFwNP9QMDr8golYoevpGtcqUuCrIT7zmGwR7/E6gzo="
|
||||
[mod."github.com/sergi/go-diff"]
|
||||
version = "v1.4.0"
|
||||
hash = "sha256-rs9NKpv/qcQEMRg7CmxGdP4HGuFdBxlpWf9LbA9wS4k="
|
||||
[mod."github.com/stretchr/testify"]
|
||||
version = "v1.11.1"
|
||||
hash = "sha256-sWfjkuKJyDllDEtnM8sb/pdLzPQmUYWYtmeWz/5suUc="
|
||||
[mod."github.com/tinylib/msgp"]
|
||||
version = "v1.6.1"
|
||||
hash = "sha256-R2LutHQFZ7HAqeyzHqzMeyAJHxcYc+n1x7ysyrXefmQ="
|
||||
[mod."github.com/tj/assert"]
|
||||
version = "v0.0.3"
|
||||
hash = "sha256-4xhmZcHpUafabaXejE9ucVnGxG/txomvKzBg6cbkusg="
|
||||
[mod."github.com/tj/go-redirects"]
|
||||
version = "v0.0.0-20200911105812-fd1ba1020b37"
|
||||
hash = "sha256-GpYpxdT4F7PkwGXLo7cYVcIRJrzd1sKHtFDH+bRb6Tk="
|
||||
[mod."github.com/valyala/bytebufferpool"]
|
||||
version = "v1.0.0"
|
||||
hash = "sha256-I9FPZ3kCNRB+o0dpMwBnwZ35Fj9+ThvITn8a3Jr8mAY="
|
||||
[mod."github.com/valyala/fasttemplate"]
|
||||
version = "v1.2.2"
|
||||
hash = "sha256-gp+lNXE8zjO+qJDM/YbS6V43HFsYP6PKn4ux1qa5lZ0="
|
||||
[mod."go.yaml.in/yaml/v2"]
|
||||
version = "v2.4.2"
|
||||
hash = "sha256-oC8RWdf1zbMYCtmR0ATy/kCkhIwPR9UqFZSMOKLVF/A="
|
||||
[mod."go.yaml.in/yaml/v3"]
|
||||
version = "v3.0.4"
|
||||
hash = "sha256-NkGFiDPoCxbr3LFsI6OCygjjkY0rdmg5ggvVVwpyDQ4="
|
||||
[mod."golang.org/x/crypto"]
|
||||
version = "v0.50.0"
|
||||
hash = "sha256-vC1BJT7+3UBWLyEE5n3to0NKhMo6m2HGow2HiFgpQLo="
|
||||
[mod."golang.org/x/net"]
|
||||
version = "v0.53.0"
|
||||
hash = "sha256-G9gKLmyaf6lIV429NKX+YlL6oUPJwlv+BrG6qGhzvmU="
|
||||
[mod."golang.org/x/sync"]
|
||||
version = "v0.20.0"
|
||||
hash = "sha256-ybcjhCfK6lroUM0yswUvWooW8MOQZBXyiSqoxG6Uy0Y="
|
||||
[mod."golang.org/x/sys"]
|
||||
version = "v0.43.0"
|
||||
hash = "sha256-aDQXqSTZES2l/132PBxhZN4ywldpPyfm7LByYCHzzwM="
|
||||
[mod."golang.org/x/text"]
|
||||
version = "v0.36.0"
|
||||
hash = "sha256-/0t9C6Mc8kYjxweFB0us2lGKo8GovHhBiq5nlMOppC0="
|
||||
[mod."google.golang.org/protobuf"]
|
||||
version = "v1.36.11"
|
||||
hash = "sha256-7W+6jntfI/awWL3JP6yQedxqP5S9o3XvPgJ2XxxsIeE="
|
||||
[mod."gopkg.in/yaml.v3"]
|
||||
version = "v3.0.1"
|
||||
hash = "sha256-FqL9TKYJ0XkNwJFnq9j0VvJ5ZUU1RvH/52h/f5bkYAU="
|
||||
25
main.go
25
main.go
@@ -2,6 +2,27 @@
|
||||
|
||||
package main
|
||||
|
||||
import gitpages "codeberg.org/git-pages/git-pages/src"
|
||||
import (
|
||||
"runtime/debug"
|
||||
|
||||
func main() { gitpages.Main() }
|
||||
git_pages "codeberg.org/git-pages/git-pages/src"
|
||||
)
|
||||
|
||||
// By default the version information is retrieved from VCS. If not available during build,
|
||||
// override this variable using linker flags to change the displayed version.
|
||||
// Example: `-ldflags "-X main.versionOverride=v1.2.3"`
|
||||
var versionOverride = ""
|
||||
|
||||
func extractVersion() string {
|
||||
if versionOverride != "" {
|
||||
return versionOverride
|
||||
} else if buildInfo, ok := debug.ReadBuildInfo(); ok {
|
||||
return buildInfo.Main.Version
|
||||
} else {
|
||||
panic("version information not available")
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
git_pages.Main(extractVersion())
|
||||
}
|
||||
|
||||
@@ -14,5 +14,7 @@
|
||||
"lockFileMaintenance": {
|
||||
"enabled": true,
|
||||
"automerge": false
|
||||
}
|
||||
},
|
||||
"semanticCommits": "disabled",
|
||||
"commitMessagePrefix": "[Renovate]"
|
||||
}
|
||||
|
||||
413
src/audit.go
Normal file
413
src/audit.go
Normal file
@@ -0,0 +1,413 @@
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
exponential "github.com/jpillora/backoff"
|
||||
"github.com/kankanreno/go-snowflake"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
var (
|
||||
auditNotifyOkCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "git_pages_audit_notify_ok",
|
||||
Help: "Count of successful audit notifications",
|
||||
})
|
||||
auditNotifyErrorCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "git_pages_audit_notify_error",
|
||||
Help: "Count of failed audit notifications",
|
||||
})
|
||||
)
|
||||
|
||||
type principalKey struct{}
|
||||
|
||||
var PrincipalKey = principalKey{}
|
||||
|
||||
func WithPrincipal(ctx context.Context) context.Context {
|
||||
principal := &Principal{}
|
||||
return context.WithValue(ctx, PrincipalKey, principal)
|
||||
}
|
||||
|
||||
func GetPrincipal(ctx context.Context) *Principal {
|
||||
if principal, ok := ctx.Value(PrincipalKey).(*Principal); ok {
|
||||
return principal
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var AuditSnowflakeStartTime = time.Date(2025, 12, 1, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
type AuditID int64
|
||||
|
||||
func GenerateAuditID() AuditID {
|
||||
inner, err := snowflake.NextID()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return AuditID(inner)
|
||||
}
|
||||
|
||||
func ParseAuditID(repr string) (AuditID, error) {
|
||||
inner, err := strconv.ParseInt(repr, 16, 64)
|
||||
if err != nil {
|
||||
return AuditID(0), err
|
||||
}
|
||||
return AuditID(inner), nil
|
||||
}
|
||||
|
||||
func (id AuditID) String() string {
|
||||
return fmt.Sprintf("%016x", int64(id))
|
||||
}
|
||||
|
||||
func (id AuditID) CompareTime(when time.Time) int {
|
||||
idMillis := int64(id) >> (snowflake.MachineIDLength + snowflake.SequenceLength)
|
||||
idMillis += AuditSnowflakeStartTime.UnixMilli()
|
||||
whenMillis := when.UTC().UnixNano() / 1e6
|
||||
return cmp.Compare(idMillis, whenMillis)
|
||||
}
|
||||
|
||||
func EncodeAuditRecord(record *AuditRecord) (data []byte) {
|
||||
data, err := proto.MarshalOptions{Deterministic: true}.Marshal(record)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func DecodeAuditRecord(data []byte) (record *AuditRecord, err error) {
|
||||
record = &AuditRecord{}
|
||||
err = proto.Unmarshal(data, record)
|
||||
return
|
||||
}
|
||||
|
||||
func (record *AuditRecord) GetAuditID() AuditID {
|
||||
return AuditID(record.GetId())
|
||||
}
|
||||
|
||||
func (record *AuditRecord) DescribePrincipal() string {
|
||||
var items []string
|
||||
if record.Principal != nil {
|
||||
if record.Principal.GetIpAddress() != "" {
|
||||
items = append(items, record.Principal.GetIpAddress())
|
||||
}
|
||||
if record.Principal.GetForgeUser() != nil {
|
||||
items = append(items, fmt.Sprintf("%s/%s(%d)",
|
||||
record.Principal.GetForgeUser().GetOrigin(),
|
||||
record.Principal.GetForgeUser().GetHandle(),
|
||||
record.Principal.GetForgeUser().GetId()))
|
||||
}
|
||||
if record.Principal.GetRepoUrl() != "" {
|
||||
items = append(items, record.Principal.GetRepoUrl())
|
||||
}
|
||||
if record.Principal.GetCliAdmin() {
|
||||
items = append(items, "<cli-admin>")
|
||||
}
|
||||
}
|
||||
if len(items) > 0 {
|
||||
return strings.Join(items, ",")
|
||||
} else {
|
||||
return "<unknown>"
|
||||
}
|
||||
}
|
||||
|
||||
func (record *AuditRecord) DescribeResource() string {
|
||||
desc := "<unknown>"
|
||||
if record.Domain != nil && record.Project != nil {
|
||||
desc = path.Join(*record.Domain, *record.Project)
|
||||
} else if record.Domain != nil {
|
||||
desc = *record.Domain
|
||||
}
|
||||
return desc
|
||||
}
|
||||
|
||||
func (record *AuditRecord) IsDetachable() bool {
|
||||
return record.GetEvent() == AuditEvent_CommitManifest
|
||||
}
|
||||
|
||||
func (record *AuditRecord) IsDetached() bool {
|
||||
return record.IsDetachable() && record.Manifest == nil
|
||||
}
|
||||
|
||||
type AuditRecordScope int
|
||||
|
||||
const (
|
||||
AuditRecordComplete AuditRecordScope = iota
|
||||
AuditRecordNoManifest
|
||||
)
|
||||
|
||||
func AuditRecordJSON(record *AuditRecord, scope AuditRecordScope) []byte {
|
||||
switch scope {
|
||||
case AuditRecordComplete:
|
||||
// as-is
|
||||
case AuditRecordNoManifest:
|
||||
// trim the manifest
|
||||
newRecord := &AuditRecord{}
|
||||
proto.Merge(newRecord, record)
|
||||
newRecord.Manifest = nil
|
||||
record = newRecord
|
||||
}
|
||||
|
||||
json, err := protojson.MarshalOptions{
|
||||
Multiline: true,
|
||||
EmitDefaultValues: true,
|
||||
}.Marshal(record)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return json
|
||||
}
|
||||
|
||||
// This function receives `id` and `record` separately because the record itself may have its
|
||||
// ID missing or mismatched. While this is very unlikely, using the actual primary key as
|
||||
// the filename is more robust.
|
||||
func ExtractAuditRecord(ctx context.Context, id AuditID, record *AuditRecord, dest string) error {
|
||||
const mode = 0o400 // readable by current user, not writable
|
||||
|
||||
err := os.WriteFile(filepath.Join(dest, fmt.Sprintf("%s-event.json", id)),
|
||||
AuditRecordJSON(record, AuditRecordNoManifest), mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if record.Manifest != nil {
|
||||
err = os.WriteFile(filepath.Join(dest, fmt.Sprintf("%s-manifest.json", id)),
|
||||
ManifestJSON(record.Manifest), mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
archive, err := os.OpenFile(filepath.Join(dest, fmt.Sprintf("%s-archive.tar", id)),
|
||||
os.O_CREATE|os.O_TRUNC|os.O_WRONLY, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer archive.Close()
|
||||
|
||||
err = CollectTar(ctx, archive, record.Manifest, ManifestMetadata{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func AuditEventProcessor(command string, args []string) (http.Handler, error) {
|
||||
var err error
|
||||
|
||||
// Resolve the command to an absolute path, as it will be run from a different current
|
||||
// directory, which would break e.g. `git-pages -audit-server tcp/:3004 ./handler.sh`.
|
||||
if command, err = exec.LookPath(command); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if command, err = filepath.Abs(command); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
router := http.NewServeMux()
|
||||
router.Handle("GET /", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Go will cancel the request context if the client drops the connection. We don't want
|
||||
// that to interrupt processing. However, we also want the client (not the server) to
|
||||
// handle retries, so instead of spawning a goroutine to process the event, we do this
|
||||
// within the HTTP handler. If an error is returned, the notify goroutine in the worker
|
||||
// will retry the HTTP request (with backoff) until it succeeds.
|
||||
//
|
||||
// This is a somewhat idiosyncratic design and it's not clear that this is the best
|
||||
// possible approach (e.g. if the worker gets restarted and the event processing fails,
|
||||
// it will not be retried), but it should do the job for now. It is expected that
|
||||
// some form of observability is used to highlight event processor errors.
|
||||
ctx := context.WithoutCancel(r.Context())
|
||||
|
||||
id, err := ParseAuditID(r.URL.RawQuery)
|
||||
if err != nil {
|
||||
logc.Printf(ctx, "audit process err: malformed query\n")
|
||||
http.Error(w, "malformed query", http.StatusBadRequest)
|
||||
return
|
||||
} else {
|
||||
logc.Printf(ctx, "audit process %s", id)
|
||||
}
|
||||
|
||||
record, err := backend.QueryAuditLog(ctx, id)
|
||||
if err != nil {
|
||||
logc.Printf(ctx, "audit process err: missing record\n")
|
||||
http.Error(w, "missing record", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
args := append(args, id.String(), record.GetEvent().String())
|
||||
cmd := exec.CommandContext(ctx, command, args...)
|
||||
if cmd.Dir, err = os.MkdirTemp("", "auditRecord"); err != nil {
|
||||
panic(fmt.Errorf("mkdtemp: %w", err))
|
||||
}
|
||||
defer os.RemoveAll(cmd.Dir)
|
||||
|
||||
if err = ExtractAuditRecord(ctx, id, record, cmd.Dir); err != nil {
|
||||
logc.Printf(ctx, "audit process %s err: %s\n", id, err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
logc.Printf(ctx, "audit process %s err: %s; %s\n", id, err, string(output))
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
if len(output) == 0 {
|
||||
fmt.Fprintln(w, err.Error())
|
||||
}
|
||||
} else {
|
||||
logc.Printf(ctx, "audit process %s ok: %s\n", id, string(output))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
w.Write(output)
|
||||
}))
|
||||
return router, nil
|
||||
}
|
||||
|
||||
type auditedBackend struct {
|
||||
Backend
|
||||
}
|
||||
|
||||
var _ Backend = (*auditedBackend)(nil)
|
||||
|
||||
func NewAuditedBackend(backend Backend) Backend {
|
||||
return &auditedBackend{backend}
|
||||
}
|
||||
|
||||
// This function does not retry appending audit records; as such, if it returns an error,
|
||||
// this error must interrupt whatever operation it was auditing. A corollary is that it is
|
||||
// possible that appending an audit record succeeds but the audited operation fails.
|
||||
// This is considered fine since the purpose of auditing is to record end user intent, not
|
||||
// to be a 100% accurate reflection of performed actions. When in doubt, the audit records
|
||||
// should be examined together with the application logs.
|
||||
func (audited *auditedBackend) appendNewAuditRecord(ctx context.Context, record *AuditRecord) (err error) {
|
||||
if config.Audit.Collect {
|
||||
id := GenerateAuditID()
|
||||
record.Id = proto.Int64(int64(id))
|
||||
record.Timestamp = timestamppb.Now()
|
||||
record.Principal = GetPrincipal(ctx)
|
||||
|
||||
err = audited.Backend.AppendAuditLog(ctx, id, record)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("audit: %w", err)
|
||||
} else {
|
||||
var subject string
|
||||
if record.Project == nil {
|
||||
subject = *record.Domain
|
||||
} else {
|
||||
subject = path.Join(*record.Domain, *record.Project)
|
||||
}
|
||||
logc.Printf(ctx, "audit %s ok: %s %s\n", subject, id, record.Event.String())
|
||||
|
||||
// Send a notification to the audit server, if configured, and try to make sure
|
||||
// it is delivered by retrying with exponential backoff on errors.
|
||||
notifyAudit(context.WithoutCancel(ctx), id)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func notifyAudit(ctx context.Context, id AuditID) {
|
||||
if config.Audit.NotifyURL != nil {
|
||||
notifyURL := config.Audit.NotifyURL.URL
|
||||
notifyURL.RawQuery = id.String()
|
||||
|
||||
// See also the explanation in `AuditEventProcessor` above.
|
||||
go func() {
|
||||
backoff := exponential.Backoff{
|
||||
Jitter: true,
|
||||
Min: time.Second * 1,
|
||||
Max: time.Second * 60,
|
||||
}
|
||||
for {
|
||||
resp, err := http.Get(notifyURL.String())
|
||||
var body []byte
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
body, _ = io.ReadAll(resp.Body)
|
||||
}
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
logc.Printf(ctx, "audit notify %s ok: %s\n", id, string(body))
|
||||
auditNotifyOkCount.Inc()
|
||||
break
|
||||
} else {
|
||||
sleepFor := backoff.Duration()
|
||||
if err != nil {
|
||||
logc.Printf(ctx, "audit notify %s err: %s (retry in %s)",
|
||||
id, err, sleepFor)
|
||||
} else {
|
||||
logc.Printf(ctx, "audit notify %s fail: %s (retry in %s); %s",
|
||||
id, resp.Status, sleepFor, string(body))
|
||||
}
|
||||
auditNotifyErrorCount.Inc()
|
||||
time.Sleep(sleepFor)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (audited *auditedBackend) CommitManifest(
|
||||
ctx context.Context, name string, manifest *Manifest, opts ModifyManifestOptions,
|
||||
) (err error) {
|
||||
domain, project, ok := strings.Cut(name, "/")
|
||||
if !ok {
|
||||
panic("malformed manifest name")
|
||||
}
|
||||
audited.appendNewAuditRecord(ctx, &AuditRecord{
|
||||
Event: AuditEvent_CommitManifest.Enum(),
|
||||
Domain: proto.String(domain),
|
||||
Project: proto.String(project),
|
||||
Manifest: manifest,
|
||||
})
|
||||
|
||||
return audited.Backend.CommitManifest(ctx, name, manifest, opts)
|
||||
}
|
||||
|
||||
func (audited *auditedBackend) DeleteManifest(
|
||||
ctx context.Context, name string, opts ModifyManifestOptions,
|
||||
) (err error) {
|
||||
domain, project, ok := strings.Cut(name, "/")
|
||||
if !ok {
|
||||
panic("malformed manifest name")
|
||||
}
|
||||
audited.appendNewAuditRecord(ctx, &AuditRecord{
|
||||
Event: AuditEvent_DeleteManifest.Enum(),
|
||||
Domain: proto.String(domain),
|
||||
Project: proto.String(project),
|
||||
})
|
||||
|
||||
return audited.Backend.DeleteManifest(ctx, name, opts)
|
||||
}
|
||||
|
||||
func (audited *auditedBackend) FreezeDomain(ctx context.Context, domain string) (err error) {
|
||||
audited.appendNewAuditRecord(ctx, &AuditRecord{
|
||||
Event: AuditEvent_FreezeDomain.Enum(),
|
||||
Domain: proto.String(domain),
|
||||
})
|
||||
|
||||
return audited.Backend.FreezeDomain(ctx, domain)
|
||||
}
|
||||
|
||||
func (audited *auditedBackend) UnfreezeDomain(ctx context.Context, domain string) (err error) {
|
||||
audited.appendNewAuditRecord(ctx, &AuditRecord{
|
||||
Event: AuditEvent_UnfreezeDomain.Enum(),
|
||||
Domain: proto.String(domain),
|
||||
})
|
||||
|
||||
return audited.Backend.UnfreezeDomain(ctx, domain)
|
||||
}
|
||||
402
src/auth.go
402
src/auth.go
@@ -6,13 +6,14 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/idna"
|
||||
)
|
||||
|
||||
type AuthError struct {
|
||||
@@ -32,9 +33,9 @@ func IsUnauthorized(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func authorizeInsecure() *Authorization {
|
||||
func authorizeInsecure(r *http.Request) *Authorization {
|
||||
if config.Insecure { // for testing only
|
||||
log.Println("auth: INSECURE mode")
|
||||
logc.Println(r.Context(), "auth: INSECURE mode")
|
||||
return &Authorization{
|
||||
repoURLs: nil,
|
||||
branch: "pages",
|
||||
@@ -43,27 +44,59 @@ func authorizeInsecure() *Authorization {
|
||||
return nil
|
||||
}
|
||||
|
||||
var idnaProfile = idna.New(idna.MapForLookup(), idna.BidiRule())
|
||||
|
||||
func GetHost(r *http.Request) (string, error) {
|
||||
// FIXME: handle IDNA
|
||||
host, _, err := net.SplitHostPort(r.Host)
|
||||
if err != nil {
|
||||
// dirty but the go stdlib doesn't have a "split port if present" function
|
||||
host = r.Host
|
||||
}
|
||||
if strings.HasPrefix(host, ".") {
|
||||
// this also rejects invalid characters and labels
|
||||
host, err = idnaProfile.ToASCII(host)
|
||||
if err != nil {
|
||||
if config.Feature("relaxed-idna") {
|
||||
// unfortunately, the go IDNA library has some significant issues around its
|
||||
// Unicode TR46 implementation: https://github.com/golang/go/issues/76804
|
||||
// we would like to allow *just* the _ here, but adding `idna.StrictDomainName(false)`
|
||||
// would also accept domains like `*.foo.bar` which should clearly be disallowed.
|
||||
// as a workaround, accept a domain name if it is valid with all `_` characters
|
||||
// replaced with an alphanumeric character (we use `a`); this allows e.g. `foo_bar.xxx`
|
||||
// and `foo__bar.xxx`, as well as `_foo.xxx` and `foo_.xxx`. labels starting with
|
||||
// an underscore are explicitly rejected below.
|
||||
_, err = idnaProfile.ToASCII(strings.ReplaceAll(host, "_", "a"))
|
||||
}
|
||||
if err != nil {
|
||||
return "", AuthError{http.StatusBadRequest,
|
||||
fmt.Sprintf("malformed host name %q", host)}
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(host, ".") || strings.HasPrefix(host, "_") {
|
||||
return "", AuthError{http.StatusBadRequest,
|
||||
fmt.Sprintf("host name %q is reserved", host)}
|
||||
fmt.Sprintf("reserved host name %q", host)}
|
||||
}
|
||||
host = strings.TrimSuffix(host, ".")
|
||||
return host, nil
|
||||
}
|
||||
|
||||
func ValidateProjectName(name string) error {
|
||||
if strings.HasPrefix(name, ".") {
|
||||
return fmt.Errorf("must not start with %q", ".")
|
||||
}
|
||||
|
||||
forbiddenChars := "%*"
|
||||
if strings.ContainsAny(name, forbiddenChars) {
|
||||
return fmt.Errorf("must not contain any of %q", forbiddenChars)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetProjectName(r *http.Request) (string, error) {
|
||||
// path must be either `/` or `/foo/` (`/foo` is accepted as an alias)
|
||||
path := strings.TrimPrefix(strings.TrimSuffix(r.URL.Path, "/"), "/")
|
||||
if path == ".index" || strings.HasPrefix(path, ".index/") {
|
||||
if err := ValidateProjectName(path); err != nil {
|
||||
return "", AuthError{http.StatusBadRequest,
|
||||
fmt.Sprintf("directory name %q is reserved", ".index")}
|
||||
fmt.Sprintf("directory name: %v", err)}
|
||||
} else if strings.Contains(path, "/") {
|
||||
return "", AuthError{http.StatusBadRequest,
|
||||
"directories nested too deep"}
|
||||
@@ -82,6 +115,15 @@ type Authorization struct {
|
||||
repoURLs []string
|
||||
// Only the exact branch is allowed.
|
||||
branch string
|
||||
// The authorized forge user.
|
||||
forgeUser *ForgeUser
|
||||
}
|
||||
|
||||
func (auth *Authorization) ForgeRepoURL() string {
|
||||
if auth.forgeUser != nil && len(auth.repoURLs) == 1 {
|
||||
return auth.repoURLs[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func authorizeDNSChallenge(r *http.Request) (*Authorization, error) {
|
||||
@@ -153,19 +195,29 @@ func authorizeDNSChallenge(r *http.Request) (*Authorization, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func authorizeDNSAllowlist(r *http.Request) (*Authorization, error) {
|
||||
func authorizeDNSAllowlist(r *http.Request, scope string) (*Authorization, error) {
|
||||
host, err := GetHost(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allowlistHostname := fmt.Sprintf("_git-pages-repository.%s", host)
|
||||
projectName, err := GetProjectName(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allowlistHostname := fmt.Sprintf("_%s.%s", scope, host)
|
||||
records, err := net.LookupTXT(allowlistHostname)
|
||||
if err != nil {
|
||||
return nil, AuthError{http.StatusUnauthorized,
|
||||
fmt.Sprintf("failed to look up DNS repository allowlist: %s TXT", allowlistHostname)}
|
||||
}
|
||||
|
||||
if projectName != ".index" {
|
||||
return nil, AuthError{http.StatusUnauthorized,
|
||||
"DNS repository allowlist only authorizes index site"}
|
||||
}
|
||||
|
||||
var (
|
||||
repoURLs []string
|
||||
errs []error
|
||||
@@ -231,8 +283,8 @@ func authorizeWildcardMatchSite(r *http.Request, pattern *WildcardPattern) (*Aut
|
||||
}
|
||||
|
||||
if userName, found := pattern.Matches(host); found {
|
||||
repoURLs, branch := pattern.ApplyTemplate(userName, projectName)
|
||||
return &Authorization{repoURLs, branch}, nil
|
||||
repoURL, branch := pattern.ApplyTemplate(userName, projectName)
|
||||
return &Authorization{repoURLs: []string{repoURL}, branch: branch}, nil
|
||||
} else {
|
||||
return nil, AuthError{
|
||||
http.StatusUnauthorized,
|
||||
@@ -266,7 +318,7 @@ func authorizeCodebergPagesV2(r *http.Request) (*Authorization, error) {
|
||||
}
|
||||
|
||||
if len(dnsRecords) > 0 {
|
||||
log.Printf("auth: %s TXT/CNAME: %q\n", host, dnsRecords)
|
||||
logc.Printf(r.Context(), "auth: %s TXT/CNAME: %q\n", host, dnsRecords)
|
||||
}
|
||||
|
||||
for _, dnsRecord := range dnsRecords {
|
||||
@@ -279,17 +331,16 @@ func authorizeCodebergPagesV2(r *http.Request) (*Authorization, error) {
|
||||
if domainParts[0] == "page" && domainParts[1] == "codeberg" {
|
||||
// map of domain names to allowed repository and branch:
|
||||
// * {username}.codeberg.page =>
|
||||
// https://codeberg.org/{username}/pages.git#main
|
||||
// https://codeberg.org/{username}/pages.git#pages
|
||||
// * {reponame}.{username}.codeberg.page =>
|
||||
// https://codeberg.org/{username}/{reponame}.git#pages
|
||||
// * {branch}.{reponame}.{username}.codeberg.page =>
|
||||
// https://codeberg.org/{username}/{reponame}.git#{branch}
|
||||
username := domainParts[2]
|
||||
reponame := "pages"
|
||||
branch := "main"
|
||||
branch := "pages"
|
||||
if len(domainParts) >= 4 {
|
||||
reponame = domainParts[3]
|
||||
branch = "pages"
|
||||
}
|
||||
if len(domainParts) == 5 {
|
||||
branch = domainParts[4]
|
||||
@@ -311,10 +362,10 @@ func authorizeCodebergPagesV2(r *http.Request) (*Authorization, error) {
|
||||
}
|
||||
|
||||
// Checks whether an operation that enables enumerating site contents is allowed.
|
||||
func AuthorizeMetadataRetrieval(r *http.Request) (*Authorization, error) {
|
||||
func AuthorizeMetadataRetrieval(r *http.Request, hasBasicAuth bool) (*Authorization, error) {
|
||||
causes := []error{AuthError{http.StatusUnauthorized, "unauthorized"}}
|
||||
|
||||
auth := authorizeInsecure()
|
||||
auth := authorizeInsecure(r)
|
||||
if auth != nil {
|
||||
return auth, nil
|
||||
}
|
||||
@@ -325,40 +376,42 @@ func AuthorizeMetadataRetrieval(r *http.Request) (*Authorization, error) {
|
||||
} else if err != nil { // bad request
|
||||
return nil, err
|
||||
} else {
|
||||
log.Println("auth: DNS challenge")
|
||||
logc.Println(r.Context(), "auth: DNS challenge")
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
for _, pattern := range wildcards {
|
||||
auth, err = authorizeWildcardMatchHost(r, pattern)
|
||||
if err != nil && IsUnauthorized(err) {
|
||||
causes = append(causes, err)
|
||||
} else if err != nil { // bad request
|
||||
return nil, err
|
||||
} else {
|
||||
log.Printf("auth: wildcard %s\n", pattern.GetHost())
|
||||
return auth, nil
|
||||
// Normally, sites that correspond to a forge via a wildcard match are considered completely
|
||||
// public and safe to retrieve without authorization. However, this is no longer the case if
|
||||
// they have password-protected sections.
|
||||
if !hasBasicAuth {
|
||||
for _, pattern := range wildcards {
|
||||
auth, err = authorizeWildcardMatchHost(r, pattern)
|
||||
if err != nil && IsUnauthorized(err) {
|
||||
causes = append(causes, err)
|
||||
} else if err != nil { // bad request
|
||||
return nil, err
|
||||
} else {
|
||||
logc.Printf(r.Context(), "auth: wildcard %s\n", pattern.GetHost())
|
||||
return auth, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if config.Feature("codeberg-pages-compat") {
|
||||
auth, err = authorizeCodebergPagesV2(r)
|
||||
if err != nil && IsUnauthorized(err) {
|
||||
causes = append(causes, err)
|
||||
} else if err != nil { // bad request
|
||||
return nil, err
|
||||
} else {
|
||||
log.Printf("auth: codeberg %s\n", r.Host)
|
||||
return auth, nil
|
||||
if config.Feature("codeberg-pages-compat") {
|
||||
auth, err = authorizeCodebergPagesV2(r)
|
||||
if err != nil && IsUnauthorized(err) {
|
||||
causes = append(causes, err)
|
||||
} else if err != nil { // bad request
|
||||
return nil, err
|
||||
} else {
|
||||
logc.Printf(r.Context(), "auth: codeberg %s\n", r.Host)
|
||||
return auth, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, joinErrors(causes...)
|
||||
}
|
||||
|
||||
// Returns `repoURLs, err` where if `err == nil` then the request is authorized to clone from
|
||||
// any repository URL included in `repoURLs` (by case-insensitive comparison), or any URL at all
|
||||
// if `repoURLs == nil`.
|
||||
func AuthorizeUpdateFromRepository(r *http.Request) (*Authorization, error) {
|
||||
causes := []error{AuthError{http.StatusUnauthorized, "unauthorized"}}
|
||||
|
||||
@@ -366,7 +419,7 @@ func AuthorizeUpdateFromRepository(r *http.Request) (*Authorization, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
auth := authorizeInsecure()
|
||||
auth := authorizeInsecure(r)
|
||||
if auth != nil {
|
||||
return auth, nil
|
||||
}
|
||||
@@ -378,19 +431,19 @@ func AuthorizeUpdateFromRepository(r *http.Request) (*Authorization, error) {
|
||||
} else if err != nil { // bad request
|
||||
return nil, err
|
||||
} else {
|
||||
log.Println("auth: DNS challenge: allow *")
|
||||
logc.Println(r.Context(), "auth: DNS challenge: allow *")
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
// DNS allowlist gives authority to update but not delete.
|
||||
if r.Method == http.MethodPut || r.Method == http.MethodPost {
|
||||
auth, err = authorizeDNSAllowlist(r)
|
||||
auth, err = authorizeDNSAllowlist(r, "git-pages-repository")
|
||||
if err != nil && IsUnauthorized(err) {
|
||||
causes = append(causes, err)
|
||||
} else if err != nil { // bad request
|
||||
return nil, err
|
||||
} else {
|
||||
log.Printf("auth: DNS allowlist: allow %v\n", auth.repoURLs)
|
||||
logc.Printf(r.Context(), "auth: DNS allowlist: allow %v\n", auth.repoURLs)
|
||||
return auth, nil
|
||||
}
|
||||
}
|
||||
@@ -404,7 +457,7 @@ func AuthorizeUpdateFromRepository(r *http.Request) (*Authorization, error) {
|
||||
} else if err != nil { // bad request
|
||||
return nil, err
|
||||
} else {
|
||||
log.Printf("auth: wildcard %s: allow %v\n", pattern.GetHost(), auth.repoURLs)
|
||||
logc.Printf(r.Context(), "auth: wildcard %s: allow %v\n", pattern.GetHost(), auth.repoURLs)
|
||||
return auth, nil
|
||||
}
|
||||
}
|
||||
@@ -416,7 +469,7 @@ func AuthorizeUpdateFromRepository(r *http.Request) (*Authorization, error) {
|
||||
} else if err != nil { // bad request
|
||||
return nil, err
|
||||
} else {
|
||||
log.Printf("auth: codeberg %s: allow %v branch %s\n",
|
||||
logc.Printf(r.Context(), "auth: codeberg %s: allow %v branch %s\n",
|
||||
r.Host, auth.repoURLs, auth.branch)
|
||||
return auth, nil
|
||||
}
|
||||
@@ -426,21 +479,23 @@ func AuthorizeUpdateFromRepository(r *http.Request) (*Authorization, error) {
|
||||
return nil, joinErrors(causes...)
|
||||
}
|
||||
|
||||
func checkAllowedURLPrefix(repoURL string) error {
|
||||
if config.Limits.AllowedRepositoryURLPrefixes != nil {
|
||||
allowedPrefix := false
|
||||
repoURL = strings.ToLower(repoURL)
|
||||
for _, allowedRepoURLPrefix := range config.Limits.AllowedRepositoryURLPrefixes {
|
||||
if strings.HasPrefix(repoURL, strings.ToLower(allowedRepoURLPrefix)) {
|
||||
allowedPrefix = true
|
||||
break
|
||||
func checkAllowedURLPrefixes(repoURLs ...string) error {
|
||||
if len(config.Limits.AllowedRepositoryURLPrefixes) > 0 {
|
||||
for _, repoURL := range repoURLs {
|
||||
allowedPrefix := false
|
||||
repoURL = strings.ToLower(repoURL)
|
||||
for _, allowedRepoURLPrefix := range config.Limits.AllowedRepositoryURLPrefixes {
|
||||
if strings.HasPrefix(repoURL, strings.ToLower(allowedRepoURLPrefix)) {
|
||||
allowedPrefix = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !allowedPrefix {
|
||||
return AuthError{
|
||||
http.StatusUnauthorized,
|
||||
fmt.Sprintf("clone URL not in prefix allowlist %v",
|
||||
config.Limits.AllowedRepositoryURLPrefixes),
|
||||
if !allowedPrefix {
|
||||
return AuthError{
|
||||
http.StatusUnauthorized,
|
||||
fmt.Sprintf("clone URL %v not in prefix allowlist %v",
|
||||
repoURL, config.Limits.AllowedRepositoryURLPrefixes),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -473,7 +528,7 @@ func AuthorizeRepository(repoURL string, auth *Authorization) error {
|
||||
return nil // any
|
||||
}
|
||||
|
||||
if err = checkAllowedURLPrefix(repoURL); err != nil {
|
||||
if err = checkAllowedURLPrefixes(repoURL); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -541,6 +596,11 @@ func checkGogsRepositoryPushPermission(baseURL *url.URL, authorization string) e
|
||||
http.StatusNotFound,
|
||||
fmt.Sprintf("no repository %s", ownerAndRepo),
|
||||
}
|
||||
} else if response.StatusCode == http.StatusUnauthorized {
|
||||
return AuthError{
|
||||
http.StatusUnauthorized,
|
||||
fmt.Sprintf("no access to %s or invalid token", ownerAndRepo),
|
||||
}
|
||||
} else if response.StatusCode != http.StatusOK {
|
||||
return AuthError{
|
||||
http.StatusServiceUnavailable,
|
||||
@@ -575,9 +635,89 @@ func checkGogsRepositoryPushPermission(baseURL *url.URL, authorization string) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func authorizeForgeWithToken(r *http.Request) (*Authorization, error) {
|
||||
authorization := r.Header.Get("Forge-Authorization")
|
||||
if authorization == "" {
|
||||
// Gogs, Gitea, and Forgejo all support the same API here.
|
||||
func fetchGogsAuthorizedUser(baseURL *url.URL, forgeToken string) (*ForgeUser, error) {
|
||||
request, err := http.NewRequest("GET", baseURL.ResolveReference(&url.URL{
|
||||
Path: "/api/v1/user",
|
||||
}).String(), nil)
|
||||
if err != nil {
|
||||
panic(err) // misconfiguration
|
||||
}
|
||||
request.Header.Set("Accept", "application/json")
|
||||
request.Header.Set("Authorization", forgeToken)
|
||||
|
||||
httpClient := http.Client{Timeout: 5 * time.Second}
|
||||
response, err := httpClient.Do(request)
|
||||
if err != nil {
|
||||
return nil, AuthError{
|
||||
http.StatusServiceUnavailable,
|
||||
fmt.Sprintf("cannot fetch authorized forge user: %s", err),
|
||||
}
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil, AuthError{
|
||||
http.StatusServiceUnavailable,
|
||||
fmt.Sprintf(
|
||||
"cannot fetch authorized forge user: GET %s returned %s",
|
||||
request.URL,
|
||||
response.Status,
|
||||
),
|
||||
}
|
||||
}
|
||||
decoder := json.NewDecoder(response.Body)
|
||||
|
||||
var userInfo struct {
|
||||
ID int64
|
||||
Login string
|
||||
}
|
||||
if err = decoder.Decode(&userInfo); err != nil {
|
||||
return nil, errors.Join(AuthError{
|
||||
http.StatusServiceUnavailable,
|
||||
fmt.Sprintf(
|
||||
"cannot fetch authorized forge user: GET %s returned malformed JSON",
|
||||
request.URL,
|
||||
),
|
||||
}, err)
|
||||
}
|
||||
|
||||
origin := request.URL.Hostname()
|
||||
return &ForgeUser{
|
||||
Origin: &origin,
|
||||
Id: &userInfo.ID,
|
||||
Handle: &userInfo.Login,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Check whether a forge token has access to a repository, and if it does, which user it
|
||||
// belongs to. Precondition: `repoURL` is well-formed.
|
||||
func authorizeGogsUser(repoURL string, forgeToken string) (*Authorization, error) {
|
||||
parsedRepoURL, err := url.Parse(repoURL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err = checkGogsRepositoryPushPermission(parsedRepoURL, forgeToken); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
authorizedUser, err := fetchGogsAuthorizedUser(parsedRepoURL, forgeToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Authorization{
|
||||
repoURLs: []string{repoURL},
|
||||
forgeUser: authorizedUser,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Validates a provided forge token against a repository URL constructed by mapping the host
|
||||
// and project name via the `[[wildcard]]` section of the configuration file.
|
||||
func authorizeForgeWildcard(r *http.Request) (*Authorization, error) {
|
||||
forgeToken := r.Header.Get("Forge-Authorization")
|
||||
if forgeToken == "" {
|
||||
return nil, AuthError{http.StatusUnauthorized, "missing Forge-Authorization header"}
|
||||
}
|
||||
|
||||
@@ -593,80 +733,136 @@ func authorizeForgeWithToken(r *http.Request) (*Authorization, error) {
|
||||
|
||||
var errs []error
|
||||
for _, pattern := range wildcards {
|
||||
if !pattern.Authorization {
|
||||
continue
|
||||
}
|
||||
|
||||
if userName, found := pattern.Matches(host); found {
|
||||
repoURLs, branch := pattern.ApplyTemplate(userName, projectName)
|
||||
for _, repoURL := range repoURLs {
|
||||
parsedRepoURL, err := url.Parse(repoURL)
|
||||
if pattern.Authorization {
|
||||
if userName, found := pattern.Matches(host); found {
|
||||
repoURL, branch := pattern.ApplyTemplate(userName, projectName)
|
||||
auth, err := authorizeGogsUser(repoURL, forgeToken)
|
||||
if err != nil {
|
||||
panic(err) // misconfiguration
|
||||
}
|
||||
|
||||
if err = checkGogsRepositoryPushPermission(parsedRepoURL, authorization); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
} else {
|
||||
auth.branch = branch
|
||||
return auth, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(errs) == 0 {
|
||||
errs = append(errs, AuthError{http.StatusUnauthorized, "no matching wildcard domain"})
|
||||
}
|
||||
|
||||
// This will actually be ignored by the caller of AuthorizeUpdateFromArchive,
|
||||
// but we return this information as it makes sense to do contextually here.
|
||||
return &Authorization{
|
||||
[]string{repoURL},
|
||||
branch,
|
||||
}, nil
|
||||
errs = append([]error{
|
||||
AuthError{http.StatusUnauthorized, "not authorized by forge (wildcard)"},
|
||||
}, errs...)
|
||||
return nil, joinErrors(errs...)
|
||||
}
|
||||
|
||||
// Validates a provided forge token against a repository URL extracted from the DNS allowlist
|
||||
// records of the target domain specified in `_git-pages-forge-authorization.*`.
|
||||
func authorizeForgeDNSAllowlist(r *http.Request) (*Authorization, error) {
|
||||
forgeToken := r.Header.Get("Forge-Authorization")
|
||||
if forgeToken == "" {
|
||||
return nil, AuthError{http.StatusUnauthorized, "missing Forge-Authorization header"}
|
||||
}
|
||||
|
||||
var errs []error
|
||||
if dnsAuth, err := authorizeDNSAllowlist(r, "git-pages-forge-allowlist"); err != nil {
|
||||
errs = append(errs, err)
|
||||
} else if dnsAuth != nil {
|
||||
// DNS allows uploads from some repositories, but we don't know yet if the forge token
|
||||
// has a push permission to any of these repositories.
|
||||
for _, repoURL := range dnsAuth.repoURLs {
|
||||
auth, err := authorizeGogsUser(repoURL, forgeToken)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
// There is both DNS authorization and forge authorization.
|
||||
return auth, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
errs = append([]error{
|
||||
AuthError{http.StatusUnauthorized, "not authorized by forge"},
|
||||
AuthError{http.StatusUnauthorized, "not authorized by forge (DNS allowlist)"},
|
||||
}, errs...)
|
||||
return nil, joinErrors(errs...)
|
||||
}
|
||||
|
||||
func AuthorizeUpdateFromArchive(r *http.Request) (*Authorization, error) {
|
||||
func authorizeDNSChallengeOrForgeWithToken(r *http.Request) (*Authorization, error) {
|
||||
causes := []error{AuthError{http.StatusUnauthorized, "unauthorized"}}
|
||||
|
||||
if err := CheckForbiddenDomain(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
auth := authorizeInsecure()
|
||||
auth := authorizeInsecure(r)
|
||||
if auth != nil {
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
// Token authorization allows updating a site on a wildcard domain from an archive.
|
||||
auth, err := authorizeForgeWithToken(r)
|
||||
// DNS challenge gives absolute authority.
|
||||
auth, err := authorizeDNSChallenge(r)
|
||||
if err != nil && IsUnauthorized(err) {
|
||||
causes = append(causes, err)
|
||||
} else if err != nil { // bad request
|
||||
return nil, err
|
||||
} else {
|
||||
log.Printf("auth: forge token: allow\n")
|
||||
logc.Println(r.Context(), "auth: DNS challenge: allow *")
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
if config.Limits.AllowedRepositoryURLPrefixes != nil {
|
||||
causes = append(causes, AuthError{http.StatusUnauthorized, "DNS challenge not allowed"})
|
||||
// Token authorization allows updating a site on a wildcard domain from an archive.
|
||||
// This sub-method uses the `[[wildcard]]` configuration section to derive repository URL.
|
||||
auth, err = authorizeForgeWildcard(r)
|
||||
if err != nil && IsUnauthorized(err) {
|
||||
causes = append(causes, err)
|
||||
} else if err != nil { // bad request
|
||||
return nil, err
|
||||
} else {
|
||||
// DNS challenge gives absolute authority.
|
||||
auth, err = authorizeDNSChallenge(r)
|
||||
if err != nil && IsUnauthorized(err) {
|
||||
causes = append(causes, err)
|
||||
} else if err != nil { // bad request
|
||||
return nil, err
|
||||
} else {
|
||||
log.Println("auth: DNS challenge")
|
||||
return auth, nil
|
||||
}
|
||||
logc.Printf(r.Context(), "auth: forge (wildcard): allow\n")
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
// Token authorization allows updating a site on a wildcard domain from an archive.
|
||||
// This sub-method uses the DNS allowlist authorization mechanism to derive repository URL.
|
||||
auth, err = authorizeForgeDNSAllowlist(r)
|
||||
if err != nil && IsUnauthorized(err) {
|
||||
causes = append(causes, err)
|
||||
} else if err != nil { // bad request
|
||||
return nil, err
|
||||
} else {
|
||||
logc.Printf(r.Context(), "auth: forge (DNS allowlist): allow\n")
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
return nil, joinErrors(causes...)
|
||||
}
|
||||
|
||||
func AuthorizeUpdateFromArchive(r *http.Request) (*Authorization, error) {
|
||||
auth, err := authorizeDNSChallengeOrForgeWithToken(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If only uploads from specific repositories are allowed, then only forge authorization
|
||||
// is acceptable, and the repository must match the configured limits.
|
||||
if len(config.Limits.AllowedRepositoryURLPrefixes) > 0 {
|
||||
if len(auth.repoURLs) == 0 {
|
||||
logc.Println(r.Context(), "auth: DNS challenge: deny (limits)")
|
||||
return nil, AuthError{http.StatusUnauthorized, "DNS challenge not allowed"}
|
||||
}
|
||||
|
||||
if err = checkAllowedURLPrefixes(auth.repoURLs...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
func AuthorizeDeletion(r *http.Request) (*Authorization, error) {
|
||||
return authorizeDNSChallengeOrForgeWithToken(r)
|
||||
}
|
||||
|
||||
func CheckForbiddenDomain(r *http.Request) error {
|
||||
host, err := GetHost(r)
|
||||
if err != nil {
|
||||
|
||||
120
src/backend.go
120
src/backend.go
@@ -5,32 +5,79 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"iter"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var ErrObjectNotFound = errors.New("not found")
|
||||
var ErrPreconditionFailed = errors.New("precondition failed")
|
||||
var ErrWriteConflict = errors.New("write conflict")
|
||||
var ErrDomainFrozen = errors.New("domain administratively frozen")
|
||||
|
||||
func splitBlobName(name string) []string {
|
||||
algo, hash, found := strings.Cut(name, "-")
|
||||
if found {
|
||||
return slices.Concat([]string{algo}, splitBlobName(hash))
|
||||
if algo, hash, found := strings.Cut(name, "-"); found {
|
||||
return []string{algo, hash[0:2], hash[2:4], hash[4:]}
|
||||
} else {
|
||||
return []string{name[0:2], name[2:4], name[4:]}
|
||||
panic("malformed blob name")
|
||||
}
|
||||
}
|
||||
|
||||
func joinBlobName(parts []string) string {
|
||||
return fmt.Sprintf("%s-%s", parts[0], strings.Join(parts[1:], ""))
|
||||
}
|
||||
|
||||
type BackendFeature string
|
||||
|
||||
const (
|
||||
FeatureCheckDomainMarker BackendFeature = "check-domain-marker"
|
||||
)
|
||||
|
||||
type BlobMetadata struct {
|
||||
Name string
|
||||
Size int64
|
||||
LastModified time.Time
|
||||
}
|
||||
|
||||
type GetManifestOptions struct {
|
||||
// If true and the manifest is past the cache `MaxAge`, `GetManifest` blocks and returns
|
||||
// a fresh object instead of revalidating in background and returning a stale object.
|
||||
BypassCache bool
|
||||
}
|
||||
|
||||
type ManifestMetadata struct {
|
||||
Name string
|
||||
Size int64
|
||||
LastModified time.Time
|
||||
ETag string
|
||||
}
|
||||
|
||||
type ModifyManifestOptions struct {
|
||||
// If non-zero, the request will only succeed if the manifest hasn't been changed since
|
||||
// the given time. Whether this is racy or not is can be determined via `HasAtomicCAS()`.
|
||||
IfUnmodifiedSince time.Time
|
||||
// If non-empty, the request will only succeed if the manifest hasn't changed from
|
||||
// the state corresponding to the ETag. Whether this is racy or not is can be determined
|
||||
// via `HasAtomicCAS()`.
|
||||
IfMatch string
|
||||
}
|
||||
|
||||
type SearchAuditLogOptions struct {
|
||||
// Inclusive lower bound on returned audit records, per their Snowflake ID (which may differ
|
||||
// slightly from the embedded timestamp). If zero, audit records are returned since beginning
|
||||
// of time.
|
||||
Since time.Time
|
||||
// Inclusive upper bound on returned audit records, per their Snowflake ID (which may differ
|
||||
// slightly from the embedded timestamp). If zero, audit records are returned until the end
|
||||
// of time.
|
||||
Until time.Time
|
||||
}
|
||||
|
||||
type SearchAuditLogResult struct {
|
||||
ID AuditID
|
||||
Err error
|
||||
}
|
||||
|
||||
type Backend interface {
|
||||
// Returns true if the feature has been enabled for this store, false otherwise.
|
||||
HasFeature(ctx context.Context, feature BackendFeature) bool
|
||||
@@ -40,7 +87,7 @@ type Backend interface {
|
||||
|
||||
// Retrieve a blob. Returns `reader, size, mtime, err`.
|
||||
GetBlob(ctx context.Context, name string) (
|
||||
reader io.ReadSeeker, size uint64, mtime time.Time, err error,
|
||||
reader io.ReadSeeker, metadata BlobMetadata, err error,
|
||||
)
|
||||
|
||||
// Store a blob. If a blob called `name` already exists, this function returns `nil` without
|
||||
@@ -51,9 +98,13 @@ type Backend interface {
|
||||
// Delete a blob. This is an unconditional operation that can break integrity of manifests.
|
||||
DeleteBlob(ctx context.Context, name string) error
|
||||
|
||||
// Iterate through all blobs. Whether blobs that are newly added during iteration will appear
|
||||
// in the results is unspecified.
|
||||
EnumerateBlobs(ctx context.Context) iter.Seq2[BlobMetadata, error]
|
||||
|
||||
// Retrieve a manifest.
|
||||
GetManifest(ctx context.Context, name string, opts GetManifestOptions) (
|
||||
manifest *Manifest, mtime time.Time, err error,
|
||||
manifest *Manifest, metadata ManifestMetadata, err error,
|
||||
)
|
||||
|
||||
// Stage a manifest. This operation stores a new version of a manifest, locking any blobs
|
||||
@@ -61,35 +112,74 @@ type Backend interface {
|
||||
// effects.
|
||||
StageManifest(ctx context.Context, manifest *Manifest) error
|
||||
|
||||
// Whether a compare-and-swap operation on a manifest is truly race-free, or only best-effort
|
||||
// atomic with a small but non-zero window where two requests may race where the one committing
|
||||
// first will have its update lost. (Plain swap operations are always guaranteed to be atomic.)
|
||||
HasAtomicCAS(ctx context.Context) bool
|
||||
|
||||
// Commit a manifest. This is an atomic operation; `GetManifest` calls will return either
|
||||
// the old version or the new version of the manifest, never anything else.
|
||||
CommitManifest(ctx context.Context, name string, manifest *Manifest) error
|
||||
CommitManifest(ctx context.Context, name string, manifest *Manifest, opts ModifyManifestOptions) error
|
||||
|
||||
// Delete a manifest.
|
||||
DeleteManifest(ctx context.Context, name string) error
|
||||
DeleteManifest(ctx context.Context, name string, opts ModifyManifestOptions) error
|
||||
|
||||
// List all manifests.
|
||||
ListManifests(ctx context.Context) (manifests []string, err error)
|
||||
// Iterate through metadata of all manifests. Whether manifests that are newly added during
|
||||
// iteration will appear in the results is unspecified.
|
||||
EnumerateManifests(ctx context.Context) iter.Seq2[*ManifestMetadata, error]
|
||||
|
||||
// Iterate through contents of all manifests. Same considerations apply as for
|
||||
// `EnumerateManifests`.
|
||||
GetAllManifests(ctx context.Context) iter.Seq2[tuple[*ManifestMetadata, *Manifest], error]
|
||||
|
||||
// Check whether a domain has any deployments.
|
||||
CheckDomain(ctx context.Context, domain string) (found bool, err error)
|
||||
|
||||
// Creates a domain. This allows us to start serving content for the domain.
|
||||
// Create a domain. This allows us to start serving content for the domain.
|
||||
CreateDomain(ctx context.Context, domain string) error
|
||||
|
||||
// Freeze a domain. This allows a site to be administratively locked, e.g. if it
|
||||
// is discovered serving abusive content.
|
||||
FreezeDomain(ctx context.Context, domain string) error
|
||||
|
||||
// Thaw a domain. This removes the previously placed administrative lock (if any).
|
||||
UnfreezeDomain(ctx context.Context, domain string) error
|
||||
|
||||
// Check whether the set of domains we serve has changed since the time passed to this method.
|
||||
HaveDomainsChanged(ctx context.Context, since time.Time) (changed bool, err error)
|
||||
|
||||
// Append a record to the audit log.
|
||||
AppendAuditLog(ctx context.Context, id AuditID, record *AuditRecord) error
|
||||
|
||||
// Retrieve a single record from the audit log.
|
||||
QueryAuditLog(ctx context.Context, id AuditID) (record *AuditRecord, err error)
|
||||
|
||||
// Retrieve record IDs from the audit log by time range.
|
||||
SearchAuditLog(ctx context.Context, opts SearchAuditLogOptions) iter.Seq2[AuditID, error]
|
||||
|
||||
// Retrieve audit record contents for given IDs.
|
||||
GetAuditLogRecords(ctx context.Context, ids iter.Seq2[AuditID, error]) iter.Seq2[*AuditRecord, error]
|
||||
|
||||
// Detach an audit record from its blobs.
|
||||
DetachAuditRecord(ctx context.Context, id AuditID) error
|
||||
|
||||
// Delete an audit record with a given ID.
|
||||
ExpireAuditRecord(ctx context.Context, id AuditID) error
|
||||
}
|
||||
|
||||
func CreateBackend(config *StorageConfig) (backend Backend, err error) {
|
||||
func CreateBackend(ctx context.Context, config *StorageConfig) (backend Backend, err error) {
|
||||
switch config.Type {
|
||||
case "fs":
|
||||
if backend, err = NewFSBackend(&config.FS); err != nil {
|
||||
if backend, err = NewFSBackend(ctx, &config.FS); err != nil {
|
||||
err = fmt.Errorf("fs backend: %w", err)
|
||||
}
|
||||
case "s3":
|
||||
if backend, err = NewS3Backend(context.Background(), &config.S3); err != nil {
|
||||
if backend, err = NewS3Backend(ctx, &config.S3); err != nil {
|
||||
err = fmt.Errorf("s3 backend: %w", err)
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("unknown backend: %s", config.Type)
|
||||
}
|
||||
backend = NewAuditedBackend(backend)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,7 +6,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
iofs "io/fs"
|
||||
"iter"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -14,8 +15,10 @@ import (
|
||||
)
|
||||
|
||||
type FSBackend struct {
|
||||
blobRoot *os.Root
|
||||
siteRoot *os.Root
|
||||
blobRoot *os.Root
|
||||
siteRoot *os.Root
|
||||
auditRoot *os.Root
|
||||
hasAtomicCAS bool
|
||||
}
|
||||
|
||||
var _ Backend = (*FSBackend)(nil)
|
||||
@@ -54,7 +57,21 @@ func createTempInRoot(root *os.Root, name string, data []byte) (string, error) {
|
||||
return tempPath, nil
|
||||
}
|
||||
|
||||
func NewFSBackend(config *FSConfig) (*FSBackend, error) {
|
||||
func checkAtomicCAS(root *os.Root) bool {
|
||||
fileName := ".hasAtomicCAS"
|
||||
file, err := root.Create(fileName)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
root.Remove(fileName)
|
||||
defer file.Close()
|
||||
|
||||
flockErr := FileLock(file)
|
||||
funlockErr := FileUnlock(file)
|
||||
return (flockErr == nil && funlockErr == nil)
|
||||
}
|
||||
|
||||
func NewFSBackend(ctx context.Context, config *FSConfig) (*FSBackend, error) {
|
||||
blobRoot, err := maybeCreateOpenRoot(config.Root, "blob")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("blob: %w", err)
|
||||
@@ -63,7 +80,17 @@ func NewFSBackend(config *FSConfig) (*FSBackend, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("site: %w", err)
|
||||
}
|
||||
return &FSBackend{blobRoot, siteRoot}, nil
|
||||
auditRoot, err := maybeCreateOpenRoot(config.Root, "audit")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("audit: %w", err)
|
||||
}
|
||||
hasAtomicCAS := checkAtomicCAS(siteRoot)
|
||||
if hasAtomicCAS {
|
||||
logc.Println(ctx, "fs: has atomic CAS")
|
||||
} else {
|
||||
logc.Println(ctx, "fs: has best-effort CAS")
|
||||
}
|
||||
return &FSBackend{blobRoot, siteRoot, auditRoot, hasAtomicCAS}, nil
|
||||
}
|
||||
|
||||
func (fs *FSBackend) Backend() Backend {
|
||||
@@ -91,7 +118,7 @@ func (fs *FSBackend) EnableFeature(ctx context.Context, feature BackendFeature)
|
||||
func (fs *FSBackend) GetBlob(
|
||||
ctx context.Context, name string,
|
||||
) (
|
||||
reader io.ReadSeeker, size uint64, mtime time.Time, err error,
|
||||
reader io.ReadSeeker, metadata BlobMetadata, err error,
|
||||
) {
|
||||
blobPath := filepath.Join(splitBlobName(name)...)
|
||||
stat, err := fs.blobRoot.Stat(blobPath)
|
||||
@@ -107,20 +134,32 @@ func (fs *FSBackend) GetBlob(
|
||||
err = fmt.Errorf("open: %w", err)
|
||||
return
|
||||
}
|
||||
return file, uint64(stat.Size()), stat.ModTime(), nil
|
||||
return file, BlobMetadata{name, int64(stat.Size()), stat.ModTime()}, nil
|
||||
}
|
||||
|
||||
func (fs *FSBackend) PutBlob(ctx context.Context, name string, data []byte) error {
|
||||
blobPath := filepath.Join(splitBlobName(name)...)
|
||||
blobDir := filepath.Dir(blobPath)
|
||||
|
||||
if _, err := fs.blobRoot.Stat(blobPath); err == nil {
|
||||
// Blob already exists. While on Linux it would be benign to write and replace a blob
|
||||
// that already exists, on Windows this is liable to cause access errors.
|
||||
return nil
|
||||
}
|
||||
|
||||
tempPath, err := createTempInRoot(fs.blobRoot, name, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fs.blobRoot.Chmod(tempPath, 0o444); err != nil {
|
||||
return fmt.Errorf("chmod: %w", err)
|
||||
if errors.Is(err, os.ErrPermission) {
|
||||
// NFSv4 configured with ACLs doesn't have a working `chmod` even though it's a Unix
|
||||
// system. This `chmod` call is done entirely for convenience (to help the system
|
||||
// administrator avoid accidentally overwriting files), so just skip it.
|
||||
} else {
|
||||
return fmt.Errorf("chmod: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
again:
|
||||
@@ -149,25 +188,36 @@ func (fs *FSBackend) DeleteBlob(ctx context.Context, name string) error {
|
||||
return fs.blobRoot.Remove(blobPath)
|
||||
}
|
||||
|
||||
func (b *FSBackend) ListManifests(ctx context.Context) (manifests []string, err error) {
|
||||
err = fs.WalkDir(b.siteRoot.FS(), ".", func(path string, d fs.DirEntry, err error) error {
|
||||
if strings.Count(path, "/") > 1 {
|
||||
return fs.SkipDir
|
||||
}
|
||||
_, project, _ := strings.Cut(path, "/")
|
||||
if project == "" || strings.HasPrefix(project, ".") && project != ".index" {
|
||||
return nil
|
||||
}
|
||||
manifests = append(manifests, path)
|
||||
return nil
|
||||
})
|
||||
return
|
||||
func (fs *FSBackend) EnumerateBlobs(ctx context.Context) iter.Seq2[BlobMetadata, error] {
|
||||
return func(yield func(BlobMetadata, error) bool) {
|
||||
iofs.WalkDir(fs.blobRoot.FS(), ".",
|
||||
func(path string, entry iofs.DirEntry, err error) error {
|
||||
var metadata BlobMetadata
|
||||
if err != nil {
|
||||
// report error
|
||||
} else if entry.IsDir() {
|
||||
// skip directory
|
||||
return nil
|
||||
} else if info, err := entry.Info(); err != nil {
|
||||
// report error
|
||||
} else {
|
||||
// report blob
|
||||
metadata.Name = joinBlobName(strings.Split(path, "/"))
|
||||
metadata.Size = info.Size()
|
||||
metadata.LastModified = info.ModTime()
|
||||
}
|
||||
if !yield(metadata, err) {
|
||||
return iofs.SkipAll
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FSBackend) GetManifest(
|
||||
ctx context.Context, name string, opts GetManifestOptions,
|
||||
) (
|
||||
manifest *Manifest, mtime time.Time, err error,
|
||||
manifest *Manifest, metadata ManifestMetadata, err error,
|
||||
) {
|
||||
stat, err := fs.siteRoot.Stat(name)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
@@ -186,7 +236,10 @@ func (fs *FSBackend) GetManifest(
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return manifest, stat.ModTime(), nil
|
||||
return manifest, ManifestMetadata{
|
||||
LastModified: stat.ModTime(),
|
||||
ETag: fmt.Sprintf("%x", sha256.Sum256(data)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func stagedManifestName(manifestData []byte) string {
|
||||
@@ -208,7 +261,102 @@ func (fs *FSBackend) StageManifest(ctx context.Context, manifest *Manifest) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *FSBackend) CommitManifest(ctx context.Context, name string, manifest *Manifest) error {
|
||||
func domainFrozenMarkerName(domain string) string {
|
||||
return filepath.Join(domain, ".frozen")
|
||||
}
|
||||
|
||||
func (fs *FSBackend) checkDomainFrozen(ctx context.Context, domain string) error {
|
||||
if _, err := fs.siteRoot.Stat(domainFrozenMarkerName(domain)); err == nil {
|
||||
return ErrDomainFrozen
|
||||
} else if !errors.Is(err, os.ErrNotExist) {
|
||||
return fmt.Errorf("stat: %w", err)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FSBackend) HasAtomicCAS(ctx context.Context) bool {
|
||||
// On a suitable filesystem, POSIX advisory locks can be used to implement atomic CAS.
|
||||
// An implementation consists of two parts:
|
||||
// - Intra-process mutex set (one per manifest), to prevent races between goroutines;
|
||||
// - Inter-process POSIX advisory locks (one per manifest), to prevent races between
|
||||
// different git-pages instances.
|
||||
return fs.hasAtomicCAS
|
||||
}
|
||||
|
||||
type manifestLockGuard struct {
|
||||
file *os.File
|
||||
}
|
||||
|
||||
func lockManifest(fs *os.Root, name string) (*manifestLockGuard, error) {
|
||||
file, err := fs.Open(name)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return &manifestLockGuard{nil}, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
if err := FileLock(file); err != nil {
|
||||
file.Close()
|
||||
return nil, fmt.Errorf("flock(LOCK_EX): %w", err)
|
||||
}
|
||||
return &manifestLockGuard{file}, nil
|
||||
}
|
||||
|
||||
func (guard *manifestLockGuard) Unlock() {
|
||||
if guard.file != nil {
|
||||
FileUnlock(guard.file)
|
||||
guard.file.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FSBackend) checkManifestPrecondition(
|
||||
ctx context.Context, name string, opts ModifyManifestOptions,
|
||||
) error {
|
||||
if !opts.IfUnmodifiedSince.IsZero() {
|
||||
stat, err := fs.siteRoot.Stat(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat: %w", err)
|
||||
}
|
||||
|
||||
if stat.ModTime().Compare(opts.IfUnmodifiedSince) > 0 {
|
||||
return fmt.Errorf("%w: If-Unmodified-Since", ErrPreconditionFailed)
|
||||
}
|
||||
}
|
||||
|
||||
if opts.IfMatch != "" {
|
||||
data, err := fs.siteRoot.ReadFile(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read: %w", err)
|
||||
}
|
||||
|
||||
if fmt.Sprintf("%x", sha256.Sum256(data)) != opts.IfMatch {
|
||||
return fmt.Errorf("%w: If-Match", ErrPreconditionFailed)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *FSBackend) CommitManifest(
|
||||
ctx context.Context, name string, manifest *Manifest, opts ModifyManifestOptions,
|
||||
) error {
|
||||
if fs.hasAtomicCAS {
|
||||
if guard, err := lockManifest(fs.siteRoot, name); err != nil {
|
||||
return err
|
||||
} else {
|
||||
defer guard.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
domain := filepath.Dir(name)
|
||||
if err := fs.checkDomainFrozen(ctx, domain); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fs.checkManifestPrecondition(ctx, name, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manifestData := EncodeManifest(manifest)
|
||||
manifestHashName := stagedManifestName(manifestData)
|
||||
|
||||
@@ -216,7 +364,7 @@ func (fs *FSBackend) CommitManifest(ctx context.Context, name string, manifest *
|
||||
return fmt.Errorf("manifest not staged")
|
||||
}
|
||||
|
||||
if err := fs.siteRoot.MkdirAll(filepath.Dir(name), 0o755); err != nil {
|
||||
if err := fs.siteRoot.MkdirAll(domain, 0o755); err != nil {
|
||||
return fmt.Errorf("mkdir: %w", err)
|
||||
}
|
||||
|
||||
@@ -227,7 +375,26 @@ func (fs *FSBackend) CommitManifest(ctx context.Context, name string, manifest *
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *FSBackend) DeleteManifest(ctx context.Context, name string) error {
|
||||
func (fs *FSBackend) DeleteManifest(
|
||||
ctx context.Context, name string, opts ModifyManifestOptions,
|
||||
) error {
|
||||
if fs.hasAtomicCAS {
|
||||
if guard, err := lockManifest(fs.siteRoot, name); err != nil {
|
||||
return err
|
||||
} else {
|
||||
defer guard.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
domain := filepath.Dir(name)
|
||||
if err := fs.checkDomainFrozen(ctx, domain); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fs.checkManifestPrecondition(ctx, name, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := fs.siteRoot.Remove(name)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
@@ -236,6 +403,55 @@ func (fs *FSBackend) DeleteManifest(ctx context.Context, name string) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FSBackend) EnumerateManifests(ctx context.Context) iter.Seq2[*ManifestMetadata, error] {
|
||||
return func(yield func(*ManifestMetadata, error) bool) {
|
||||
iofs.WalkDir(fs.siteRoot.FS(), ".",
|
||||
func(path string, entry iofs.DirEntry, err error) error {
|
||||
_, project, _ := strings.Cut(path, "/")
|
||||
var metadata *ManifestMetadata
|
||||
if err != nil {
|
||||
// report error
|
||||
} else if entry.IsDir() {
|
||||
// skip directory
|
||||
return nil
|
||||
} else if project == "" || strings.HasPrefix(project, ".") && project != ".index" {
|
||||
// skip internal
|
||||
return nil
|
||||
} else if info, err := entry.Info(); err != nil {
|
||||
// report error
|
||||
} else {
|
||||
// report blob
|
||||
metadata = &ManifestMetadata{
|
||||
Name: path,
|
||||
Size: info.Size(),
|
||||
LastModified: info.ModTime(),
|
||||
}
|
||||
// not setting metadata.ETag since it is too costly
|
||||
}
|
||||
if !yield(metadata, err) {
|
||||
return iofs.SkipAll
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FSBackend) GetAllManifests(ctx context.Context) iter.Seq2[tuple[*ManifestMetadata, *Manifest], error] {
|
||||
return func(yield func(tuple[*ManifestMetadata, *Manifest], error) bool) {
|
||||
for metadata, err := range fs.EnumerateManifests(ctx) {
|
||||
var item tuple[*ManifestMetadata, *Manifest]
|
||||
if err == nil {
|
||||
var manifest *Manifest
|
||||
manifest, _, err = backend.GetManifest(ctx, metadata.Name, GetManifestOptions{})
|
||||
item = tuple[*ManifestMetadata, *Manifest]{metadata, manifest}
|
||||
}
|
||||
if !yield(item, err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FSBackend) CheckDomain(ctx context.Context, domain string) (bool, error) {
|
||||
_, err := fs.siteRoot.Stat(domain)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
@@ -250,3 +466,101 @@ func (fs *FSBackend) CheckDomain(ctx context.Context, domain string) (bool, erro
|
||||
func (fs *FSBackend) CreateDomain(ctx context.Context, domain string) error {
|
||||
return nil // no-op
|
||||
}
|
||||
|
||||
func (fs *FSBackend) FreezeDomain(ctx context.Context, domain string) error {
|
||||
return fs.siteRoot.WriteFile(domainFrozenMarkerName(domain), []byte{}, 0o644)
|
||||
}
|
||||
|
||||
func (fs *FSBackend) UnfreezeDomain(ctx context.Context, domain string) error {
|
||||
err := fs.siteRoot.Remove(domainFrozenMarkerName(domain))
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FSBackend) HaveDomainsChanged(ctx context.Context, since time.Time) (bool, error) {
|
||||
return true, nil // not implemented
|
||||
}
|
||||
|
||||
func auditDetachedName(id AuditID) string {
|
||||
return fmt.Sprintf("%s.detached", id)
|
||||
}
|
||||
|
||||
func (fs *FSBackend) AppendAuditLog(ctx context.Context, id AuditID, record *AuditRecord) error {
|
||||
if _, err := fs.auditRoot.Stat(id.String()); err == nil {
|
||||
panic(fmt.Errorf("audit ID collision: %s", id))
|
||||
}
|
||||
|
||||
return fs.auditRoot.WriteFile(id.String(), EncodeAuditRecord(record), 0o444)
|
||||
}
|
||||
|
||||
func (fs *FSBackend) QueryAuditLog(ctx context.Context, id AuditID) (*AuditRecord, error) {
|
||||
if data, err := fs.auditRoot.ReadFile(id.String()); err != nil {
|
||||
return nil, fmt.Errorf("read: %w", err)
|
||||
} else if record, err := DecodeAuditRecord(data); err != nil {
|
||||
return nil, fmt.Errorf("decode: %w", err)
|
||||
} else {
|
||||
if _, err := fs.auditRoot.Stat(auditDetachedName(id)); err == nil {
|
||||
record.Manifest = nil
|
||||
} else if !errors.Is(err, os.ErrNotExist) {
|
||||
return nil, fmt.Errorf("stat detached marker: %w", err)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FSBackend) SearchAuditLog(
|
||||
ctx context.Context, opts SearchAuditLogOptions,
|
||||
) iter.Seq2[AuditID, error] {
|
||||
return func(yield func(AuditID, error) bool) {
|
||||
iofs.WalkDir(fs.auditRoot.FS(), ".",
|
||||
func(path string, entry iofs.DirEntry, err error) error {
|
||||
if path == "." {
|
||||
return nil // skip
|
||||
}
|
||||
var id AuditID
|
||||
if err != nil {
|
||||
// report error
|
||||
} else if strings.Contains(path, ".") {
|
||||
return nil // skip
|
||||
} else if id, err = ParseAuditID(path); err != nil {
|
||||
// report error
|
||||
} else if !opts.Since.IsZero() && id.CompareTime(opts.Since) < 0 {
|
||||
return nil // skip
|
||||
} else if !opts.Until.IsZero() && id.CompareTime(opts.Until) > 0 {
|
||||
return nil // skip
|
||||
}
|
||||
if !yield(id, err) {
|
||||
return iofs.SkipAll // break
|
||||
} else {
|
||||
return nil // continue
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FSBackend) GetAuditLogRecords(
|
||||
ctx context.Context, ids iter.Seq2[AuditID, error],
|
||||
) iter.Seq2[*AuditRecord, error] {
|
||||
return func(yield func(*AuditRecord, error) bool) {
|
||||
for id, err := range ids {
|
||||
var record *AuditRecord
|
||||
if err == nil {
|
||||
record, err = fs.QueryAuditLog(ctx, id)
|
||||
}
|
||||
if !yield(record, err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FSBackend) DetachAuditRecord(ctx context.Context, id AuditID) error {
|
||||
return fs.auditRoot.WriteFile(auditDetachedName(id), []byte{}, 0o644)
|
||||
}
|
||||
|
||||
func (fs *FSBackend) ExpireAuditRecord(ctx context.Context, id AuditID) error {
|
||||
return fs.auditRoot.Remove(id.String())
|
||||
}
|
||||
|
||||
@@ -6,10 +6,11 @@ import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"iter"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
@@ -36,7 +37,7 @@ var (
|
||||
manifestCacheEvictionsCount prometheus.Counter
|
||||
|
||||
s3GetObjectDurationSeconds *prometheus.HistogramVec
|
||||
s3GetObjectErrorsCount *prometheus.CounterVec
|
||||
s3GetObjectResponseCount *prometheus.CounterVec
|
||||
)
|
||||
|
||||
func initS3BackendMetrics() {
|
||||
@@ -96,10 +97,10 @@ func initS3BackendMetrics() {
|
||||
NativeHistogramMaxBucketNumber: 100,
|
||||
NativeHistogramMinResetDuration: 10 * time.Minute,
|
||||
}, []string{"kind"})
|
||||
s3GetObjectErrorsCount = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "git_pages_s3_get_object_errors_count",
|
||||
Help: "Count of s3:GetObject errors",
|
||||
}, []string{"object_kind"})
|
||||
s3GetObjectResponseCount = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "git_pages_s3_get_object_responses_count",
|
||||
Help: "Count of s3:GetObject responses",
|
||||
}, []string{"kind", "code"})
|
||||
}
|
||||
|
||||
// Blobs can be safely cached indefinitely. They only need to be evicted to preserve memory.
|
||||
@@ -117,8 +118,7 @@ func (c *CachedBlob) Weight() uint32 { return uint32(len(c.blob)) }
|
||||
type CachedManifest struct {
|
||||
manifest *Manifest
|
||||
weight uint32
|
||||
mtime time.Time
|
||||
etag string
|
||||
metadata ManifestMetadata
|
||||
err error
|
||||
}
|
||||
|
||||
@@ -144,10 +144,12 @@ func makeCacheOptions[K comparable, V any](
|
||||
options.Weigher = weigher
|
||||
}
|
||||
if config.MaxStale != 0 {
|
||||
options.RefreshCalculator = otter.RefreshWriting[K, V](time.Duration(config.MaxAge))
|
||||
options.RefreshCalculator = otter.RefreshWriting[K, V](
|
||||
time.Duration(config.MaxAge))
|
||||
}
|
||||
if config.MaxAge != 0 || config.MaxStale != 0 {
|
||||
options.ExpiryCalculator = otter.ExpiryWriting[K, V](time.Duration(config.MaxAge + config.MaxStale))
|
||||
options.ExpiryCalculator = otter.ExpiryWriting[K, V](
|
||||
time.Duration(config.MaxAge + config.MaxStale))
|
||||
}
|
||||
return options
|
||||
}
|
||||
@@ -170,13 +172,19 @@ func NewS3Backend(ctx context.Context, config *S3Config) (*S3Backend, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !exists {
|
||||
log.Printf("s3: create bucket %s\n", bucket)
|
||||
logc.Printf(ctx, "s3: create bucket %s\n", bucket)
|
||||
|
||||
err = client.MakeBucket(ctx, bucket,
|
||||
minio.MakeBucketOptions{Region: config.Region})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = (&S3Backend{client: client, bucket: bucket}).
|
||||
EnableFeature(ctx, FeatureCheckDomainMarker)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
initS3BackendMetrics()
|
||||
@@ -236,13 +244,13 @@ func (s3 *S3Backend) HasFeature(ctx context.Context, feature BackendFeature) boo
|
||||
minio.StatObjectOptions{})
|
||||
if err != nil {
|
||||
if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
|
||||
log.Printf("s3 feature %q: disabled", feature)
|
||||
logc.Printf(ctx, "s3 feature %q: disabled", feature)
|
||||
return false, nil
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
log.Printf("s3 feature %q: enabled", feature)
|
||||
logc.Printf(ctx, "s3 feature %q: enabled", feature)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -250,7 +258,7 @@ func (s3 *S3Backend) HasFeature(ctx context.Context, feature BackendFeature) boo
|
||||
if err != nil {
|
||||
err = fmt.Errorf("getting s3 backend feature %q: %w", feature, err)
|
||||
ObserveError(err)
|
||||
log.Print(err)
|
||||
logc.Println(ctx, err)
|
||||
return false
|
||||
}
|
||||
return isOn
|
||||
@@ -265,10 +273,10 @@ func (s3 *S3Backend) EnableFeature(ctx context.Context, feature BackendFeature)
|
||||
func (s3 *S3Backend) GetBlob(
|
||||
ctx context.Context, name string,
|
||||
) (
|
||||
reader io.ReadSeeker, size uint64, mtime time.Time, err error,
|
||||
reader io.ReadSeeker, metadata BlobMetadata, err error,
|
||||
) {
|
||||
loader := func(ctx context.Context, name string) (*CachedBlob, error) {
|
||||
log.Printf("s3: get blob %s\n", name)
|
||||
logc.Printf(ctx, "s3: get blob %s\n", name)
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
@@ -297,23 +305,33 @@ func (s3 *S3Backend) GetBlob(
|
||||
return &CachedBlob{data, stat.LastModified}, nil
|
||||
}
|
||||
|
||||
observer := func(ctx context.Context, name string) (*CachedBlob, error) {
|
||||
cached, err := loader(ctx, name)
|
||||
var code = "OK"
|
||||
if resp, ok := err.(minio.ErrorResponse); ok {
|
||||
code = resp.Code
|
||||
}
|
||||
s3GetObjectResponseCount.With(prometheus.Labels{"kind": "blob", "code": code}).Inc()
|
||||
return cached, err
|
||||
}
|
||||
|
||||
var cached *CachedBlob
|
||||
cached, err = s3.blobCache.Get(ctx, name, otter.LoaderFunc[string, *CachedBlob](loader))
|
||||
cached, err = s3.blobCache.Get(ctx, name, otter.LoaderFunc[string, *CachedBlob](observer))
|
||||
if err != nil {
|
||||
if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
|
||||
s3GetObjectErrorsCount.With(prometheus.Labels{"object_kind": "blob"}).Inc()
|
||||
err = fmt.Errorf("%w: %s", ErrObjectNotFound, errResp.Key)
|
||||
}
|
||||
} else {
|
||||
reader = bytes.NewReader(cached.blob)
|
||||
size = uint64(len(cached.blob))
|
||||
mtime = cached.mtime
|
||||
metadata.Name = name
|
||||
metadata.Size = int64(len(cached.blob))
|
||||
metadata.LastModified = cached.mtime
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) PutBlob(ctx context.Context, name string, data []byte) error {
|
||||
log.Printf("s3: put blob %s (%s)\n", name, datasize.ByteSize(len(data)).HumanReadable())
|
||||
logc.Printf(ctx, "s3: put blob %s (%s)\n", name, datasize.ByteSize(len(data)).HumanReadable())
|
||||
|
||||
_, err := s3.client.StatObject(ctx, s3.bucket, blobObjectName(name),
|
||||
minio.GetObjectOptions{})
|
||||
@@ -325,7 +343,7 @@ func (s3 *S3Backend) PutBlob(ctx context.Context, name string, data []byte) erro
|
||||
return err
|
||||
} else {
|
||||
ObserveData(ctx, "blob.status", "created")
|
||||
log.Printf("s3: put blob %s (created)\n", name)
|
||||
logc.Printf(ctx, "s3: put blob %s (created)\n", name)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
@@ -333,7 +351,7 @@ func (s3 *S3Backend) PutBlob(ctx context.Context, name string, data []byte) erro
|
||||
}
|
||||
} else {
|
||||
ObserveData(ctx, "blob.status", "exists")
|
||||
log.Printf("s3: put blob %s (exists)\n", name)
|
||||
logc.Printf(ctx, "s3: put blob %s (exists)\n", name)
|
||||
blobsDedupedCount.Inc()
|
||||
blobsDedupedBytes.Add(float64(len(data)))
|
||||
return nil
|
||||
@@ -341,12 +359,43 @@ func (s3 *S3Backend) PutBlob(ctx context.Context, name string, data []byte) erro
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) DeleteBlob(ctx context.Context, name string) error {
|
||||
log.Printf("s3: delete blob %s\n", name)
|
||||
logc.Printf(ctx, "s3: delete blob %s\n", name)
|
||||
|
||||
return s3.client.RemoveObject(ctx, s3.bucket, blobObjectName(name),
|
||||
minio.RemoveObjectOptions{})
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) EnumerateBlobs(ctx context.Context) iter.Seq2[BlobMetadata, error] {
|
||||
return func(yield func(BlobMetadata, error) bool) {
|
||||
logc.Print(ctx, "s3: enumerate blobs")
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
prefix := "blob/"
|
||||
for object := range s3.client.ListObjectsIter(ctx, s3.bucket, minio.ListObjectsOptions{
|
||||
Prefix: prefix,
|
||||
Recursive: true,
|
||||
}) {
|
||||
var metadata BlobMetadata
|
||||
var err error
|
||||
if err = object.Err; err == nil {
|
||||
key := strings.TrimPrefix(object.Key, prefix)
|
||||
if strings.HasSuffix(key, "/") {
|
||||
continue // directory; skip
|
||||
} else {
|
||||
metadata.Name = joinBlobName(strings.Split(key, "/"))
|
||||
metadata.Size = object.Size
|
||||
metadata.LastModified = object.LastModified
|
||||
}
|
||||
}
|
||||
if !yield(metadata, err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func manifestObjectName(name string) string {
|
||||
return fmt.Sprintf("site/%s", name)
|
||||
}
|
||||
@@ -355,55 +404,37 @@ func stagedManifestObjectName(manifestData []byte) string {
|
||||
return fmt.Sprintf("dirty/%x", sha256.Sum256(manifestData))
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) ListManifests(ctx context.Context) (manifests []string, err error) {
|
||||
log.Print("s3: list manifests")
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
prefix := manifestObjectName("")
|
||||
for object := range s3.client.ListObjectsIter(ctx, s3.bucket, minio.ListObjectsOptions{
|
||||
Prefix: prefix,
|
||||
Recursive: true,
|
||||
}) {
|
||||
if object.Err != nil {
|
||||
return nil, object.Err
|
||||
}
|
||||
key := strings.TrimRight(strings.TrimPrefix(object.Key, prefix), "/")
|
||||
if strings.Count(key, "/") > 1 {
|
||||
continue
|
||||
}
|
||||
_, project, _ := strings.Cut(key, "/")
|
||||
if project == "" || strings.HasPrefix(project, ".") && project != ".index" {
|
||||
continue
|
||||
}
|
||||
manifests = append(manifests, key)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type s3ManifestLoader struct {
|
||||
s3 *S3Backend
|
||||
}
|
||||
|
||||
func (l s3ManifestLoader) Load(ctx context.Context, key string) (*CachedManifest, error) {
|
||||
func (l s3ManifestLoader) Load(
|
||||
ctx context.Context, key string,
|
||||
) (
|
||||
*CachedManifest, error,
|
||||
) {
|
||||
return l.load(ctx, key, nil)
|
||||
}
|
||||
|
||||
func (l s3ManifestLoader) Reload(ctx context.Context, key string, oldValue *CachedManifest) (*CachedManifest, error) {
|
||||
func (l s3ManifestLoader) Reload(
|
||||
ctx context.Context, key string, oldValue *CachedManifest,
|
||||
) (
|
||||
*CachedManifest, error,
|
||||
) {
|
||||
return l.load(ctx, key, oldValue)
|
||||
}
|
||||
|
||||
func (l s3ManifestLoader) load(ctx context.Context, name string, oldManifest *CachedManifest) (*CachedManifest, error) {
|
||||
func (l s3ManifestLoader) load(
|
||||
ctx context.Context, name string, oldManifest *CachedManifest,
|
||||
) (
|
||||
*CachedManifest, error,
|
||||
) {
|
||||
logc.Printf(ctx, "s3: get manifest %s\n", name)
|
||||
|
||||
loader := func() (*CachedManifest, error) {
|
||||
log.Printf("s3: get manifest %s\n", name)
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
opts := minio.GetObjectOptions{}
|
||||
if oldManifest != nil && oldManifest.etag != "" {
|
||||
opts.SetMatchETagExcept(oldManifest.etag)
|
||||
if oldManifest != nil && oldManifest.metadata.ETag != "" {
|
||||
opts.SetMatchETagExcept(oldManifest.metadata.ETag)
|
||||
}
|
||||
object, err := l.s3.client.GetObject(ctx, l.s3.bucket, manifestObjectName(name), opts)
|
||||
// Note that many errors (e.g. NoSuchKey) will be reported only after this point.
|
||||
@@ -427,20 +458,34 @@ func (l s3ManifestLoader) load(ctx context.Context, name string, oldManifest *Ca
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s3GetObjectDurationSeconds.
|
||||
With(prometheus.Labels{"kind": "manifest"}).
|
||||
Observe(time.Since(startTime).Seconds())
|
||||
|
||||
return &CachedManifest{manifest, uint32(len(data)), stat.LastModified, stat.ETag, nil}, nil
|
||||
metadata := ManifestMetadata{
|
||||
LastModified: stat.LastModified,
|
||||
ETag: stat.ETag,
|
||||
}
|
||||
return &CachedManifest{manifest, uint32(len(data)), metadata, nil}, nil
|
||||
}
|
||||
|
||||
var cached *CachedManifest
|
||||
cached, err := loader()
|
||||
observer := func() (*CachedManifest, error) {
|
||||
cached, err := loader()
|
||||
var code = "OK"
|
||||
if resp, ok := err.(minio.ErrorResponse); ok {
|
||||
code = resp.Code
|
||||
}
|
||||
s3GetObjectResponseCount.With(prometheus.Labels{"kind": "manifest", "code": code}).Inc()
|
||||
return cached, err
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
cached, err := observer()
|
||||
s3GetObjectDurationSeconds.
|
||||
With(prometheus.Labels{"kind": "manifest"}).
|
||||
Observe(time.Since(startTime).Seconds())
|
||||
|
||||
if err != nil {
|
||||
if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
|
||||
s3GetObjectErrorsCount.With(prometheus.Labels{"object_kind": "manifest"}).Inc()
|
||||
errResp := minio.ToErrorResponse(err)
|
||||
if errResp.Code == "NoSuchKey" {
|
||||
err = fmt.Errorf("%w: %s", ErrObjectNotFound, errResp.Key)
|
||||
return &CachedManifest{nil, 1, time.Time{}, "", err}, nil
|
||||
return &CachedManifest{nil, 1, ManifestMetadata{}, err}, nil
|
||||
} else if errResp.StatusCode == http.StatusNotModified && oldManifest != nil {
|
||||
return oldManifest, nil
|
||||
} else {
|
||||
@@ -454,7 +499,7 @@ func (l s3ManifestLoader) load(ctx context.Context, name string, oldManifest *Ca
|
||||
func (s3 *S3Backend) GetManifest(
|
||||
ctx context.Context, name string, opts GetManifestOptions,
|
||||
) (
|
||||
manifest *Manifest, mtime time.Time, err error,
|
||||
manifest *Manifest, metadata ManifestMetadata, err error,
|
||||
) {
|
||||
if opts.BypassCache {
|
||||
entry, found := s3.siteCache.Cache.GetEntry(name)
|
||||
@@ -469,33 +514,112 @@ func (s3 *S3Backend) GetManifest(
|
||||
return
|
||||
} else {
|
||||
// This could be `manifest, mtime, nil` or `nil, time.Time{}, ErrObjectNotFound`.
|
||||
manifest, mtime, err = cached.manifest, cached.mtime, cached.err
|
||||
manifest, metadata, err = cached.manifest, cached.metadata, cached.err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) StageManifest(ctx context.Context, manifest *Manifest) error {
|
||||
data := EncodeManifest(manifest)
|
||||
log.Printf("s3: stage manifest %x\n", sha256.Sum256(data))
|
||||
logc.Printf(ctx, "s3: stage manifest %x\n", sha256.Sum256(data))
|
||||
|
||||
_, err := s3.client.PutObject(ctx, s3.bucket, stagedManifestObjectName(data),
|
||||
bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) CommitManifest(ctx context.Context, name string, manifest *Manifest) error {
|
||||
func domainFrozenObjectName(domain string) string {
|
||||
return manifestObjectName(fmt.Sprintf("%s/.frozen", domain))
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) checkDomainFrozen(ctx context.Context, domain string) error {
|
||||
_, err := s3.client.StatObject(ctx, s3.bucket, domainFrozenObjectName(domain),
|
||||
minio.GetObjectOptions{})
|
||||
if err == nil {
|
||||
return ErrDomainFrozen
|
||||
} else if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) HasAtomicCAS(ctx context.Context) bool {
|
||||
// Support for `If-Unmodified-Since:` or `If-Match:` for PutObject requests is very spotty:
|
||||
// - AWS supports only `If-Match:`:
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
|
||||
// - Minio supports `If-Match:`:
|
||||
// https://blog.min.io/leading-the-way-minios-conditional-write-feature-for-modern-data-workloads/
|
||||
// - Tigris supports `If-Unmodified-Since:` and `If-Match:`, but only with `X-Tigris-Consistent: true`;
|
||||
// https://www.tigrisdata.com/docs/objects/conditionals/
|
||||
// Note that the `X-Tigris-Consistent: true` header must be present on *every* transaction
|
||||
// touching the object, not just on the CAS transactions.
|
||||
// - Wasabi does not support either one and docs seem to suggest that the headers are ignored;
|
||||
// - Garage does not support either one and source code suggests the headers are ignored.
|
||||
// It seems that the only safe option is to not claim support for atomic CAS, and only do
|
||||
// best-effort CAS implementation using HeadObject and PutObject/DeleteObject.
|
||||
return false
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) checkManifestPrecondition(
|
||||
ctx context.Context, name string, opts ModifyManifestOptions,
|
||||
) error {
|
||||
if opts.IfUnmodifiedSince.IsZero() && opts.IfMatch == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
stat, err := s3.client.StatObject(ctx, s3.bucket, manifestObjectName(name),
|
||||
minio.GetObjectOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !opts.IfUnmodifiedSince.IsZero() && stat.LastModified.Compare(opts.IfUnmodifiedSince) > 0 {
|
||||
return fmt.Errorf("%w: If-Unmodified-Since", ErrPreconditionFailed)
|
||||
}
|
||||
if opts.IfMatch != "" && stat.ETag != opts.IfMatch {
|
||||
return fmt.Errorf("%w: If-Match", ErrPreconditionFailed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) CommitManifest(
|
||||
ctx context.Context, name string, manifest *Manifest, opts ModifyManifestOptions,
|
||||
) error {
|
||||
data := EncodeManifest(manifest)
|
||||
log.Printf("s3: commit manifest %x -> %s", sha256.Sum256(data), name)
|
||||
logc.Printf(ctx, "s3: commit manifest %x -> %s", sha256.Sum256(data), name)
|
||||
|
||||
domain, _, _ := strings.Cut(name, "/")
|
||||
if err := s3.checkDomainFrozen(ctx, domain); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s3.checkManifestPrecondition(ctx, name, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove staged object unconditionally (whether commit succeeded or failed), since
|
||||
// the upper layer has to retry the complete operation anyway.
|
||||
putOptions := minio.PutObjectOptions{}
|
||||
putOptions.Header().Add("X-Tigris-Consistent", "true")
|
||||
if opts.IfMatch != "" {
|
||||
// Not guaranteed to do anything (see `HasAtomicCAS`), but let's try anyway;
|
||||
// this is a "belt and suspenders" approach, together with `checkManifestPrecondition`.
|
||||
// It does reliably work on MinIO at least.
|
||||
putOptions.SetMatchETag(opts.IfMatch)
|
||||
}
|
||||
_, putErr := s3.client.PutObject(ctx, s3.bucket, manifestObjectName(name),
|
||||
bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{})
|
||||
bytes.NewReader(data), int64(len(data)), putOptions)
|
||||
removeErr := s3.client.RemoveObject(ctx, s3.bucket, stagedManifestObjectName(data),
|
||||
minio.RemoveObjectOptions{})
|
||||
s3.siteCache.Cache.Invalidate(name)
|
||||
if putErr != nil {
|
||||
return putErr
|
||||
if errResp := minio.ToErrorResponse(putErr); errResp.Code == "PreconditionFailed" {
|
||||
return ErrPreconditionFailed
|
||||
} else {
|
||||
return putErr
|
||||
}
|
||||
} else if removeErr != nil {
|
||||
return removeErr
|
||||
} else {
|
||||
@@ -503,13 +627,106 @@ func (s3 *S3Backend) CommitManifest(ctx context.Context, name string, manifest *
|
||||
}
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) DeleteManifest(ctx context.Context, name string) error {
|
||||
log.Printf("s3: delete manifest %s\n", name)
|
||||
func (s3 *S3Backend) DeleteManifest(
|
||||
ctx context.Context, name string, opts ModifyManifestOptions,
|
||||
) error {
|
||||
logc.Printf(ctx, "s3: delete manifest %s\n", name)
|
||||
|
||||
domain, _, _ := strings.Cut(name, "/")
|
||||
if err := s3.checkDomainFrozen(ctx, domain); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s3.checkManifestPrecondition(ctx, name, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := s3.client.RemoveObject(ctx, s3.bucket, manifestObjectName(name),
|
||||
minio.RemoveObjectOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s3.siteCache.Cache.Invalidate(name)
|
||||
return err
|
||||
return s3.bumpLastDomainUpdateTimestamp(ctx)
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) EnumerateManifests(ctx context.Context) iter.Seq2[*ManifestMetadata, error] {
|
||||
return func(yield func(*ManifestMetadata, error) bool) {
|
||||
logc.Print(ctx, "s3: enumerate manifests")
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
prefix := "site/"
|
||||
for object := range s3.client.ListObjectsIter(ctx, s3.bucket, minio.ListObjectsOptions{
|
||||
Prefix: prefix,
|
||||
Recursive: true,
|
||||
}) {
|
||||
var metadata *ManifestMetadata
|
||||
var err error
|
||||
if err = object.Err; err == nil {
|
||||
key := strings.TrimPrefix(object.Key, prefix)
|
||||
_, project, _ := strings.Cut(key, "/")
|
||||
if strings.HasSuffix(key, "/") {
|
||||
continue // directory; skip
|
||||
} else if project == "" || strings.HasPrefix(project, ".") && project != ".index" {
|
||||
continue // internal; skip
|
||||
} else {
|
||||
metadata = &ManifestMetadata{
|
||||
Name: key,
|
||||
Size: object.Size,
|
||||
LastModified: object.LastModified,
|
||||
ETag: object.ETag,
|
||||
}
|
||||
}
|
||||
}
|
||||
if !yield(metadata, err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Limits the number of concurrent uploads, globally across the entire git-pages process.
|
||||
// Not currently configurable as there seems to be little need.
|
||||
var getAllManifestsSemaphore = make(chan struct{}, 64)
|
||||
|
||||
func (s3 *S3Backend) GetAllManifests(ctx context.Context) iter.Seq2[tuple[*ManifestMetadata, *Manifest], error] {
|
||||
type result struct {
|
||||
metadata *ManifestMetadata
|
||||
manifest *Manifest
|
||||
err error
|
||||
}
|
||||
|
||||
resultsChan := make(chan result)
|
||||
enumeratorCtx, cancel := context.WithCancel(ctx)
|
||||
go func(ctx context.Context) {
|
||||
wg := sync.WaitGroup{}
|
||||
for metadata, err := range s3.EnumerateManifests(ctx) {
|
||||
if err != nil {
|
||||
resultsChan <- result{nil, nil, err}
|
||||
} else {
|
||||
getAllManifestsSemaphore <- struct{}{} // acquire
|
||||
wg.Go(func() {
|
||||
defer func() { <-getAllManifestsSemaphore }() // release
|
||||
manifest, _, err := backend.GetManifest(ctx, metadata.Name, GetManifestOptions{})
|
||||
resultsChan <- result{metadata, manifest, err}
|
||||
})
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
close(resultsChan)
|
||||
}(enumeratorCtx)
|
||||
|
||||
return func(yield func(tuple[*ManifestMetadata, *Manifest], error) bool) {
|
||||
for result := range resultsChan {
|
||||
item := tuple[*ManifestMetadata, *Manifest]{result.metadata, result.manifest}
|
||||
if !yield(item, result.err) {
|
||||
cancel()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func domainCheckObjectName(domain string) string {
|
||||
@@ -517,7 +734,7 @@ func domainCheckObjectName(domain string) string {
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) CheckDomain(ctx context.Context, domain string) (exists bool, err error) {
|
||||
log.Printf("s3: check domain %s\n", domain)
|
||||
logc.Printf(ctx, "s3: check domain %s\n", domain)
|
||||
|
||||
_, err = s3.client.StatObject(ctx, s3.bucket, domainCheckObjectName(domain),
|
||||
minio.StatObjectOptions{})
|
||||
@@ -548,9 +765,200 @@ func (s3 *S3Backend) CheckDomain(ctx context.Context, domain string) (exists boo
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) CreateDomain(ctx context.Context, domain string) error {
|
||||
log.Printf("s3: create domain %s\n", domain)
|
||||
logc.Printf(ctx, "s3: create domain %s\n", domain)
|
||||
|
||||
_, err := s3.client.PutObject(ctx, s3.bucket, domainCheckObjectName(domain),
|
||||
exists, err := s3.CheckDomain(ctx, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = s3.client.PutObject(ctx, s3.bucket, domainCheckObjectName(domain),
|
||||
&bytes.Reader{}, 0, minio.PutObjectOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
err = s3.bumpLastDomainUpdateTimestamp(ctx)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) FreezeDomain(ctx context.Context, domain string) error {
|
||||
logc.Printf(ctx, "s3: freeze domain %s\n", domain)
|
||||
|
||||
_, err := s3.client.PutObject(ctx, s3.bucket, domainFrozenObjectName(domain),
|
||||
&bytes.Reader{}, 0, minio.PutObjectOptions{})
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) UnfreezeDomain(ctx context.Context, domain string) error {
|
||||
logc.Printf(ctx, "s3: unfreeze domain %s\n", domain)
|
||||
|
||||
err := s3.client.RemoveObject(ctx, s3.bucket, domainFrozenObjectName(domain),
|
||||
minio.RemoveObjectOptions{})
|
||||
if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
const lastDomainUpdateObjectName = "meta/last-domain-update"
|
||||
|
||||
func (s3 *S3Backend) HaveDomainsChanged(ctx context.Context, since time.Time) (bool, error) {
|
||||
info, err := s3.client.StatObject(ctx, s3.bucket, lastDomainUpdateObjectName,
|
||||
minio.GetObjectOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return info.LastModified.After(since), nil
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) bumpLastDomainUpdateTimestamp(ctx context.Context) error {
|
||||
logc.Print(ctx, "s3: bumping last domain update timestamp")
|
||||
_, err := s3.client.PutObject(ctx, s3.bucket, lastDomainUpdateObjectName,
|
||||
&bytes.Reader{}, 0, minio.PutObjectOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func auditObjectName(id AuditID) string {
|
||||
return fmt.Sprintf("audit/%s", id)
|
||||
}
|
||||
|
||||
func auditDetachedObjectName(id AuditID) string {
|
||||
return fmt.Sprintf("audit/%s.detached", id)
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) AppendAuditLog(ctx context.Context, id AuditID, record *AuditRecord) error {
|
||||
logc.Printf(ctx, "s3: append audit %s\n", id)
|
||||
|
||||
name := auditObjectName(id)
|
||||
data := EncodeAuditRecord(record)
|
||||
|
||||
options := minio.PutObjectOptions{}
|
||||
options.SetMatchETagExcept("*") // may or may not be supported
|
||||
_, err := s3.client.PutObject(ctx, s3.bucket, name,
|
||||
bytes.NewReader(data), int64(len(data)), options)
|
||||
if errResp := minio.ToErrorResponse(err); errResp.StatusCode == 412 {
|
||||
panic(fmt.Errorf("audit ID collision: %s", name))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) QueryAuditLog(ctx context.Context, id AuditID) (*AuditRecord, error) {
|
||||
logc.Printf(ctx, "s3: read audit %s\n", id)
|
||||
|
||||
object, err := s3.client.GetObject(ctx, s3.bucket, auditObjectName(id),
|
||||
minio.GetObjectOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer object.Close()
|
||||
|
||||
data, err := io.ReadAll(object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
record, err := DecodeAuditRecord(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = s3.client.StatObject(ctx, s3.bucket, auditDetachedObjectName(id),
|
||||
minio.StatObjectOptions{})
|
||||
if err == nil {
|
||||
record.Manifest = nil
|
||||
} else if errResp := minio.ToErrorResponse(err); err != nil && errResp.Code != "NoSuchKey" {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) SearchAuditLog(
|
||||
ctx context.Context, opts SearchAuditLogOptions,
|
||||
) iter.Seq2[AuditID, error] {
|
||||
return func(yield func(AuditID, error) bool) {
|
||||
logc.Printf(ctx, "s3: search audit\n")
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
prefix := "audit/"
|
||||
for object := range s3.client.ListObjectsIter(ctx, s3.bucket, minio.ListObjectsOptions{
|
||||
Prefix: prefix,
|
||||
}) {
|
||||
var id AuditID
|
||||
var err error
|
||||
if object.Err != nil {
|
||||
err = object.Err
|
||||
} else if strings.Contains(object.Key, ".") {
|
||||
continue
|
||||
} else if id, err = ParseAuditID(strings.TrimPrefix(object.Key, prefix)); err != nil {
|
||||
// report error
|
||||
} else if !opts.Since.IsZero() && id.CompareTime(opts.Since) < 0 {
|
||||
continue
|
||||
} else if !opts.Until.IsZero() && id.CompareTime(opts.Until) > 0 {
|
||||
continue
|
||||
}
|
||||
if !yield(id, err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var getAuditLogRecordsSemaphore = make(chan struct{}, 64)
|
||||
|
||||
func (s3 *S3Backend) GetAuditLogRecords(
|
||||
ctx context.Context, ids iter.Seq2[AuditID, error],
|
||||
) iter.Seq2[*AuditRecord, error] {
|
||||
return func(yield func(*AuditRecord, error) bool) {
|
||||
resultsChan := make(chan tuple[*AuditRecord, error])
|
||||
enumeratorCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
go func(ctx context.Context) {
|
||||
wg := sync.WaitGroup{}
|
||||
for id, err := range ids {
|
||||
if err != nil {
|
||||
resultsChan <- tuple[*AuditRecord, error]{nil, err}
|
||||
} else {
|
||||
getAuditLogRecordsSemaphore <- struct{}{} // acquire
|
||||
wg.Go(func() {
|
||||
defer func() { <-getAuditLogRecordsSemaphore }() // release
|
||||
record, err := s3.QueryAuditLog(ctx, id)
|
||||
resultsChan <- tuple[*AuditRecord, error]{record, err}
|
||||
})
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
close(resultsChan)
|
||||
}(enumeratorCtx)
|
||||
|
||||
for result := range resultsChan {
|
||||
record, err := result.Splat()
|
||||
if !yield(record, err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) DetachAuditRecord(ctx context.Context, id AuditID) error {
|
||||
logc.Printf(ctx, "s3: detach audit record %s\n", id)
|
||||
|
||||
_, err := s3.client.PutObject(ctx, s3.bucket, auditDetachedObjectName(id),
|
||||
&bytes.Reader{}, 0, minio.PutObjectOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s3 *S3Backend) ExpireAuditRecord(ctx context.Context, id AuditID) error {
|
||||
logc.Printf(ctx, "s3: expire audit record %s\n", id)
|
||||
|
||||
return s3.client.RemoveObject(ctx, s3.bucket, auditObjectName(id),
|
||||
minio.RemoveObjectOptions{})
|
||||
}
|
||||
|
||||
77
src/caddy.go
77
src/caddy.go
@@ -1,12 +1,11 @@
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -22,12 +21,22 @@ func ServeCaddy(w http.ResponseWriter, r *http.Request) {
|
||||
// this isn't really what git-pages is designed for, and object store accesses can cost money.
|
||||
// [^1]: https://letsencrypt.org/2025/07/01/issuing-our-first-ip-address-certificate
|
||||
if ip := net.ParseIP(domain); ip != nil {
|
||||
log.Println("caddy:", domain, 404, "(bare IP)")
|
||||
logc.Println(r.Context(), "caddy:", domain, 404, "(bare IP)")
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
found, err := backend.CheckDomain(r.Context(), strings.ToLower(domain))
|
||||
var err error
|
||||
domain = strings.ToLower(domain)
|
||||
|
||||
// Run a cheap check as to whether we might be serving the domain.
|
||||
var found = domainCache.CheckDomain(r.Context(), domain)
|
||||
|
||||
if found {
|
||||
// Run an expensive check as to whether we are actually serving the domain.
|
||||
found, err = backend.CheckDomain(r.Context(), domain)
|
||||
}
|
||||
|
||||
if !found {
|
||||
// If we don't serve the domain, but a fallback server does, then we should let our
|
||||
// Caddy instance request a TLS certificate. Otherwise, we'll never have an opportunity
|
||||
@@ -35,43 +44,47 @@ func ServeCaddy(w http.ResponseWriter, r *http.Request) {
|
||||
// Pages v2, which would under some circumstances return certificates with subjectAltName
|
||||
// not valid for the SNI. Go's TLS stack makes `tls.Dial` return an error for these,
|
||||
// thankfully making it unnecessary to examine X.509 certificates manually here.)
|
||||
for _, wildcardConfig := range config.Wildcard {
|
||||
if wildcardConfig.FallbackProxyTo == "" {
|
||||
continue
|
||||
}
|
||||
fallbackURL, err := url.Parse(wildcardConfig.FallbackProxyTo)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if fallbackURL.Scheme != "https" {
|
||||
continue
|
||||
}
|
||||
connectHost := fallbackURL.Host
|
||||
if fallbackURL.Port() != "" {
|
||||
connectHost += ":" + fallbackURL.Port()
|
||||
} else {
|
||||
connectHost += ":443"
|
||||
}
|
||||
log.Printf("caddy: check TLS %s", fallbackURL)
|
||||
connection, err := tls.Dial("tcp", connectHost, &tls.Config{ServerName: domain})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
connection.Close()
|
||||
found = true
|
||||
break
|
||||
found, err = tryDialWithSNI(r.Context(), domain)
|
||||
if err != nil {
|
||||
logc.Printf(r.Context(), "caddy err: check SNI: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
log.Println("caddy:", domain, 200)
|
||||
logc.Println(r.Context(), "caddy:", domain, 200)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
} else if err == nil {
|
||||
log.Println("caddy:", domain, 404)
|
||||
logc.Println(r.Context(), "caddy:", domain, 404)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
} else {
|
||||
log.Println("caddy:", domain, 500)
|
||||
logc.Println(r.Context(), "caddy:", domain, 500)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
fmt.Fprintln(w, err)
|
||||
}
|
||||
}
|
||||
|
||||
func tryDialWithSNI(ctx context.Context, domain string) (bool, error) {
|
||||
if config.Fallback.ProxyTo == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
fallbackURL := config.Fallback.ProxyTo
|
||||
if fallbackURL.Scheme != "https" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
connectHost := fallbackURL.Host
|
||||
if fallbackURL.Port() != "" {
|
||||
connectHost += ":" + fallbackURL.Port()
|
||||
} else {
|
||||
connectHost += ":443"
|
||||
}
|
||||
|
||||
logc.Printf(ctx, "caddy: check TLS %s", fallbackURL)
|
||||
connection, err := tls.Dial("tcp", connectHost, &tls.Config{ServerName: domain})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
connection.Close()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Flusher interface {
|
||||
@@ -14,7 +13,7 @@ type Flusher interface {
|
||||
|
||||
// Inverse of `ExtractTar`.
|
||||
func CollectTar(
|
||||
context context.Context, writer io.Writer, manifest *Manifest, manifestMtime time.Time,
|
||||
context context.Context, writer io.Writer, manifest *Manifest, metadata ManifestMetadata,
|
||||
) (
|
||||
err error,
|
||||
) {
|
||||
@@ -22,22 +21,25 @@ func CollectTar(
|
||||
|
||||
appendFile := func(header *tar.Header, data []byte, transform Transform) (err error) {
|
||||
switch transform {
|
||||
case Transform_None:
|
||||
case Transform_Zstandard:
|
||||
case Transform_Identity:
|
||||
case Transform_Zstd:
|
||||
data, err = zstdDecoder.DecodeAll(data, []byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("zstd: %s: %w", header.Name, err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unexpected transform")
|
||||
return fmt.Errorf("%s: unexpected transform", header.Name)
|
||||
}
|
||||
header.Size = int64(len(data))
|
||||
|
||||
err = archive.WriteHeader(header)
|
||||
if err != nil {
|
||||
return
|
||||
return fmt.Errorf("tar: %w", err)
|
||||
}
|
||||
_, err = archive.Write(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tar: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -52,37 +54,45 @@ func CollectTar(
|
||||
case Type_Directory:
|
||||
header.Typeflag = tar.TypeDir
|
||||
header.Mode = 0755
|
||||
header.ModTime = manifestMtime
|
||||
err = appendFile(&header, nil, Transform_None)
|
||||
header.ModTime = metadata.LastModified
|
||||
err = appendFile(&header, nil, Transform_Identity)
|
||||
|
||||
case Type_InlineFile:
|
||||
header.Typeflag = tar.TypeReg
|
||||
header.Mode = 0644
|
||||
header.ModTime = manifestMtime
|
||||
header.ModTime = metadata.LastModified
|
||||
err = appendFile(&header, entry.GetData(), entry.GetTransform())
|
||||
|
||||
case Type_ExternalFile:
|
||||
var blobReader io.Reader
|
||||
var blobMtime time.Time
|
||||
var blobMetadata BlobMetadata
|
||||
var blobData []byte
|
||||
blobReader, _, blobMtime, err = backend.GetBlob(context, string(entry.Data))
|
||||
blobReader, blobMetadata, err = backend.GetBlob(context, string(entry.Data))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
blobData, err = io.ReadAll(blobReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
blobData, _ = io.ReadAll(blobReader)
|
||||
header.Typeflag = tar.TypeReg
|
||||
header.Mode = 0644
|
||||
header.ModTime = blobMtime
|
||||
header.ModTime = blobMetadata.LastModified
|
||||
err = appendFile(&header, blobData, entry.GetTransform())
|
||||
|
||||
case Type_Symlink:
|
||||
header.Typeflag = tar.TypeSymlink
|
||||
header.Mode = 0644
|
||||
header.ModTime = manifestMtime
|
||||
err = appendFile(&header, entry.GetData(), Transform_None)
|
||||
header.ModTime = metadata.LastModified
|
||||
header.Linkname = string(entry.GetData())
|
||||
err = archive.WriteHeader(&header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tar: %w", err)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unexpected entry type")
|
||||
panic(fmt.Errorf("CollectTar encountered invalid entry: %v, %v",
|
||||
entry.GetType(), entry.GetTransform()))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -94,8 +104,8 @@ func CollectTar(
|
||||
Name: RedirectsFileName,
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: 0644,
|
||||
ModTime: manifestMtime,
|
||||
}, []byte(redirects), Transform_None)
|
||||
ModTime: metadata.LastModified,
|
||||
}, []byte(redirects), Transform_Identity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -106,8 +116,8 @@ func CollectTar(
|
||||
Name: HeadersFileName,
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: 0644,
|
||||
ModTime: manifestMtime,
|
||||
}, []byte(headers), Transform_None)
|
||||
ModTime: metadata.LastModified,
|
||||
}, []byte(headers), Transform_Identity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -115,7 +125,7 @@ func CollectTar(
|
||||
|
||||
err = archive.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("tar: %w", err)
|
||||
}
|
||||
|
||||
flusher, ok := writer.(Flusher)
|
||||
|
||||
145
src/config.go
145
src/config.go
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"slices"
|
||||
@@ -11,12 +12,12 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/pelletier/go-toml/v2"
|
||||
)
|
||||
|
||||
// For some reason, the standard `time.Duration` type doesn't implement the standard
|
||||
// For an unknown reason, the standard `time.Duration` type doesn't implement the standard
|
||||
// `encoding.{TextMarshaler,TextUnmarshaler}` interfaces.
|
||||
type Duration time.Duration
|
||||
|
||||
@@ -26,7 +27,9 @@ func (t Duration) String() string {
|
||||
|
||||
func (t *Duration) UnmarshalText(data []byte) (err error) {
|
||||
u, err := time.ParseDuration(string(data))
|
||||
*t = Duration(u)
|
||||
if err == nil {
|
||||
*t = Duration(u)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -34,31 +37,58 @@ func (t *Duration) MarshalText() ([]byte, error) {
|
||||
return []byte(t.String()), nil
|
||||
}
|
||||
|
||||
// For a known but upsetting reason, the standard `url.URL` type doesn't implement the standard
|
||||
// `encoding.{TextMarshaler,TextUnmarshaler}` interfaces.
|
||||
type URL struct {
|
||||
url.URL
|
||||
}
|
||||
|
||||
func (t *URL) String() string {
|
||||
return fmt.Sprint(&t.URL)
|
||||
}
|
||||
|
||||
func (t *URL) UnmarshalText(data []byte) (err error) {
|
||||
u, err := url.Parse(string(data))
|
||||
if err == nil {
|
||||
*t = URL{*u}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *URL) MarshalText() ([]byte, error) {
|
||||
return []byte(t.String()), nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Insecure bool `toml:"-" env:"insecure"`
|
||||
Features []string `toml:"features"`
|
||||
LogFormat string `toml:"log-format" default:"text"`
|
||||
Server ServerConfig `toml:"server"`
|
||||
Wildcard []WildcardConfig `toml:"wildcard"`
|
||||
Fallback FallbackConfig `toml:"fallback"`
|
||||
Storage StorageConfig `toml:"storage"`
|
||||
Limits LimitsConfig `toml:"limits"`
|
||||
Audit AuditConfig `toml:"audit"`
|
||||
Observability ObservabilityConfig `toml:"observability"`
|
||||
}
|
||||
|
||||
type ServerConfig struct {
|
||||
Pages string `toml:"pages" default:"tcp/:3000"`
|
||||
Caddy string `toml:"caddy" default:"tcp/:3001"`
|
||||
Metrics string `toml:"metrics" default:"tcp/:3002"`
|
||||
Pages string `toml:"pages" default:"tcp/localhost:3000"`
|
||||
Caddy string `toml:"caddy" default:"tcp/localhost:3001"`
|
||||
Metrics string `toml:"metrics" default:"tcp/localhost:3002"`
|
||||
}
|
||||
|
||||
type WildcardConfig struct {
|
||||
Domain string `toml:"domain"`
|
||||
CloneURL string `toml:"clone-url"`
|
||||
IndexRepos []string `toml:"index-repos" default:"[]"`
|
||||
IndexRepoBranch string `toml:"index-repo-branch" default:"pages"`
|
||||
Authorization string `toml:"authorization"`
|
||||
FallbackProxyTo string `toml:"fallback-proxy-to"`
|
||||
FallbackInsecure bool `toml:"fallback-insecure"`
|
||||
Domain string `toml:"domain"`
|
||||
CloneURL string `toml:"clone-url"` // URL template, not an exact URL
|
||||
IndexRepo string `toml:"index-repo" default:"pages"`
|
||||
IndexRepoBranch string `toml:"index-repo-branch" default:"pages"`
|
||||
Authorization string `toml:"authorization"`
|
||||
}
|
||||
|
||||
type FallbackConfig struct {
|
||||
ProxyTo *URL `toml:"proxy-to"`
|
||||
Insecure bool `toml:"insecure"`
|
||||
}
|
||||
|
||||
type CacheConfig struct {
|
||||
@@ -104,16 +134,35 @@ type LimitsConfig struct {
|
||||
// Maximum time that an update operation (PUT or POST request) could take before being
|
||||
// interrupted.
|
||||
UpdateTimeout Duration `toml:"update-timeout" default:"60s"`
|
||||
// Maximum number of concurrent blob uploads, globally across every update request.
|
||||
ConcurrentUploads uint `toml:"concurrent-uploads" default:"1024"`
|
||||
// Soft limit on Go heap size, expressed as a fraction of total available RAM.
|
||||
MaxHeapSizeRatio float64 `toml:"max-heap-size-ratio" default:"0.5"`
|
||||
// List of domains unconditionally forbidden for uploads.
|
||||
ForbiddenDomains []string `toml:"forbidden-domains" default:"[]"`
|
||||
// List of allowed repository URL prefixes. Setting this option prohibits uploading archives.
|
||||
AllowedRepositoryURLPrefixes []string `toml:"allowed-repository-url-prefixes"`
|
||||
AllowedRepositoryURLPrefixes []string `toml:"allowed-repository-url-prefixes" default:"[]"`
|
||||
// List of allowed custom headers. Header name must be in the MIME canonical form,
|
||||
// e.g. `Foo-Bar`. Setting this option permits including this custom header in `_headers`,
|
||||
// unless it is fundamentally unsafe.
|
||||
AllowedCustomHeaders []string `toml:"allowed-custom-headers" default:"[\"X-Clacks-Overhead\"]"`
|
||||
// Whether to allow Netlify-style credentials specified in a `Basic-Auth:` pseudo-header.
|
||||
// These credentials are plaintext.
|
||||
AllowBasicAuth bool `toml:"allow-basic-auth" default:"false"`
|
||||
}
|
||||
|
||||
type AuditConfig struct {
|
||||
// Globally unique machine identifier (0 to 63 inclusive).
|
||||
NodeID int `toml:"node-id"`
|
||||
// Whether audit reports should be stored whenever an audit event occurs.
|
||||
Collect bool `toml:"collect"`
|
||||
// If not empty, includes the principal's IP address in audit reports, with the value specifying
|
||||
// the source of the IP address. If the value is "X-Forwarded-For", the last item of the
|
||||
// corresponding header field (assumed to be comma-separated) is used. If the value is
|
||||
// "RemoteAddr", the connecting host's address is used. Any other value is disallowed.
|
||||
IncludeIPs string `toml:"include-ip"`
|
||||
// Endpoint to notify with a `GET /<notify-url>?<id>` whenever an audit event occurs.
|
||||
NotifyURL *URL `toml:"notify-url"`
|
||||
}
|
||||
|
||||
type ObservabilityConfig struct {
|
||||
@@ -121,8 +170,8 @@ type ObservabilityConfig struct {
|
||||
SlowResponseThreshold Duration `toml:"slow-response-threshold" default:"500ms"`
|
||||
}
|
||||
|
||||
func (config *Config) DebugJSON() string {
|
||||
result, err := json.MarshalIndent(config, "", " ")
|
||||
func (config *Config) TOML() string {
|
||||
result, err := toml.Marshal(config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -192,6 +241,11 @@ func setConfigValue(reflValue reflect.Value, repr string) (err error) {
|
||||
if valueCast, err = strconv.ParseBool(repr); err == nil {
|
||||
reflValue.SetBool(valueCast)
|
||||
}
|
||||
case int:
|
||||
var parsed int64
|
||||
if parsed, err = strconv.ParseInt(repr, 10, strconv.IntSize); err == nil {
|
||||
reflValue.SetInt(parsed)
|
||||
}
|
||||
case uint:
|
||||
var parsed uint64
|
||||
if parsed, err = strconv.ParseUint(repr, 10, strconv.IntSize); err == nil {
|
||||
@@ -205,15 +259,20 @@ func setConfigValue(reflValue reflect.Value, repr string) (err error) {
|
||||
if valueCast, err = datasize.ParseString(repr); err == nil {
|
||||
reflValue.Set(reflect.ValueOf(valueCast))
|
||||
}
|
||||
case time.Duration:
|
||||
if valueCast, err = time.ParseDuration(repr); err == nil {
|
||||
reflValue.Set(reflect.ValueOf(valueCast))
|
||||
}
|
||||
case Duration:
|
||||
var parsed time.Duration
|
||||
if parsed, err = time.ParseDuration(repr); err == nil {
|
||||
reflValue.Set(reflect.ValueOf(Duration(parsed)))
|
||||
}
|
||||
case *URL:
|
||||
if repr == "" {
|
||||
reflValue.Set(reflect.ValueOf(nil))
|
||||
} else {
|
||||
var parsed *url.URL
|
||||
if parsed, err = url.Parse(repr); err == nil {
|
||||
reflValue.Set(reflect.ValueOf(&URL{*parsed}))
|
||||
}
|
||||
}
|
||||
case []WildcardConfig:
|
||||
var parsed []*WildcardConfig
|
||||
decoder := json.NewDecoder(bytes.NewReader([]byte(repr)))
|
||||
@@ -250,25 +309,43 @@ func PrintConfigEnvVars() {
|
||||
})
|
||||
}
|
||||
|
||||
func Configure(tomlPath string) (config *Config, err error) {
|
||||
func PrettyTomlKey(key toml.Key) string {
|
||||
if len(key) == 1 {
|
||||
return key.String()
|
||||
} else {
|
||||
// `toml.Key.String()` adds quotes if necessary.
|
||||
return fmt.Sprintf("[%s].%s", key[:len(key)-1].String(), key[len(key)-1:].String())
|
||||
}
|
||||
}
|
||||
|
||||
func ReadConfigFile(config *Config, tomlPath string) (err error) {
|
||||
if tomlPath != "" {
|
||||
meta, err := toml.DecodeFile(tomlPath, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unknownKeys := []string{}
|
||||
for _, key := range meta.Undecoded() {
|
||||
unknownKeys = append(unknownKeys, PrettyTomlKey(key))
|
||||
}
|
||||
if len(unknownKeys) > 0 {
|
||||
return fmt.Errorf("unknown keys: %s", strings.Join(unknownKeys, ", "))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Configure(tomlPaths ...string) (config *Config, err error) {
|
||||
// start with an all-default configuration
|
||||
config = new(Config)
|
||||
defaults.MustSet(config)
|
||||
|
||||
// inject values from `config.toml`
|
||||
if tomlPath != "" {
|
||||
var file *os.File
|
||||
file, err = os.Open(tomlPath)
|
||||
// inject values from each toml file
|
||||
for _, tomlPath := range tomlPaths {
|
||||
err := ReadConfigFile(config, tomlPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
decoder := toml.NewDecoder(file)
|
||||
decoder.DisallowUnknownFields()
|
||||
decoder.EnableUnmarshalerInterface()
|
||||
if err = decoder.Decode(&config); err != nil {
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
132
src/domain_cache.go
Normal file
132
src/domain_cache.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/bits-and-blooms/bloom/v3"
|
||||
)
|
||||
|
||||
type DomainCache interface {
|
||||
// Check if we might be serving the domain.
|
||||
CheckDomain(ctx context.Context, domain string) (found bool)
|
||||
|
||||
// Add the domain to the cache.
|
||||
AddDomain(ctx context.Context, domain string)
|
||||
}
|
||||
|
||||
func CreateDomainCache(ctx context.Context) (DomainCache, error) {
|
||||
if !config.Feature("domain-existence-cache") {
|
||||
return &dummyDomainCache{}, nil
|
||||
}
|
||||
return createBloomDomainCache(ctx)
|
||||
}
|
||||
|
||||
type bloomDomainCache struct {
|
||||
filter *bloom.BloomFilter
|
||||
filterMu sync.Mutex
|
||||
|
||||
accessCh chan struct{}
|
||||
refreshMu sync.Mutex
|
||||
lastRefresh time.Time
|
||||
maxAge time.Duration
|
||||
}
|
||||
|
||||
func createBloomDomainCache(ctx context.Context) (DomainCache, error) {
|
||||
cache := bloomDomainCache{
|
||||
accessCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
switch config.Storage.Type {
|
||||
case "fs":
|
||||
// the FS backend has no cache
|
||||
case "s3":
|
||||
cache.maxAge = time.Duration(config.Storage.S3.SiteCache.MaxAge)
|
||||
default:
|
||||
panic(fmt.Errorf("unknown backend: %s", config.Storage.Type))
|
||||
}
|
||||
|
||||
if err := cache.refresh(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go cache.handleFilterUpdates(ctx)
|
||||
|
||||
return &cache, nil
|
||||
}
|
||||
|
||||
func (c *bloomDomainCache) handleFilterUpdates(ctx context.Context) {
|
||||
for range c.accessCh {
|
||||
if time.Since(c.lastRefresh) > c.maxAge {
|
||||
logc.Print(ctx, "domain cache: refreshing")
|
||||
if err := c.refresh(ctx); err != nil {
|
||||
logc.Printf(ctx, "domain cache: refresh error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *bloomDomainCache) refresh(ctx context.Context) error {
|
||||
c.refreshMu.Lock()
|
||||
defer c.refreshMu.Unlock()
|
||||
|
||||
if changed, err := backend.HaveDomainsChanged(ctx, c.lastRefresh); err != nil {
|
||||
return err
|
||||
} else if !changed {
|
||||
logc.Print(ctx, "domain cache: unchanged")
|
||||
c.lastRefresh = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a 256 KiB Bloom filter that will fit ~150K entries with 0.1% false positive rate.
|
||||
filter := bloom.New(256*1024, 10)
|
||||
for metadata, err := range backend.EnumerateManifests(ctx) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("enum manifests: %w", err)
|
||||
}
|
||||
domain, _, _ := strings.Cut(metadata.Name, "/")
|
||||
filter.AddString(domain)
|
||||
}
|
||||
|
||||
c.filterMu.Lock()
|
||||
c.filter = filter
|
||||
c.filterMu.Unlock()
|
||||
|
||||
logc.Printf(ctx, "domain cache: refreshed with approx. %d domains", filter.ApproximatedSize())
|
||||
c.lastRefresh = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *bloomDomainCache) CheckDomain(ctx context.Context, domain string) (found bool) {
|
||||
select {
|
||||
case c.accessCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
c.filterMu.Lock()
|
||||
found = c.filter.TestString(domain)
|
||||
c.filterMu.Unlock()
|
||||
|
||||
logc.Printf(ctx, "domain cache: bloom filter returns %v for %q", found, domain)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *bloomDomainCache) AddDomain(ctx context.Context, domain string) {
|
||||
c.refreshMu.Lock()
|
||||
defer c.refreshMu.Unlock()
|
||||
|
||||
c.filterMu.Lock()
|
||||
c.filter.AddString(domain)
|
||||
c.filterMu.Unlock()
|
||||
|
||||
logc.Printf(ctx, "domain cache: added %q", domain)
|
||||
}
|
||||
|
||||
type dummyDomainCache struct{}
|
||||
|
||||
func (d dummyDomainCache) CheckDomain(context.Context, string) bool { return true }
|
||||
|
||||
func (d dummyDomainCache) AddDomain(context.Context, string) {}
|
||||
231
src/extract.go
231
src/extract.go
@@ -5,32 +5,89 @@ import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var ErrArchiveTooLarge = errors.New("archive too large")
|
||||
|
||||
func ExtractTar(reader io.Reader) (*Manifest, error) {
|
||||
// If the tar stream is itself compressed, both the outer and the inner bounds checks
|
||||
// are load-bearing.
|
||||
boundedReader := ReadAtMost(reader, int64(config.Limits.MaxSiteSize.Bytes()),
|
||||
func boundArchiveStream(reader io.Reader) io.Reader {
|
||||
return ReadAtMost(reader, int64(config.Limits.MaxSiteSize.Bytes()),
|
||||
fmt.Errorf("%w: %s limit exceeded", ErrArchiveTooLarge, config.Limits.MaxSiteSize.HR()))
|
||||
}
|
||||
|
||||
archive := tar.NewReader(boundedReader)
|
||||
|
||||
manifest := Manifest{
|
||||
Contents: map[string]*Entry{
|
||||
"": {Type: Type_Directory.Enum()},
|
||||
},
|
||||
func ExtractGzip(
|
||||
ctx context.Context, reader io.Reader,
|
||||
next func(context.Context, io.Reader) (*Manifest, error),
|
||||
) (*Manifest, error) {
|
||||
stream, err := gzip.NewReader(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
return next(ctx, boundArchiveStream(stream))
|
||||
}
|
||||
|
||||
func ExtractZstd(
|
||||
ctx context.Context, reader io.Reader,
|
||||
next func(context.Context, io.Reader) (*Manifest, error),
|
||||
) (*Manifest, error) {
|
||||
stream, err := zstd.NewReader(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
return next(ctx, boundArchiveStream(stream))
|
||||
}
|
||||
|
||||
func normalizeArchiveMemberName(fileName string) string {
|
||||
// Strip the leading slash and any extraneous path segments.
|
||||
fileName = path.Clean(fileName)
|
||||
fileName = strings.TrimPrefix(fileName, "/")
|
||||
if fileName == "." {
|
||||
fileName = ""
|
||||
}
|
||||
return fileName
|
||||
}
|
||||
|
||||
func addSymlinkOrBlobReference(
|
||||
manifest *Manifest, fileName string, target string,
|
||||
index map[string]*Entry, missing *[]string,
|
||||
) *Entry {
|
||||
if hash, found := strings.CutPrefix(target, BlobReferencePrefix); found {
|
||||
if entry, found := index[hash]; found {
|
||||
manifest.Contents[fileName] = entry
|
||||
return entry
|
||||
} else {
|
||||
*missing = append(*missing, hash)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
return AddSymlink(manifest, fileName, target)
|
||||
}
|
||||
}
|
||||
|
||||
func ExtractTar(ctx context.Context, reader io.Reader, oldManifest *Manifest) (*Manifest, error) {
|
||||
archive := tar.NewReader(reader)
|
||||
|
||||
var dataBytesRecycled int64
|
||||
var dataBytesTransferred int64
|
||||
|
||||
index := IndexManifestByGitHash(oldManifest)
|
||||
missing := []string{}
|
||||
manifest := NewManifest()
|
||||
hardLinks := map[string]*Entry{}
|
||||
for {
|
||||
header, err := archive.Next()
|
||||
if err == io.EOF {
|
||||
@@ -39,70 +96,67 @@ func ExtractTar(reader io.Reader) (*Manifest, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For some reason, GNU tar includes any leading `.` path segments in archive filenames,
|
||||
// unless there is a `..` path segment anywhere in the input filenames.
|
||||
fileName := header.Name
|
||||
for {
|
||||
if strippedName, found := strings.CutPrefix(fileName, "./"); found {
|
||||
fileName = strippedName
|
||||
} else {
|
||||
break
|
||||
}
|
||||
fileName := normalizeArchiveMemberName(header.Name)
|
||||
if fileName == "" {
|
||||
// This must be the root directory. It will be filled in by EnsureLeadingDirectories.
|
||||
continue
|
||||
}
|
||||
|
||||
manifestEntry := Entry{}
|
||||
switch header.Typeflag {
|
||||
case tar.TypeReg:
|
||||
fileData, err := io.ReadAll(archive)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tar: %s: %w", fileName, err)
|
||||
}
|
||||
|
||||
manifestEntry.Type = Type_InlineFile.Enum()
|
||||
manifestEntry.Size = proto.Int64(header.Size)
|
||||
manifestEntry.Data = fileData
|
||||
|
||||
entry := AddFile(manifest, fileName, fileData)
|
||||
hardLinks[header.Name] = entry
|
||||
dataBytesTransferred += int64(len(fileData))
|
||||
case tar.TypeSymlink:
|
||||
manifestEntry.Type = Type_Symlink.Enum()
|
||||
manifestEntry.Size = proto.Int64(header.Size)
|
||||
manifestEntry.Data = []byte(header.Linkname)
|
||||
|
||||
entry := addSymlinkOrBlobReference(
|
||||
manifest, fileName, header.Linkname, index, &missing)
|
||||
hardLinks[header.Name] = entry
|
||||
switch {
|
||||
case entry == nil:
|
||||
// unresolved blob reference
|
||||
case entry.GetType() != Type_Symlink:
|
||||
dataBytesRecycled += entry.GetOriginalSize() // resolved blob reference
|
||||
default:
|
||||
dataBytesTransferred += int64(len(header.Linkname)) // actual symlink
|
||||
}
|
||||
case tar.TypeLink:
|
||||
if entry, found := hardLinks[header.Linkname]; found {
|
||||
manifest.Contents[fileName] = entry
|
||||
} else {
|
||||
AddProblem(manifest, fileName, "tar: invalid hardlink %q", header.Linkname)
|
||||
}
|
||||
case tar.TypeDir:
|
||||
manifestEntry.Type = Type_Directory.Enum()
|
||||
fileName = strings.TrimSuffix(fileName, "/")
|
||||
|
||||
AddDirectory(manifest, fileName)
|
||||
default:
|
||||
AddProblem(&manifest, fileName, "unsupported type '%c'", header.Typeflag)
|
||||
AddProblem(manifest, fileName, "tar: unsupported type '%c'", header.Typeflag)
|
||||
continue
|
||||
}
|
||||
manifest.Contents[fileName] = &manifestEntry
|
||||
}
|
||||
return &manifest, nil
|
||||
|
||||
if len(missing) > 0 {
|
||||
return nil, UnresolvedRefError{missing}
|
||||
}
|
||||
|
||||
// Ensure parent directories exist for all entries.
|
||||
EnsureLeadingDirectories(manifest)
|
||||
|
||||
logc.Printf(ctx,
|
||||
"reuse: %s recycled, %s transferred\n",
|
||||
datasize.ByteSize(dataBytesRecycled).HR(),
|
||||
datasize.ByteSize(dataBytesTransferred).HR(),
|
||||
)
|
||||
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
func ExtractTarGzip(reader io.Reader) (*Manifest, error) {
|
||||
stream, err := gzip.NewReader(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer stream.Close()
|
||||
// Used for zstd decompression inside zip files, it is recommended to share this.
|
||||
var zstdDecomp = zstd.ZipDecompressor()
|
||||
|
||||
// stream length is limited in `ExtractTar`
|
||||
return ExtractTar(stream)
|
||||
}
|
||||
|
||||
func ExtractTarZstd(reader io.Reader) (*Manifest, error) {
|
||||
stream, err := zstd.NewReader(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
// stream length is limited in `ExtractTar`
|
||||
return ExtractTar(stream)
|
||||
}
|
||||
|
||||
func ExtractZip(reader io.Reader) (*Manifest, error) {
|
||||
func ExtractZip(ctx context.Context, reader io.Reader, oldManifest *Manifest) (*Manifest, error) {
|
||||
data, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -113,9 +167,18 @@ func ExtractZip(reader io.Reader) (*Manifest, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Support zstd compression inside zip files.
|
||||
archive.RegisterDecompressor(zstd.ZipMethodWinZip, zstdDecomp)
|
||||
archive.RegisterDecompressor(zstd.ZipMethodPKWare, zstdDecomp)
|
||||
|
||||
// Detect and defuse zipbombs.
|
||||
var totalSize uint64
|
||||
for _, file := range archive.File {
|
||||
if totalSize+file.UncompressedSize64 < totalSize {
|
||||
// Would overflow
|
||||
totalSize = math.MaxUint64
|
||||
break
|
||||
}
|
||||
totalSize += file.UncompressedSize64
|
||||
}
|
||||
if totalSize > config.Limits.MaxSiteSize.Bytes() {
|
||||
@@ -126,14 +189,17 @@ func ExtractZip(reader io.Reader) (*Manifest, error) {
|
||||
)
|
||||
}
|
||||
|
||||
manifest := Manifest{
|
||||
Contents: map[string]*Entry{
|
||||
"": {Type: Type_Directory.Enum()},
|
||||
},
|
||||
}
|
||||
var dataBytesRecycled int64
|
||||
var dataBytesTransferred int64
|
||||
|
||||
index := IndexManifestByGitHash(oldManifest)
|
||||
missing := []string{}
|
||||
manifest := NewManifest()
|
||||
for _, file := range archive.File {
|
||||
manifestEntry := Entry{}
|
||||
if !strings.HasSuffix(file.Name, "/") {
|
||||
normalizedName := normalizeArchiveMemberName(file.Name)
|
||||
if strings.HasSuffix(file.Name, "/") {
|
||||
AddDirectory(manifest, normalizedName)
|
||||
} else {
|
||||
fileReader, err := file.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -146,16 +212,35 @@ func ExtractZip(reader io.Reader) (*Manifest, error) {
|
||||
}
|
||||
|
||||
if file.Mode()&os.ModeSymlink != 0 {
|
||||
manifestEntry.Type = Type_Symlink.Enum()
|
||||
entry := addSymlinkOrBlobReference(
|
||||
manifest, normalizedName, string(fileData), index, &missing)
|
||||
switch {
|
||||
case entry == nil:
|
||||
// unresolved blob reference
|
||||
case entry.GetType() != Type_Symlink:
|
||||
dataBytesRecycled += entry.GetOriginalSize() // resolved blob reference
|
||||
default:
|
||||
dataBytesTransferred += int64(len(fileData)) // actual symlink
|
||||
}
|
||||
} else {
|
||||
manifestEntry.Type = Type_InlineFile.Enum()
|
||||
AddFile(manifest, normalizedName, fileData)
|
||||
dataBytesTransferred += int64(len(fileData))
|
||||
}
|
||||
manifestEntry.Size = proto.Int64(int64(file.UncompressedSize64))
|
||||
manifestEntry.Data = fileData
|
||||
} else {
|
||||
manifestEntry.Type = Type_Directory.Enum()
|
||||
}
|
||||
manifest.Contents[strings.TrimSuffix(file.Name, "/")] = &manifestEntry
|
||||
}
|
||||
return &manifest, nil
|
||||
|
||||
if len(missing) > 0 {
|
||||
return nil, UnresolvedRefError{missing}
|
||||
}
|
||||
|
||||
// Ensure parent directories exist for all entries.
|
||||
EnsureLeadingDirectories(manifest)
|
||||
|
||||
logc.Printf(ctx,
|
||||
"reuse: %s recycled, %s transferred\n",
|
||||
datasize.ByteSize(dataBytesRecycled).HR(),
|
||||
datasize.ByteSize(dataBytesTransferred).HR(),
|
||||
)
|
||||
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
247
src/fetch.go
247
src/fetch.go
@@ -2,45 +2,79 @@ package git_pages
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/url"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/go-git/go-billy/v6/osfs"
|
||||
"github.com/go-git/go-git/v6"
|
||||
"github.com/go-git/go-git/v6/plumbing"
|
||||
"github.com/go-git/go-git/v6/plumbing/cache"
|
||||
"github.com/go-git/go-git/v6/plumbing/client"
|
||||
"github.com/go-git/go-git/v6/plumbing/filemode"
|
||||
"github.com/go-git/go-git/v6/plumbing/object"
|
||||
"github.com/go-git/go-git/v6/plumbing/protocol/packp"
|
||||
"github.com/go-git/go-git/v6/plumbing/transport"
|
||||
"github.com/go-git/go-git/v6/storage/filesystem"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func FetchRepository(ctx context.Context, repoURL string, branch string) (*Manifest, error) {
|
||||
var ErrRepositoryTooLarge = errors.New("repository too large")
|
||||
|
||||
func FetchRepository(
|
||||
ctx context.Context, repoURL string, branch string, oldManifest *Manifest,
|
||||
) (
|
||||
*Manifest, error,
|
||||
) {
|
||||
span, ctx := ObserveFunction(ctx, "FetchRepository",
|
||||
"git.repository", repoURL, "git.branch", branch)
|
||||
defer span.Finish()
|
||||
|
||||
baseDir, err := os.MkdirTemp("", "fetchRepo")
|
||||
parsedRepoURL, err := url.Parse(repoURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("mkdtemp: %w", err)
|
||||
return nil, fmt.Errorf("URL parse: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(baseDir)
|
||||
|
||||
fs := osfs.New(baseDir, osfs.WithBoundOS())
|
||||
cache := cache.NewObjectLRUDefault()
|
||||
storer := filesystem.NewStorageWithOptions(fs, cache, filesystem.Options{
|
||||
ExclusiveAccess: true,
|
||||
LargeObjectThreshold: int64(config.Limits.GitLargeObjectThreshold.Bytes()),
|
||||
})
|
||||
repo, err := git.CloneContext(ctx, storer, nil, &git.CloneOptions{
|
||||
Bare: true,
|
||||
URL: repoURL,
|
||||
ReferenceName: plumbing.ReferenceName(branch),
|
||||
SingleBranch: true,
|
||||
Depth: 1,
|
||||
Tags: git.NoTags,
|
||||
})
|
||||
var repo *git.Repository
|
||||
var storer *filesystem.Storage
|
||||
for _, filter := range []packp.Filter{packp.FilterBlobNone(), packp.Filter("")} {
|
||||
var tempDir string
|
||||
if tempDir, err = os.MkdirTemp("", "fetchRepo"); err != nil {
|
||||
return nil, fmt.Errorf("mkdtemp: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
storer = filesystem.NewStorageWithOptions(
|
||||
osfs.New(tempDir, osfs.WithBoundOS()),
|
||||
cache.NewObjectLRUDefault(),
|
||||
filesystem.Options{
|
||||
ExclusiveAccess: true,
|
||||
LargeObjectThreshold: int64(config.Limits.GitLargeObjectThreshold.Bytes()),
|
||||
},
|
||||
)
|
||||
repo, err = git.CloneContext(ctx, storer, nil, &git.CloneOptions{
|
||||
Bare: true,
|
||||
URL: repoURL,
|
||||
ReferenceName: plumbing.NewBranchReferenceName(branch),
|
||||
SingleBranch: true,
|
||||
Depth: 1,
|
||||
Tags: git.NoTags,
|
||||
Filter: filter,
|
||||
})
|
||||
if err != nil {
|
||||
logc.Printf(ctx, "clone err: %s %s filter=%q\n", repoURL, branch, filter)
|
||||
continue
|
||||
} else {
|
||||
logc.Printf(ctx, "clone ok: %s %s filter=%q\n", repoURL, branch, filter)
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("git clone: %w", err)
|
||||
}
|
||||
@@ -63,14 +97,13 @@ func FetchRepository(ctx context.Context, repoURL string, branch string) (*Manif
|
||||
walker := object.NewTreeWalker(tree, true, make(map[plumbing.Hash]bool))
|
||||
defer walker.Close()
|
||||
|
||||
manifest := Manifest{
|
||||
RepoUrl: proto.String(repoURL),
|
||||
Branch: proto.String(branch),
|
||||
Commit: proto.String(ref.Hash().String()),
|
||||
Contents: map[string]*Entry{
|
||||
"": {Type: Type_Directory.Enum()},
|
||||
},
|
||||
}
|
||||
// Create a manifest for the tree object corresponding to `branch`, but do not populate it
|
||||
// with data yet; instead, record all the blobs we'll need.
|
||||
manifest := NewManifest()
|
||||
manifest.RepoUrl = proto.String(repoURL)
|
||||
manifest.Branch = proto.String(branch)
|
||||
manifest.Commit = proto.String(ref.Hash().String())
|
||||
blobsNeeded := map[plumbing.Hash]*Entry{}
|
||||
for {
|
||||
name, entry, err := walker.Next()
|
||||
if err == io.EOF {
|
||||
@@ -78,39 +111,155 @@ func FetchRepository(ctx context.Context, repoURL string, branch string) (*Manif
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("git walker: %w", err)
|
||||
} else {
|
||||
manifestEntry := Entry{}
|
||||
if entry.Mode.IsFile() {
|
||||
blob, err := repo.BlobObject(entry.Hash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("git blob %s: %w", name, err)
|
||||
}
|
||||
|
||||
reader, err := blob.Reader()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("git blob open: %w", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
data, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("git blob read: %w", err)
|
||||
}
|
||||
|
||||
manifestEntry := &Entry{}
|
||||
if existingManifestEntry, found := blobsNeeded[entry.Hash]; found {
|
||||
// If the same blob is present twice, we only need to fetch it once (and both
|
||||
// instances will alias the same `Entry` structure in the manifest).
|
||||
manifestEntry = existingManifestEntry
|
||||
} else if entry.Mode.IsFile() {
|
||||
blobsNeeded[entry.Hash] = manifestEntry
|
||||
if entry.Mode == filemode.Symlink {
|
||||
manifestEntry.Type = Type_Symlink.Enum()
|
||||
} else {
|
||||
manifestEntry.Type = Type_InlineFile.Enum()
|
||||
}
|
||||
manifestEntry.Size = proto.Int64(blob.Size)
|
||||
manifestEntry.Data = data
|
||||
manifestEntry.GitHash = proto.String(entry.Hash.String())
|
||||
} else if entry.Mode == filemode.Dir {
|
||||
manifestEntry.Type = Type_Directory.Enum()
|
||||
} else {
|
||||
AddProblem(&manifest, name, "unsupported mode %#o", entry.Mode)
|
||||
AddProblem(manifest, name, "unsupported mode %#o", entry.Mode)
|
||||
continue
|
||||
}
|
||||
manifest.Contents[name] = &manifestEntry
|
||||
manifest.Contents[name] = manifestEntry
|
||||
}
|
||||
}
|
||||
|
||||
// Collect checkout statistics.
|
||||
var dataBytesRecycled int64
|
||||
var dataBytesTransferred int64
|
||||
|
||||
// First, see if we can extract the blobs from the old manifest. This is the preferred option
|
||||
// because it avoids both network transfers and recompression. Note that we do not request
|
||||
// blobs from the backend under any circumstances to avoid creating a blob existence oracle.
|
||||
for _, oldManifestEntry := range oldManifest.GetContents() {
|
||||
if hash, ok := plumbing.FromHex(oldManifestEntry.GetGitHash()); ok {
|
||||
if manifestEntry, found := blobsNeeded[hash]; found {
|
||||
manifestEntry.Reset()
|
||||
proto.Merge(manifestEntry, oldManifestEntry)
|
||||
dataBytesRecycled += oldManifestEntry.GetOriginalSize()
|
||||
delete(blobsNeeded, hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Second, fill the manifest entries with data from the git checkout we just made.
|
||||
// This will only succeed if a `blob:none` filter isn't supported and we got a full
|
||||
// clone despite asking for a partial clone.
|
||||
for hash, manifestEntry := range blobsNeeded {
|
||||
if err := readGitBlob(repo, hash, manifestEntry, &dataBytesTransferred); err == nil {
|
||||
delete(blobsNeeded, hash)
|
||||
} else if errors.Is(err, ErrRepositoryTooLarge) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Third, if we still don't have data for some manifest entries, re-establish a git transport
|
||||
// and request the missing blobs (only) from the server.
|
||||
if len(blobsNeeded) > 0 {
|
||||
gitClient := client.New()
|
||||
request := &transport.Request{
|
||||
URL: parsedRepoURL,
|
||||
Command: transport.UploadPackService}
|
||||
|
||||
session, err := gitClient.Handshake(ctx, request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("git connection: %w", err)
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
if err := session.Fetch(ctx, storer, &transport.FetchRequest{
|
||||
Wants: slices.Collect(maps.Keys(blobsNeeded)),
|
||||
Depth: 1,
|
||||
// Git CLI behaves like this, even if the wants above are references to blobs.
|
||||
Filter: "blob:none",
|
||||
}); err != nil && !errors.Is(err, transport.ErrNoChange) {
|
||||
return nil, fmt.Errorf("git blob fetch request: %w", err)
|
||||
}
|
||||
|
||||
// All remaining blobs should now be available.
|
||||
for hash, manifestEntry := range blobsNeeded {
|
||||
if err := readGitBlob(repo, hash, manifestEntry, &dataBytesTransferred); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
delete(blobsNeeded, hash)
|
||||
}
|
||||
}
|
||||
|
||||
logc.Printf(ctx,
|
||||
"reuse: %s recycled, %s transferred\n",
|
||||
datasize.ByteSize(dataBytesRecycled).HR(),
|
||||
datasize.ByteSize(dataBytesTransferred).HR(),
|
||||
)
|
||||
|
||||
warnAboutGitLFS(ctx, manifest)
|
||||
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
func readGitBlob(
|
||||
repo *git.Repository, hash plumbing.Hash, entry *Entry, bytesTransferred *int64,
|
||||
) error {
|
||||
blob, err := repo.BlobObject(hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("git blob %s: %w", hash, err)
|
||||
}
|
||||
|
||||
reader, err := blob.Reader()
|
||||
if err != nil {
|
||||
return fmt.Errorf("git blob open: %w", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
data, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("git blob read: %w", err)
|
||||
}
|
||||
|
||||
switch entry.GetType() {
|
||||
case Type_InlineFile, Type_Symlink:
|
||||
// okay
|
||||
default:
|
||||
panic(fmt.Errorf("readGitBlob encountered invalid entry: %v, %v",
|
||||
entry.GetType(), entry.GetTransform()))
|
||||
}
|
||||
|
||||
entry.Data = data
|
||||
entry.Transform = Transform_Identity.Enum()
|
||||
entry.OriginalSize = proto.Int64(blob.Size)
|
||||
entry.CompressedSize = proto.Int64(blob.Size)
|
||||
|
||||
*bytesTransferred += blob.Size
|
||||
if uint64(*bytesTransferred) > config.Limits.MaxSiteSize.Bytes() {
|
||||
return fmt.Errorf("%w: fetch exceeds %s limit",
|
||||
ErrRepositoryTooLarge,
|
||||
config.Limits.MaxSiteSize.HR(),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func warnAboutGitLFS(ctx context.Context, manifest *Manifest) {
|
||||
gitattributes := ReadGitAttributes(ctx, manifest)
|
||||
for _, name := range slices.Sorted(maps.Keys(manifest.GetContents())) {
|
||||
entry := manifest.GetContents()[name]
|
||||
if !IsEntryRegularFile(entry) {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(name, "/")
|
||||
attrs, _ := gitattributes.Match(parts, nil)
|
||||
if attr, ok := attrs["filter"]; ok && attr.Value() == "lfs" {
|
||||
AddProblem(manifest, name, "git-pages does not support Git LFS; move this file into Git or use incremental uploads")
|
||||
}
|
||||
}
|
||||
return &manifest, nil
|
||||
}
|
||||
|
||||
16
src/flock_other.go
Normal file
16
src/flock_other.go
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build !unix
|
||||
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
func FileLock(file *os.File) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func FileUnlock(file *os.File) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
16
src/flock_posix.go
Normal file
16
src/flock_posix.go
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build unix
|
||||
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func FileLock(file *os.File) error {
|
||||
return syscall.Flock(int(file.Fd()), syscall.LOCK_EX)
|
||||
}
|
||||
|
||||
func FileUnlock(file *os.File) error {
|
||||
return syscall.Flock(int(file.Fd()), syscall.LOCK_UN)
|
||||
}
|
||||
84
src/garbage.go
Normal file
84
src/garbage.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
)
|
||||
|
||||
func TraceGarbage(ctx context.Context) error {
|
||||
allBlobs := map[string]int64{}
|
||||
liveBlobs := map[string]int64{}
|
||||
|
||||
reduceBlobs := func(data map[string]int64) (items, total int64) {
|
||||
for _, value := range data {
|
||||
items += 1
|
||||
total += value
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
traceManifest := func(manifestKind string, manifestName string, manifest *Manifest) error {
|
||||
for _, entry := range manifest.GetContents() {
|
||||
if entry.GetType() == Type_ExternalFile {
|
||||
blobName := string(entry.Data)
|
||||
if size, ok := allBlobs[blobName]; ok {
|
||||
liveBlobs[blobName] = size
|
||||
} else {
|
||||
logc.Printf(ctx, "trace manifest: %s/%s: dangling reference %s",
|
||||
manifestKind, manifestName, blobName)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Enumerate all blobs.
|
||||
logc.Printf(ctx, "trace: enumerating blobs")
|
||||
for metadata, err := range backend.EnumerateBlobs(ctx) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("trace blobs err: %w", err)
|
||||
}
|
||||
allBlobs[metadata.Name] = metadata.Size
|
||||
}
|
||||
|
||||
// Enumerate blobs live via site manifests.
|
||||
logc.Printf(ctx, "trace: enumerating manifests")
|
||||
for item, err := range backend.GetAllManifests(ctx) {
|
||||
metadata, manifest := item.Splat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("trace sites err: %w", err)
|
||||
}
|
||||
err = traceManifest("site", metadata.Name, manifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("trace sites err: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Enumerate blobs live via audit records.
|
||||
logc.Printf(ctx, "trace: enumerating audit records")
|
||||
auditIDs := backend.SearchAuditLog(ctx, SearchAuditLogOptions{})
|
||||
for record, err := range backend.GetAuditLogRecords(ctx, auditIDs) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("trace audit err: %w", err)
|
||||
}
|
||||
if record.Manifest != nil {
|
||||
err = traceManifest("audit", record.GetAuditID().String(), record.Manifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("trace audit err: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
allBlobsCount, allBlobsSize := reduceBlobs(allBlobs)
|
||||
liveBlobsCount, liveBlobsSize := reduceBlobs(liveBlobs)
|
||||
logc.Printf(ctx, "trace all: %d blobs, %s",
|
||||
allBlobsCount, datasize.ByteSize(allBlobsSize).HR())
|
||||
logc.Printf(ctx, "trace live: %d blobs, %s",
|
||||
liveBlobsCount, datasize.ByteSize(liveBlobsSize).HR())
|
||||
logc.Printf(ctx, "trace dead: %d blobs, %s",
|
||||
allBlobsCount-liveBlobsCount, datasize.ByteSize(allBlobsSize-liveBlobsSize).HR())
|
||||
|
||||
return nil
|
||||
}
|
||||
61
src/gitattributes.go
Normal file
61
src/gitattributes.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/go-git/go-git/v6/plumbing/format/gitattributes"
|
||||
)
|
||||
|
||||
func ReadGitAttributes(ctx context.Context, manifest *Manifest) gitattributes.Matcher {
|
||||
type entryPair struct {
|
||||
parts []string
|
||||
entry *Entry
|
||||
}
|
||||
|
||||
// Collect all .gitattributes files.
|
||||
var files []entryPair
|
||||
for name, entry := range manifest.GetContents() {
|
||||
switch entry.GetType() {
|
||||
case Type_InlineFile, Type_ExternalFile:
|
||||
parts := strings.Split(name, "/")
|
||||
if parts[len(parts)-1] == ".gitattributes" {
|
||||
files = append(files, entryPair{parts, entry})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort the file list by depth, then by name.
|
||||
slices.SortFunc(files, func(a entryPair, b entryPair) int {
|
||||
return cmp.Or(
|
||||
cmp.Compare(len(a.parts), len(b.parts)),
|
||||
slices.Compare(a.parts, b.parts),
|
||||
)
|
||||
})
|
||||
|
||||
// Gather all .gitattributes rules, sorted by depth.
|
||||
var rules []gitattributes.MatchAttribute
|
||||
for _, pair := range files {
|
||||
parts, entry := pair.parts, pair.entry
|
||||
data, err := GetEntryContents(ctx, entry)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
dirs := parts[:len(parts)-1]
|
||||
isRoot := len(parts) == 1
|
||||
newRules, err := gitattributes.ReadAttributes(bytes.NewReader(data), dirs, isRoot)
|
||||
if err != nil {
|
||||
AddProblem(manifest, strings.Join(parts, "/"), "parsing .gitattributes: %v", err)
|
||||
continue
|
||||
}
|
||||
rules = append(rules, newRules...)
|
||||
}
|
||||
|
||||
// gitattributes.Matcher applies rules in reverse.
|
||||
slices.Reverse(rules)
|
||||
matcher := gitattributes.NewMatcher(rules)
|
||||
return matcher
|
||||
}
|
||||
125
src/headers.go
125
src/headers.go
@@ -1,6 +1,7 @@
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
)
|
||||
|
||||
var ErrHeaderNotAllowed = errors.New("custom header not allowed")
|
||||
var ErrBasicAuthNotAllowed = errors.New("basic authorization not allowed")
|
||||
|
||||
const HeadersFileName string = "_headers"
|
||||
|
||||
@@ -74,28 +76,40 @@ func validateHeaderRule(rule headers.Rule) error {
|
||||
if slices.Contains(unsafeHeaders, header) {
|
||||
return fmt.Errorf("rule sets header %q (fundamentally unsafe)", header)
|
||||
}
|
||||
if !slices.Contains(config.Limits.AllowedCustomHeaders, header) {
|
||||
return fmt.Errorf("rule sets header %q (not allowlisted)", header)
|
||||
}
|
||||
if !IsAllowedCustomHeader(header) { // make sure we don't desync
|
||||
panic(errors.New("header check inconsistency"))
|
||||
switch header {
|
||||
case "Basic-Auth":
|
||||
if !config.Limits.AllowBasicAuth {
|
||||
return fmt.Errorf("rule sets header %q (forbidden by policy)", header)
|
||||
}
|
||||
default:
|
||||
if !slices.Contains(config.Limits.AllowedCustomHeaders, header) {
|
||||
return fmt.Errorf("rule sets header %q (not allowlisted)", header)
|
||||
}
|
||||
if !IsAllowedCustomHeader(header) { // make sure we don't desync
|
||||
panic(errors.New("header check inconsistency"))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parses redirects file and injects rules into the manifest.
|
||||
func ProcessHeadersFile(manifest *Manifest) error {
|
||||
func ProcessHeadersFile(ctx context.Context, manifest *Manifest) error {
|
||||
headersEntry := manifest.Contents[HeadersFileName]
|
||||
delete(manifest.Contents, HeadersFileName)
|
||||
if headersEntry == nil {
|
||||
return nil
|
||||
} else if headersEntry.GetType() != Type_InlineFile {
|
||||
return AddProblem(manifest, HeadersFileName,
|
||||
"not a regular file")
|
||||
}
|
||||
|
||||
rules, err := headers.ParseString(string(headersEntry.GetData()))
|
||||
data, err := GetEntryContents(ctx, headersEntry)
|
||||
if errors.Is(err, ErrNotRegularFile) {
|
||||
return AddProblem(manifest, HeadersFileName,
|
||||
"not a regular file")
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rules, err := headers.ParseString(string(data))
|
||||
if err != nil {
|
||||
return AddProblem(manifest, HeadersFileName,
|
||||
"syntax error: %s", err)
|
||||
@@ -108,16 +122,52 @@ func ProcessHeadersFile(manifest *Manifest) error {
|
||||
continue
|
||||
}
|
||||
headerMap := []*Header{}
|
||||
credentials := []*BasicCredential{}
|
||||
hasBasicAuth := false
|
||||
for header, values := range rule.Headers {
|
||||
headerMap = append(headerMap, &Header{
|
||||
Name: proto.String(header),
|
||||
Values: values,
|
||||
})
|
||||
switch header {
|
||||
case "Basic-Auth":
|
||||
hasBasicAuth = true
|
||||
for _, value := range values {
|
||||
for _, usernamePassword := range strings.Split(value, " ") {
|
||||
if usernamePassword == "" {
|
||||
continue
|
||||
}
|
||||
if username, password, found := strings.Cut(usernamePassword, ":"); !found {
|
||||
AddProblem(manifest, HeadersFileName,
|
||||
"rule #%d %q: malformed Basic-Auth credential", index+1, rule.Path)
|
||||
continue
|
||||
} else {
|
||||
credentials = append(credentials, &BasicCredential{
|
||||
Username: proto.String(username),
|
||||
Password: proto.String(password),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
headerMap = append(headerMap, &Header{
|
||||
Name: proto.String(header),
|
||||
Values: values,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Note that we may add an empty `headerMap` here even if only credentials are defined.
|
||||
// This is intentional: in `_headers` files processing terminates at the first matching
|
||||
// clause, and Netlify mixes Basic-Auth with all the other headers.
|
||||
manifest.Headers = append(manifest.Headers, &HeaderRule{
|
||||
Path: proto.String(rule.Path),
|
||||
HeaderMap: headerMap,
|
||||
})
|
||||
// We're using `hasBasicAuth` instead of `len(credentials) > 0` so that if a `_headers`
|
||||
// file defines only malformed credentials, we still add a rule (that in effect always
|
||||
// denies access).
|
||||
if hasBasicAuth {
|
||||
manifest.BasicAuth = append(manifest.BasicAuth, &BasicAuthRule{
|
||||
Path: proto.String(rule.Path),
|
||||
Credentials: credentials,
|
||||
})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -137,13 +187,14 @@ func CollectHeadersFile(manifest *Manifest) string {
|
||||
return headers.Must(headers.UnparseString(headersRules))
|
||||
}
|
||||
|
||||
func ApplyHeaderRules(manifest *Manifest, url *url.URL) (headers http.Header, err error) {
|
||||
headers = http.Header{}
|
||||
func matchPathRules[
|
||||
Rule interface{ GetPath() string },
|
||||
](rules []Rule, url *url.URL) (matched Rule) {
|
||||
fromSegments := pathSegments(url.Path)
|
||||
next:
|
||||
for _, rule := range manifest.Headers {
|
||||
for _, rule := range rules {
|
||||
// check if the rule matches url
|
||||
ruleURL, _ := url.Parse(*rule.Path) // pre-validated in `validateHeaderRule`
|
||||
ruleURL, _ := url.Parse(rule.GetPath()) // pre-validated in `validateHeaderRule`
|
||||
ruleSegments := pathSegments(ruleURL.Path)
|
||||
if ruleSegments[len(ruleSegments)-1] != "*" {
|
||||
if len(ruleSegments) < len(fromSegments) {
|
||||
@@ -161,8 +212,19 @@ next:
|
||||
continue next
|
||||
}
|
||||
}
|
||||
matched = rule
|
||||
break
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ApplyHeaderRules(manifest *Manifest, url *url.URL) (
|
||||
headers http.Header, err error,
|
||||
) {
|
||||
headers = http.Header{}
|
||||
if rule := matchPathRules(manifest.Headers, url); rule != nil {
|
||||
// the rule has matched url, validate headers against up-to-date policy
|
||||
for _, header := range rule.HeaderMap {
|
||||
for _, header := range rule.GetHeaderMap() {
|
||||
name := header.GetName()
|
||||
if !IsAllowedCustomHeader(name) {
|
||||
return nil, fmt.Errorf("%w: %s", ErrHeaderNotAllowed, name)
|
||||
@@ -171,7 +233,30 @@ next:
|
||||
headers.Add(name, value)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ApplyBasicAuthRules(manifest *Manifest, url *url.URL, r *http.Request) (bool, error) {
|
||||
if rule := matchPathRules(manifest.BasicAuth, url); rule == nil {
|
||||
// no matches, authorized by default
|
||||
return true, nil
|
||||
} else {
|
||||
// the rule has matched url, check that basic auth is allowed per up-to-date policy
|
||||
if !config.Limits.AllowBasicAuth {
|
||||
// basic auth configured in the past but not allowed any more
|
||||
return false, ErrBasicAuthNotAllowed
|
||||
}
|
||||
if username, password, ok := r.BasicAuth(); ok {
|
||||
// request has credentials, check them
|
||||
for _, credential := range rule.GetCredentials() {
|
||||
if credential.GetUsername() == username && credential.GetPassword() == password {
|
||||
// authorized!
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// request has no credentials, unauthorized
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
35
src/histogram.go
Normal file
35
src/histogram.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type DomainStatistics struct {
|
||||
Domain string
|
||||
OriginalSize int64
|
||||
CompressedSize int64
|
||||
StoredSize int64
|
||||
}
|
||||
|
||||
func SizeHistogram(ctx context.Context) ([]*DomainStatistics, error) {
|
||||
statisticsMap := map[string]*DomainStatistics{}
|
||||
for item, err := range backend.GetAllManifests(ctx) {
|
||||
metadata, manifest := item.Splat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("size histogram err: %w", err)
|
||||
}
|
||||
domain, _, _ := strings.Cut(metadata.Name, "/")
|
||||
if _, found := statisticsMap[domain]; !found {
|
||||
statisticsMap[domain] = &DomainStatistics{Domain: domain}
|
||||
}
|
||||
statistics := statisticsMap[domain]
|
||||
statistics.OriginalSize += metadata.Size + manifest.GetOriginalSize()
|
||||
statistics.CompressedSize += metadata.Size + manifest.GetCompressedSize()
|
||||
statistics.StoredSize += metadata.Size + manifest.GetStoredSize()
|
||||
}
|
||||
return slices.Collect(maps.Values(statisticsMap)), nil
|
||||
}
|
||||
144
src/http.go
144
src/http.go
@@ -2,43 +2,96 @@ package git_pages
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var httpAcceptEncodingRegexp = regexp.MustCompile(`` +
|
||||
var httpAcceptRegexp = regexp.MustCompile(`` +
|
||||
// token optionally prefixed by whitespace
|
||||
`^[ \t]*([a-zA-Z0-9$!#$%&'*+.^_\x60|~-]+)` +
|
||||
`^[ \t]*([a-zA-Z0-9$!#$%&'*+./^_\x60|~-]+)` +
|
||||
// quality value prefixed by a semicolon optionally surrounded by whitespace
|
||||
`(?:[ \t]*;[ \t]*q=(0(?:\.[0-9]{1,3})?|1(?:\.0{1,3})?))?` +
|
||||
// optional whitespace followed by comma or end of line
|
||||
`[ \t]*(?:,|$)`,
|
||||
)
|
||||
|
||||
type httpEncoding struct {
|
||||
type httpAcceptOffer struct {
|
||||
code string
|
||||
qval float64
|
||||
}
|
||||
|
||||
type httpEncodings struct {
|
||||
encodings []httpEncoding
|
||||
}
|
||||
|
||||
func parseHTTPEncodings(headerValue string) (result httpEncodings) {
|
||||
func parseGenericAcceptHeader(headerValue string) (result []httpAcceptOffer) {
|
||||
for headerValue != "" {
|
||||
matches := httpAcceptEncodingRegexp.FindStringSubmatch(headerValue)
|
||||
matches := httpAcceptRegexp.FindStringSubmatch(headerValue)
|
||||
if matches == nil {
|
||||
return httpEncodings{}
|
||||
return
|
||||
}
|
||||
enc := httpEncoding{strings.ToLower(matches[1]), 1.0}
|
||||
offer := httpAcceptOffer{strings.ToLower(matches[1]), 1.0}
|
||||
if matches[2] != "" {
|
||||
enc.qval, _ = strconv.ParseFloat(matches[2], 64)
|
||||
offer.qval, _ = strconv.ParseFloat(matches[2], 64)
|
||||
}
|
||||
result.encodings = append(result.encodings, enc)
|
||||
result = append(result, offer)
|
||||
headerValue = headerValue[len(matches[0]):]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func preferredAcceptOffer(offers []httpAcceptOffer) string {
|
||||
slices.SortStableFunc(offers, func(a, b httpAcceptOffer) int {
|
||||
return -cmp.Compare(a.qval, b.qval)
|
||||
})
|
||||
for _, offer := range offers {
|
||||
if offer.qval != 0 {
|
||||
return offer.code
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type HTTPContentTypes struct {
|
||||
contentTypes []httpAcceptOffer
|
||||
}
|
||||
|
||||
func ParseAcceptHeader(headerValue string) (result HTTPContentTypes) {
|
||||
if headerValue == "" {
|
||||
headerValue = "*/*"
|
||||
}
|
||||
result = HTTPContentTypes{parseGenericAcceptHeader(headerValue)}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *HTTPContentTypes) Negotiate(offers ...string) string {
|
||||
prefs := make(map[string]float64, len(offers))
|
||||
for _, code := range offers {
|
||||
prefs[code] = 0
|
||||
}
|
||||
for _, ctyp := range e.contentTypes {
|
||||
if ctyp.code == "*/*" {
|
||||
for code := range prefs {
|
||||
prefs[code] = ctyp.qval
|
||||
}
|
||||
} else if _, ok := prefs[ctyp.code]; ok {
|
||||
prefs[ctyp.code] = ctyp.qval
|
||||
}
|
||||
}
|
||||
ctyps := make([]httpAcceptOffer, len(offers))
|
||||
for idx, code := range offers {
|
||||
ctyps[idx] = httpAcceptOffer{code, prefs[code]}
|
||||
}
|
||||
return preferredAcceptOffer(ctyps)
|
||||
}
|
||||
|
||||
type HTTPEncodings struct {
|
||||
encodings []httpAcceptOffer
|
||||
}
|
||||
|
||||
func ParseAcceptEncodingHeader(headerValue string) (result HTTPEncodings) {
|
||||
result = HTTPEncodings{parseGenericAcceptHeader(headerValue)}
|
||||
if len(result.encodings) == 0 {
|
||||
// RFC 9110 says (https://httpwg.org/specs/rfc9110.html#field.accept-encoding):
|
||||
// "If no Accept-Encoding header field is in the request, any content
|
||||
@@ -51,9 +104,9 @@ func parseHTTPEncodings(headerValue string) (result httpEncodings) {
|
||||
|
||||
// Negotiate returns the most preferred encoding that is acceptable by the
|
||||
// client, or an empty string if no encodings are acceptable.
|
||||
func (e *httpEncodings) Negotiate(codes ...string) string {
|
||||
prefs := make(map[string]float64, len(codes))
|
||||
for _, code := range codes {
|
||||
func (e *HTTPEncodings) Negotiate(offers ...string) string {
|
||||
prefs := make(map[string]float64, len(offers))
|
||||
for _, code := range offers {
|
||||
prefs[code] = 0
|
||||
}
|
||||
implicitIdentity := true
|
||||
@@ -73,17 +126,52 @@ func (e *httpEncodings) Negotiate(codes ...string) string {
|
||||
if _, ok := prefs["identity"]; ok && implicitIdentity {
|
||||
prefs["identity"] = -1 // sort last
|
||||
}
|
||||
encs := make([]httpEncoding, len(codes))
|
||||
for idx, code := range codes {
|
||||
encs[idx] = httpEncoding{code, prefs[code]}
|
||||
encs := make([]httpAcceptOffer, len(offers))
|
||||
for idx, code := range offers {
|
||||
encs[idx] = httpAcceptOffer{code, prefs[code]}
|
||||
}
|
||||
slices.SortStableFunc(encs, func(a, b httpEncoding) int {
|
||||
return -cmp.Compare(a.qval, b.qval)
|
||||
})
|
||||
for _, enc := range encs {
|
||||
if enc.qval != 0 {
|
||||
return enc.code
|
||||
}
|
||||
}
|
||||
return ""
|
||||
return preferredAcceptOffer(encs)
|
||||
}
|
||||
|
||||
func chainHTTPMiddleware(middleware ...func(http.Handler) http.Handler) func(http.Handler) http.Handler {
|
||||
return func(handler http.Handler) http.Handler {
|
||||
for idx := len(middleware) - 1; idx >= 0; idx-- {
|
||||
handler = middleware[idx](handler)
|
||||
}
|
||||
return handler
|
||||
}
|
||||
}
|
||||
|
||||
func remoteAddrMiddleware(handler http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var readXForwardedFor bool
|
||||
switch config.Audit.IncludeIPs {
|
||||
case "X-Forwarded-For":
|
||||
readXForwardedFor = true
|
||||
case "RemoteAddr", "":
|
||||
readXForwardedFor = false
|
||||
default:
|
||||
panic(fmt.Errorf("config.Audit.IncludeIPs is set to an unknown value (%q)",
|
||||
config.Audit.IncludeIPs))
|
||||
}
|
||||
|
||||
usingOriginalRemoteAddr := true
|
||||
if readXForwardedFor {
|
||||
forwardedFor := strings.Split(r.Header.Get("X-Forwarded-For"), ",")
|
||||
if len(forwardedFor) > 0 {
|
||||
remoteAddr := strings.TrimSpace(forwardedFor[len(forwardedFor)-1])
|
||||
if remoteAddr != "" {
|
||||
r.RemoteAddr = remoteAddr
|
||||
usingOriginalRemoteAddr = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if usingOriginalRemoteAddr {
|
||||
if ipAddress, _, err := net.SplitHostPort(r.RemoteAddr); err == nil {
|
||||
r.RemoteAddr = ipAddress
|
||||
}
|
||||
}
|
||||
|
||||
handler.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
54
src/log.go
Normal file
54
src/log.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var logc slogWithCtx
|
||||
|
||||
type slogWithCtx struct{}
|
||||
|
||||
func (l slogWithCtx) log(ctx context.Context, level slog.Level, msg string) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
logger := slog.Default()
|
||||
if !logger.Enabled(ctx, level) {
|
||||
return
|
||||
}
|
||||
|
||||
var pcs [1]uintptr
|
||||
// skip [runtime.Callers, this method, method calling this method]
|
||||
runtime.Callers(3, pcs[:])
|
||||
|
||||
record := slog.NewRecord(time.Now(), level, strings.TrimRight(msg, "\n"), pcs[0])
|
||||
logger.Handler().Handle(ctx, record)
|
||||
}
|
||||
|
||||
func (l slogWithCtx) Print(ctx context.Context, v ...any) {
|
||||
l.log(ctx, slog.LevelInfo, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
func (l slogWithCtx) Printf(ctx context.Context, format string, v ...any) {
|
||||
l.log(ctx, slog.LevelInfo, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func (l slogWithCtx) Println(ctx context.Context, v ...any) {
|
||||
l.log(ctx, slog.LevelInfo, fmt.Sprintln(v...))
|
||||
}
|
||||
|
||||
func (l slogWithCtx) Fatalf(ctx context.Context, format string, v ...any) {
|
||||
l.log(ctx, slog.LevelError, fmt.Sprintf(format, v...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (l slogWithCtx) Fatalln(ctx context.Context, v ...any) {
|
||||
l.log(ctx, slog.LevelError, fmt.Sprintln(v...))
|
||||
os.Exit(1)
|
||||
}
|
||||
588
src/main.go
588
src/main.go
@@ -1,7 +1,9 @@
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -10,28 +12,38 @@ import (
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
automemlimit "github.com/KimMachineGun/automemlimit/memlimit"
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/fatih/color"
|
||||
"github.com/kankanreno/go-snowflake"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var config *Config
|
||||
var wildcards []*WildcardPattern
|
||||
var fallback http.Handler
|
||||
var backend Backend
|
||||
var domainCache DomainCache
|
||||
|
||||
func configureFeatures() (err error) {
|
||||
func configureFeatures(ctx context.Context) (err error) {
|
||||
if len(config.Features) > 0 {
|
||||
log.Println("features:", strings.Join(config.Features, ", "))
|
||||
logc.Println(ctx, "features:", strings.Join(config.Features, ", "))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func configureMemLimit() (err error) {
|
||||
func configureMemLimit(ctx context.Context) (err error) {
|
||||
// Avoid being OOM killed by not garbage collecting early enough.
|
||||
memlimitBefore := datasize.ByteSize(debug.SetMemoryLimit(-1))
|
||||
automemlimit.SetGoMemLimitWithOpts(
|
||||
@@ -46,14 +58,20 @@ func configureMemLimit() (err error) {
|
||||
)
|
||||
memlimitAfter := datasize.ByteSize(debug.SetMemoryLimit(-1))
|
||||
if memlimitBefore == memlimitAfter {
|
||||
log.Println("memlimit: now", memlimitBefore.HR())
|
||||
logc.Println(ctx, "memlimit: now", memlimitBefore.HR())
|
||||
} else {
|
||||
log.Println("memlimit: was", memlimitBefore.HR(), "now", memlimitAfter.HR())
|
||||
logc.Println(ctx, "memlimit: was", memlimitBefore.HR(), "now", memlimitAfter.HR())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func configureWildcards() (err error) {
|
||||
// Can only be safely called during initial configuration.
|
||||
func configureConcurrency(_ context.Context) (err error) {
|
||||
putBlobSemaphore = make(chan struct{}, config.Limits.ConcurrentUploads)
|
||||
return
|
||||
}
|
||||
|
||||
func configureWildcards(_ context.Context) (err error) {
|
||||
newWildcards, err := TranslateWildcards(config.Wildcard)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -63,19 +81,45 @@ func configureWildcards() (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func listen(name string, listen string) net.Listener {
|
||||
func configureFallback(_ context.Context) (err error) {
|
||||
if config.Fallback.ProxyTo != nil {
|
||||
fallbackURL := &config.Fallback.ProxyTo.URL
|
||||
fallback = &httputil.ReverseProxy{
|
||||
Rewrite: func(r *httputil.ProxyRequest) {
|
||||
r.SetURL(fallbackURL)
|
||||
r.Out.Host = r.In.Host
|
||||
r.Out.Header["X-Forwarded-For"] = r.In.Header["X-Forwarded-For"]
|
||||
},
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: config.Fallback.Insecure,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Thread-unsafe, must be called only during initial configuration.
|
||||
func configureAudit(_ context.Context) (err error) {
|
||||
snowflake.SetStartTime(AuditSnowflakeStartTime)
|
||||
snowflake.SetMachineID(config.Audit.NodeID)
|
||||
return
|
||||
}
|
||||
|
||||
func listen(ctx context.Context, name string, listen string) net.Listener {
|
||||
if listen == "-" {
|
||||
return nil
|
||||
}
|
||||
|
||||
protocol, address, ok := strings.Cut(listen, "/")
|
||||
if !ok {
|
||||
log.Fatalf("%s: %s: malformed endpoint", name, listen)
|
||||
logc.Fatalf(ctx, "%s: %s: malformed endpoint", name, listen)
|
||||
}
|
||||
|
||||
listener, err := net.Listen(protocol, address)
|
||||
if err != nil {
|
||||
log.Fatalf("%s: %s\n", name, err)
|
||||
logc.Fatalf(ctx, "%s: %s\n", name, err)
|
||||
}
|
||||
|
||||
return listener
|
||||
@@ -85,7 +129,10 @@ func panicHandler(handler http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
log.Printf("panic: %s %s %s: %s\n%s",
|
||||
if err, ok := err.(error); ok && errors.Is(err, http.ErrAbortHandler) {
|
||||
panic(http.ErrAbortHandler)
|
||||
}
|
||||
logc.Printf(r.Context(), "panic: %s %s %s: %s\n%s",
|
||||
r.Method, r.Host, r.URL.Path, err, string(debug.Stack()))
|
||||
http.Error(w,
|
||||
fmt.Sprintf("internal server error: %s", err),
|
||||
@@ -97,17 +144,13 @@ func panicHandler(handler http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
func serve(listener net.Listener, handler http.Handler) {
|
||||
func serve(ctx context.Context, listener net.Listener, handler http.Handler) {
|
||||
if listener != nil {
|
||||
handler = panicHandler(handler)
|
||||
|
||||
server := http.Server{Handler: handler}
|
||||
server.Protocols = new(http.Protocols)
|
||||
server.Protocols.SetHTTP1(true)
|
||||
if config.Feature("serve-h2c") {
|
||||
server.Protocols.SetUnencryptedHTTP2(true)
|
||||
}
|
||||
log.Fatalln(server.Serve(listener))
|
||||
server.Protocols.SetUnencryptedHTTP2(true)
|
||||
logc.Fatalln(ctx, server.Serve(listener))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,7 +161,8 @@ func webRootArg(arg string) string {
|
||||
case 1:
|
||||
return arg
|
||||
default:
|
||||
log.Fatalf("webroot argument must be either 'domain.tld' or 'domain.tld/dir")
|
||||
logc.Fatalln(context.Background(),
|
||||
"webroot argument must be either 'domain.tld' or 'domain.tld/dir")
|
||||
return ""
|
||||
}
|
||||
}
|
||||
@@ -130,7 +174,7 @@ func fileOutputArg() (writer io.WriteCloser) {
|
||||
} else {
|
||||
writer, err = os.Create(flag.Arg(0))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
logc.Fatalln(context.Background(), err)
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -140,27 +184,43 @@ func usage() {
|
||||
fmt.Fprintf(os.Stderr, "Usage:\n")
|
||||
fmt.Fprintf(os.Stderr, "(server) "+
|
||||
"git-pages [-config <file>|-no-config]\n")
|
||||
fmt.Fprintf(os.Stderr, "(admin) "+
|
||||
"git-pages {-run-migration <name>}\n")
|
||||
fmt.Fprintf(os.Stderr, "(info) "+
|
||||
"git-pages {-print-config-env-vars|-print-config}\n")
|
||||
fmt.Fprintf(os.Stderr, "(cli) "+
|
||||
"git-pages {-version|-print-config-env-vars|-print-config}\n")
|
||||
fmt.Fprintf(os.Stderr, "(debug) "+
|
||||
"git-pages {-list-blobs|-list-manifests}\n")
|
||||
fmt.Fprintf(os.Stderr, "(debug) "+
|
||||
"git-pages {-get-blob|-get-manifest|-get-archive|-update-site} <ref> [file]\n")
|
||||
fmt.Fprintf(os.Stderr, "(admin) "+
|
||||
"git-pages {-freeze-domain|-unfreeze-domain} <domain>\n")
|
||||
fmt.Fprintf(os.Stderr, "(audit) "+
|
||||
"git-pages {-audit-log|-audit-read <id>|-audit-rollback <id>}\n")
|
||||
fmt.Fprintf(os.Stderr, "(audit) "+
|
||||
"git-pages {-audit-expire <days>|-audit-detach <domain>/<project>}\n")
|
||||
fmt.Fprintf(os.Stderr, "(audit) "+
|
||||
"git-pages -audit-server <endpoint> <program> [args...]\n")
|
||||
fmt.Fprintf(os.Stderr, "(maint) "+
|
||||
"git-pages {-run-migration <name>|-trace-garbage|-size-histogram {original|stored}}\n")
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func Main() {
|
||||
func Main(versionInfo string) {
|
||||
ctx := context.Background()
|
||||
|
||||
flag.Usage = usage
|
||||
configTomlPath := flag.String("config", "",
|
||||
"load configuration from `filename` (default: 'config.toml')")
|
||||
secretTomlPath := flag.String("secrets", "",
|
||||
"load additional configuration values from `filename` (default: '$CREDENTIALS_DIRECTORY/secrets.toml' if it exists)")
|
||||
noConfig := flag.Bool("no-config", false,
|
||||
"run without configuration file (configure via environment variables)")
|
||||
printConfigEnvVars := flag.Bool("print-config-env-vars", false,
|
||||
"print every recognized configuration environment variable and exit")
|
||||
printConfig := flag.Bool("print-config", false,
|
||||
"print configuration as JSON and exit")
|
||||
configTomlPath := flag.String("config", "",
|
||||
"load configuration from `filename` (default: 'config.toml')")
|
||||
noConfig := flag.Bool("no-config", false,
|
||||
"run without configuration file (configure via environment variables)")
|
||||
runMigration := flag.String("run-migration", "",
|
||||
"run a store `migration` (one of: create-domain-markers)")
|
||||
listBlobs := flag.Bool("list-blobs", false,
|
||||
"enumerate every blob with its metadata")
|
||||
listManifests := flag.Bool("list-manifests", false,
|
||||
"enumerate every manifest with its metadata")
|
||||
getBlob := flag.String("get-blob", "",
|
||||
"write contents of `blob` ('sha256-xxxxxxx...xxx')")
|
||||
getManifest := flag.String("get-manifest", "",
|
||||
@@ -169,24 +229,70 @@ func Main() {
|
||||
"write archive for `site` (either 'domain.tld' or 'domain.tld/dir') in tar format")
|
||||
updateSite := flag.String("update-site", "",
|
||||
"update `site` (either 'domain.tld' or 'domain.tld/dir') from archive or repository URL")
|
||||
freezeDomain := flag.String("freeze-domain", "",
|
||||
"prevent any site uploads to a given `domain`")
|
||||
unfreezeDomain := flag.String("unfreeze-domain", "",
|
||||
"allow site uploads to a `domain` again after it has been frozen")
|
||||
auditLog := flag.Bool("audit-log", false,
|
||||
"display audit log")
|
||||
auditRead := flag.String("audit-read", "",
|
||||
"extract contents of audit record `id` to files '<id>-*'")
|
||||
auditRollback := flag.String("audit-rollback", "",
|
||||
"restore site from contents of audit record `id`")
|
||||
auditExpire := flag.String("audit-expire", "",
|
||||
"expire audit records older than `days` old")
|
||||
auditDetach := flag.String("audit-detach", "",
|
||||
"detach all blobs of audit records for a single `site` (or the entire domain with 'domain.tld/*')")
|
||||
auditServer := flag.String("audit-server", "",
|
||||
"listen for notifications on `endpoint` and spawn a process for each audit event")
|
||||
runMigration := flag.String("run-migration", "",
|
||||
"run a store `migration` (one of: create-domain-markers)")
|
||||
sizeHistogram := flag.String("size-histogram", "",
|
||||
"display histogram of `size-type` (original or stored) per domain")
|
||||
traceGarbage := flag.Bool("trace-garbage", false,
|
||||
"estimate total size of unreachable blobs")
|
||||
version := flag.Bool("version", false,
|
||||
"display version")
|
||||
flag.Parse()
|
||||
|
||||
if *version {
|
||||
fmt.Printf("git-pages %s\n", versionInfo)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
var cliOperations int
|
||||
if *getBlob != "" {
|
||||
cliOperations += 1
|
||||
}
|
||||
if *getManifest != "" {
|
||||
cliOperations += 1
|
||||
}
|
||||
if *getArchive != "" {
|
||||
cliOperations += 1
|
||||
for _, selected := range []bool{
|
||||
*listBlobs,
|
||||
*listManifests,
|
||||
*getBlob != "",
|
||||
*getManifest != "",
|
||||
*getArchive != "",
|
||||
*updateSite != "",
|
||||
*freezeDomain != "",
|
||||
*unfreezeDomain != "",
|
||||
*auditLog,
|
||||
*auditRead != "",
|
||||
*auditRollback != "",
|
||||
*auditExpire != "",
|
||||
*auditDetach != "",
|
||||
*auditServer != "",
|
||||
*runMigration != "",
|
||||
*sizeHistogram != "",
|
||||
*traceGarbage,
|
||||
} {
|
||||
if selected {
|
||||
cliOperations++
|
||||
}
|
||||
}
|
||||
if cliOperations > 1 {
|
||||
log.Fatalln("-get-blob, -get-manifest, and -get-archive are mutually exclusive")
|
||||
logc.Fatalln(ctx, "-list-blobs, -list-manifests, -get-blob, -get-manifest, -get-archive, "+
|
||||
"-update-site, -freeze-domain, -unfreeze-domain, -audit-log, -audit-read, "+
|
||||
"-audit-rollback, -audit-expire, -audit-detach, -audit-server, -run-migration, "+
|
||||
"-size-histogram, and -trace-garbage are mutually exclusive")
|
||||
}
|
||||
|
||||
if *configTomlPath != "" && *noConfig {
|
||||
log.Fatalln("-no-config and -config are mutually exclusive")
|
||||
logc.Fatalln(ctx, "-no-config and -config are mutually exclusive")
|
||||
}
|
||||
|
||||
if *printConfigEnvVars {
|
||||
@@ -198,12 +304,24 @@ func Main() {
|
||||
if *configTomlPath == "" && !*noConfig {
|
||||
*configTomlPath = "config.toml"
|
||||
}
|
||||
if config, err = Configure(*configTomlPath); err != nil {
|
||||
log.Fatalln("config:", err)
|
||||
|
||||
if *secretTomlPath == "" && !*noConfig {
|
||||
// check for a second config file at $CREDENTIALS_DIRECTORY/secrets.toml, and use it
|
||||
if systemdCredentialsDir := os.Getenv("CREDENTIALS_DIRECTORY"); systemdCredentialsDir != "" {
|
||||
secretTomlTestPath := path.Join(systemdCredentialsDir, "secrets.toml")
|
||||
_, err := os.Stat(secretTomlTestPath)
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
*secretTomlPath = secretTomlTestPath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if config, err = Configure(*configTomlPath, *secretTomlPath); err != nil {
|
||||
logc.Fatalln(ctx, "config:", err)
|
||||
}
|
||||
|
||||
if *printConfig {
|
||||
fmt.Println(config.DebugJSON())
|
||||
fmt.Println(config.TOML())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -211,78 +329,96 @@ func Main() {
|
||||
defer FiniObservability()
|
||||
|
||||
if err = errors.Join(
|
||||
configureFeatures(),
|
||||
configureMemLimit(),
|
||||
configureWildcards(),
|
||||
configureFeatures(ctx),
|
||||
configureMemLimit(ctx),
|
||||
configureConcurrency(ctx),
|
||||
configureWildcards(ctx),
|
||||
configureFallback(ctx),
|
||||
configureAudit(ctx),
|
||||
); err != nil {
|
||||
log.Fatalln(err)
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
// The server has its own logic for creating the backend.
|
||||
if cliOperations > 0 {
|
||||
if backend, err = CreateBackend(ctx, &config.Storage); err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
if domainCache, err = CreateDomainCache(ctx); err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case *runMigration != "":
|
||||
if backend, err = CreateBackend(&config.Storage); err != nil {
|
||||
log.Fatalln(err)
|
||||
case *listBlobs:
|
||||
for metadata, err := range backend.EnumerateBlobs(ctx) {
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
fmt.Fprintf(color.Output, "%s %s %s\n",
|
||||
metadata.Name,
|
||||
color.HiWhiteString(metadata.LastModified.UTC().Format(time.RFC3339)),
|
||||
color.HiGreenString(fmt.Sprint(metadata.Size)),
|
||||
)
|
||||
}
|
||||
|
||||
if err := RunMigration(context.Background(), *runMigration); err != nil {
|
||||
log.Fatalln(err)
|
||||
case *listManifests:
|
||||
for metadata, err := range backend.EnumerateManifests(ctx) {
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
fmt.Fprintf(color.Output, "%s %s %s\n",
|
||||
metadata.Name,
|
||||
color.HiWhiteString(metadata.LastModified.UTC().Format(time.RFC3339)),
|
||||
color.HiGreenString(fmt.Sprint(metadata.Size)),
|
||||
)
|
||||
}
|
||||
|
||||
case *getBlob != "":
|
||||
if backend, err = CreateBackend(&config.Storage); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
reader, _, _, err := backend.GetBlob(context.Background(), *getBlob)
|
||||
reader, _, err := backend.GetBlob(ctx, *getBlob)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
io.Copy(fileOutputArg(), reader)
|
||||
|
||||
case *getManifest != "":
|
||||
if backend, err = CreateBackend(&config.Storage); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
webRoot := webRootArg(*getManifest)
|
||||
manifest, _, err := backend.GetManifest(context.Background(), webRoot, GetManifestOptions{})
|
||||
manifest, _, err := backend.GetManifest(ctx, webRoot, GetManifestOptions{})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
fmt.Fprintln(fileOutputArg(), ManifestDebugJSON(manifest))
|
||||
fmt.Fprintln(fileOutputArg(), string(ManifestJSON(manifest)))
|
||||
|
||||
case *getArchive != "":
|
||||
if backend, err = CreateBackend(&config.Storage); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
webRoot := webRootArg(*getArchive)
|
||||
manifest, manifestMtime, err :=
|
||||
backend.GetManifest(context.Background(), webRoot, GetManifestOptions{})
|
||||
manifest, metadata, err :=
|
||||
backend.GetManifest(ctx, webRoot, GetManifestOptions{})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
if err = CollectTar(ctx, fileOutputArg(), manifest, metadata); err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
CollectTar(context.Background(), fileOutputArg(), manifest, manifestMtime)
|
||||
|
||||
case *updateSite != "":
|
||||
if backend, err = CreateBackend(&config.Storage); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
ctx = WithPrincipal(ctx)
|
||||
GetPrincipal(ctx).CliAdmin = proto.Bool(true)
|
||||
|
||||
if flag.NArg() != 1 {
|
||||
log.Fatalln("update source must be provided as the argument")
|
||||
logc.Fatalln(ctx, "update source must be provided as the argument")
|
||||
}
|
||||
|
||||
sourceURL, err := url.Parse(flag.Arg(0))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
var result UpdateResult
|
||||
if sourceURL.Scheme == "" {
|
||||
file, err := os.Open(sourceURL.Path)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
@@ -301,7 +437,7 @@ func Main() {
|
||||
}
|
||||
|
||||
webRoot := webRootArg(*updateSite)
|
||||
result = UpdateFromArchive(context.Background(), webRoot, contentType, file)
|
||||
result = UpdateFromArchive(ctx, webRoot, "", contentType, file)
|
||||
} else {
|
||||
branch := "pages"
|
||||
if sourceURL.Fragment != "" {
|
||||
@@ -309,24 +445,254 @@ func Main() {
|
||||
}
|
||||
|
||||
webRoot := webRootArg(*updateSite)
|
||||
result = UpdateFromRepository(context.Background(), webRoot, sourceURL.String(), branch)
|
||||
result = UpdateFromRepository(ctx, webRoot, sourceURL.String(), branch)
|
||||
}
|
||||
|
||||
switch result.outcome {
|
||||
case UpdateError:
|
||||
log.Printf("error: %s\n", result.err)
|
||||
logc.Printf(ctx, "error: %s\n", result.err)
|
||||
os.Exit(2)
|
||||
case UpdateTimeout:
|
||||
log.Println("timeout")
|
||||
logc.Println(ctx, "timeout")
|
||||
os.Exit(1)
|
||||
case UpdateCreated:
|
||||
log.Println("created")
|
||||
logc.Println(ctx, "created")
|
||||
case UpdateReplaced:
|
||||
log.Println("replaced")
|
||||
logc.Println(ctx, "replaced")
|
||||
case UpdateDeleted:
|
||||
log.Println("deleted")
|
||||
logc.Println(ctx, "deleted")
|
||||
case UpdateNoChange:
|
||||
log.Println("no-change")
|
||||
logc.Println(ctx, "no-change")
|
||||
}
|
||||
|
||||
case *freezeDomain != "" || *unfreezeDomain != "":
|
||||
ctx = WithPrincipal(ctx)
|
||||
GetPrincipal(ctx).CliAdmin = proto.Bool(true)
|
||||
|
||||
var domain string
|
||||
var freeze bool
|
||||
if *freezeDomain != "" {
|
||||
domain = *freezeDomain
|
||||
freeze = true
|
||||
} else {
|
||||
domain = *unfreezeDomain
|
||||
freeze = false
|
||||
}
|
||||
|
||||
if freeze {
|
||||
if err = backend.FreezeDomain(ctx, domain); err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
logc.Println(ctx, "frozen")
|
||||
} else {
|
||||
if err = backend.UnfreezeDomain(ctx, domain); err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
logc.Println(ctx, "thawed")
|
||||
}
|
||||
|
||||
case *auditLog:
|
||||
records := []*AuditRecord{}
|
||||
ids := backend.SearchAuditLog(ctx, SearchAuditLogOptions{})
|
||||
for record, err := range backend.GetAuditLogRecords(ctx, ids) {
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
records = append(records, record)
|
||||
}
|
||||
|
||||
slices.SortFunc(records, func(a, b *AuditRecord) int {
|
||||
return cmp.Compare(a.GetAuditID(), b.GetAuditID())
|
||||
})
|
||||
|
||||
for _, record := range records {
|
||||
parts := []string{
|
||||
record.GetAuditID().String(),
|
||||
color.HiWhiteString(record.GetTimestamp().AsTime().UTC().Format(time.RFC3339)),
|
||||
fmt.Sprint(record.GetEvent()),
|
||||
color.HiGreenString(record.DescribeResource()),
|
||||
color.HiMagentaString(record.DescribePrincipal()),
|
||||
}
|
||||
if record.IsDetached() {
|
||||
parts = append(parts,
|
||||
color.HiYellowString("(detached)"),
|
||||
)
|
||||
}
|
||||
fmt.Fprintln(color.Output, strings.Join(parts, " "))
|
||||
}
|
||||
|
||||
case *auditRead != "":
|
||||
id, err := ParseAuditID(*auditRead)
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
record, err := backend.QueryAuditLog(ctx, id)
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
if err = ExtractAuditRecord(ctx, id, record, "."); err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
case *auditRollback != "":
|
||||
ctx = WithPrincipal(ctx)
|
||||
GetPrincipal(ctx).CliAdmin = proto.Bool(true)
|
||||
|
||||
id, err := ParseAuditID(*auditRollback)
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
record, err := backend.QueryAuditLog(ctx, id)
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
if record.GetManifest() == nil || record.GetDomain() == "" || record.GetProject() == "" {
|
||||
logc.Fatalln(ctx, "no manifest in audit record")
|
||||
}
|
||||
|
||||
webRoot := path.Join(record.GetDomain(), record.GetProject())
|
||||
err = backend.StageManifest(ctx, record.GetManifest())
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
err = backend.CommitManifest(ctx, webRoot, record.GetManifest(), ModifyManifestOptions{})
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
case *auditDetach != "":
|
||||
domain, project, found := strings.Cut(*auditDetach, "/")
|
||||
if !found || domain == "" || project == "" {
|
||||
logc.Fatalln(ctx, "argument to -audit-detach must be in the form of "+
|
||||
"'domain.tld/project' or 'domain.tld/*'")
|
||||
}
|
||||
|
||||
if project != "*" && project != ".index" {
|
||||
if err := ValidateProjectName(project); err != nil {
|
||||
logc.Fatalf(ctx, "audit detach: project name: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
count := 0
|
||||
ids := backend.SearchAuditLog(ctx, SearchAuditLogOptions{})
|
||||
for record, err := range backend.GetAuditLogRecords(ctx, ids) {
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
if record.GetDomain() == domain && (project == "*" || record.GetProject() == project) {
|
||||
if !record.IsDetachable() {
|
||||
continue
|
||||
} else if !record.IsDetached() {
|
||||
logc.Printf(ctx, "detaching audit record %s\n", record.GetAuditID())
|
||||
err = backend.DetachAuditRecord(ctx, record.GetAuditID())
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
count++
|
||||
} else {
|
||||
logc.Printf(ctx, "audit record %s already detached\n", record.GetAuditID())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
logc.Printf(ctx, "no detachable audit records found for %s/%s", domain, project)
|
||||
}
|
||||
|
||||
case *auditServer != "":
|
||||
if flag.NArg() < 1 {
|
||||
logc.Fatalln(ctx, "handler path not provided")
|
||||
}
|
||||
|
||||
processor, err := AuditEventProcessor(flag.Arg(0), flag.Args()[1:])
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
serve(ctx, listen(ctx, "audit", *auditServer), ObserveHTTPHandler(processor))
|
||||
|
||||
case *auditExpire != "":
|
||||
days, err := strconv.ParseInt(*auditExpire, 10, 0)
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
ids := backend.SearchAuditLog(ctx, SearchAuditLogOptions{
|
||||
Until: time.Now().AddDate(0, 0, int(-days)),
|
||||
})
|
||||
|
||||
count := 0
|
||||
for id, err := range ids {
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = backend.ExpireAuditRecord(ctx, id)
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
} else {
|
||||
logc.Printf(ctx, "audit: expired record %s\n", id)
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
|
||||
logc.Printf(ctx, "audit: expired %d records\n", count)
|
||||
|
||||
case *runMigration != "":
|
||||
if err = RunMigration(ctx, *runMigration); err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
case *sizeHistogram != "":
|
||||
extractSize := func(s *DomainStatistics) int64 { return 0 }
|
||||
switch *sizeHistogram {
|
||||
case "original":
|
||||
// Displays a size histogram using the `manifest.OriginalSize`, which is useful to see
|
||||
// which site is the closest to hitting the size limit (checked against apparent size).
|
||||
// This apparent size does not have any direct relationship with used storage.
|
||||
extractSize = func(s *DomainStatistics) int64 { return s.OriginalSize }
|
||||
case "stored":
|
||||
// Displays a size histogram using the `manifest.StoredSize`, which is useful to see
|
||||
// which site consumes the most resources. The site is keeping at least this many
|
||||
// bytes worth of blobs alive, but removing it may not free any space because
|
||||
// deduplication is global.
|
||||
extractSize = func(s *DomainStatistics) int64 { return s.StoredSize }
|
||||
default:
|
||||
logc.Fatalln(ctx, "unknown histogram type")
|
||||
}
|
||||
|
||||
histogram, err := SizeHistogram(ctx)
|
||||
if err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
slices.SortFunc(histogram, func(a *DomainStatistics, b *DomainStatistics) int {
|
||||
return cmp.Compare(extractSize(a), extractSize(b))
|
||||
})
|
||||
|
||||
if len(histogram) > 0 {
|
||||
fullScaleSize := max(extractSize(histogram[len(histogram)-1]), 1)
|
||||
fullScaleWidth := int64(40)
|
||||
for _, statistics := range histogram {
|
||||
size := extractSize(statistics)
|
||||
barWidth := size * fullScaleWidth / fullScaleSize
|
||||
spaceWidth := fullScaleWidth - barWidth
|
||||
bar := strings.Repeat("*", int(barWidth)) + strings.Repeat(" ", int(spaceWidth))
|
||||
fmt.Fprintf(color.Output, "%s %s %s\n",
|
||||
color.HiBlackString(fmt.Sprint("|", bar, "|")),
|
||||
statistics.Domain,
|
||||
color.HiGreenString(datasize.ByteSize(extractSize(statistics)).HR()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
case *traceGarbage:
|
||||
if err = TraceGarbage(ctx); err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
default:
|
||||
@@ -338,8 +704,8 @@ func Main() {
|
||||
// Note that not all of the configuration is updated on reload. Listeners are kept as-is.
|
||||
// The backend is not recreated (this is intentional as it allows preserving the cache).
|
||||
OnReload(func() {
|
||||
if newConfig, err := Configure(*configTomlPath); err != nil {
|
||||
log.Println("config: reload err:", err)
|
||||
if newConfig, err := Configure(*configTomlPath, *secretTomlPath); err != nil {
|
||||
logc.Println(ctx, "config: reload err:", err)
|
||||
} else {
|
||||
// From https://go.dev/ref/mem:
|
||||
// > A read r of a memory location x holding a value that is not larger than
|
||||
@@ -349,15 +715,16 @@ func Main() {
|
||||
// > concurrent write.
|
||||
config = newConfig
|
||||
if err = errors.Join(
|
||||
configureFeatures(),
|
||||
configureMemLimit(),
|
||||
configureWildcards(),
|
||||
configureFeatures(ctx),
|
||||
configureMemLimit(ctx),
|
||||
configureWildcards(ctx),
|
||||
configureFallback(ctx),
|
||||
); err != nil {
|
||||
// At this point the configuration is in an in-between, corrupted state, so
|
||||
// the only reasonable choice is to crash.
|
||||
log.Fatalln("config: reload fail:", err)
|
||||
logc.Fatalln(ctx, "config: reload fail:", err)
|
||||
} else {
|
||||
log.Println("config: reload ok")
|
||||
logc.Println(ctx, "config: reload ok")
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -366,24 +733,35 @@ func Main() {
|
||||
// spends some time initializing (which the S3 backend does) a proxy like Caddy can race
|
||||
// with git-pages on startup and return errors for requests that would have been served
|
||||
// just 0.5s later.
|
||||
pagesListener := listen("pages", config.Server.Pages)
|
||||
caddyListener := listen("caddy", config.Server.Caddy)
|
||||
metricsListener := listen("metrics", config.Server.Metrics)
|
||||
pagesListener := listen(ctx, "pages", config.Server.Pages)
|
||||
caddyListener := listen(ctx, "caddy", config.Server.Caddy)
|
||||
metricsListener := listen(ctx, "metrics", config.Server.Metrics)
|
||||
|
||||
if backend, err = CreateBackend(&config.Storage); err != nil {
|
||||
log.Fatalln(err)
|
||||
if backend, err = CreateBackend(ctx, &config.Storage); err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
backend = NewObservedBackend(backend)
|
||||
|
||||
go serve(pagesListener, ObserveHTTPHandler(http.HandlerFunc(ServePages)))
|
||||
go serve(caddyListener, ObserveHTTPHandler(http.HandlerFunc(ServeCaddy)))
|
||||
go serve(metricsListener, promhttp.Handler())
|
||||
if domainCache, err = CreateDomainCache(ctx); err != nil {
|
||||
logc.Fatalln(ctx, err)
|
||||
}
|
||||
|
||||
middleware := chainHTTPMiddleware(
|
||||
panicHandler,
|
||||
remoteAddrMiddleware,
|
||||
ObserveHTTPHandler,
|
||||
)
|
||||
go serve(ctx, pagesListener, middleware(http.HandlerFunc(ServePages)))
|
||||
go serve(ctx, caddyListener, middleware(http.HandlerFunc(ServeCaddy)))
|
||||
go serve(ctx, metricsListener, promhttp.Handler())
|
||||
|
||||
if config.Insecure {
|
||||
log.Println("serve: ready (INSECURE)")
|
||||
logc.Println(ctx, "serve: ready (INSECURE)")
|
||||
} else {
|
||||
log.Println("serve: ready")
|
||||
logc.Println(ctx, "serve: ready")
|
||||
}
|
||||
select {}
|
||||
|
||||
WaitForInterrupt()
|
||||
logc.Println(ctx, "serve: exiting")
|
||||
}
|
||||
}
|
||||
|
||||
299
src/manifest.go
299
src/manifest.go
@@ -8,7 +8,7 @@ import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path"
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/go-git/go-git/v6/plumbing"
|
||||
format "github.com/go-git/go-git/v6/plumbing/format/config"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -37,6 +39,14 @@ var (
|
||||
})
|
||||
)
|
||||
|
||||
func NewManifest() *Manifest {
|
||||
return &Manifest{
|
||||
Contents: map[string]*Entry{
|
||||
"": {Type: Type_Directory.Enum()},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func IsManifestEmpty(manifest *Manifest) bool {
|
||||
if len(manifest.Contents) > 1 {
|
||||
return false
|
||||
@@ -69,47 +79,164 @@ func CompareManifest(left *Manifest, right *Manifest) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func EncodeManifest(manifest *Manifest) []byte {
|
||||
result, err := proto.MarshalOptions{Deterministic: true}.Marshal(manifest)
|
||||
func EncodeManifest(manifest *Manifest) (data []byte) {
|
||||
data, err := proto.MarshalOptions{Deterministic: true}.Marshal(manifest)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return result
|
||||
return
|
||||
}
|
||||
|
||||
func DecodeManifest(data []byte) (*Manifest, error) {
|
||||
manifest := Manifest{}
|
||||
err := proto.Unmarshal(data, &manifest)
|
||||
return &manifest, err
|
||||
func DecodeManifest(data []byte) (manifest *Manifest, err error) {
|
||||
manifest = &Manifest{}
|
||||
err = proto.Unmarshal(data, manifest)
|
||||
return
|
||||
}
|
||||
|
||||
func AddProblem(manifest *Manifest, path, format string, args ...any) error {
|
||||
func NewManifestEntry(type_ Type, data []byte) *Entry {
|
||||
entry := &Entry{}
|
||||
entry.Type = type_.Enum()
|
||||
if data != nil {
|
||||
entry.Data = data
|
||||
entry.Transform = Transform_Identity.Enum()
|
||||
entry.OriginalSize = proto.Int64(int64(len(data)))
|
||||
entry.CompressedSize = proto.Int64(int64(len(data)))
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
func AddFile(manifest *Manifest, fileName string, data []byte) *Entry {
|
||||
// Fill in `git_hash` even for files not originating from git using the SHA256 algorithm;
|
||||
// we use this primarily for incremental archive uploads, but when support for git SHA256
|
||||
// repositories is complete, archive uploads and git checkouts will have cross-support for
|
||||
// incremental updates.
|
||||
hasher := plumbing.NewHasher(format.SHA256, plumbing.BlobObject, int64(len(data)))
|
||||
hasher.Write(data)
|
||||
entry := NewManifestEntry(Type_InlineFile, data)
|
||||
entry.GitHash = proto.String(hasher.Sum().String())
|
||||
manifest.Contents[fileName] = entry
|
||||
return entry
|
||||
}
|
||||
|
||||
func AddSymlink(manifest *Manifest, fileName string, target string) *Entry {
|
||||
if path.IsAbs(target) {
|
||||
AddProblem(manifest, fileName, "absolute symlink: %s", target)
|
||||
return nil
|
||||
} else {
|
||||
entry := NewManifestEntry(Type_Symlink, []byte(target))
|
||||
manifest.Contents[fileName] = entry
|
||||
return entry
|
||||
}
|
||||
}
|
||||
|
||||
func AddDirectory(manifest *Manifest, dirName string) *Entry {
|
||||
dirName = strings.TrimSuffix(dirName, "/")
|
||||
entry := NewManifestEntry(Type_Directory, nil)
|
||||
manifest.Contents[dirName] = entry
|
||||
return entry
|
||||
}
|
||||
|
||||
func AddProblem(manifest *Manifest, pathName, format string, args ...any) error {
|
||||
cause := fmt.Sprintf(format, args...)
|
||||
manifest.Problems = append(manifest.Problems, &Problem{
|
||||
Path: proto.String(path),
|
||||
Path: proto.String(pathName),
|
||||
Cause: proto.String(cause),
|
||||
})
|
||||
return fmt.Errorf("%s: %s", path, cause)
|
||||
return fmt.Errorf("%s: %s", pathName, cause)
|
||||
}
|
||||
|
||||
// Returns a map of git hash to entry. If `manifest` is nil, returns an empty map.
|
||||
func IndexManifestByGitHash(manifest *Manifest) map[string]*Entry {
|
||||
index := map[string]*Entry{}
|
||||
for _, entry := range manifest.GetContents() {
|
||||
if hash := entry.GetGitHash(); hash != "" {
|
||||
if _, ok := plumbing.FromHex(hash); ok {
|
||||
index[hash] = entry
|
||||
} else {
|
||||
panic(fmt.Errorf("index: malformed hash: %s", hash))
|
||||
}
|
||||
}
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
func ManifestHasBasicAuth(manifest *Manifest) bool {
|
||||
return len(manifest.GetBasicAuth()) > 0
|
||||
}
|
||||
|
||||
func IsEntryRegularFile(entry *Entry) bool {
|
||||
return entry.GetType() == Type_InlineFile ||
|
||||
entry.GetType() == Type_ExternalFile
|
||||
}
|
||||
|
||||
var ErrNotRegularFile = errors.New("not a regular file")
|
||||
|
||||
func GetEntryContents(ctx context.Context, entry *Entry) (data []byte, err error) {
|
||||
switch entry.GetType() {
|
||||
case Type_InlineFile:
|
||||
data = entry.GetData()
|
||||
case Type_ExternalFile:
|
||||
reader, _, err := backend.GetBlob(ctx, string(entry.GetData()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data, err = io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, ErrNotRegularFile
|
||||
}
|
||||
|
||||
switch entry.GetTransform() {
|
||||
case Transform_Identity:
|
||||
case Transform_Zstd:
|
||||
data, err = zstdDecoder.DecodeAll(data, []byte{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected transform")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// EnsureLeadingDirectories adds directory entries for any parent directories
|
||||
// that are implicitly referenced by files in the manifest but don't have
|
||||
// explicit directory entries. (This can be the case if an archive is created
|
||||
// via globs rather than including a whole directory.)
|
||||
func EnsureLeadingDirectories(manifest *Manifest) {
|
||||
for name := range manifest.Contents {
|
||||
for dir := path.Dir(name); dir != "." && dir != ""; dir = path.Dir(dir) {
|
||||
if dir == "/" {
|
||||
panic("malformed manifest (paths must not be rooted in /)")
|
||||
}
|
||||
if _, exists := manifest.Contents[dir]; !exists {
|
||||
AddDirectory(manifest, dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func GetProblemReport(manifest *Manifest) []string {
|
||||
var report []string
|
||||
for _, problem := range manifest.Problems {
|
||||
report = append(report,
|
||||
fmt.Sprintf("%s: %s", problem.GetPath(), problem.GetCause()))
|
||||
fmt.Sprintf("/%s: %s", problem.GetPath(), problem.GetCause()))
|
||||
}
|
||||
return report
|
||||
}
|
||||
|
||||
func ManifestDebugJSON(manifest *Manifest) string {
|
||||
result, err := protojson.MarshalOptions{
|
||||
func ManifestJSON(manifest *Manifest) []byte {
|
||||
json, err := protojson.MarshalOptions{
|
||||
Multiline: true,
|
||||
EmitDefaultValues: true,
|
||||
}.Marshal(manifest)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return string(result)
|
||||
return json
|
||||
}
|
||||
|
||||
var ErrSymlinkLoop = errors.New("symbolic link loop")
|
||||
@@ -145,20 +272,20 @@ func DetectContentType(manifest *Manifest) {
|
||||
for path, entry := range manifest.Contents {
|
||||
if entry.GetType() == Type_Directory || entry.GetType() == Type_Symlink {
|
||||
// no Content-Type
|
||||
} else if entry.GetType() == Type_InlineFile && entry.GetTransform() == Transform_None {
|
||||
} else if entry.GetType() == Type_InlineFile && entry.GetTransform() == Transform_Identity {
|
||||
contentType := mime.TypeByExtension(filepath.Ext(path))
|
||||
if contentType == "" {
|
||||
contentType = http.DetectContentType(entry.Data[:512])
|
||||
contentType = http.DetectContentType(entry.Data[:min(512, len(entry.Data))])
|
||||
}
|
||||
entry.ContentType = proto.String(contentType)
|
||||
} else {
|
||||
} else if entry.GetContentType() == "" {
|
||||
panic(fmt.Errorf("DetectContentType encountered invalid entry: %v, %v",
|
||||
entry.GetType(), entry.GetTransform()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The `clauspost/compress/zstd` package recommends reusing a compressor to avoid repeated
|
||||
// The `klauspost/compress/zstd` package recommends reusing a compressor to avoid repeated
|
||||
// allocations of internal buffers.
|
||||
var zstdEncoder, _ = zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedBetterCompression))
|
||||
|
||||
@@ -167,29 +294,31 @@ func CompressFiles(ctx context.Context, manifest *Manifest) {
|
||||
span, _ := ObserveFunction(ctx, "CompressFiles")
|
||||
defer span.Finish()
|
||||
|
||||
var originalSize, compressedSize int64
|
||||
var originalSize int64
|
||||
var compressedSize int64
|
||||
for _, entry := range manifest.Contents {
|
||||
if entry.GetType() == Type_InlineFile && entry.GetTransform() == Transform_None {
|
||||
mtype := getMediaType(entry.GetContentType())
|
||||
if strings.HasPrefix(mtype, "video/") || strings.HasPrefix(mtype, "audio/") {
|
||||
if entry.GetType() == Type_InlineFile && entry.GetTransform() == Transform_Identity {
|
||||
mediaType := getMediaType(entry.GetContentType())
|
||||
if strings.HasPrefix(mediaType, "video/") || strings.HasPrefix(mediaType, "audio/") {
|
||||
continue
|
||||
}
|
||||
originalSize += entry.GetSize()
|
||||
compressedData := zstdEncoder.EncodeAll(entry.GetData(), make([]byte, 0, entry.GetSize()))
|
||||
if len(compressedData) < int(*entry.Size) {
|
||||
compressedData := zstdEncoder.EncodeAll(entry.GetData(),
|
||||
make([]byte, 0, entry.GetOriginalSize()))
|
||||
if int64(len(compressedData)) < entry.GetOriginalSize() {
|
||||
entry.Data = compressedData
|
||||
entry.Size = proto.Int64(int64(len(entry.Data)))
|
||||
entry.Transform = Transform_Zstandard.Enum()
|
||||
entry.Transform = Transform_Zstd.Enum()
|
||||
entry.CompressedSize = proto.Int64(int64(len(entry.Data)))
|
||||
}
|
||||
compressedSize += entry.GetSize()
|
||||
}
|
||||
originalSize += entry.GetOriginalSize()
|
||||
compressedSize += entry.GetCompressedSize()
|
||||
}
|
||||
manifest.OriginalSize = proto.Int64(originalSize)
|
||||
manifest.CompressedSize = proto.Int64(compressedSize)
|
||||
|
||||
if originalSize != 0 {
|
||||
spaceSaving := (float64(originalSize) - float64(compressedSize)) / float64(originalSize)
|
||||
log.Printf("compress: saved %.2f percent (%s to %s)",
|
||||
logc.Printf(ctx, "compress: saved %.2f percent (%s to %s)",
|
||||
spaceSaving*100.0,
|
||||
datasize.ByteSize(originalSize).HR(),
|
||||
datasize.ByteSize(compressedSize).HR(),
|
||||
@@ -203,75 +332,96 @@ func CompressFiles(ctx context.Context, manifest *Manifest) {
|
||||
// At the moment, there isn't a good way to report errors except to log them on the terminal.
|
||||
// (Perhaps in the future they could be exposed at `.git-pages/status.txt`?)
|
||||
func PrepareManifest(ctx context.Context, manifest *Manifest) error {
|
||||
// Parse Netlify-style `_redirects`
|
||||
if err := ProcessRedirectsFile(manifest); err != nil {
|
||||
log.Printf("redirects err: %s\n", err)
|
||||
// Parse Netlify-style `_redirects`.
|
||||
if err := ProcessRedirectsFile(ctx, manifest); err != nil {
|
||||
logc.Printf(ctx, "redirects err: %s\n", err)
|
||||
} else if len(manifest.Redirects) > 0 {
|
||||
log.Printf("redirects ok: %d rules\n", len(manifest.Redirects))
|
||||
logc.Printf(ctx, "redirects ok: %d rules\n", len(manifest.Redirects))
|
||||
}
|
||||
|
||||
// Parse Netlify-style `_headers`
|
||||
if err := ProcessHeadersFile(manifest); err != nil {
|
||||
log.Printf("headers err: %s\n", err)
|
||||
// Check if any redirects are unreachable.
|
||||
LintRedirects(manifest)
|
||||
|
||||
// Parse Netlify-style `_headers`.
|
||||
if err := ProcessHeadersFile(ctx, manifest); err != nil {
|
||||
logc.Printf(ctx, "headers err: %s\n", err)
|
||||
} else if len(manifest.Headers) > 0 {
|
||||
log.Printf("headers ok: %d rules\n", len(manifest.Headers))
|
||||
logc.Printf(ctx, "headers ok: %d rules\n", len(manifest.Headers))
|
||||
}
|
||||
|
||||
// Sniff content type like `http.ServeContent`
|
||||
// Sniff content type like `http.ServeContent`.
|
||||
DetectContentType(manifest)
|
||||
|
||||
// Opportunistically compress blobs (must be done last)
|
||||
// Opportunistically compress blobs (must be done last).
|
||||
CompressFiles(ctx, manifest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var ErrSiteTooLarge = errors.New("site too large")
|
||||
var ErrManifestTooLarge = errors.New("manifest too large")
|
||||
|
||||
// Limits the number of concurrent uploads, globally across the entire git-pages process.
|
||||
// As created, there is no limit, but reinitializing the semaphore with a bounded channel
|
||||
// limits the concurrency to the channel size. Note that the default *configuration* does
|
||||
// limit the number of uploads.
|
||||
var putBlobSemaphore = make(chan struct{})
|
||||
|
||||
// Uploads inline file data over certain size to the storage backend. Returns a copy of
|
||||
// the manifest updated to refer to an external content-addressable store.
|
||||
func StoreManifest(ctx context.Context, name string, manifest *Manifest) (*Manifest, error) {
|
||||
func StoreManifest(
|
||||
ctx context.Context, name string, manifest *Manifest, opts ModifyManifestOptions,
|
||||
) (*Manifest, error) {
|
||||
span, ctx := ObserveFunction(ctx, "StoreManifest", "manifest.name", name)
|
||||
defer span.Finish()
|
||||
|
||||
extManifest := &Manifest{}
|
||||
proto.Merge(extManifest, manifest)
|
||||
|
||||
// Replace inline files over certain size with references to external data.
|
||||
extManifest := Manifest{
|
||||
RepoUrl: manifest.RepoUrl,
|
||||
Branch: manifest.Branch,
|
||||
Commit: manifest.Commit,
|
||||
Contents: make(map[string]*Entry),
|
||||
Redirects: manifest.Redirects,
|
||||
Headers: manifest.Headers,
|
||||
Problems: manifest.Problems,
|
||||
OriginalSize: manifest.OriginalSize,
|
||||
CompressedSize: manifest.CompressedSize,
|
||||
StoredSize: proto.Int64(0),
|
||||
}
|
||||
extObjectSizes := make(map[string]int64)
|
||||
extManifest.Contents = make(map[string]*Entry)
|
||||
for name, entry := range manifest.Contents {
|
||||
cannotBeInlined := entry.GetType() == Type_InlineFile &&
|
||||
entry.GetSize() > int64(config.Limits.MaxInlineFileSize.Bytes())
|
||||
entry.GetCompressedSize() > int64(config.Limits.MaxInlineFileSize.Bytes())
|
||||
if cannotBeInlined {
|
||||
dataHash := sha256.Sum256(entry.Data)
|
||||
extManifest.Contents[name] = &Entry{
|
||||
Type: Type_ExternalFile.Enum(),
|
||||
Size: entry.Size,
|
||||
Data: fmt.Appendf(nil, "sha256-%x", dataHash),
|
||||
Transform: entry.Transform,
|
||||
ContentType: entry.ContentType,
|
||||
Type: Type_ExternalFile.Enum(),
|
||||
OriginalSize: entry.OriginalSize,
|
||||
CompressedSize: entry.CompressedSize,
|
||||
Data: fmt.Appendf(nil, "sha256-%x", dataHash),
|
||||
Transform: entry.Transform,
|
||||
ContentType: entry.ContentType,
|
||||
GitHash: entry.GitHash,
|
||||
}
|
||||
extObjectSizes[string(dataHash[:])] = entry.GetSize()
|
||||
} else {
|
||||
extManifest.Contents[name] = entry
|
||||
}
|
||||
}
|
||||
// `extObjectMap` stores size once per object, deduplicating it
|
||||
for _, storedSize := range extObjectSizes {
|
||||
*extManifest.StoredSize += storedSize
|
||||
|
||||
// Compute the total and deduplicated storage size.
|
||||
totalSize := int64(0)
|
||||
blobSizes := map[string]int64{}
|
||||
for _, entry := range extManifest.Contents {
|
||||
totalSize += entry.GetOriginalSize()
|
||||
if entry.GetType() == Type_ExternalFile {
|
||||
blobSizes[string(entry.Data)] = entry.GetCompressedSize()
|
||||
}
|
||||
}
|
||||
if uint64(totalSize) > config.Limits.MaxSiteSize.Bytes() {
|
||||
return nil, fmt.Errorf("%w: contents size %s exceeds %s limit",
|
||||
ErrSiteTooLarge,
|
||||
datasize.ByteSize(totalSize).HR(),
|
||||
config.Limits.MaxSiteSize.HR(),
|
||||
)
|
||||
}
|
||||
extManifest.StoredSize = proto.Int64(0)
|
||||
for _, blobSize := range blobSizes {
|
||||
*extManifest.StoredSize += blobSize
|
||||
}
|
||||
|
||||
// Upload the resulting manifest and the blob it references.
|
||||
extManifestData := EncodeManifest(&extManifest)
|
||||
extManifestData := EncodeManifest(extManifest)
|
||||
if uint64(len(extManifestData)) > config.Limits.MaxManifestSize.Bytes() {
|
||||
return nil, fmt.Errorf("%w: manifest size %s exceeds %s limit",
|
||||
ErrManifestTooLarge,
|
||||
@@ -280,15 +430,20 @@ func StoreManifest(ctx context.Context, name string, manifest *Manifest) (*Manif
|
||||
)
|
||||
}
|
||||
|
||||
if err := backend.StageManifest(ctx, &extManifest); err != nil {
|
||||
if err := backend.StageManifest(ctx, extManifest); err != nil {
|
||||
return nil, fmt.Errorf("stage manifest: %w", err)
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
ch := make(chan error, len(extManifest.Contents))
|
||||
for name, entry := range extManifest.Contents {
|
||||
if entry.GetType() == Type_ExternalFile {
|
||||
// Upload external entries (those that were decided as ineligible for being stored inline).
|
||||
// If the entry in the original manifest is already an external reference, there's no need
|
||||
// to externalize it (and no way for us to do so, since the entry only contains the blob name).
|
||||
if entry.GetType() == Type_ExternalFile && manifest.Contents[name].GetType() == Type_InlineFile {
|
||||
putBlobSemaphore <- struct{}{} // acquire (and maybe block)
|
||||
wg.Go(func() {
|
||||
defer func() { <-putBlobSemaphore }() // release
|
||||
err := backend.PutBlob(ctx, string(entry.Data), manifest.Contents[name].Data)
|
||||
if err != nil {
|
||||
ch <- fmt.Errorf("put blob %s: %w", name, err)
|
||||
@@ -302,9 +457,13 @@ func StoreManifest(ctx context.Context, name string, manifest *Manifest) (*Manif
|
||||
return nil, err // currently ignores all but 1st error
|
||||
}
|
||||
|
||||
if err := backend.CommitManifest(ctx, name, &extManifest); err != nil {
|
||||
return nil, fmt.Errorf("commit manifest: %w", err)
|
||||
if err := backend.CommitManifest(ctx, name, extManifest, opts); err != nil {
|
||||
if errors.Is(err, ErrDomainFrozen) {
|
||||
return nil, err
|
||||
} else {
|
||||
return nil, fmt.Errorf("commit manifest: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &extManifest, nil
|
||||
return extManifest, nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package git_pages
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
@@ -19,16 +18,19 @@ func RunMigration(ctx context.Context, name string) error {
|
||||
|
||||
func createDomainMarkers(ctx context.Context) error {
|
||||
if backend.HasFeature(ctx, FeatureCheckDomainMarker) {
|
||||
log.Print("store already has domain markers")
|
||||
logc.Print(ctx, "store already has domain markers")
|
||||
return nil
|
||||
}
|
||||
|
||||
var manifests, domains []string
|
||||
manifests, err := backend.ListManifests(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list manifests: %w", err)
|
||||
var manifests []string
|
||||
for metadata, err := range backend.EnumerateManifests(ctx) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("enum manifests: %w", err)
|
||||
}
|
||||
manifests = append(manifests, metadata.Name)
|
||||
}
|
||||
slices.Sort(manifests)
|
||||
var domains []string
|
||||
for _, manifest := range manifests {
|
||||
domain, _, _ := strings.Cut(manifest, "/")
|
||||
if len(domains) == 0 || domains[len(domains)-1] != domain {
|
||||
@@ -36,7 +38,7 @@ func createDomainMarkers(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
for idx, domain := range domains {
|
||||
log.Printf("(%d / %d) creating domain %s", idx+1, len(domains), domain)
|
||||
logc.Printf(ctx, "(%d / %d) creating domain %s", idx+1, len(domains), domain)
|
||||
if err := backend.CreateDomain(ctx, domain); err != nil {
|
||||
return fmt.Errorf("creating domain %s: %w", domain, err)
|
||||
}
|
||||
@@ -44,6 +46,6 @@ func createDomainMarkers(ctx context.Context) error {
|
||||
if err := backend.EnableFeature(ctx, FeatureCheckDomainMarker); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("created markers for %d domains", len(domains))
|
||||
logc.Printf(ctx, "created markers for %d domains", len(domains))
|
||||
return nil
|
||||
}
|
||||
|
||||
265
src/observe.go
265
src/observe.go
@@ -5,23 +5,21 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"iter"
|
||||
"log"
|
||||
"log/slog"
|
||||
"math/rand/v2"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
slogmulti "github.com/samber/slog-multi"
|
||||
|
||||
syslog "codeberg.org/git-pages/go-slog-syslog"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
|
||||
"github.com/getsentry/sentry-go"
|
||||
sentryhttp "github.com/getsentry/sentry-go/http"
|
||||
sentryslog "github.com/getsentry/sentry-go/slog"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -41,18 +39,11 @@ var (
|
||||
}, []string{"method"})
|
||||
)
|
||||
|
||||
func hasSentry() bool {
|
||||
return os.Getenv("SENTRY_DSN") != ""
|
||||
}
|
||||
var syslogHandler syslog.Handler
|
||||
|
||||
func InitObservability() {
|
||||
debug.SetPanicOnFault(true)
|
||||
|
||||
environment := "development"
|
||||
if value, ok := os.LookupEnv("ENVIRONMENT"); ok {
|
||||
environment = value
|
||||
}
|
||||
|
||||
logHandlers := []slog.Handler{}
|
||||
|
||||
switch config.LogFormat {
|
||||
@@ -68,67 +59,29 @@ func InitObservability() {
|
||||
log.Println("unknown log format", config.LogFormat)
|
||||
}
|
||||
|
||||
if hasSentry() {
|
||||
enableLogs := false
|
||||
if value, err := strconv.ParseBool(os.Getenv("SENTRY_LOGS")); err == nil {
|
||||
enableLogs = value
|
||||
}
|
||||
|
||||
enableTracing := false
|
||||
if value, err := strconv.ParseBool(os.Getenv("SENTRY_TRACING")); err == nil {
|
||||
enableTracing = value
|
||||
}
|
||||
|
||||
options := sentry.ClientOptions{}
|
||||
options.Environment = environment
|
||||
options.EnableLogs = enableLogs
|
||||
options.EnableTracing = enableTracing
|
||||
options.TracesSampleRate = 1
|
||||
switch environment {
|
||||
case "development", "staging":
|
||||
default:
|
||||
options.BeforeSendTransaction = func(event *sentry.Event, hint *sentry.EventHint) *sentry.Event {
|
||||
sampleRate := 0.05
|
||||
if trace, ok := event.Contexts["trace"]; ok {
|
||||
if data, ok := trace["data"].(map[string]any); ok {
|
||||
if method, ok := data["http.request.method"].(string); ok {
|
||||
switch method {
|
||||
case "PUT", "DELETE", "POST":
|
||||
sampleRate = 1
|
||||
default:
|
||||
duration := event.Timestamp.Sub(event.StartTime)
|
||||
threshold := time.Duration(config.Observability.SlowResponseThreshold)
|
||||
if duration >= threshold {
|
||||
sampleRate = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if rand.Float64() < sampleRate {
|
||||
return event
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if err := sentry.Init(options); err != nil {
|
||||
log.Fatalf("sentry: %s\n", err)
|
||||
}
|
||||
|
||||
if enableLogs {
|
||||
logHandlers = append(logHandlers, sentryslog.Option{
|
||||
AddSource: true,
|
||||
}.NewSentryHandler(context.Background()))
|
||||
if syslogAddr := os.Getenv("SYSLOG_ADDR"); syslogAddr != "" {
|
||||
var err error
|
||||
syslogHandler, err = syslog.NewHandler(&syslog.HandlerOptions{
|
||||
Address: syslogAddr,
|
||||
AppName: "git-pages",
|
||||
StructuredDataID: "git-pages",
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("syslog: %v", err)
|
||||
}
|
||||
logHandlers = append(logHandlers, syslogHandler)
|
||||
}
|
||||
|
||||
slog.SetDefault(slog.New(slogmulti.Fanout(logHandlers...)))
|
||||
}
|
||||
|
||||
func FiniObservability() {
|
||||
if hasSentry() {
|
||||
sentry.Flush(2 * time.Second)
|
||||
var wg sync.WaitGroup
|
||||
timeout := 2 * time.Second
|
||||
if syslogHandler != nil {
|
||||
wg.Go(func() { syslogHandler.Flush(timeout) })
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func ObserveError(err error) {
|
||||
@@ -137,10 +90,6 @@ func ObserveError(err error) {
|
||||
// Timeout results in a different error.
|
||||
return
|
||||
}
|
||||
|
||||
if hasSentry() {
|
||||
sentry.CaptureException(err)
|
||||
}
|
||||
}
|
||||
|
||||
type observedResponseWriter struct {
|
||||
@@ -173,22 +122,6 @@ func (w *observedResponseWriter) WriteHeader(statusCode int) {
|
||||
}
|
||||
|
||||
func ObserveHTTPHandler(handler http.Handler) http.Handler {
|
||||
if hasSentry() {
|
||||
handler = func(next http.Handler) http.Handler {
|
||||
next = sentryhttp.New(sentryhttp.Options{
|
||||
Repanic: true,
|
||||
}).Handle(handler)
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Prevent the Sentry SDK from continuing traces as we don't use this feature.
|
||||
r.Header.Del(sentry.SentryTraceHeader)
|
||||
r.Header.Del(sentry.SentryBaggageHeader)
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}(handler)
|
||||
}
|
||||
|
||||
handler = func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ow := newObservedResponseWriter(w)
|
||||
@@ -219,23 +152,12 @@ func ObserveFunction(
|
||||
interface{ Finish() }, context.Context,
|
||||
) {
|
||||
switch {
|
||||
case hasSentry():
|
||||
span := sentry.StartSpan(ctx, "function")
|
||||
span.Description = funcName
|
||||
ObserveData(span.Context(), data...)
|
||||
return span, span.Context()
|
||||
default:
|
||||
return noopSpan{}, ctx
|
||||
}
|
||||
}
|
||||
|
||||
func ObserveData(ctx context.Context, data ...any) {
|
||||
if span := sentry.SpanFromContext(ctx); span != nil {
|
||||
for i := 0; i < len(data); i += 2 {
|
||||
name, value := data[i], data[i+1]
|
||||
span.SetData(name.(string), value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -290,13 +212,13 @@ func (backend *observedBackend) EnableFeature(ctx context.Context, feature Backe
|
||||
func (backend *observedBackend) GetBlob(
|
||||
ctx context.Context, name string,
|
||||
) (
|
||||
reader io.ReadSeeker, size uint64, mtime time.Time, err error,
|
||||
reader io.ReadSeeker, metadata BlobMetadata, err error,
|
||||
) {
|
||||
span, ctx := ObserveFunction(ctx, "GetBlob", "blob.name", name)
|
||||
if reader, size, mtime, err = backend.inner.GetBlob(ctx, name); err == nil {
|
||||
ObserveData(ctx, "blob.size", size)
|
||||
if reader, metadata, err = backend.inner.GetBlob(ctx, name); err == nil {
|
||||
ObserveData(ctx, "blob.size", metadata.Size)
|
||||
blobsRetrievedCount.Inc()
|
||||
blobsRetrievedBytes.Add(float64(size))
|
||||
blobsRetrievedBytes.Add(float64(metadata.Size))
|
||||
}
|
||||
span.Finish()
|
||||
return
|
||||
@@ -319,23 +241,28 @@ func (backend *observedBackend) DeleteBlob(ctx context.Context, name string) (er
|
||||
return
|
||||
}
|
||||
|
||||
func (backend *observedBackend) ListManifests(ctx context.Context) (manifests []string, err error) {
|
||||
span, ctx := ObserveFunction(ctx, "ListManifests")
|
||||
manifests, err = backend.inner.ListManifests(ctx)
|
||||
span.Finish()
|
||||
return
|
||||
func (backend *observedBackend) EnumerateBlobs(ctx context.Context) iter.Seq2[BlobMetadata, error] {
|
||||
return func(yield func(BlobMetadata, error) bool) {
|
||||
span, ctx := ObserveFunction(ctx, "EnumerateBlobs")
|
||||
for metadata, err := range backend.inner.EnumerateBlobs(ctx) {
|
||||
if !yield(metadata, err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
span.Finish()
|
||||
}
|
||||
}
|
||||
|
||||
func (backend *observedBackend) GetManifest(
|
||||
ctx context.Context, name string, opts GetManifestOptions,
|
||||
) (
|
||||
manifest *Manifest, mtime time.Time, err error,
|
||||
manifest *Manifest, metadata ManifestMetadata, err error,
|
||||
) {
|
||||
span, ctx := ObserveFunction(ctx, "GetManifest",
|
||||
"manifest.name", name,
|
||||
"manifest.bypass_cache", opts.BypassCache,
|
||||
)
|
||||
if manifest, mtime, err = backend.inner.GetManifest(ctx, name, opts); err == nil {
|
||||
if manifest, metadata, err = backend.inner.GetManifest(ctx, name, opts); err == nil {
|
||||
manifestsRetrievedCount.Inc()
|
||||
}
|
||||
span.Finish()
|
||||
@@ -349,30 +276,138 @@ func (backend *observedBackend) StageManifest(ctx context.Context, manifest *Man
|
||||
return
|
||||
}
|
||||
|
||||
func (backend *observedBackend) CommitManifest(ctx context.Context, name string, manifest *Manifest) (err error) {
|
||||
func (backend *observedBackend) HasAtomicCAS(ctx context.Context) bool {
|
||||
return backend.inner.HasAtomicCAS(ctx)
|
||||
}
|
||||
|
||||
func (backend *observedBackend) CommitManifest(ctx context.Context, name string, manifest *Manifest, opts ModifyManifestOptions) (err error) {
|
||||
span, ctx := ObserveFunction(ctx, "CommitManifest", "manifest.name", name)
|
||||
err = backend.inner.CommitManifest(ctx, name, manifest)
|
||||
err = backend.inner.CommitManifest(ctx, name, manifest, opts)
|
||||
span.Finish()
|
||||
return
|
||||
}
|
||||
|
||||
func (backend *observedBackend) DeleteManifest(ctx context.Context, name string) (err error) {
|
||||
func (backend *observedBackend) DeleteManifest(ctx context.Context, name string, opts ModifyManifestOptions) (err error) {
|
||||
span, ctx := ObserveFunction(ctx, "DeleteManifest", "manifest.name", name)
|
||||
err = backend.inner.DeleteManifest(ctx, name)
|
||||
err = backend.inner.DeleteManifest(ctx, name, opts)
|
||||
span.Finish()
|
||||
return
|
||||
}
|
||||
|
||||
func (backend *observedBackend) EnumerateManifests(ctx context.Context) iter.Seq2[*ManifestMetadata, error] {
|
||||
return func(yield func(*ManifestMetadata, error) bool) {
|
||||
span, ctx := ObserveFunction(ctx, "EnumerateManifests")
|
||||
for metadata, err := range backend.inner.EnumerateManifests(ctx) {
|
||||
if !yield(metadata, err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
span.Finish()
|
||||
}
|
||||
}
|
||||
|
||||
func (backend *observedBackend) GetAllManifests(ctx context.Context) iter.Seq2[tuple[*ManifestMetadata, *Manifest], error] {
|
||||
return func(yield func(tuple[*ManifestMetadata, *Manifest], error) bool) {
|
||||
span, ctx := ObserveFunction(ctx, "GetAllManifests")
|
||||
for item, err := range backend.inner.GetAllManifests(ctx) {
|
||||
if !yield(item, err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
span.Finish()
|
||||
}
|
||||
}
|
||||
|
||||
func (backend *observedBackend) CheckDomain(ctx context.Context, domain string) (found bool, err error) {
|
||||
span, ctx := ObserveFunction(ctx, "CheckDomain", "manifest.domain", domain)
|
||||
span, ctx := ObserveFunction(ctx, "CheckDomain", "domain.name", domain)
|
||||
found, err = backend.inner.CheckDomain(ctx, domain)
|
||||
span.Finish()
|
||||
return
|
||||
}
|
||||
|
||||
func (backend *observedBackend) CreateDomain(ctx context.Context, domain string) (err error) {
|
||||
span, ctx := ObserveFunction(ctx, "CreateDomain", "manifest.domain", domain)
|
||||
span, ctx := ObserveFunction(ctx, "CreateDomain", "domain.name", domain)
|
||||
err = backend.inner.CreateDomain(ctx, domain)
|
||||
span.Finish()
|
||||
return
|
||||
}
|
||||
|
||||
func (backend *observedBackend) FreezeDomain(ctx context.Context, domain string) (err error) {
|
||||
span, ctx := ObserveFunction(ctx, "FreezeDomain", "domain.name", domain)
|
||||
err = backend.inner.FreezeDomain(ctx, domain)
|
||||
span.Finish()
|
||||
return
|
||||
}
|
||||
|
||||
func (backend *observedBackend) UnfreezeDomain(ctx context.Context, domain string) (err error) {
|
||||
span, ctx := ObserveFunction(ctx, "UnfreezeDomain", "domain.name", domain)
|
||||
err = backend.inner.UnfreezeDomain(ctx, domain)
|
||||
span.Finish()
|
||||
return
|
||||
}
|
||||
|
||||
func (backend *observedBackend) HaveDomainsChanged(ctx context.Context, since time.Time) (changed bool, err error) {
|
||||
span, ctx := ObserveFunction(ctx, "HaveDomainsChanged", "since", since)
|
||||
changed, err = backend.inner.HaveDomainsChanged(ctx, since)
|
||||
span.Finish()
|
||||
return
|
||||
}
|
||||
|
||||
func (backend *observedBackend) AppendAuditLog(ctx context.Context, id AuditID, record *AuditRecord) (err error) {
|
||||
span, ctx := ObserveFunction(ctx, "AppendAuditLog", "audit.id", id)
|
||||
err = backend.inner.AppendAuditLog(ctx, id, record)
|
||||
span.Finish()
|
||||
return
|
||||
}
|
||||
|
||||
func (backend *observedBackend) QueryAuditLog(ctx context.Context, id AuditID) (record *AuditRecord, err error) {
|
||||
span, ctx := ObserveFunction(ctx, "QueryAuditLog", "audit.id", id)
|
||||
record, err = backend.inner.QueryAuditLog(ctx, id)
|
||||
span.Finish()
|
||||
return
|
||||
}
|
||||
|
||||
func (backend *observedBackend) SearchAuditLog(
|
||||
ctx context.Context, opts SearchAuditLogOptions,
|
||||
) iter.Seq2[AuditID, error] {
|
||||
return func(yield func(AuditID, error) bool) {
|
||||
span, ctx := ObserveFunction(ctx, "SearchAuditLog",
|
||||
"audit.search.since", opts.Since,
|
||||
"audit.search.until", opts.Until,
|
||||
)
|
||||
for id, err := range backend.inner.SearchAuditLog(ctx, opts) {
|
||||
if !yield(id, err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
span.Finish()
|
||||
}
|
||||
}
|
||||
|
||||
func (backend *observedBackend) GetAuditLogRecords(
|
||||
ctx context.Context, ids iter.Seq2[AuditID, error],
|
||||
) iter.Seq2[*AuditRecord, error] {
|
||||
return func(yield func(*AuditRecord, error) bool) {
|
||||
span, ctx := ObserveFunction(ctx, "GetAuditLogRecords")
|
||||
for item, err := range backend.inner.GetAuditLogRecords(ctx, ids) {
|
||||
if !yield(item, err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
span.Finish()
|
||||
}
|
||||
}
|
||||
|
||||
func (backend *observedBackend) DetachAuditRecord(ctx context.Context, id AuditID) (err error) {
|
||||
span, ctx := ObserveFunction(ctx, "DetachAuditRecord", "audit.id", id)
|
||||
err = backend.inner.DetachAuditRecord(ctx, id)
|
||||
span.Finish()
|
||||
return
|
||||
}
|
||||
|
||||
func (backend *observedBackend) ExpireAuditRecord(ctx context.Context, id AuditID) (err error) {
|
||||
span, ctx := ObserveFunction(ctx, "ExpireAuditRecord", "audit.id", id)
|
||||
err = backend.inner.ExpireAuditRecord(ctx, id)
|
||||
span.Finish()
|
||||
return
|
||||
}
|
||||
|
||||
473
src/pages.go
473
src/pages.go
@@ -8,12 +8,12 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"maps"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -22,11 +22,17 @@ import (
|
||||
"github.com/pquerna/cachecontrol/cacheobject"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const notFoundPage = "404.html"
|
||||
|
||||
var (
|
||||
serveEncodingCount = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "git_pages_serve_encoding_count",
|
||||
Help: "Count of blob transform vs negotiated encoding",
|
||||
}, []string{"transform", "negotiated"})
|
||||
|
||||
siteUpdatesCount = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "git_pages_site_updates",
|
||||
Help: "Count of site updates in total",
|
||||
@@ -41,9 +47,8 @@ var (
|
||||
}, []string{"cause"})
|
||||
)
|
||||
|
||||
func reportSiteUpdate(via string, result *UpdateResult) {
|
||||
func observeSiteUpdate(via string, result *UpdateResult) {
|
||||
siteUpdatesCount.With(prometheus.Labels{"via": via}).Inc()
|
||||
|
||||
switch result.outcome {
|
||||
case UpdateError:
|
||||
siteUpdateErrorCount.With(prometheus.Labels{"cause": "other"}).Inc()
|
||||
@@ -60,8 +65,37 @@ func reportSiteUpdate(via string, result *UpdateResult) {
|
||||
}
|
||||
}
|
||||
|
||||
func copyForgeAuthToPrincipal(principal *Principal, auth *Authorization) {
|
||||
if auth.forgeUser != nil {
|
||||
principal.ForgeUser = auth.forgeUser
|
||||
}
|
||||
|
||||
repoURL := auth.ForgeRepoURL()
|
||||
if repoURL != "" {
|
||||
principal.RepoUrl = &repoURL
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeHost(host string) string {
|
||||
return strings.ToLower(host)
|
||||
}
|
||||
|
||||
func makeWebRoot(host string, projectName string) string {
|
||||
return fmt.Sprintf("%s/%s", strings.ToLower(host), projectName)
|
||||
return path.Join(normalizeHost(host), projectName)
|
||||
}
|
||||
|
||||
func getWebRoot(r *http.Request) (string, error) {
|
||||
host, err := GetHost(r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
projectName, err := GetProjectName(r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return makeWebRoot(host, projectName), nil
|
||||
}
|
||||
|
||||
func writeRedirect(w http.ResponseWriter, code int, path string) {
|
||||
@@ -78,7 +112,7 @@ func getPage(w http.ResponseWriter, r *http.Request) error {
|
||||
var err error
|
||||
var sitePath string
|
||||
var manifest *Manifest
|
||||
var manifestMtime time.Time
|
||||
var metadata ManifestMetadata
|
||||
|
||||
cacheControl, err := cacheobject.ParseRequestCacheControl(r.Header.Get("Cache-Control"))
|
||||
if err != nil {
|
||||
@@ -96,43 +130,54 @@ func getPage(w http.ResponseWriter, r *http.Request) error {
|
||||
return err
|
||||
}
|
||||
|
||||
host = normalizeHost(host)
|
||||
if !domainCache.CheckDomain(r.Context(), host) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprintf(w, "site not found\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
type indexManifestResult struct {
|
||||
manifest *Manifest
|
||||
manifestMtime time.Time
|
||||
err error
|
||||
manifest *Manifest
|
||||
metadata ManifestMetadata
|
||||
err error
|
||||
}
|
||||
indexManifestCh := make(chan indexManifestResult, 1)
|
||||
go func() {
|
||||
manifest, mtime, err := backend.GetManifest(
|
||||
manifest, metadata, err := backend.GetManifest(
|
||||
r.Context(), makeWebRoot(host, ".index"),
|
||||
GetManifestOptions{BypassCache: bypassCache},
|
||||
)
|
||||
indexManifestCh <- (indexManifestResult{manifest, mtime, err})
|
||||
indexManifestCh <- (indexManifestResult{manifest, metadata, err})
|
||||
}()
|
||||
|
||||
err = nil
|
||||
sitePath = strings.TrimPrefix(r.URL.Path, "/")
|
||||
if projectName, projectPath, hasProjectSlash := strings.Cut(sitePath, "/"); projectName != "" {
|
||||
var projectManifest *Manifest
|
||||
var projectManifestMtime time.Time
|
||||
projectManifest, projectManifestMtime, err = backend.GetManifest(
|
||||
r.Context(), makeWebRoot(host, projectName),
|
||||
GetManifestOptions{BypassCache: bypassCache},
|
||||
)
|
||||
if err == nil {
|
||||
if !hasProjectSlash {
|
||||
writeRedirect(w, http.StatusFound, r.URL.Path+"/")
|
||||
return nil
|
||||
if ValidateProjectName(projectName) == nil {
|
||||
var projectManifest *Manifest
|
||||
var projectMetadata ManifestMetadata
|
||||
projectManifest, projectMetadata, err = backend.GetManifest(
|
||||
r.Context(), makeWebRoot(host, projectName),
|
||||
GetManifestOptions{BypassCache: bypassCache},
|
||||
)
|
||||
if err == nil {
|
||||
if !hasProjectSlash {
|
||||
writeRedirect(w, http.StatusFound, r.URL.Path+"/")
|
||||
return nil
|
||||
}
|
||||
sitePath, manifest, metadata = projectPath, projectManifest, projectMetadata
|
||||
}
|
||||
sitePath, manifest, manifestMtime = projectPath, projectManifest, projectManifestMtime
|
||||
}
|
||||
}
|
||||
if manifest == nil && (err == nil || errors.Is(err, ErrObjectNotFound)) {
|
||||
result := <-indexManifestCh
|
||||
manifest, manifestMtime, err = result.manifest, result.manifestMtime, result.err
|
||||
manifest, metadata, err = result.manifest, result.metadata, result.err
|
||||
if manifest == nil && errors.Is(err, ErrObjectNotFound) {
|
||||
if found, fallbackErr := HandleWildcardFallback(w, r); found {
|
||||
return fallbackErr
|
||||
if fallback != nil {
|
||||
logc.Printf(r.Context(), "fallback: %s via %s", host, config.Fallback.ProxyTo)
|
||||
fallback.ServeHTTP(w, r)
|
||||
return nil
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprintf(w, "site not found\n")
|
||||
@@ -159,44 +204,48 @@ func getPage(w http.ResponseWriter, r *http.Request) error {
|
||||
return nil
|
||||
}
|
||||
if metadataPath, found := strings.CutPrefix(sitePath, ".git-pages/"); found {
|
||||
lastModified := manifestMtime.UTC().Format(http.TimeFormat)
|
||||
lastModified := metadata.LastModified.UTC().Format(http.TimeFormat)
|
||||
switch {
|
||||
case metadataPath == "health":
|
||||
w.Header().Add("Last-Modified", lastModified)
|
||||
w.Header().Add("ETag", fmt.Sprintf("\"%s\"", metadata.ETag))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, "ok\n")
|
||||
return nil
|
||||
|
||||
case metadataPath == "manifest.json":
|
||||
// metadata requests require authorization to avoid making pushes from private
|
||||
// repositories enumerable
|
||||
_, err := AuthorizeMetadataRetrieval(r)
|
||||
// repositories enumerable or exposing basic-auth protected sections
|
||||
_, err := AuthorizeMetadataRetrieval(r, ManifestHasBasicAuth(manifest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.Header().Add("Content-Type", "application/json; charset=utf-8")
|
||||
w.Header().Add("Last-Modified", lastModified)
|
||||
w.Header().Add("ETag", fmt.Sprintf("\"%s-manifest\"", metadata.ETag))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(ManifestDebugJSON(manifest)))
|
||||
w.Write(ManifestJSON(manifest))
|
||||
return nil
|
||||
|
||||
case metadataPath == "archive.tar" && config.Feature("archive-site"):
|
||||
case metadataPath == "archive.tar":
|
||||
// same as above
|
||||
_, err := AuthorizeMetadataRetrieval(r)
|
||||
_, err := AuthorizeMetadataRetrieval(r, ManifestHasBasicAuth(manifest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// we only offer `/.git-pages/archive.tar` and not the `.tar.gz`/`.tar.zst` variants
|
||||
// because HTTP can already request compression using the `Content-Encoding` mechanism
|
||||
acceptedEncodings := parseHTTPEncodings(r.Header.Get("Accept-Encoding"))
|
||||
acceptedEncodings := ParseAcceptEncodingHeader(r.Header.Get("Accept-Encoding"))
|
||||
w.Header().Add("Vary", "Accept-Encoding")
|
||||
negotiated := acceptedEncodings.Negotiate("zstd", "gzip", "identity")
|
||||
if negotiated != "" {
|
||||
w.Header().Set("Content-Encoding", negotiated)
|
||||
}
|
||||
w.Header().Add("Content-Type", "application/x-tar")
|
||||
w.Header().Add("Last-Modified", lastModified)
|
||||
w.Header().Add("ETag", fmt.Sprintf("\"%s-archive\"", metadata.ETag))
|
||||
w.Header().Add("Transfer-Encoding", "chunked")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
var iow io.Writer
|
||||
@@ -208,7 +257,7 @@ func getPage(w http.ResponseWriter, r *http.Request) error {
|
||||
case "zstd":
|
||||
iow, _ = zstd.NewWriter(w)
|
||||
}
|
||||
return CollectTar(r.Context(), iow, manifest, manifestMtime)
|
||||
return CollectTar(r.Context(), iow, manifest, metadata)
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
@@ -217,10 +266,23 @@ func getPage(w http.ResponseWriter, r *http.Request) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Apply basic-auth rules before checking existence of a path to avoid leaking the latter.
|
||||
authorized, err := ApplyBasicAuthRules(manifest, &url.URL{Path: sitePath}, r)
|
||||
if err != nil {
|
||||
// See comment below for the error case under `ApplyHeaderRules`.
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
fmt.Fprintf(w, "%s\n", err)
|
||||
return err
|
||||
} else if !authorized {
|
||||
w.Header().Set("WWW-Authenticate", `Basic charset="UTF-8"`)
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return nil
|
||||
}
|
||||
|
||||
entryPath := sitePath
|
||||
entry := (*Entry)(nil)
|
||||
appliedRedirect := false
|
||||
status := 200
|
||||
status := http.StatusOK
|
||||
reader := io.ReadSeeker(nil)
|
||||
mtime := time.Time{}
|
||||
for {
|
||||
@@ -234,11 +296,11 @@ func getPage(w http.ResponseWriter, r *http.Request) error {
|
||||
entry = manifest.Contents[entryPath]
|
||||
if !appliedRedirect {
|
||||
redirectKind := RedirectAny
|
||||
if entry != nil && entry.GetType() != Type_Invalid {
|
||||
if entry != nil && entry.GetType() != Type_InvalidEntry {
|
||||
redirectKind = RedirectForce
|
||||
}
|
||||
originalURL := (&url.URL{Host: r.Host}).ResolveReference(r.URL)
|
||||
redirectURL, redirectStatus := ApplyRedirectRules(manifest, originalURL, redirectKind)
|
||||
_, redirectURL, redirectStatus := ApplyRedirectRules(manifest, originalURL, redirectKind)
|
||||
if Is3xxHTTPStatus(redirectStatus) {
|
||||
writeRedirect(w, redirectStatus, redirectURL.String())
|
||||
return nil
|
||||
@@ -251,8 +313,8 @@ func getPage(w http.ResponseWriter, r *http.Request) error {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if entry == nil || entry.GetType() == Type_Invalid {
|
||||
status = 404
|
||||
if entry == nil || entry.GetType() == Type_InvalidEntry {
|
||||
status = http.StatusNotFound
|
||||
if entryPath != notFoundPage {
|
||||
entryPath = notFoundPage
|
||||
continue
|
||||
@@ -268,13 +330,15 @@ func getPage(w http.ResponseWriter, r *http.Request) error {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return nil
|
||||
} else {
|
||||
reader, _, mtime, err = backend.GetBlob(r.Context(), string(entry.Data))
|
||||
var metadata BlobMetadata
|
||||
reader, metadata, err = backend.GetBlob(r.Context(), string(entry.Data))
|
||||
if err != nil {
|
||||
ObserveError(err) // all storage errors must be reported
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
fmt.Fprintf(w, "internal server error: %s\n", err)
|
||||
return err
|
||||
}
|
||||
mtime = metadata.LastModified
|
||||
w.Header().Set("ETag", etag)
|
||||
}
|
||||
} else if entry.GetType() == Type_Directory {
|
||||
@@ -297,26 +361,40 @@ func getPage(w http.ResponseWriter, r *http.Request) error {
|
||||
defer closer.Close()
|
||||
}
|
||||
|
||||
acceptedEncodings := parseHTTPEncodings(r.Header.Get("Accept-Encoding"))
|
||||
var offeredEncodings []string
|
||||
acceptedEncodings := ParseAcceptEncodingHeader(r.Header.Get("Accept-Encoding"))
|
||||
w.Header().Add("Vary", "Accept-Encoding")
|
||||
negotiatedEncoding := true
|
||||
switch entry.GetTransform() {
|
||||
case Transform_None:
|
||||
if acceptedEncodings.Negotiate("identity") != "identity" {
|
||||
case Transform_Identity:
|
||||
offeredEncodings = []string{"identity"}
|
||||
switch acceptedEncodings.Negotiate(offeredEncodings...) {
|
||||
case "identity":
|
||||
serveEncodingCount.
|
||||
With(prometheus.Labels{"transform": "identity", "negotiated": "identity"}).
|
||||
Inc()
|
||||
default:
|
||||
negotiatedEncoding = false
|
||||
serveEncodingCount.
|
||||
With(prometheus.Labels{"transform": "identity", "negotiated": "failure"}).
|
||||
Inc()
|
||||
}
|
||||
case Transform_Zstandard:
|
||||
supported := []string{"zstd", "identity"}
|
||||
case Transform_Zstd:
|
||||
offeredEncodings = []string{"zstd", "identity"}
|
||||
if entry.ContentType == nil {
|
||||
// If Content-Type is unset, `http.ServeContent` will try to sniff
|
||||
// the file contents. That won't work if it's compressed.
|
||||
supported = []string{"identity"}
|
||||
offeredEncodings = []string{"identity"}
|
||||
}
|
||||
switch acceptedEncodings.Negotiate(supported...) {
|
||||
switch acceptedEncodings.Negotiate(offeredEncodings...) {
|
||||
case "zstd":
|
||||
// Set Content-Length ourselves since `http.ServeContent` only sets
|
||||
// it if Content-Encoding is unset or if it's a range request.
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(*entry.Size, 10))
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(entry.GetCompressedSize(), 10))
|
||||
w.Header().Set("Content-Encoding", "zstd")
|
||||
serveEncodingCount.
|
||||
With(prometheus.Labels{"transform": "zstd", "negotiated": "zstd"}).
|
||||
Inc()
|
||||
case "identity":
|
||||
compressedData, _ := io.ReadAll(reader)
|
||||
decompressedData, err := zstdDecoder.DecodeAll(compressedData, []byte{})
|
||||
@@ -326,15 +404,22 @@ func getPage(w http.ResponseWriter, r *http.Request) error {
|
||||
return err
|
||||
}
|
||||
reader = bytes.NewReader(decompressedData)
|
||||
serveEncodingCount.
|
||||
With(prometheus.Labels{"transform": "zstd", "negotiated": "identity"}).
|
||||
Inc()
|
||||
default:
|
||||
negotiatedEncoding = false
|
||||
serveEncodingCount.
|
||||
With(prometheus.Labels{"transform": "zstd", "negotiated": "failure"}).
|
||||
Inc()
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unexpected transform")
|
||||
}
|
||||
if !negotiatedEncoding {
|
||||
w.Header().Set("Accept-Encoding", strings.Join(offeredEncodings, ", "))
|
||||
w.WriteHeader(http.StatusNotAcceptable)
|
||||
return fmt.Errorf("no supported content encodings (accept-encoding: %q)",
|
||||
return fmt.Errorf("no supported content encodings (Accept-Encoding: %s)",
|
||||
r.Header.Get("Accept-Encoding"))
|
||||
}
|
||||
|
||||
@@ -369,13 +454,15 @@ func getPage(w http.ResponseWriter, r *http.Request) error {
|
||||
io.Copy(w, reader)
|
||||
}
|
||||
} else {
|
||||
// consider content fresh for 60 seconds (the same as the freshness interval of
|
||||
// manifests in the S3 backend), and use stale content anyway as long as it's not
|
||||
// older than a hour; while it is cheap to handle If-Modified-Since queries
|
||||
// server-side, on the client `max-age=0, must-revalidate` causes every resource
|
||||
// to block the page load every time
|
||||
w.Header().Set("Cache-Control", "max-age=60, stale-while-revalidate=3600")
|
||||
// see https://web.dev/articles/stale-while-revalidate for details
|
||||
if _, hasCacheControl := w.Header()["Cache-Control"]; !hasCacheControl {
|
||||
// consider content fresh for 60 seconds (the same as the freshness interval of
|
||||
// manifests in the S3 backend), and use stale content anyway as long as it's not
|
||||
// older than a hour; while it is cheap to handle If-Modified-Since queries
|
||||
// server-side, on the client `max-age=0, must-revalidate` causes every resource
|
||||
// to block the page load every time
|
||||
w.Header().Set("Cache-Control", "max-age=60, stale-while-revalidate=3600")
|
||||
// see https://web.dev/articles/stale-while-revalidate for details
|
||||
}
|
||||
|
||||
// http.ServeContent handles conditional requests and range requests
|
||||
http.ServeContent(w, r, entryPath, mtime, reader)
|
||||
@@ -383,27 +470,39 @@ func getPage(w http.ResponseWriter, r *http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDryRun(w http.ResponseWriter, r *http.Request) bool {
|
||||
// "Dry run" requests are used to non-destructively check if the request would have
|
||||
// successfully been authorized.
|
||||
if r.Header.Get("Dry-Run") != "" {
|
||||
fmt.Fprintln(w, "dry-run ok")
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func putPage(w http.ResponseWriter, r *http.Request) error {
|
||||
var result UpdateResult
|
||||
|
||||
host, err := GetHost(r)
|
||||
for _, header := range []string{
|
||||
"If-Modified-Since", "If-Unmodified-Since", "If-Match", "If-None-Match",
|
||||
} {
|
||||
if r.Header.Get(header) != "" {
|
||||
http.Error(w, fmt.Sprintf("unsupported precondition %s", header), http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
webRoot, err := getWebRoot(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
projectName, err := GetProjectName(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
webRoot := makeWebRoot(host, projectName)
|
||||
|
||||
updateCtx, cancel := context.WithTimeout(r.Context(), time.Duration(config.Limits.UpdateTimeout))
|
||||
ctx, cancel := context.WithTimeout(r.Context(), time.Duration(config.Limits.UpdateTimeout))
|
||||
defer cancel()
|
||||
|
||||
contentType := getMediaType(r.Header.Get("Content-Type"))
|
||||
|
||||
if contentType == "application/x-www-form-urlencoded" {
|
||||
switch contentType {
|
||||
case "application/x-www-form-urlencoded":
|
||||
auth, err := AuthorizeUpdateFromRepository(r)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -428,26 +527,146 @@ func putPage(w http.ResponseWriter, r *http.Request) error {
|
||||
return err
|
||||
}
|
||||
|
||||
result = UpdateFromRepository(updateCtx, webRoot, repoURL, branch)
|
||||
} else {
|
||||
_, err := AuthorizeUpdateFromArchive(r)
|
||||
if checkDryRun(w, r) {
|
||||
return nil
|
||||
}
|
||||
|
||||
result = UpdateFromRepository(ctx, webRoot, repoURL, branch)
|
||||
|
||||
default:
|
||||
auth, err := AuthorizeUpdateFromArchive(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
principal := GetPrincipal(r.Context())
|
||||
copyForgeAuthToPrincipal(principal, auth)
|
||||
|
||||
repoURL := auth.ForgeRepoURL()
|
||||
|
||||
if checkDryRun(w, r) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// request body contains archive
|
||||
reader := http.MaxBytesReader(w, r.Body, int64(config.Limits.MaxSiteSize.Bytes()))
|
||||
result = UpdateFromArchive(updateCtx, webRoot, contentType, reader)
|
||||
result = UpdateFromArchive(ctx, webRoot, repoURL, contentType, reader)
|
||||
}
|
||||
|
||||
return reportUpdateResult(w, r, result)
|
||||
}
|
||||
|
||||
func patchPage(w http.ResponseWriter, r *http.Request) error {
|
||||
for _, header := range []string{
|
||||
"If-Modified-Since", "If-Unmodified-Since", "If-Match", "If-None-Match",
|
||||
} {
|
||||
if r.Header.Get(header) != "" {
|
||||
http.Error(w, fmt.Sprintf("unsupported precondition %s", header), http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
webRoot, err := getWebRoot(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
auth, err := AuthorizeUpdateFromArchive(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
principal := GetPrincipal(r.Context())
|
||||
copyForgeAuthToPrincipal(principal, auth)
|
||||
|
||||
if checkDryRun(w, r) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Providing atomic compare-and-swap operations might be difficult or impossible depending
|
||||
// on the backend in use and its configuration, but for applications where a mostly-atomic
|
||||
// compare-and-swap operation is good enough (e.g. generating page previews) we don't want
|
||||
// to prevent the use of partial updates.
|
||||
wantAtomicCAS := r.Header.Get("Atomic")
|
||||
hasAtomicCAS := backend.HasAtomicCAS(r.Context())
|
||||
switch {
|
||||
case wantAtomicCAS == "yes" && hasAtomicCAS || wantAtomicCAS == "no":
|
||||
// all good
|
||||
case wantAtomicCAS == "yes":
|
||||
http.Error(w, "atomic partial updates unsupported", http.StatusPreconditionFailed)
|
||||
return nil
|
||||
case wantAtomicCAS == "":
|
||||
http.Error(w, "must provide \"Atomic: yes|no\" header", http.StatusPreconditionRequired)
|
||||
return nil
|
||||
default:
|
||||
http.Error(w, "malformed Atomic: header", http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
|
||||
var parents CreateParentsMode
|
||||
switch r.Header.Get("Create-Parents") {
|
||||
case "", "no":
|
||||
parents = RequireParents
|
||||
case "yes":
|
||||
parents = CreateParents
|
||||
default:
|
||||
http.Error(w, "malformed Create-Parents: header", http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(r.Context(), time.Duration(config.Limits.UpdateTimeout))
|
||||
defer cancel()
|
||||
|
||||
contentType := getMediaType(r.Header.Get("Content-Type"))
|
||||
reader := http.MaxBytesReader(w, r.Body, int64(config.Limits.MaxSiteSize.Bytes()))
|
||||
result := PartialUpdateFromArchive(ctx, webRoot, contentType, reader, parents)
|
||||
return reportUpdateResult(w, r, result)
|
||||
}
|
||||
|
||||
func reportUpdateResult(w http.ResponseWriter, r *http.Request, result UpdateResult) error {
|
||||
var unresolvedRefErr UnresolvedRefError
|
||||
if result.outcome == UpdateError && errors.As(result.err, &unresolvedRefErr) {
|
||||
offeredContentTypes := []string{"text/plain", "application/vnd.git-pages.unresolved"}
|
||||
acceptedContentTypes := ParseAcceptHeader(r.Header.Get("Accept"))
|
||||
switch acceptedContentTypes.Negotiate(offeredContentTypes...) {
|
||||
default:
|
||||
w.Header().Set("Accept", strings.Join(offeredContentTypes, ", "))
|
||||
w.WriteHeader(http.StatusNotAcceptable)
|
||||
return fmt.Errorf("no supported content types (Accept: %s)", r.Header.Get("Accept"))
|
||||
case "application/vnd.git-pages.unresolved":
|
||||
w.Header().Set("Content-Type", "application/vnd.git-pages.unresolved")
|
||||
w.WriteHeader(http.StatusUnprocessableEntity)
|
||||
for _, missingRef := range unresolvedRefErr.missing {
|
||||
fmt.Fprintln(w, missingRef)
|
||||
}
|
||||
return nil
|
||||
case "text/plain":
|
||||
// handled below
|
||||
}
|
||||
}
|
||||
|
||||
switch result.outcome {
|
||||
case UpdateError:
|
||||
if errors.Is(result.err, ErrManifestTooLarge) {
|
||||
w.WriteHeader(http.StatusRequestEntityTooLarge)
|
||||
if errors.Is(result.err, ErrSiteTooLarge) {
|
||||
w.WriteHeader(http.StatusUnprocessableEntity)
|
||||
} else if errors.Is(result.err, ErrManifestTooLarge) {
|
||||
w.WriteHeader(http.StatusUnprocessableEntity)
|
||||
} else if errors.Is(result.err, errArchiveFormat) {
|
||||
w.WriteHeader(http.StatusUnsupportedMediaType)
|
||||
} else if errors.Is(result.err, ErrArchiveTooLarge) {
|
||||
w.WriteHeader(http.StatusRequestEntityTooLarge)
|
||||
} else if errors.Is(result.err, ErrRepositoryTooLarge) {
|
||||
w.WriteHeader(http.StatusUnprocessableEntity)
|
||||
} else if errors.Is(result.err, ErrMalformedPatch) {
|
||||
w.WriteHeader(http.StatusUnprocessableEntity)
|
||||
} else if errors.Is(result.err, ErrPreconditionFailed) {
|
||||
w.WriteHeader(http.StatusPreconditionFailed)
|
||||
} else if errors.Is(result.err, ErrWriteConflict) {
|
||||
w.WriteHeader(http.StatusConflict)
|
||||
} else if errors.Is(result.err, ErrDomainFrozen) {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
} else if errors.As(result.err, &unresolvedRefErr) {
|
||||
w.WriteHeader(http.StatusUnprocessableEntity)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
}
|
||||
@@ -476,61 +695,55 @@ func putPage(w http.ResponseWriter, r *http.Request) error {
|
||||
} else {
|
||||
fmt.Fprintln(w, "internal error")
|
||||
}
|
||||
reportSiteUpdate("rest", &result)
|
||||
observeSiteUpdate("rest", &result)
|
||||
return nil
|
||||
}
|
||||
|
||||
func deletePage(w http.ResponseWriter, r *http.Request) error {
|
||||
_, err := AuthorizeUpdateFromRepository(r)
|
||||
webRoot, err := getWebRoot(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host, err := GetHost(r)
|
||||
auth, err := AuthorizeDeletion(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
projectName, err := GetProjectName(r)
|
||||
if err != nil {
|
||||
return err
|
||||
principal := GetPrincipal(r.Context())
|
||||
copyForgeAuthToPrincipal(principal, auth)
|
||||
|
||||
if checkDryRun(w, r) {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = backend.DeleteManifest(r.Context(), makeWebRoot(host, projectName))
|
||||
if err != nil {
|
||||
if err = backend.DeleteManifest(r.Context(), webRoot, ModifyManifestOptions{}); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
fmt.Fprintln(w, err)
|
||||
} else {
|
||||
w.Header().Add("Update-Result", "deleted")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintln(w, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func postPage(w http.ResponseWriter, r *http.Request) error {
|
||||
// Start a timer for the request timeout immediately.
|
||||
// The HTTP requests for webhook delivery usually have a short timeout. We start the timer
|
||||
// before doing any time-consuming work so that it's closely aligned to the client's timeout and
|
||||
// we can respond before the webhook delivery is considered failed.
|
||||
requestTimeout := 3 * time.Second
|
||||
requestTimer := time.NewTimer(requestTimeout)
|
||||
|
||||
webRoot, err := getWebRoot(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
auth, err := AuthorizeUpdateFromRepository(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host, err := GetHost(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
projectName, err := GetProjectName(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
webRoot := makeWebRoot(host, projectName)
|
||||
|
||||
eventName := ""
|
||||
for _, header := range []string{
|
||||
"X-Forgejo-Event",
|
||||
@@ -578,7 +791,7 @@ func postPage(w http.ResponseWriter, r *http.Request) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if event.Ref != fmt.Sprintf("refs/heads/%s", auth.branch) {
|
||||
if event.Ref != path.Join("refs", "heads", auth.branch) {
|
||||
code := http.StatusUnauthorized
|
||||
if strings.Contains(r.Header.Get("User-Agent"), "GitHub-Hookshot") {
|
||||
// GitHub has no way to restrict branches for a webhook, and responding with 401
|
||||
@@ -596,6 +809,10 @@ func postPage(w http.ResponseWriter, r *http.Request) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if checkDryRun(w, r) {
|
||||
return nil
|
||||
}
|
||||
|
||||
resultChan := make(chan UpdateResult)
|
||||
go func(ctx context.Context) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Duration(config.Limits.UpdateTimeout))
|
||||
@@ -603,8 +820,8 @@ func postPage(w http.ResponseWriter, r *http.Request) error {
|
||||
|
||||
result := UpdateFromRepository(ctx, webRoot, repoURL, auth.branch)
|
||||
resultChan <- result
|
||||
reportSiteUpdate("webhook", &result)
|
||||
}(context.Background())
|
||||
observeSiteUpdate("webhook", &result)
|
||||
}(context.WithoutCancel(r.Context()))
|
||||
|
||||
var result UpdateResult
|
||||
select {
|
||||
@@ -623,16 +840,12 @@ func postPage(w http.ResponseWriter, r *http.Request) error {
|
||||
w.WriteHeader(http.StatusGatewayTimeout)
|
||||
fmt.Fprintln(w, "update timeout")
|
||||
case UpdateNoChange:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, "unchanged")
|
||||
case UpdateCreated:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, "created")
|
||||
case UpdateReplaced:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, "replaced")
|
||||
case UpdateDeleted:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, "deleted")
|
||||
}
|
||||
if result.manifest != nil {
|
||||
@@ -648,39 +861,49 @@ func postPage(w http.ResponseWriter, r *http.Request) error {
|
||||
}
|
||||
|
||||
func ServePages(w http.ResponseWriter, r *http.Request) {
|
||||
// We want upstream health checks to be done as closely to the normal flow as possible;
|
||||
// any intentional deviation is an opportunity to miss an issue that will affect our
|
||||
// visitors but not our health checks.
|
||||
if r.Header.Get("Health-Check") == "" {
|
||||
log.Println("pages:", r.Method, r.Host, r.URL, r.Header.Get("Content-Type"))
|
||||
if region := os.Getenv("FLY_REGION"); region != "" {
|
||||
machine_id := os.Getenv("FLY_MACHINE_ID")
|
||||
w.Header().Add("Server", fmt.Sprintf("git-pages (fly.io; %s; %s)", region, machine_id))
|
||||
ObserveData(r.Context(), "server.name", machine_id, "server.region", region)
|
||||
} else if hostname, err := os.Hostname(); err == nil {
|
||||
if region := os.Getenv("PAGES_REGION"); region != "" {
|
||||
w.Header().Add("Server", fmt.Sprintf("git-pages (%s; %s)", region, hostname))
|
||||
ObserveData(r.Context(), "server.name", hostname, "server.region", region)
|
||||
} else {
|
||||
w.Header().Add("Server", fmt.Sprintf("git-pages (%s)", hostname))
|
||||
ObserveData(r.Context(), "server.name", hostname)
|
||||
}
|
||||
r = r.WithContext(WithPrincipal(r.Context()))
|
||||
if config.Audit.IncludeIPs != "" {
|
||||
GetPrincipal(r.Context()).IpAddress = proto.String(r.RemoteAddr)
|
||||
}
|
||||
switch r.Method {
|
||||
case "PUT", "PATCH", "POST":
|
||||
mediaType := r.Header.Get("Content-Type")
|
||||
logc.Println(r.Context(), "pages:", r.Method, r.Host, r.URL, mediaType)
|
||||
default:
|
||||
logc.Println(r.Context(), "pages:", r.Method, r.Host, r.URL)
|
||||
}
|
||||
if hostname, err := os.Hostname(); err == nil {
|
||||
if region := os.Getenv("PAGES_REGION"); region != "" {
|
||||
w.Header().Add("Server", fmt.Sprintf("git-pages (%s; %s)", region, hostname))
|
||||
ObserveData(r.Context(), "server.name", hostname, "server.region", region)
|
||||
} else {
|
||||
w.Header().Add("Server", fmt.Sprintf("git-pages (%s)", hostname))
|
||||
ObserveData(r.Context(), "server.name", hostname)
|
||||
}
|
||||
} else {
|
||||
w.Header().Add("Server", "git-pages")
|
||||
}
|
||||
allowedMethods := []string{"OPTIONS", "HEAD", "GET", "PUT", "PATCH", "DELETE", "POST"}
|
||||
if r.Method == "OPTIONS" || !slices.Contains(allowedMethods, r.Method) {
|
||||
w.Header().Add("Allow", strings.Join(allowedMethods, ", "))
|
||||
}
|
||||
err := error(nil)
|
||||
switch r.Method {
|
||||
// REST API
|
||||
case http.MethodHead, http.MethodGet:
|
||||
case "OPTIONS":
|
||||
// no preflight options
|
||||
case "HEAD", "GET":
|
||||
err = getPage(w, r)
|
||||
case http.MethodPut:
|
||||
case "PUT":
|
||||
err = putPage(w, r)
|
||||
case http.MethodDelete:
|
||||
case "PATCH":
|
||||
err = patchPage(w, r)
|
||||
case "DELETE":
|
||||
err = deletePage(w, r)
|
||||
// webhook API
|
||||
case http.MethodPost:
|
||||
case "POST":
|
||||
err = postPage(w, r)
|
||||
default:
|
||||
w.Header().Add("Allow", "HEAD, GET, PUT, DELETE, POST")
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
err = fmt.Errorf("method %s not allowed", r.Method)
|
||||
}
|
||||
@@ -695,6 +918,6 @@ func ServePages(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, message, http.StatusRequestEntityTooLarge)
|
||||
err = errors.New(message)
|
||||
}
|
||||
log.Println("pages err:", err)
|
||||
logc.Println(r.Context(), "pages err:", err)
|
||||
}
|
||||
}
|
||||
|
||||
55
src/pages_test.go
Normal file
55
src/pages_test.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func checkHost(t *testing.T, host string, expectOk string, expectErr string) {
|
||||
host, err := GetHost(&http.Request{Host: host})
|
||||
if expectErr != "" {
|
||||
if err == nil || !strings.HasPrefix(err.Error(), expectErr) {
|
||||
t.Errorf("%s: expect err %s, got err %s", host, expectErr, err)
|
||||
}
|
||||
}
|
||||
if expectOk != "" {
|
||||
if err != nil {
|
||||
t.Errorf("%s: expect ok %s, got err %s", host, expectOk, err)
|
||||
} else if host != expectOk {
|
||||
t.Errorf("%s: expect ok %s, got ok %s", host, expectOk, host)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHelloName(t *testing.T) {
|
||||
config = &Config{Features: []string{}}
|
||||
|
||||
checkHost(t, "foo.bar", "foo.bar", "")
|
||||
checkHost(t, "foo-baz.bar", "foo-baz.bar", "")
|
||||
checkHost(t, "foo--baz.bar", "foo--baz.bar", "")
|
||||
checkHost(t, "foo.bar.", "foo.bar", "")
|
||||
checkHost(t, ".foo.bar", "", "reserved host name")
|
||||
checkHost(t, "..foo.bar", "", "reserved host name")
|
||||
|
||||
checkHost(t, "ß.bar", "xn--zca.bar", "")
|
||||
checkHost(t, "xn--zca.bar", "xn--zca.bar", "")
|
||||
|
||||
checkHost(t, "foo-.bar", "", "malformed host name")
|
||||
checkHost(t, "-foo.bar", "", "malformed host name")
|
||||
checkHost(t, "foo_.bar", "", "malformed host name")
|
||||
checkHost(t, "_foo.bar", "", "malformed host name")
|
||||
checkHost(t, "foo_baz.bar", "", "malformed host name")
|
||||
checkHost(t, "foo__baz.bar", "", "malformed host name")
|
||||
checkHost(t, "*.foo.bar", "", "malformed host name")
|
||||
|
||||
config = &Config{Features: []string{"relaxed-idna"}}
|
||||
|
||||
checkHost(t, "foo-.bar", "", "malformed host name")
|
||||
checkHost(t, "-foo.bar", "", "malformed host name")
|
||||
checkHost(t, "foo_.bar", "foo_.bar", "")
|
||||
checkHost(t, "_foo.bar", "", "reserved host name")
|
||||
checkHost(t, "foo_baz.bar", "foo_baz.bar", "")
|
||||
checkHost(t, "foo__baz.bar", "foo__baz.bar", "")
|
||||
checkHost(t, "*.foo.bar", "", "malformed host name")
|
||||
}
|
||||
158
src/patch.go
Normal file
158
src/patch.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var ErrMalformedPatch = errors.New("malformed patch")
|
||||
|
||||
type CreateParentsMode int
|
||||
|
||||
const (
|
||||
RequireParents CreateParentsMode = iota
|
||||
CreateParents
|
||||
)
|
||||
|
||||
// Mutates `manifest` according to a tar stream and the following rules:
|
||||
// - A character device with major 0 and minor 0 is a "whiteout marker". When placed
|
||||
// at a given path, this path and its entire subtree (if any) are removed from the manifest.
|
||||
// - When a directory is placed at a given path, this path and its entire subtree (if any) are
|
||||
// removed from the manifest and replaced with the contents of the directory.
|
||||
func ApplyTarPatch(manifest *Manifest, reader io.Reader, parents CreateParentsMode) error {
|
||||
type Node struct {
|
||||
entry *Entry
|
||||
children map[string]*Node
|
||||
}
|
||||
|
||||
// Index the manifest for incremental update operations.
|
||||
index := IndexManifestByGitHash(manifest)
|
||||
missing := []string{}
|
||||
|
||||
// Extract the manifest contents (which is using a flat hash map) into a directory tree
|
||||
// so that recursive delete operations have O(1) complexity.
|
||||
var root *Node
|
||||
sortedNames := slices.Sorted(maps.Keys(manifest.GetContents()))
|
||||
for _, name := range sortedNames {
|
||||
entry := manifest.Contents[name]
|
||||
node := &Node{entry: entry}
|
||||
if entry.GetType() == Type_Directory {
|
||||
node.children = map[string]*Node{}
|
||||
}
|
||||
if name == "" {
|
||||
root = node
|
||||
} else {
|
||||
segments := strings.Split(name, "/")
|
||||
fileName := segments[len(segments)-1]
|
||||
iter := root
|
||||
for _, segment := range segments[:len(segments)-1] {
|
||||
if iter.children == nil {
|
||||
panic("malformed manifest (not a directory)")
|
||||
} else if _, exists := iter.children[segment]; !exists {
|
||||
panic("malformed manifest (node does not exist)")
|
||||
} else {
|
||||
iter = iter.children[segment]
|
||||
}
|
||||
}
|
||||
iter.children[fileName] = node
|
||||
}
|
||||
}
|
||||
manifest.Contents = map[string]*Entry{}
|
||||
|
||||
// Process the archive as a patch operation.
|
||||
archive := tar.NewReader(reader)
|
||||
for {
|
||||
header, err := archive.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
segments := strings.Split(normalizeArchiveMemberName(header.Name), "/")
|
||||
fileName := segments[len(segments)-1]
|
||||
node := root
|
||||
for index, segment := range segments[:len(segments)-1] {
|
||||
if node.children == nil {
|
||||
dirName := strings.Join(segments[:index], "/")
|
||||
return fmt.Errorf("%w: %s: not a directory", ErrMalformedPatch, dirName)
|
||||
}
|
||||
if _, exists := node.children[segment]; !exists {
|
||||
switch parents {
|
||||
case RequireParents:
|
||||
nodeName := strings.Join(segments[:index+1], "/")
|
||||
return fmt.Errorf("%w: %s: path not found", ErrMalformedPatch, nodeName)
|
||||
case CreateParents:
|
||||
node.children[segment] = &Node{
|
||||
entry: NewManifestEntry(Type_Directory, nil),
|
||||
children: map[string]*Node{},
|
||||
}
|
||||
}
|
||||
}
|
||||
node = node.children[segment]
|
||||
}
|
||||
if node.children == nil {
|
||||
dirName := strings.Join(segments[:len(segments)-1], "/")
|
||||
return fmt.Errorf("%w: %s: not a directory", ErrMalformedPatch, dirName)
|
||||
}
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeReg:
|
||||
fileData, err := io.ReadAll(archive)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tar: %s: %w", header.Name, err)
|
||||
}
|
||||
node.children[fileName] = &Node{
|
||||
entry: NewManifestEntry(Type_InlineFile, fileData),
|
||||
}
|
||||
case tar.TypeSymlink:
|
||||
if hash, found := strings.CutPrefix(header.Linkname, BlobReferencePrefix); found {
|
||||
if entry, found := index[hash]; found {
|
||||
node.children[fileName] = &Node{entry: entry}
|
||||
} else {
|
||||
missing = append(missing, hash)
|
||||
}
|
||||
} else {
|
||||
node.children[fileName] = &Node{
|
||||
entry: NewManifestEntry(Type_Symlink, []byte(header.Linkname)),
|
||||
}
|
||||
}
|
||||
case tar.TypeDir:
|
||||
node.children[fileName] = &Node{
|
||||
entry: NewManifestEntry(Type_Directory, nil),
|
||||
children: map[string]*Node{},
|
||||
}
|
||||
case tar.TypeChar:
|
||||
if header.Devmajor == 0 && header.Devminor == 0 {
|
||||
delete(node.children, fileName)
|
||||
} else {
|
||||
AddProblem(manifest, header.Name,
|
||||
"tar: unsupported chardev %d,%d", header.Devmajor, header.Devminor)
|
||||
}
|
||||
default:
|
||||
AddProblem(manifest, header.Name,
|
||||
"tar: unsupported type '%c'", header.Typeflag)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
return UnresolvedRefError{missing}
|
||||
}
|
||||
|
||||
// Repopulate manifest contents with the updated directory tree.
|
||||
var traverse func([]string, *Node)
|
||||
traverse = func(segments []string, node *Node) {
|
||||
manifest.Contents[strings.Join(segments, "/")] = node.entry
|
||||
for fileName, childNode := range node.children {
|
||||
traverse(append(segments, fileName), childNode)
|
||||
}
|
||||
}
|
||||
traverse([]string{}, root)
|
||||
return nil
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -13,7 +15,17 @@ import (
|
||||
|
||||
const RedirectsFileName string = "_redirects"
|
||||
|
||||
func unparseRule(rule redirects.Rule) string {
|
||||
// Converts our Protobuf representation to tj/go-redirects.
|
||||
func exportRedirectRule(rule *RedirectRule) *redirects.Rule {
|
||||
return &redirects.Rule{
|
||||
From: rule.GetFrom(),
|
||||
To: rule.GetTo(),
|
||||
Status: int(rule.GetStatus()),
|
||||
Force: rule.GetForce(),
|
||||
}
|
||||
}
|
||||
|
||||
func unparseRedirectRule(rule *redirects.Rule) string {
|
||||
var statusPart string
|
||||
if rule.Force {
|
||||
statusPart = fmt.Sprintf("%d!", rule.Status)
|
||||
@@ -49,7 +61,7 @@ func Is3xxHTTPStatus(status int) bool {
|
||||
return status >= 300 && status <= 399
|
||||
}
|
||||
|
||||
func validateRedirectRule(rule redirects.Rule) error {
|
||||
func validateRedirectRule(rule *redirects.Rule) error {
|
||||
if len(rule.Params) > 0 {
|
||||
return fmt.Errorf("rules with parameters are not supported")
|
||||
}
|
||||
@@ -86,26 +98,31 @@ func validateRedirectRule(rule redirects.Rule) error {
|
||||
}
|
||||
|
||||
// Parses redirects file and injects rules into the manifest.
|
||||
func ProcessRedirectsFile(manifest *Manifest) error {
|
||||
func ProcessRedirectsFile(ctx context.Context, manifest *Manifest) error {
|
||||
redirectsEntry := manifest.Contents[RedirectsFileName]
|
||||
delete(manifest.Contents, RedirectsFileName)
|
||||
if redirectsEntry == nil {
|
||||
return nil
|
||||
} else if redirectsEntry.GetType() != Type_InlineFile {
|
||||
return AddProblem(manifest, RedirectsFileName,
|
||||
"not a regular file")
|
||||
}
|
||||
|
||||
rules, err := redirects.ParseString(string(redirectsEntry.GetData()))
|
||||
data, err := GetEntryContents(ctx, redirectsEntry)
|
||||
if errors.Is(err, ErrNotRegularFile) {
|
||||
return AddProblem(manifest, RedirectsFileName,
|
||||
"not a regular file")
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rules, err := redirects.ParseString(string(data))
|
||||
if err != nil {
|
||||
return AddProblem(manifest, RedirectsFileName,
|
||||
"syntax error: %s", err)
|
||||
}
|
||||
|
||||
for index, rule := range rules {
|
||||
if err := validateRedirectRule(rule); err != nil {
|
||||
if err := validateRedirectRule(&rule); err != nil {
|
||||
AddProblem(manifest, RedirectsFileName,
|
||||
"rule #%d %q: %s", index+1, unparseRule(rule), err)
|
||||
"rule #%d %q: %s", index+1, unparseRedirectRule(&rule), err)
|
||||
continue
|
||||
}
|
||||
manifest.Redirects = append(manifest.Redirects, &RedirectRule{
|
||||
@@ -121,12 +138,7 @@ func ProcessRedirectsFile(manifest *Manifest) error {
|
||||
func CollectRedirectsFile(manifest *Manifest) string {
|
||||
var rules []string
|
||||
for _, rule := range manifest.GetRedirects() {
|
||||
rules = append(rules, unparseRule(redirects.Rule{
|
||||
From: rule.GetFrom(),
|
||||
To: rule.GetTo(),
|
||||
Status: int(rule.GetStatus()),
|
||||
Force: rule.GetForce(),
|
||||
})+"\n")
|
||||
rules = append(rules, unparseRedirectRule(exportRedirectRule(rule))+"\n")
|
||||
}
|
||||
return strings.Join(rules, "")
|
||||
}
|
||||
@@ -147,18 +159,22 @@ type RedirectKind int
|
||||
|
||||
const (
|
||||
RedirectAny RedirectKind = iota
|
||||
RedirectNormal
|
||||
RedirectForce
|
||||
)
|
||||
|
||||
func ApplyRedirectRules(
|
||||
manifest *Manifest, fromURL *url.URL, kind RedirectKind,
|
||||
) (
|
||||
toURL *url.URL, status int,
|
||||
rule *RedirectRule, toURL *url.URL, status int,
|
||||
) {
|
||||
fromSegments := pathSegments(fromURL.Path)
|
||||
next:
|
||||
for _, rule := range manifest.Redirects {
|
||||
if kind == RedirectForce && !*rule.Force {
|
||||
for _, rule = range manifest.Redirects {
|
||||
switch {
|
||||
case kind == RedirectNormal && *rule.Force:
|
||||
continue
|
||||
case kind == RedirectForce && !*rule.Force:
|
||||
continue
|
||||
}
|
||||
// check if the rule matches fromURL
|
||||
@@ -205,8 +221,43 @@ next:
|
||||
RawQuery: fromURL.RawQuery,
|
||||
}
|
||||
status = int(*rule.Status)
|
||||
break
|
||||
return
|
||||
}
|
||||
// no redirect found
|
||||
rule = nil
|
||||
return
|
||||
}
|
||||
|
||||
func redirectHasSplat(rule *RedirectRule) bool {
|
||||
ruleFromURL, _ := url.Parse(*rule.From) // pre-validated in `validateRedirectRule`
|
||||
ruleFromSegments := pathSegments(ruleFromURL.Path)
|
||||
return slices.Contains(ruleFromSegments, "*")
|
||||
}
|
||||
|
||||
func LintRedirects(manifest *Manifest) {
|
||||
for name, entry := range manifest.GetContents() {
|
||||
nameURL, err := url.Parse("/" + name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the entry URL would trigger a non-forced redirect if the entry didn't exist.
|
||||
// If the redirect matches exactly one URL (i.e. has no splat) then it will never be
|
||||
// triggered and an issue is reported; if the rule has a splat, it will always be possible
|
||||
// to trigger it, as it matches an infinite number of URLs.
|
||||
rule, _, _ := ApplyRedirectRules(manifest, nameURL, RedirectNormal)
|
||||
if rule != nil && !redirectHasSplat(rule) {
|
||||
entryDesc := "file"
|
||||
if entry.GetType() == Type_Directory {
|
||||
entryDesc = "directory"
|
||||
}
|
||||
AddProblem(manifest, name,
|
||||
"%s shadows redirect %q; remove the %s or use a %d! forced redirect instead",
|
||||
entryDesc,
|
||||
unparseRedirectRule(exportRedirectRule(rule)),
|
||||
entryDesc,
|
||||
rule.GetStatus(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
619
src/schema.pb.go
619
src/schema.pb.go
@@ -9,6 +9,7 @@ package git_pages
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
@@ -25,7 +26,7 @@ type Type int32
|
||||
|
||||
const (
|
||||
// Invalid entry.
|
||||
Type_Invalid Type = 0
|
||||
Type_InvalidEntry Type = 0
|
||||
// Directory.
|
||||
Type_Directory Type = 1
|
||||
// Inline file. `Blob.Data` contains file contents.
|
||||
@@ -39,14 +40,14 @@ const (
|
||||
// Enum value maps for Type.
|
||||
var (
|
||||
Type_name = map[int32]string{
|
||||
0: "Invalid",
|
||||
0: "InvalidEntry",
|
||||
1: "Directory",
|
||||
2: "InlineFile",
|
||||
3: "ExternalFile",
|
||||
4: "Symlink",
|
||||
}
|
||||
Type_value = map[string]int32{
|
||||
"Invalid": 0,
|
||||
"InvalidEntry": 0,
|
||||
"Directory": 1,
|
||||
"InlineFile": 2,
|
||||
"ExternalFile": 3,
|
||||
@@ -81,24 +82,25 @@ func (Type) EnumDescriptor() ([]byte, []int) {
|
||||
return file_schema_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
// Transformation names should match HTTP `Accept-Encoding:` header.
|
||||
type Transform int32
|
||||
|
||||
const (
|
||||
// No transformation.
|
||||
Transform_None Transform = 0
|
||||
Transform_Identity Transform = 0
|
||||
// Zstandard compression.
|
||||
Transform_Zstandard Transform = 1
|
||||
Transform_Zstd Transform = 1
|
||||
)
|
||||
|
||||
// Enum value maps for Transform.
|
||||
var (
|
||||
Transform_name = map[int32]string{
|
||||
0: "None",
|
||||
1: "Zstandard",
|
||||
0: "Identity",
|
||||
1: "Zstd",
|
||||
}
|
||||
Transform_value = map[string]int32{
|
||||
"None": 0,
|
||||
"Zstandard": 1,
|
||||
"Identity": 0,
|
||||
"Zstd": 1,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -129,12 +131,77 @@ func (Transform) EnumDescriptor() ([]byte, []int) {
|
||||
return file_schema_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
type AuditEvent int32
|
||||
|
||||
const (
|
||||
// Invalid event.
|
||||
AuditEvent_InvalidEvent AuditEvent = 0
|
||||
// A manifest was committed (a site was created or updated).
|
||||
AuditEvent_CommitManifest AuditEvent = 1
|
||||
// A manifest was deleted (a site was deleted).
|
||||
AuditEvent_DeleteManifest AuditEvent = 2
|
||||
// A domain was frozen.
|
||||
AuditEvent_FreezeDomain AuditEvent = 3
|
||||
// A domain was thawed.
|
||||
AuditEvent_UnfreezeDomain AuditEvent = 4
|
||||
)
|
||||
|
||||
// Enum value maps for AuditEvent.
|
||||
var (
|
||||
AuditEvent_name = map[int32]string{
|
||||
0: "InvalidEvent",
|
||||
1: "CommitManifest",
|
||||
2: "DeleteManifest",
|
||||
3: "FreezeDomain",
|
||||
4: "UnfreezeDomain",
|
||||
}
|
||||
AuditEvent_value = map[string]int32{
|
||||
"InvalidEvent": 0,
|
||||
"CommitManifest": 1,
|
||||
"DeleteManifest": 2,
|
||||
"FreezeDomain": 3,
|
||||
"UnfreezeDomain": 4,
|
||||
}
|
||||
)
|
||||
|
||||
func (x AuditEvent) Enum() *AuditEvent {
|
||||
p := new(AuditEvent)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x AuditEvent) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (AuditEvent) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_schema_proto_enumTypes[2].Descriptor()
|
||||
}
|
||||
|
||||
func (AuditEvent) Type() protoreflect.EnumType {
|
||||
return &file_schema_proto_enumTypes[2]
|
||||
}
|
||||
|
||||
func (x AuditEvent) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use AuditEvent.Descriptor instead.
|
||||
func (AuditEvent) EnumDescriptor() ([]byte, []int) {
|
||||
return file_schema_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
type Entry struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Type *Type `protobuf:"varint,1,opt,name=type,enum=Type" json:"type,omitempty"`
|
||||
// Only present for `type == InlineFile` and `type == ExternalFile`.
|
||||
// For transformed entries, refers to the post-transformation (compressed) size.
|
||||
Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
|
||||
// For transformed entries, refers to the pre-transformation (decompressed) size; otherwise
|
||||
// equal to `compressed_size`.
|
||||
OriginalSize *int64 `protobuf:"varint,7,opt,name=original_size,json=originalSize" json:"original_size,omitempty"`
|
||||
// Only present for `type == InlineFile` and `type == ExternalFile`.
|
||||
// For transformed entries, refers to the post-transformation (compressed) size; otherwise
|
||||
// equal to `original_size`.
|
||||
CompressedSize *int64 `protobuf:"varint,2,opt,name=compressed_size,json=compressedSize" json:"compressed_size,omitempty"`
|
||||
// Meaning depends on `type`:
|
||||
// - If `type == InlineFile`, contains file data.
|
||||
// - If `type == ExternalFile`, contains blob name (an otherwise unspecified
|
||||
@@ -147,7 +214,13 @@ type Entry struct {
|
||||
Transform *Transform `protobuf:"varint,4,opt,name=transform,enum=Transform" json:"transform,omitempty"`
|
||||
// Only present for `type == InlineFile` and `type == ExternalFile`.
|
||||
// Currently, optional (not present on certain legacy manifests).
|
||||
ContentType *string `protobuf:"bytes,5,opt,name=content_type,json=contentType" json:"content_type,omitempty"`
|
||||
ContentType *string `protobuf:"bytes,5,opt,name=content_type,json=contentType" json:"content_type,omitempty"`
|
||||
// May be present for `type == InlineFile` and `type == ExternalFile`.
|
||||
// Used to reduce the amount of work being done during git checkouts.
|
||||
// The type of hash used is determined by the length:
|
||||
// - 40 bytes: SHA1DC (as hex)
|
||||
// - 64 bytes: SHA256 (as hex)
|
||||
GitHash *string `protobuf:"bytes,6,opt,name=git_hash,json=gitHash" json:"git_hash,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -186,12 +259,19 @@ func (x *Entry) GetType() Type {
|
||||
if x != nil && x.Type != nil {
|
||||
return *x.Type
|
||||
}
|
||||
return Type_Invalid
|
||||
return Type_InvalidEntry
|
||||
}
|
||||
|
||||
func (x *Entry) GetSize() int64 {
|
||||
if x != nil && x.Size != nil {
|
||||
return *x.Size
|
||||
func (x *Entry) GetOriginalSize() int64 {
|
||||
if x != nil && x.OriginalSize != nil {
|
||||
return *x.OriginalSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Entry) GetCompressedSize() int64 {
|
||||
if x != nil && x.CompressedSize != nil {
|
||||
return *x.CompressedSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
@@ -207,7 +287,7 @@ func (x *Entry) GetTransform() Transform {
|
||||
if x != nil && x.Transform != nil {
|
||||
return *x.Transform
|
||||
}
|
||||
return Transform_None
|
||||
return Transform_Identity
|
||||
}
|
||||
|
||||
func (x *Entry) GetContentType() string {
|
||||
@@ -217,6 +297,13 @@ func (x *Entry) GetContentType() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Entry) GetGitHash() string {
|
||||
if x != nil && x.GitHash != nil {
|
||||
return *x.GitHash
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// See https://docs.netlify.com/manage/routing/redirects/overview/ for details.
|
||||
// Only a subset of the Netlify specification is representable here.
|
||||
type RedirectRule struct {
|
||||
@@ -392,6 +479,110 @@ func (x *HeaderRule) GetHeaderMap() []*Header {
|
||||
return nil
|
||||
}
|
||||
|
||||
type BasicCredential struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Username *string `protobuf:"bytes,1,opt,name=username" json:"username,omitempty"`
|
||||
Password *string `protobuf:"bytes,2,opt,name=password" json:"password,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *BasicCredential) Reset() {
|
||||
*x = BasicCredential{}
|
||||
mi := &file_schema_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *BasicCredential) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BasicCredential) ProtoMessage() {}
|
||||
|
||||
func (x *BasicCredential) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_schema_proto_msgTypes[4]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BasicCredential.ProtoReflect.Descriptor instead.
|
||||
func (*BasicCredential) Descriptor() ([]byte, []int) {
|
||||
return file_schema_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *BasicCredential) GetUsername() string {
|
||||
if x != nil && x.Username != nil {
|
||||
return *x.Username
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *BasicCredential) GetPassword() string {
|
||||
if x != nil && x.Password != nil {
|
||||
return *x.Password
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type BasicAuthRule struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Path *string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"`
|
||||
Credentials []*BasicCredential `protobuf:"bytes,2,rep,name=credentials" json:"credentials,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *BasicAuthRule) Reset() {
|
||||
*x = BasicAuthRule{}
|
||||
mi := &file_schema_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *BasicAuthRule) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BasicAuthRule) ProtoMessage() {}
|
||||
|
||||
func (x *BasicAuthRule) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_schema_proto_msgTypes[5]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BasicAuthRule.ProtoReflect.Descriptor instead.
|
||||
func (*BasicAuthRule) Descriptor() ([]byte, []int) {
|
||||
return file_schema_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *BasicAuthRule) GetPath() string {
|
||||
if x != nil && x.Path != nil {
|
||||
return *x.Path
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *BasicAuthRule) GetCredentials() []*BasicCredential {
|
||||
if x != nil {
|
||||
return x.Credentials
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Problem struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Path *string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"`
|
||||
@@ -402,7 +593,7 @@ type Problem struct {
|
||||
|
||||
func (x *Problem) Reset() {
|
||||
*x = Problem{}
|
||||
mi := &file_schema_proto_msgTypes[4]
|
||||
mi := &file_schema_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -414,7 +605,7 @@ func (x *Problem) String() string {
|
||||
func (*Problem) ProtoMessage() {}
|
||||
|
||||
func (x *Problem) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_schema_proto_msgTypes[4]
|
||||
mi := &file_schema_proto_msgTypes[6]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -427,7 +618,7 @@ func (x *Problem) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use Problem.ProtoReflect.Descriptor instead.
|
||||
func (*Problem) Descriptor() ([]byte, []int) {
|
||||
return file_schema_proto_rawDescGZIP(), []int{4}
|
||||
return file_schema_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *Problem) GetPath() string {
|
||||
@@ -446,19 +637,20 @@ func (x *Problem) GetCause() string {
|
||||
|
||||
type Manifest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
// Source metadata
|
||||
// Source metadata.
|
||||
RepoUrl *string `protobuf:"bytes,1,opt,name=repo_url,json=repoUrl" json:"repo_url,omitempty"`
|
||||
Branch *string `protobuf:"bytes,2,opt,name=branch" json:"branch,omitempty"`
|
||||
Commit *string `protobuf:"bytes,3,opt,name=commit" json:"commit,omitempty"`
|
||||
// Contents
|
||||
// Site contents.
|
||||
Contents map[string]*Entry `protobuf:"bytes,4,rep,name=contents" json:"contents,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
OriginalSize *int64 `protobuf:"varint,10,opt,name=original_size,json=originalSize" json:"original_size,omitempty"` // total size of entries before compression
|
||||
CompressedSize *int64 `protobuf:"varint,5,opt,name=compressed_size,json=compressedSize" json:"compressed_size,omitempty"` // simple sum of each `entry.size`
|
||||
StoredSize *int64 `protobuf:"varint,8,opt,name=stored_size,json=storedSize" json:"stored_size,omitempty"` // total size of (deduplicated) external objects
|
||||
// Netlify-style `_redirects` and `_headers`
|
||||
Redirects []*RedirectRule `protobuf:"bytes,6,rep,name=redirects" json:"redirects,omitempty"`
|
||||
Headers []*HeaderRule `protobuf:"bytes,9,rep,name=headers" json:"headers,omitempty"`
|
||||
// Diagnostics for non-fatal errors
|
||||
OriginalSize *int64 `protobuf:"varint,10,opt,name=original_size,json=originalSize" json:"original_size,omitempty"` // sum of each `entry.original_size`
|
||||
CompressedSize *int64 `protobuf:"varint,5,opt,name=compressed_size,json=compressedSize" json:"compressed_size,omitempty"` // sum of each `entry.compressed_size`
|
||||
StoredSize *int64 `protobuf:"varint,8,opt,name=stored_size,json=storedSize" json:"stored_size,omitempty"` // sum of deduplicated `entry.compressed_size` for external files only
|
||||
// Netlify-style `_redirects` and `_headers` rules.
|
||||
Redirects []*RedirectRule `protobuf:"bytes,6,rep,name=redirects" json:"redirects,omitempty"`
|
||||
Headers []*HeaderRule `protobuf:"bytes,9,rep,name=headers" json:"headers,omitempty"`
|
||||
BasicAuth []*BasicAuthRule `protobuf:"bytes,11,rep,name=basic_auth,json=basicAuth" json:"basic_auth,omitempty"`
|
||||
// Diagnostics for non-fatal errors.
|
||||
Problems []*Problem `protobuf:"bytes,7,rep,name=problems" json:"problems,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -466,7 +658,7 @@ type Manifest struct {
|
||||
|
||||
func (x *Manifest) Reset() {
|
||||
*x = Manifest{}
|
||||
mi := &file_schema_proto_msgTypes[5]
|
||||
mi := &file_schema_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -478,7 +670,7 @@ func (x *Manifest) String() string {
|
||||
func (*Manifest) ProtoMessage() {}
|
||||
|
||||
func (x *Manifest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_schema_proto_msgTypes[5]
|
||||
mi := &file_schema_proto_msgTypes[7]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -491,7 +683,7 @@ func (x *Manifest) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use Manifest.ProtoReflect.Descriptor instead.
|
||||
func (*Manifest) Descriptor() ([]byte, []int) {
|
||||
return file_schema_proto_rawDescGZIP(), []int{5}
|
||||
return file_schema_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *Manifest) GetRepoUrl() string {
|
||||
@@ -557,6 +749,13 @@ func (x *Manifest) GetHeaders() []*HeaderRule {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Manifest) GetBasicAuth() []*BasicAuthRule {
|
||||
if x != nil {
|
||||
return x.BasicAuth
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Manifest) GetProblems() []*Problem {
|
||||
if x != nil {
|
||||
return x.Problems
|
||||
@@ -564,18 +763,243 @@ func (x *Manifest) GetProblems() []*Problem {
|
||||
return nil
|
||||
}
|
||||
|
||||
type AuditRecord struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
// Audit event metadata.
|
||||
Id *int64 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
|
||||
Timestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp,omitempty"`
|
||||
Event *AuditEvent `protobuf:"varint,3,opt,name=event,enum=AuditEvent" json:"event,omitempty"`
|
||||
Principal *Principal `protobuf:"bytes,4,opt,name=principal" json:"principal,omitempty"`
|
||||
// Affected resource.
|
||||
Domain *string `protobuf:"bytes,10,opt,name=domain" json:"domain,omitempty"`
|
||||
Project *string `protobuf:"bytes,11,opt,name=project" json:"project,omitempty"` // only for `*Manifest` events
|
||||
// Snapshot of site manifest.
|
||||
Manifest *Manifest `protobuf:"bytes,12,opt,name=manifest" json:"manifest,omitempty"` // only for `*Manifest` events
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *AuditRecord) Reset() {
|
||||
*x = AuditRecord{}
|
||||
mi := &file_schema_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *AuditRecord) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*AuditRecord) ProtoMessage() {}
|
||||
|
||||
func (x *AuditRecord) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_schema_proto_msgTypes[8]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use AuditRecord.ProtoReflect.Descriptor instead.
|
||||
func (*AuditRecord) Descriptor() ([]byte, []int) {
|
||||
return file_schema_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *AuditRecord) GetId() int64 {
|
||||
if x != nil && x.Id != nil {
|
||||
return *x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *AuditRecord) GetTimestamp() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *AuditRecord) GetEvent() AuditEvent {
|
||||
if x != nil && x.Event != nil {
|
||||
return *x.Event
|
||||
}
|
||||
return AuditEvent_InvalidEvent
|
||||
}
|
||||
|
||||
func (x *AuditRecord) GetPrincipal() *Principal {
|
||||
if x != nil {
|
||||
return x.Principal
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *AuditRecord) GetDomain() string {
|
||||
if x != nil && x.Domain != nil {
|
||||
return *x.Domain
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *AuditRecord) GetProject() string {
|
||||
if x != nil && x.Project != nil {
|
||||
return *x.Project
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *AuditRecord) GetManifest() *Manifest {
|
||||
if x != nil {
|
||||
return x.Manifest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Principal struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
IpAddress *string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress" json:"ip_address,omitempty"`
|
||||
CliAdmin *bool `protobuf:"varint,2,opt,name=cli_admin,json=cliAdmin" json:"cli_admin,omitempty"`
|
||||
ForgeUser *ForgeUser `protobuf:"bytes,3,opt,name=forge_user,json=forgeUser" json:"forge_user,omitempty"`
|
||||
RepoUrl *string `protobuf:"bytes,4,opt,name=repo_url,json=repoUrl" json:"repo_url,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *Principal) Reset() {
|
||||
*x = Principal{}
|
||||
mi := &file_schema_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *Principal) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Principal) ProtoMessage() {}
|
||||
|
||||
func (x *Principal) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_schema_proto_msgTypes[9]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Principal.ProtoReflect.Descriptor instead.
|
||||
func (*Principal) Descriptor() ([]byte, []int) {
|
||||
return file_schema_proto_rawDescGZIP(), []int{9}
|
||||
}
|
||||
|
||||
func (x *Principal) GetIpAddress() string {
|
||||
if x != nil && x.IpAddress != nil {
|
||||
return *x.IpAddress
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Principal) GetCliAdmin() bool {
|
||||
if x != nil && x.CliAdmin != nil {
|
||||
return *x.CliAdmin
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Principal) GetForgeUser() *ForgeUser {
|
||||
if x != nil {
|
||||
return x.ForgeUser
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Principal) GetRepoUrl() string {
|
||||
if x != nil && x.RepoUrl != nil {
|
||||
return *x.RepoUrl
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ForgeUser struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Origin *string `protobuf:"bytes,1,opt,name=origin" json:"origin,omitempty"`
|
||||
Id *int64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"`
|
||||
Handle *string `protobuf:"bytes,3,opt,name=handle" json:"handle,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ForgeUser) Reset() {
|
||||
*x = ForgeUser{}
|
||||
mi := &file_schema_proto_msgTypes[10]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ForgeUser) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ForgeUser) ProtoMessage() {}
|
||||
|
||||
func (x *ForgeUser) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_schema_proto_msgTypes[10]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ForgeUser.ProtoReflect.Descriptor instead.
|
||||
func (*ForgeUser) Descriptor() ([]byte, []int) {
|
||||
return file_schema_proto_rawDescGZIP(), []int{10}
|
||||
}
|
||||
|
||||
func (x *ForgeUser) GetOrigin() string {
|
||||
if x != nil && x.Origin != nil {
|
||||
return *x.Origin
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ForgeUser) GetId() int64 {
|
||||
if x != nil && x.Id != nil {
|
||||
return *x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ForgeUser) GetHandle() string {
|
||||
if x != nil && x.Handle != nil {
|
||||
return *x.Handle
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_schema_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_schema_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\fschema.proto\"\x97\x01\n" +
|
||||
"\fschema.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xec\x01\n" +
|
||||
"\x05Entry\x12\x19\n" +
|
||||
"\x04type\x18\x01 \x01(\x0e2\x05.TypeR\x04type\x12\x12\n" +
|
||||
"\x04size\x18\x02 \x01(\x03R\x04size\x12\x12\n" +
|
||||
"\x04type\x18\x01 \x01(\x0e2\x05.TypeR\x04type\x12#\n" +
|
||||
"\roriginal_size\x18\a \x01(\x03R\foriginalSize\x12'\n" +
|
||||
"\x0fcompressed_size\x18\x02 \x01(\x03R\x0ecompressedSize\x12\x12\n" +
|
||||
"\x04data\x18\x03 \x01(\fR\x04data\x12(\n" +
|
||||
"\ttransform\x18\x04 \x01(\x0e2\n" +
|
||||
".TransformR\ttransform\x12!\n" +
|
||||
"\fcontent_type\x18\x05 \x01(\tR\vcontentType\"`\n" +
|
||||
"\fcontent_type\x18\x05 \x01(\tR\vcontentType\x12\x19\n" +
|
||||
"\bgit_hash\x18\x06 \x01(\tR\agitHash\"`\n" +
|
||||
"\fRedirectRule\x12\x12\n" +
|
||||
"\x04from\x18\x01 \x01(\tR\x04from\x12\x0e\n" +
|
||||
"\x02to\x18\x02 \x01(\tR\x02to\x12\x16\n" +
|
||||
@@ -588,10 +1012,16 @@ const file_schema_proto_rawDesc = "" +
|
||||
"HeaderRule\x12\x12\n" +
|
||||
"\x04path\x18\x01 \x01(\tR\x04path\x12&\n" +
|
||||
"\n" +
|
||||
"header_map\x18\x02 \x03(\v2\a.HeaderR\theaderMap\"3\n" +
|
||||
"header_map\x18\x02 \x03(\v2\a.HeaderR\theaderMap\"I\n" +
|
||||
"\x0fBasicCredential\x12\x1a\n" +
|
||||
"\busername\x18\x01 \x01(\tR\busername\x12\x1a\n" +
|
||||
"\bpassword\x18\x02 \x01(\tR\bpassword\"W\n" +
|
||||
"\rBasicAuthRule\x12\x12\n" +
|
||||
"\x04path\x18\x01 \x01(\tR\x04path\x122\n" +
|
||||
"\vcredentials\x18\x02 \x03(\v2\x10.BasicCredentialR\vcredentials\"3\n" +
|
||||
"\aProblem\x12\x12\n" +
|
||||
"\x04path\x18\x01 \x01(\tR\x04path\x12\x14\n" +
|
||||
"\x05cause\x18\x02 \x01(\tR\x05cause\"\xb8\x03\n" +
|
||||
"\x05cause\x18\x02 \x01(\tR\x05cause\"\xe7\x03\n" +
|
||||
"\bManifest\x12\x19\n" +
|
||||
"\brepo_url\x18\x01 \x01(\tR\arepoUrl\x12\x16\n" +
|
||||
"\x06branch\x18\x02 \x01(\tR\x06branch\x12\x16\n" +
|
||||
@@ -603,21 +1033,52 @@ const file_schema_proto_rawDesc = "" +
|
||||
"\vstored_size\x18\b \x01(\x03R\n" +
|
||||
"storedSize\x12+\n" +
|
||||
"\tredirects\x18\x06 \x03(\v2\r.RedirectRuleR\tredirects\x12%\n" +
|
||||
"\aheaders\x18\t \x03(\v2\v.HeaderRuleR\aheaders\x12$\n" +
|
||||
"\aheaders\x18\t \x03(\v2\v.HeaderRuleR\aheaders\x12-\n" +
|
||||
"\n" +
|
||||
"basic_auth\x18\v \x03(\v2\x0e.BasicAuthRuleR\tbasicAuth\x12$\n" +
|
||||
"\bproblems\x18\a \x03(\v2\b.ProblemR\bproblems\x1aC\n" +
|
||||
"\rContentsEntry\x12\x10\n" +
|
||||
"\x03key\x18\x01 \x01(\tR\x03key\x12\x1c\n" +
|
||||
"\x05value\x18\x02 \x01(\v2\x06.EntryR\x05value:\x028\x01*Q\n" +
|
||||
"\x04Type\x12\v\n" +
|
||||
"\aInvalid\x10\x00\x12\r\n" +
|
||||
"\x05value\x18\x02 \x01(\v2\x06.EntryR\x05value:\x028\x01\"\xfd\x01\n" +
|
||||
"\vAuditRecord\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x03R\x02id\x128\n" +
|
||||
"\ttimestamp\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12!\n" +
|
||||
"\x05event\x18\x03 \x01(\x0e2\v.AuditEventR\x05event\x12(\n" +
|
||||
"\tprincipal\x18\x04 \x01(\v2\n" +
|
||||
".PrincipalR\tprincipal\x12\x16\n" +
|
||||
"\x06domain\x18\n" +
|
||||
" \x01(\tR\x06domain\x12\x18\n" +
|
||||
"\aproject\x18\v \x01(\tR\aproject\x12%\n" +
|
||||
"\bmanifest\x18\f \x01(\v2\t.ManifestR\bmanifest\"\x8d\x01\n" +
|
||||
"\tPrincipal\x12\x1d\n" +
|
||||
"\n" +
|
||||
"ip_address\x18\x01 \x01(\tR\tipAddress\x12\x1b\n" +
|
||||
"\tcli_admin\x18\x02 \x01(\bR\bcliAdmin\x12)\n" +
|
||||
"\n" +
|
||||
"forge_user\x18\x03 \x01(\v2\n" +
|
||||
".ForgeUserR\tforgeUser\x12\x19\n" +
|
||||
"\brepo_url\x18\x04 \x01(\tR\arepoUrl\"K\n" +
|
||||
"\tForgeUser\x12\x16\n" +
|
||||
"\x06origin\x18\x01 \x01(\tR\x06origin\x12\x0e\n" +
|
||||
"\x02id\x18\x02 \x01(\x03R\x02id\x12\x16\n" +
|
||||
"\x06handle\x18\x03 \x01(\tR\x06handle*V\n" +
|
||||
"\x04Type\x12\x10\n" +
|
||||
"\fInvalidEntry\x10\x00\x12\r\n" +
|
||||
"\tDirectory\x10\x01\x12\x0e\n" +
|
||||
"\n" +
|
||||
"InlineFile\x10\x02\x12\x10\n" +
|
||||
"\fExternalFile\x10\x03\x12\v\n" +
|
||||
"\aSymlink\x10\x04*$\n" +
|
||||
"\tTransform\x12\b\n" +
|
||||
"\x04None\x10\x00\x12\r\n" +
|
||||
"\tZstandard\x10\x01B,Z*codeberg.org/git-pages/git-pages/git_pagesb\beditionsp\xe8\a"
|
||||
"\aSymlink\x10\x04*#\n" +
|
||||
"\tTransform\x12\f\n" +
|
||||
"\bIdentity\x10\x00\x12\b\n" +
|
||||
"\x04Zstd\x10\x01*l\n" +
|
||||
"\n" +
|
||||
"AuditEvent\x12\x10\n" +
|
||||
"\fInvalidEvent\x10\x00\x12\x12\n" +
|
||||
"\x0eCommitManifest\x10\x01\x12\x12\n" +
|
||||
"\x0eDeleteManifest\x10\x02\x12\x10\n" +
|
||||
"\fFreezeDomain\x10\x03\x12\x12\n" +
|
||||
"\x0eUnfreezeDomain\x10\x04B,Z*codeberg.org/git-pages/git-pages/git_pagesb\beditionsp\xe8\a"
|
||||
|
||||
var (
|
||||
file_schema_proto_rawDescOnce sync.Once
|
||||
@@ -631,33 +1092,47 @@ func file_schema_proto_rawDescGZIP() []byte {
|
||||
return file_schema_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_schema_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
|
||||
var file_schema_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
|
||||
var file_schema_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
|
||||
var file_schema_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
|
||||
var file_schema_proto_goTypes = []any{
|
||||
(Type)(0), // 0: Type
|
||||
(Transform)(0), // 1: Transform
|
||||
(*Entry)(nil), // 2: Entry
|
||||
(*RedirectRule)(nil), // 3: RedirectRule
|
||||
(*Header)(nil), // 4: Header
|
||||
(*HeaderRule)(nil), // 5: HeaderRule
|
||||
(*Problem)(nil), // 6: Problem
|
||||
(*Manifest)(nil), // 7: Manifest
|
||||
nil, // 8: Manifest.ContentsEntry
|
||||
(Type)(0), // 0: Type
|
||||
(Transform)(0), // 1: Transform
|
||||
(AuditEvent)(0), // 2: AuditEvent
|
||||
(*Entry)(nil), // 3: Entry
|
||||
(*RedirectRule)(nil), // 4: RedirectRule
|
||||
(*Header)(nil), // 5: Header
|
||||
(*HeaderRule)(nil), // 6: HeaderRule
|
||||
(*BasicCredential)(nil), // 7: BasicCredential
|
||||
(*BasicAuthRule)(nil), // 8: BasicAuthRule
|
||||
(*Problem)(nil), // 9: Problem
|
||||
(*Manifest)(nil), // 10: Manifest
|
||||
(*AuditRecord)(nil), // 11: AuditRecord
|
||||
(*Principal)(nil), // 12: Principal
|
||||
(*ForgeUser)(nil), // 13: ForgeUser
|
||||
nil, // 14: Manifest.ContentsEntry
|
||||
(*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp
|
||||
}
|
||||
var file_schema_proto_depIdxs = []int32{
|
||||
0, // 0: Entry.type:type_name -> Type
|
||||
1, // 1: Entry.transform:type_name -> Transform
|
||||
4, // 2: HeaderRule.header_map:type_name -> Header
|
||||
8, // 3: Manifest.contents:type_name -> Manifest.ContentsEntry
|
||||
3, // 4: Manifest.redirects:type_name -> RedirectRule
|
||||
5, // 5: Manifest.headers:type_name -> HeaderRule
|
||||
6, // 6: Manifest.problems:type_name -> Problem
|
||||
2, // 7: Manifest.ContentsEntry.value:type_name -> Entry
|
||||
8, // [8:8] is the sub-list for method output_type
|
||||
8, // [8:8] is the sub-list for method input_type
|
||||
8, // [8:8] is the sub-list for extension type_name
|
||||
8, // [8:8] is the sub-list for extension extendee
|
||||
0, // [0:8] is the sub-list for field type_name
|
||||
0, // 0: Entry.type:type_name -> Type
|
||||
1, // 1: Entry.transform:type_name -> Transform
|
||||
5, // 2: HeaderRule.header_map:type_name -> Header
|
||||
7, // 3: BasicAuthRule.credentials:type_name -> BasicCredential
|
||||
14, // 4: Manifest.contents:type_name -> Manifest.ContentsEntry
|
||||
4, // 5: Manifest.redirects:type_name -> RedirectRule
|
||||
6, // 6: Manifest.headers:type_name -> HeaderRule
|
||||
8, // 7: Manifest.basic_auth:type_name -> BasicAuthRule
|
||||
9, // 8: Manifest.problems:type_name -> Problem
|
||||
15, // 9: AuditRecord.timestamp:type_name -> google.protobuf.Timestamp
|
||||
2, // 10: AuditRecord.event:type_name -> AuditEvent
|
||||
12, // 11: AuditRecord.principal:type_name -> Principal
|
||||
10, // 12: AuditRecord.manifest:type_name -> Manifest
|
||||
13, // 13: Principal.forge_user:type_name -> ForgeUser
|
||||
3, // 14: Manifest.ContentsEntry.value:type_name -> Entry
|
||||
15, // [15:15] is the sub-list for method output_type
|
||||
15, // [15:15] is the sub-list for method input_type
|
||||
15, // [15:15] is the sub-list for extension type_name
|
||||
15, // [15:15] is the sub-list for extension extendee
|
||||
0, // [0:15] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_schema_proto_init() }
|
||||
@@ -670,8 +1145,8 @@ func file_schema_proto_init() {
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_schema_proto_rawDesc), len(file_schema_proto_rawDesc)),
|
||||
NumEnums: 2,
|
||||
NumMessages: 7,
|
||||
NumEnums: 3,
|
||||
NumMessages: 12,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@@ -2,9 +2,11 @@ edition = "2023";
|
||||
|
||||
option go_package = "codeberg.org/git-pages/git-pages/git_pages";
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
enum Type {
|
||||
// Invalid entry.
|
||||
Invalid = 0;
|
||||
InvalidEntry = 0;
|
||||
// Directory.
|
||||
Directory = 1;
|
||||
// Inline file. `Blob.Data` contains file contents.
|
||||
@@ -15,18 +17,24 @@ enum Type {
|
||||
Symlink = 4;
|
||||
}
|
||||
|
||||
// Transformation names should match HTTP `Accept-Encoding:` header.
|
||||
enum Transform {
|
||||
// No transformation.
|
||||
None = 0;
|
||||
Identity = 0;
|
||||
// Zstandard compression.
|
||||
Zstandard = 1;
|
||||
Zstd = 1;
|
||||
}
|
||||
|
||||
message Entry {
|
||||
Type type = 1;
|
||||
// Only present for `type == InlineFile` and `type == ExternalFile`.
|
||||
// For transformed entries, refers to the post-transformation (compressed) size.
|
||||
int64 size = 2;
|
||||
// For transformed entries, refers to the pre-transformation (decompressed) size; otherwise
|
||||
// equal to `compressed_size`.
|
||||
int64 original_size = 7;
|
||||
// Only present for `type == InlineFile` and `type == ExternalFile`.
|
||||
// For transformed entries, refers to the post-transformation (compressed) size; otherwise
|
||||
// equal to `original_size`.
|
||||
int64 compressed_size = 2;
|
||||
// Meaning depends on `type`:
|
||||
// * If `type == InlineFile`, contains file data.
|
||||
// * If `type == ExternalFile`, contains blob name (an otherwise unspecified
|
||||
@@ -40,6 +48,12 @@ message Entry {
|
||||
// Only present for `type == InlineFile` and `type == ExternalFile`.
|
||||
// Currently, optional (not present on certain legacy manifests).
|
||||
string content_type = 5;
|
||||
// May be present for `type == InlineFile` and `type == ExternalFile`.
|
||||
// Used to reduce the amount of work being done during git checkouts.
|
||||
// The type of hash used is determined by the length:
|
||||
// * 40 bytes: SHA1DC (as hex)
|
||||
// * 64 bytes: SHA256 (as hex)
|
||||
string git_hash = 6;
|
||||
}
|
||||
|
||||
// See https://docs.netlify.com/manage/routing/redirects/overview/ for details.
|
||||
@@ -62,27 +76,79 @@ message HeaderRule {
|
||||
repeated Header header_map = 2;
|
||||
}
|
||||
|
||||
message BasicCredential {
|
||||
string username = 1;
|
||||
string password = 2;
|
||||
}
|
||||
|
||||
message BasicAuthRule {
|
||||
string path = 1;
|
||||
repeated BasicCredential credentials = 2;
|
||||
}
|
||||
|
||||
message Problem {
|
||||
string path = 1;
|
||||
string cause = 2;
|
||||
}
|
||||
|
||||
message Manifest {
|
||||
// Source metadata
|
||||
// Source metadata.
|
||||
string repo_url = 1;
|
||||
string branch = 2;
|
||||
string commit = 3;
|
||||
|
||||
// Contents
|
||||
// Site contents.
|
||||
map<string, Entry> contents = 4;
|
||||
int64 original_size = 10; // total size of entries before compression
|
||||
int64 compressed_size = 5; // simple sum of each `entry.size`
|
||||
int64 stored_size = 8; // total size of (deduplicated) external objects
|
||||
int64 original_size = 10; // sum of each `entry.original_size`
|
||||
int64 compressed_size = 5; // sum of each `entry.compressed_size`
|
||||
int64 stored_size = 8; // sum of deduplicated `entry.compressed_size` for external files only
|
||||
|
||||
// Netlify-style `_redirects` and `_headers`
|
||||
// Netlify-style `_redirects` and `_headers` rules.
|
||||
repeated RedirectRule redirects = 6;
|
||||
repeated HeaderRule headers = 9;
|
||||
repeated BasicAuthRule basic_auth = 11;
|
||||
|
||||
// Diagnostics for non-fatal errors
|
||||
// Diagnostics for non-fatal errors.
|
||||
repeated Problem problems = 7;
|
||||
}
|
||||
|
||||
enum AuditEvent {
|
||||
// Invalid event.
|
||||
InvalidEvent = 0;
|
||||
// A manifest was committed (a site was created or updated).
|
||||
CommitManifest = 1;
|
||||
// A manifest was deleted (a site was deleted).
|
||||
DeleteManifest = 2;
|
||||
// A domain was frozen.
|
||||
FreezeDomain = 3;
|
||||
// A domain was thawed.
|
||||
UnfreezeDomain = 4;
|
||||
}
|
||||
|
||||
message AuditRecord {
|
||||
// Audit event metadata.
|
||||
int64 id = 1;
|
||||
google.protobuf.Timestamp timestamp = 2;
|
||||
AuditEvent event = 3;
|
||||
Principal principal = 4;
|
||||
|
||||
// Affected resource.
|
||||
string domain = 10;
|
||||
string project = 11; // only for `*Manifest` events
|
||||
|
||||
// Snapshot of site manifest.
|
||||
Manifest manifest = 12; // only for `*Manifest` events
|
||||
}
|
||||
|
||||
message Principal {
|
||||
string ip_address = 1;
|
||||
bool cli_admin = 2;
|
||||
ForgeUser forge_user = 3;
|
||||
string repo_url = 4;
|
||||
}
|
||||
|
||||
message ForgeUser {
|
||||
string origin = 1;
|
||||
int64 id = 2;
|
||||
string handle = 3;
|
||||
}
|
||||
|
||||
29
src/signal.go
Normal file
29
src/signal.go
Normal file
@@ -0,0 +1,29 @@
|
||||
// See https://pkg.go.dev/os/signal#hdr-Windows for a description of what this module
|
||||
// will do on Windows (tl;dr nothing calls the reload handler, the interrupt handler works
|
||||
// more or less how you'd expect).
|
||||
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func OnReload(handler func()) {
|
||||
sighup := make(chan os.Signal, 1)
|
||||
signal.Notify(sighup, syscall.SIGHUP)
|
||||
go func() {
|
||||
for {
|
||||
<-sighup
|
||||
handler()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func WaitForInterrupt() {
|
||||
sigint := make(chan os.Signal, 1)
|
||||
signal.Notify(sigint, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-sigint
|
||||
signal.Stop(sigint)
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
//go:build !unix
|
||||
|
||||
package git_pages
|
||||
|
||||
func OnReload(handler func()) {
|
||||
// not implemented
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
//go:build unix
|
||||
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func OnReload(handler func()) {
|
||||
sighup := make(chan os.Signal, 1)
|
||||
signal.Notify(sighup, syscall.SIGHUP)
|
||||
go func() {
|
||||
for {
|
||||
<-sighup
|
||||
handler()
|
||||
}
|
||||
}()
|
||||
}
|
||||
162
src/update.go
162
src/update.go
@@ -5,10 +5,21 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const BlobReferencePrefix = "/git/blobs/"
|
||||
|
||||
type UnresolvedRefError struct {
|
||||
missing []string
|
||||
}
|
||||
|
||||
func (err UnresolvedRefError) Error() string {
|
||||
return fmt.Sprintf("%d unresolved blob references", len(err.missing))
|
||||
}
|
||||
|
||||
type UpdateOutcome int
|
||||
|
||||
const (
|
||||
@@ -26,14 +37,16 @@ type UpdateResult struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func Update(ctx context.Context, webRoot string, manifest *Manifest) UpdateResult {
|
||||
var oldManifest, newManifest *Manifest
|
||||
func Update(
|
||||
ctx context.Context, webRoot string, oldManifest, newManifest *Manifest,
|
||||
opts ModifyManifestOptions,
|
||||
) UpdateResult {
|
||||
var err error
|
||||
var storedManifest *Manifest
|
||||
|
||||
outcome := UpdateError
|
||||
oldManifest, _, _ = backend.GetManifest(ctx, webRoot, GetManifestOptions{})
|
||||
if IsManifestEmpty(manifest) {
|
||||
newManifest, err = manifest, backend.DeleteManifest(ctx, webRoot)
|
||||
if IsManifestEmpty(newManifest) {
|
||||
storedManifest, err = newManifest, backend.DeleteManifest(ctx, webRoot, opts)
|
||||
if err == nil {
|
||||
if oldManifest == nil {
|
||||
outcome = UpdateNoChange
|
||||
@@ -41,16 +54,17 @@ func Update(ctx context.Context, webRoot string, manifest *Manifest) UpdateResul
|
||||
outcome = UpdateDeleted
|
||||
}
|
||||
}
|
||||
} else if err = PrepareManifest(ctx, manifest); err == nil {
|
||||
newManifest, err = StoreManifest(ctx, webRoot, manifest)
|
||||
} else if err = PrepareManifest(ctx, newManifest); err == nil {
|
||||
storedManifest, err = StoreManifest(ctx, webRoot, newManifest, opts)
|
||||
if err == nil {
|
||||
domain, _, _ := strings.Cut(webRoot, "/")
|
||||
err = backend.CreateDomain(ctx, domain)
|
||||
domainCache.AddDomain(ctx, domain)
|
||||
}
|
||||
if err == nil {
|
||||
if oldManifest == nil {
|
||||
outcome = UpdateCreated
|
||||
} else if CompareManifest(oldManifest, newManifest) {
|
||||
} else if CompareManifest(oldManifest, storedManifest) {
|
||||
outcome = UpdateNoChange
|
||||
} else {
|
||||
outcome = UpdateReplaced
|
||||
@@ -70,16 +84,16 @@ func Update(ctx context.Context, webRoot string, manifest *Manifest) UpdateResul
|
||||
case UpdateNoChange:
|
||||
status = "unchanged"
|
||||
}
|
||||
if newManifest.Commit != nil {
|
||||
log.Printf("update %s ok: %s %s", webRoot, status, *newManifest.Commit)
|
||||
if storedManifest.Commit != nil {
|
||||
logc.Printf(ctx, "update %s ok: %s %s", webRoot, *storedManifest.Commit, status)
|
||||
} else {
|
||||
log.Printf("update %s ok: %s", webRoot, status)
|
||||
logc.Printf(ctx, "update %s ok: %s", webRoot, status)
|
||||
}
|
||||
} else {
|
||||
log.Printf("update %s err: %s", webRoot, err)
|
||||
logc.Printf(ctx, "update %s err: %s", webRoot, err)
|
||||
}
|
||||
|
||||
return UpdateResult{outcome, newManifest, err}
|
||||
return UpdateResult{outcome, storedManifest, err}
|
||||
}
|
||||
|
||||
func UpdateFromRepository(
|
||||
@@ -91,15 +105,18 @@ func UpdateFromRepository(
|
||||
span, ctx := ObserveFunction(ctx, "UpdateFromRepository", "repo.url", repoURL)
|
||||
defer span.Finish()
|
||||
|
||||
log.Printf("update %s: %s %s\n", webRoot, repoURL, branch)
|
||||
logc.Printf(ctx, "update %s: %s %s\n", webRoot, repoURL, branch)
|
||||
|
||||
manifest, err := FetchRepository(ctx, repoURL, branch)
|
||||
// Ignore errors; worst case we have to re-fetch all of the blobs.
|
||||
oldManifest, _, _ := backend.GetManifest(ctx, webRoot, GetManifestOptions{})
|
||||
|
||||
newManifest, err := FetchRepository(ctx, repoURL, branch, oldManifest)
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
result = UpdateResult{UpdateTimeout, nil, fmt.Errorf("update timeout")}
|
||||
} else if err != nil {
|
||||
result = UpdateResult{UpdateError, nil, err}
|
||||
} else {
|
||||
result = Update(ctx, webRoot, manifest)
|
||||
result = Update(ctx, webRoot, oldManifest, newManifest, ModifyManifestOptions{})
|
||||
}
|
||||
|
||||
observeUpdateResult(result)
|
||||
@@ -111,34 +128,116 @@ var errArchiveFormat = errors.New("unsupported archive format")
|
||||
func UpdateFromArchive(
|
||||
ctx context.Context,
|
||||
webRoot string,
|
||||
repoURL string,
|
||||
contentType string,
|
||||
reader io.Reader,
|
||||
) (result UpdateResult) {
|
||||
var manifest *Manifest
|
||||
var err error
|
||||
|
||||
// Ignore errors; worst case we have to re-fetch all of the blobs.
|
||||
oldManifest, _, _ := backend.GetManifest(ctx, webRoot, GetManifestOptions{})
|
||||
|
||||
extractTar := func(ctx context.Context, reader io.Reader) (*Manifest, error) {
|
||||
return ExtractTar(ctx, reader, oldManifest)
|
||||
}
|
||||
|
||||
var newManifest *Manifest
|
||||
switch contentType {
|
||||
case "application/x-tar":
|
||||
log.Printf("update %s: (tar)", webRoot)
|
||||
manifest, err = ExtractTar(reader) // yellow?
|
||||
logc.Printf(ctx, "update %s: (tar)", webRoot)
|
||||
newManifest, err = extractTar(ctx, reader) // yellow?
|
||||
case "application/x-tar+gzip":
|
||||
log.Printf("update %s: (tar.gz)", webRoot)
|
||||
manifest, err = ExtractTarGzip(reader) // definitely yellow.
|
||||
logc.Printf(ctx, "update %s: (tar.gz)", webRoot)
|
||||
newManifest, err = ExtractGzip(ctx, reader, extractTar) // definitely yellow.
|
||||
case "application/x-tar+zstd":
|
||||
log.Printf("update %s: (tar.zst)", webRoot)
|
||||
manifest, err = ExtractTarZstd(reader)
|
||||
logc.Printf(ctx, "update %s: (tar.zst)", webRoot)
|
||||
newManifest, err = ExtractZstd(ctx, reader, extractTar)
|
||||
case "application/zip":
|
||||
log.Printf("update %s: (zip)", webRoot)
|
||||
manifest, err = ExtractZip(reader)
|
||||
logc.Printf(ctx, "update %s: (zip)", webRoot)
|
||||
newManifest, err = ExtractZip(ctx, reader, oldManifest)
|
||||
default:
|
||||
err = errArchiveFormat
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Printf("update %s err: %s", webRoot, err)
|
||||
logc.Printf(ctx, "update %s err: %s", webRoot, err)
|
||||
result = UpdateResult{UpdateError, nil, err}
|
||||
} else {
|
||||
result = Update(ctx, webRoot, manifest)
|
||||
if repoURL != "" {
|
||||
newManifest.RepoUrl = &repoURL
|
||||
}
|
||||
|
||||
result = Update(ctx, webRoot, oldManifest, newManifest, ModifyManifestOptions{})
|
||||
}
|
||||
|
||||
observeUpdateResult(result)
|
||||
return
|
||||
}
|
||||
|
||||
func PartialUpdateFromArchive(
|
||||
ctx context.Context,
|
||||
webRoot string,
|
||||
contentType string,
|
||||
reader io.Reader,
|
||||
parents CreateParentsMode,
|
||||
) (result UpdateResult) {
|
||||
var err error
|
||||
|
||||
// Here the old manifest is used both as a substrate to which a patch is applied, as well
|
||||
// as a "load linked" operation for a future "store conditional" update which, taken together,
|
||||
// create an atomic compare-and-swap operation.
|
||||
oldManifest, oldMetadata, err := backend.GetManifest(ctx, webRoot,
|
||||
GetManifestOptions{BypassCache: true})
|
||||
if err != nil {
|
||||
logc.Printf(ctx, "patch %s err: %s", webRoot, err)
|
||||
return UpdateResult{UpdateError, nil, err}
|
||||
}
|
||||
|
||||
applyTarPatch := func(ctx context.Context, reader io.Reader) (*Manifest, error) {
|
||||
// Clone the manifest before starting to mutate it. `GetManifest` may return cached
|
||||
// `*Manifest` objects, which should never be mutated.
|
||||
newManifest := &Manifest{}
|
||||
proto.Merge(newManifest, oldManifest)
|
||||
newManifest.RepoUrl = nil
|
||||
newManifest.Branch = nil
|
||||
newManifest.Commit = nil
|
||||
if err := ApplyTarPatch(newManifest, reader, parents); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return newManifest, nil
|
||||
}
|
||||
}
|
||||
|
||||
var newManifest *Manifest
|
||||
switch contentType {
|
||||
case "application/x-tar":
|
||||
logc.Printf(ctx, "patch %s: (tar)", webRoot)
|
||||
newManifest, err = applyTarPatch(ctx, reader)
|
||||
case "application/x-tar+gzip":
|
||||
logc.Printf(ctx, "patch %s: (tar.gz)", webRoot)
|
||||
newManifest, err = ExtractGzip(ctx, reader, applyTarPatch)
|
||||
case "application/x-tar+zstd":
|
||||
logc.Printf(ctx, "patch %s: (tar.zst)", webRoot)
|
||||
newManifest, err = ExtractZstd(ctx, reader, applyTarPatch)
|
||||
default:
|
||||
err = errArchiveFormat
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logc.Printf(ctx, "patch %s err: %s", webRoot, err)
|
||||
result = UpdateResult{UpdateError, nil, err}
|
||||
} else {
|
||||
result = Update(ctx, webRoot, oldManifest, newManifest,
|
||||
ModifyManifestOptions{
|
||||
IfUnmodifiedSince: oldMetadata.LastModified,
|
||||
IfMatch: oldMetadata.ETag,
|
||||
})
|
||||
// The `If-Unmodified-Since` precondition is internally generated here, which means its
|
||||
// failure shouldn't be surfaced as-is in the HTTP response. If we also accepted options
|
||||
// from the client, then that precondition failure should surface in the response.
|
||||
if errors.Is(result.err, ErrPreconditionFailed) {
|
||||
result.err = ErrWriteConflict
|
||||
}
|
||||
}
|
||||
|
||||
observeUpdateResult(result)
|
||||
@@ -146,7 +245,12 @@ func UpdateFromArchive(
|
||||
}
|
||||
|
||||
func observeUpdateResult(result UpdateResult) {
|
||||
if result.err != nil {
|
||||
var unresolvedRefErr UnresolvedRefError
|
||||
if errors.As(result.err, &unresolvedRefErr) {
|
||||
// This error is an expected outcome of an incremental update's probe phase.
|
||||
} else if errors.Is(result.err, ErrWriteConflict) {
|
||||
// This error is an expected outcome of an incremental update losing a race.
|
||||
} else if result.err != nil {
|
||||
ObserveError(result.err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,15 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type tuple[A, B any] struct {
|
||||
A A
|
||||
B B
|
||||
}
|
||||
|
||||
func (t tuple[A, B]) Splat() (A, B) {
|
||||
return t.A, t.B
|
||||
}
|
||||
|
||||
type BoundedReader struct {
|
||||
inner io.Reader
|
||||
fuel int64
|
||||
|
||||
@@ -1,12 +1,7 @@
|
||||
package git_pages
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
@@ -16,11 +11,9 @@ import (
|
||||
type WildcardPattern struct {
|
||||
Domain []string
|
||||
CloneURL *fasttemplate.Template
|
||||
IndexRepos []*fasttemplate.Template
|
||||
IndexRepo *fasttemplate.Template
|
||||
IndexBranch string
|
||||
Authorization bool
|
||||
FallbackURL *url.URL
|
||||
Fallback http.Handler
|
||||
}
|
||||
|
||||
func (pattern *WildcardPattern) GetHost() string {
|
||||
@@ -56,51 +49,24 @@ func (pattern *WildcardPattern) Matches(host string) (string, bool) {
|
||||
return subdomain, true
|
||||
}
|
||||
|
||||
func (pattern *WildcardPattern) ApplyTemplate(userName string, projectName string) ([]string, string) {
|
||||
var repoURLs []string
|
||||
func (pattern *WildcardPattern) ApplyTemplate(userName string, projectName string) (string, string) {
|
||||
var repoURL string
|
||||
var branch string
|
||||
repoURLTemplate := pattern.CloneURL
|
||||
if projectName == ".index" {
|
||||
for _, indexRepoTemplate := range pattern.IndexRepos {
|
||||
indexRepo := indexRepoTemplate.ExecuteString(map[string]any{"user": userName})
|
||||
repoURLs = append(repoURLs, repoURLTemplate.ExecuteString(map[string]any{
|
||||
"user": userName,
|
||||
"project": indexRepo,
|
||||
}))
|
||||
}
|
||||
repoURL = repoURLTemplate.ExecuteString(map[string]any{
|
||||
"user": userName,
|
||||
"project": pattern.IndexRepo.ExecuteString(map[string]any{"user": userName}),
|
||||
})
|
||||
branch = pattern.IndexBranch
|
||||
} else {
|
||||
repoURLs = append(repoURLs, repoURLTemplate.ExecuteString(map[string]any{
|
||||
repoURL = repoURLTemplate.ExecuteString(map[string]any{
|
||||
"user": userName,
|
||||
"project": projectName,
|
||||
}))
|
||||
})
|
||||
branch = "pages"
|
||||
}
|
||||
return repoURLs, branch
|
||||
}
|
||||
|
||||
func (pattern *WildcardPattern) IsFallbackFor(host string) bool {
|
||||
if pattern.Fallback == nil {
|
||||
return false
|
||||
}
|
||||
_, found := pattern.Matches(host)
|
||||
return found
|
||||
}
|
||||
|
||||
func HandleWildcardFallback(w http.ResponseWriter, r *http.Request) (bool, error) {
|
||||
host, err := GetHost(r)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, pattern := range wildcards {
|
||||
if pattern.IsFallbackFor(host) {
|
||||
log.Printf("proxy: %s via %s", pattern.GetHost(), pattern.FallbackURL)
|
||||
pattern.Fallback.ServeHTTP(w, r)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
return repoURL, branch
|
||||
}
|
||||
|
||||
func TranslateWildcards(configs []WildcardConfig) ([]*WildcardPattern, error) {
|
||||
@@ -111,14 +77,10 @@ func TranslateWildcards(configs []WildcardConfig) ([]*WildcardPattern, error) {
|
||||
return nil, fmt.Errorf("wildcard pattern: clone URL: %w", err)
|
||||
}
|
||||
|
||||
var indexRepoTemplates []*fasttemplate.Template
|
||||
var indexRepoBranch string = config.IndexRepoBranch
|
||||
for _, indexRepo := range config.IndexRepos {
|
||||
indexRepoTemplate, err := fasttemplate.NewTemplate(indexRepo, "<", ">")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("wildcard pattern: index repo: %w", err)
|
||||
}
|
||||
indexRepoTemplates = append(indexRepoTemplates, indexRepoTemplate)
|
||||
indexRepoTemplate, err := fasttemplate.NewTemplate(config.IndexRepo, "<", ">")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("wildcard pattern: index repo: %w", err)
|
||||
}
|
||||
|
||||
authorization := false
|
||||
@@ -135,36 +97,12 @@ func TranslateWildcards(configs []WildcardConfig) ([]*WildcardPattern, error) {
|
||||
}
|
||||
}
|
||||
|
||||
var fallbackURL *url.URL
|
||||
var fallback http.Handler
|
||||
if config.FallbackProxyTo != "" {
|
||||
fallbackURL, err = url.Parse(config.FallbackProxyTo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("wildcard pattern: fallback URL: %w", err)
|
||||
}
|
||||
|
||||
fallback = &httputil.ReverseProxy{
|
||||
Rewrite: func(r *httputil.ProxyRequest) {
|
||||
r.SetURL(fallbackURL)
|
||||
r.Out.Host = r.In.Host
|
||||
r.Out.Header["X-Forwarded-For"] = r.In.Header["X-Forwarded-For"]
|
||||
},
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: config.FallbackInsecure,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
wildcardPatterns = append(wildcardPatterns, &WildcardPattern{
|
||||
Domain: strings.Split(config.Domain, "."),
|
||||
CloneURL: cloneURLTemplate,
|
||||
IndexRepos: indexRepoTemplates,
|
||||
IndexRepo: indexRepoTemplate,
|
||||
IndexBranch: indexRepoBranch,
|
||||
Authorization: authorization,
|
||||
FallbackURL: fallbackURL,
|
||||
Fallback: fallback,
|
||||
})
|
||||
}
|
||||
return wildcardPatterns, nil
|
||||
|
||||
112
test/stresspatch/main.go
Normal file
112
test/stresspatch/main.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func makeInit() []byte {
|
||||
writer := bytes.NewBuffer(nil)
|
||||
archive := tar.NewWriter(writer)
|
||||
archive.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: "index.html",
|
||||
})
|
||||
archive.Write([]byte{})
|
||||
archive.Flush()
|
||||
return writer.Bytes()
|
||||
}
|
||||
|
||||
func initSite() {
|
||||
req, err := http.NewRequest(http.MethodPut, "http://localhost:3000",
|
||||
bytes.NewReader(makeInit()))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
req.Header.Add("Content-Type", "application/x-tar")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
func makePatch(n int) []byte {
|
||||
writer := bytes.NewBuffer(nil)
|
||||
archive := tar.NewWriter(writer)
|
||||
archive.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: fmt.Sprintf("%d.txt", n),
|
||||
})
|
||||
archive.Write([]byte{})
|
||||
archive.Flush()
|
||||
return writer.Bytes()
|
||||
}
|
||||
|
||||
func patchRequest(n int) int {
|
||||
req, err := http.NewRequest(http.MethodPatch, "http://localhost:3000",
|
||||
bytes.NewReader(makePatch(n)))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
req.Header.Add("Atomic", "no")
|
||||
req.Header.Add("Content-Type", "application/x-tar")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf("%d: %s %q\n", n, resp.Status, string(data))
|
||||
return resp.StatusCode
|
||||
}
|
||||
|
||||
func concurrentWriter(wg *sync.WaitGroup, n int) {
|
||||
for {
|
||||
if patchRequest(n) == 200 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
var count = flag.Int("count", 10, "request count")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
initSite()
|
||||
time.Sleep(time.Second)
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
for n := range *count {
|
||||
wg.Add(1)
|
||||
go concurrentWriter(wg, n)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
success := 0
|
||||
for n := range *count {
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:3000/%d.txt", n))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if resp.StatusCode == 200 {
|
||||
success++
|
||||
}
|
||||
}
|
||||
fmt.Printf("written: %d of %d\n", success, *count)
|
||||
}
|
||||
Reference in New Issue
Block a user