mirror of
https://github.com/versity/versitygw.git
synced 2026-01-27 13:32:02 +00:00
Compare commits
354 Commits
ben/chunk-
...
v1.0.16
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b2516e4153 | ||
|
|
08ccf821f9 | ||
|
|
b57be7d56f | ||
|
|
e39ab6f0ee | ||
|
|
4eb13c2fdc | ||
|
|
0c2252fde0 | ||
|
|
a915c3fec4 | ||
|
|
706dee8572 | ||
|
|
c6944650a3 | ||
|
|
c3201081ce | ||
|
|
9cc29af073 | ||
|
|
7d98d1df39 | ||
|
|
f295df2217 | ||
|
|
cbd3eb1cd2 | ||
|
|
c196b5f999 | ||
|
|
839909c880 | ||
|
|
68c002486d | ||
|
|
4117bcdf65 | ||
|
|
003bf5db0b | ||
|
|
91b904d10f | ||
|
|
ee4d0b0c3e | ||
|
|
78a92168bf | ||
|
|
36509daec7 | ||
|
|
28cb97329e | ||
|
|
3ec6e634c3 | ||
|
|
7b8b483dfc | ||
|
|
4ce0ba33e9 | ||
|
|
98a7b7f402 | ||
|
|
b09efa532c | ||
|
|
1066c44a04 | ||
|
|
0d73e3ebe2 | ||
|
|
5ba5327ba6 | ||
|
|
78537bedf9 | ||
|
|
c276e0ebe4 | ||
|
|
1c08eaadcd | ||
|
|
458db64e2d | ||
|
|
8e5b7ead92 | ||
|
|
868c17e590 | ||
|
|
f9b73208ef | ||
|
|
7260854cd0 | ||
|
|
58659ae279 | ||
|
|
532123e84d | ||
|
|
c4cfc8a08a | ||
|
|
d9300eaa6e | ||
|
|
580b07c24b | ||
|
|
c35c73fa72 | ||
|
|
3aa2042a79 | ||
|
|
5e3d4cbeec | ||
|
|
729321e1e8 | ||
|
|
b99d7e29ae | ||
|
|
23007f4198 | ||
|
|
71333b2709 | ||
|
|
22e29b84a3 | ||
|
|
d831985f13 | ||
|
|
6541232a2d | ||
|
|
082498a65c | ||
|
|
2d2bb1aa5c | ||
|
|
b33499c453 | ||
|
|
97dd0a92bc | ||
|
|
7e5695f63b | ||
|
|
f630bf3c9e | ||
|
|
0b004ff4a8 | ||
|
|
d971e0e988 | ||
|
|
d776537944 | ||
|
|
18bcfebbab | ||
|
|
23cebcee2c | ||
|
|
282e875d9f | ||
|
|
f912778617 | ||
|
|
23169fa51d | ||
|
|
cd45a24100 | ||
|
|
c632e647f3 | ||
|
|
9a2acceaa8 | ||
|
|
276ea75de5 | ||
|
|
bbb62927a5 | ||
|
|
60aaaa0908 | ||
|
|
e2905b6880 | ||
|
|
87ece0cc26 | ||
|
|
4405fb1d26 | ||
|
|
b9b75b58f6 | ||
|
|
4f8b1ffb1c | ||
|
|
46bde72474 | ||
|
|
14d2b8a0ed | ||
|
|
dbc710da2d | ||
|
|
ed125c317e | ||
|
|
32c6f2e463 | ||
|
|
845fe73b20 | ||
|
|
925f89465e | ||
|
|
12b25b7f83 | ||
|
|
68d267e422 | ||
|
|
ae7d5f677f | ||
|
|
35cdef1eba | ||
|
|
85b6437a28 | ||
|
|
132086d9d5 | ||
|
|
4334f869f2 | ||
|
|
9ef7ee8254 | ||
|
|
6b20ec96f4 | ||
|
|
8bd5831182 | ||
|
|
720a7e5628 | ||
|
|
3e50e29306 | ||
|
|
1e91d901e7 | ||
|
|
afbcbcac13 | ||
|
|
8e2d51e501 | ||
|
|
1f5f040840 | ||
|
|
d3bcd8ffc5 | ||
|
|
4c5f65da96 | ||
|
|
323717bcf1 | ||
|
|
be275bbb2c | ||
|
|
a022c3bdb6 | ||
|
|
d3585e6c1c | ||
|
|
42b03b866c | ||
|
|
3740d79173 | ||
|
|
f4577d4af5 | ||
|
|
809d969afb | ||
|
|
3a9f8c6525 | ||
|
|
23b5e60854 | ||
|
|
2d5d641824 | ||
|
|
4478ed1143 | ||
|
|
22703de0c8 | ||
|
|
5122b8c6ed | ||
|
|
42013d365b | ||
|
|
a77c24f61f | ||
|
|
e7294c631f | ||
|
|
c3334008f5 | ||
|
|
b12b0d242e | ||
|
|
384bb463d3 | ||
|
|
4b34ef1a5f | ||
|
|
e0999ce5a3 | ||
|
|
dfa1ed2358 | ||
|
|
98867bc731 | ||
|
|
e98f7763d0 | ||
|
|
e9286f7a23 | ||
|
|
a60d6a7faa | ||
|
|
a29f7b1839 | ||
|
|
6321406008 | ||
|
|
cd9cb108a3 | ||
|
|
78910fb556 | ||
|
|
a9fcf63063 | ||
|
|
1ef81d985e | ||
|
|
d19c446f72 | ||
|
|
2e7a7fcbe9 | ||
|
|
c45b32066f | ||
|
|
9f13b544f7 | ||
|
|
1f96af5c66 | ||
|
|
ddceb28f98 | ||
|
|
c497baa733 | ||
|
|
9244e9100d | ||
|
|
4eba4e031c | ||
|
|
32faf9a4c3 | ||
|
|
a4d2f5c180 | ||
|
|
24fbbdbd63 | ||
|
|
2b1e1af89b | ||
|
|
dea4b6382f | ||
|
|
8c101b3901 | ||
|
|
7f9b9dfd97 | ||
|
|
224ab5111f | ||
|
|
b69352bdd6 | ||
|
|
aecea5f068 | ||
|
|
5e6056467e | ||
|
|
9bd3c21606 | ||
|
|
e1e54b1175 | ||
|
|
9f788c4266 | ||
|
|
9082d469e7 | ||
|
|
1ea2e42f0a | ||
|
|
63fd82654d | ||
|
|
6fa58db6ab | ||
|
|
b4486b095d | ||
|
|
f831578d51 | ||
|
|
c7c454ddcc | ||
|
|
fe29e826f3 | ||
|
|
749eebc97f | ||
|
|
a31fd1a9c3 | ||
|
|
999f360139 | ||
|
|
1ba370421b | ||
|
|
1f8a7d11eb | ||
|
|
d63ae74faa | ||
|
|
38f532e6e7 | ||
|
|
520122bec5 | ||
|
|
bd986e97f3 | ||
|
|
bbb5a22c89 | ||
|
|
7ef2bb4547 | ||
|
|
f0a1184459 | ||
|
|
f677500caf | ||
|
|
adadba8fa8 | ||
|
|
768bc8767c | ||
|
|
fe2b66b26d | ||
|
|
4690afb5a0 | ||
|
|
3e6bd89fa2 | ||
|
|
f630392718 | ||
|
|
df6dcff429 | ||
|
|
5d7a68cabd | ||
|
|
66b979ee86 | ||
|
|
b5c98b0106 | ||
|
|
aaee7afa98 | ||
|
|
eb72905401 | ||
|
|
bf6c0bb294 | ||
|
|
4f70a16245 | ||
|
|
7866c326e3 | ||
|
|
e8f08349d6 | ||
|
|
c88d45ceff | ||
|
|
3e7ab4592b | ||
|
|
20d00f7f6d | ||
|
|
2b7a71568c | ||
|
|
fca1cf9fd6 | ||
|
|
4dcb14f300 | ||
|
|
5560e5ebc4 | ||
|
|
8431ba2a21 | ||
|
|
ed44fe1969 | ||
|
|
7a3de637e4 | ||
|
|
f627b2efec | ||
|
|
90a67272a7 | ||
|
|
8705905319 | ||
|
|
dadfc97d46 | ||
|
|
cb97fb589b | ||
|
|
26af5f8e5a | ||
|
|
03c7c432ad | ||
|
|
5e0ea54f99 | ||
|
|
338664a442 | ||
|
|
53df904f8a | ||
|
|
591581cb98 | ||
|
|
30f319bc92 | ||
|
|
d62e701918 | ||
|
|
9db0940d27 | ||
|
|
605e6b83f6 | ||
|
|
7d4076b944 | ||
|
|
d47dc72abf | ||
|
|
832371afb1 | ||
|
|
0ab377e7c5 | ||
|
|
8c1c362694 | ||
|
|
a4b7feaa70 | ||
|
|
90033845ad | ||
|
|
95086d70b3 | ||
|
|
c662f926b7 | ||
|
|
8fa77551ba | ||
|
|
e0236226fc | ||
|
|
9994df9b4f | ||
|
|
75771cc366 | ||
|
|
6fbf0bb0f0 | ||
|
|
c383fbd0b1 | ||
|
|
4496711695 | ||
|
|
4219d00fd7 | ||
|
|
f1c8efdaf6 | ||
|
|
d82da64942 | ||
|
|
1eb905bf14 | ||
|
|
9433986121 | ||
|
|
75c25ec789 | ||
|
|
22cecea1c0 | ||
|
|
39edb0b3b9 | ||
|
|
1cea4c8247 | ||
|
|
4dad42e4e8 | ||
|
|
0acceaeeab | ||
|
|
607c6d2308 | ||
|
|
9e0f56f807 | ||
|
|
c08fd6f064 | ||
|
|
e635306bfb | ||
|
|
e8e6356174 | ||
|
|
28c3b9b95e | ||
|
|
cfb2d6d87d | ||
|
|
38768a92b8 | ||
|
|
092d3b0384 | ||
|
|
ff305d0efc | ||
|
|
73b07eb8c5 | ||
|
|
976e44bb57 | ||
|
|
ff80b90d32 | ||
|
|
65261a9753 | ||
|
|
39c8eddadd | ||
|
|
31e748277f | ||
|
|
7fd3ca27a7 | ||
|
|
acb33f608e | ||
|
|
1421bc111a | ||
|
|
f15f783633 | ||
|
|
b20b4362d7 | ||
|
|
55999ec94a | ||
|
|
d034f87f60 | ||
|
|
c3c201d5b6 | ||
|
|
f77058b817 | ||
|
|
485f2abbaa | ||
|
|
4c5cd918d8 | ||
|
|
293e7faf31 | ||
|
|
d6d4f304e1 | ||
|
|
96af2b6471 | ||
|
|
ca1697f1f5 | ||
|
|
4b0dd64836 | ||
|
|
12bef32d70 | ||
|
|
11f646b051 | ||
|
|
7d6505ec06 | ||
|
|
59c392028f | ||
|
|
6361d7780b | ||
|
|
dea316bb26 | ||
|
|
c6fcb8ea53 | ||
|
|
7b784d257f | ||
|
|
d59ee87b10 | ||
|
|
19e296c579 | ||
|
|
f6676b2a74 | ||
|
|
b7971e4e43 | ||
|
|
92dee62298 | ||
|
|
9fe7361625 | ||
|
|
9a02c474b1 | ||
|
|
b585b7804a | ||
|
|
003719cc74 | ||
|
|
36e2abd344 | ||
|
|
955004377d | ||
|
|
62c6fbd97d | ||
|
|
05d76490cf | ||
|
|
28a9d2862f | ||
|
|
6f77cec3bd | ||
|
|
30f3fac4e1 | ||
|
|
3e7654eebc | ||
|
|
b4f2b0ddaf | ||
|
|
dd04fb0c99 | ||
|
|
d13791f5ce | ||
|
|
c443d98ad3 | ||
|
|
f7277e4274 | ||
|
|
704d6a3cd4 | ||
|
|
2dbbfb87cf | ||
|
|
37a1412fee | ||
|
|
fcafb57abc | ||
|
|
85ba390ebd | ||
|
|
0d94d9a77f | ||
|
|
bab8c040d1 | ||
|
|
096945dbf5 | ||
|
|
e7b9db1a1f | ||
|
|
967573e9f9 | ||
|
|
425bed0a08 | ||
|
|
ee64d7f846 | ||
|
|
1bb07fd78e | ||
|
|
dcfff94abc | ||
|
|
b53bbb025f | ||
|
|
e256a1aa0d | ||
|
|
a30b8c2320 | ||
|
|
549289c581 | ||
|
|
e5811e4ce7 | ||
|
|
38366b88b0 | ||
|
|
323600a7d3 | ||
|
|
714f4f7607 | ||
|
|
173518278e | ||
|
|
60151a70b0 | ||
|
|
64a72a2dee | ||
|
|
ff0cf29d0a | ||
|
|
155126e00a | ||
|
|
9ef2c71360 | ||
|
|
3cae3fced9 | ||
|
|
593b53d829 | ||
|
|
505f592a25 | ||
|
|
4517b292b9 | ||
|
|
132d0ae631 | ||
|
|
6956757557 | ||
|
|
f42c20297b | ||
|
|
34f60f171b | ||
|
|
a3338dbd34 | ||
|
|
2257281ad1 | ||
|
|
2d75bae2ec | ||
|
|
cceacf3e8a | ||
|
|
0e03fa9f43 | ||
|
|
3b1fcf2f08 |
25
.github/SECURITY.md
vendored
Normal file
25
.github/SECURITY.md
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you discover a security vulnerability in `versitygw`, we strongly encourage you to report it privately and responsibly.
|
||||
|
||||
Please do **not** create public issues or pull requests that contain details about the vulnerability.
|
||||
|
||||
Instead, report the issue using GitHub's private **Security Advisories** feature:
|
||||
|
||||
- Go to [versitygw's Security Advisories page](https://github.com/versity/versitygw/security/advisories)
|
||||
- Click on **"Report a vulnerability"**
|
||||
|
||||
We aim to respond within **2 business days** and work with you to quickly resolve the issue.
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| --------------- | --------- |
|
||||
| Latest (v1.x.x) | ✅ |
|
||||
| Older versions | ❌ |
|
||||
|
||||
## Responsible Disclosure
|
||||
|
||||
We appreciate responsible disclosures and are committed to fixing vulnerabilities in a timely manner. Thank you for helping keep `versitygw` secure.
|
||||
2
.github/workflows/azurite.yml
vendored
2
.github/workflows/azurite.yml
vendored
@@ -1,5 +1,5 @@
|
||||
name: azurite functional tests
|
||||
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
|
||||
jobs:
|
||||
|
||||
2
.github/workflows/docker-bats.yml
vendored
2
.github/workflows/docker-bats.yml
vendored
@@ -1,5 +1,5 @@
|
||||
name: docker bats tests
|
||||
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
|
||||
jobs:
|
||||
|
||||
1
.github/workflows/docker.yml
vendored
1
.github/workflows/docker.yml
vendored
@@ -1,5 +1,4 @@
|
||||
name: Publish Docker image
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
2
.github/workflows/functional.yml
vendored
2
.github/workflows/functional.yml
vendored
@@ -1,5 +1,5 @@
|
||||
name: functional tests
|
||||
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
|
||||
jobs:
|
||||
|
||||
30
.github/workflows/go.yml
vendored
30
.github/workflows/go.yml
vendored
@@ -1,9 +1,10 @@
|
||||
name: general
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
name: Go Basic Checks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
@@ -23,9 +24,6 @@ jobs:
|
||||
run: |
|
||||
go get -v -t -d ./...
|
||||
|
||||
- name: Build
|
||||
run: make
|
||||
|
||||
- name: Test
|
||||
run: go test -coverprofile profile.txt -race -v -timeout 30s -tags=github ./...
|
||||
|
||||
@@ -35,4 +33,26 @@ jobs:
|
||||
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
||||
shell: bash
|
||||
shell: bash
|
||||
|
||||
verify-build:
|
||||
name: Verify Build Targets
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
os: [darwin, freebsd, linux]
|
||||
arch: [amd64, arm64]
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
- name: Build for ${{ matrix.os }}/${{ matrix.arch }}
|
||||
run: |
|
||||
GOOS=${{ matrix.os }} GOARCH=${{ matrix.arch }} go build -o versitygw-${{ matrix.os }}-${{ matrix.arch }} cmd/versitygw/*.go
|
||||
12
.github/workflows/goreleaser.yml
vendored
12
.github/workflows/goreleaser.yml
vendored
@@ -1,16 +1,12 @@
|
||||
name: goreleaser
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
on:
|
||||
push:
|
||||
# run only against tags
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
# packages: write
|
||||
# issues: write
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -29,10 +25,10 @@ jobs:
|
||||
go-version: stable
|
||||
|
||||
- name: Run Releaser
|
||||
uses: goreleaser/goreleaser-action@v5
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
version: '~> v2'
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.TOKEN }}
|
||||
|
||||
13
.github/workflows/host-style-tests.yml
vendored
Normal file
13
.github/workflows/host-style-tests.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
name: host style tests
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
|
||||
jobs:
|
||||
build-and-run:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: run host-style tests
|
||||
run: make test-host-style
|
||||
1
.github/workflows/shellcheck.yml
vendored
1
.github/workflows/shellcheck.yml
vendored
@@ -1,4 +1,5 @@
|
||||
name: shellcheck
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
jobs:
|
||||
|
||||
|
||||
1
.github/workflows/static.yml
vendored
1
.github/workflows/static.yml
vendored
@@ -1,4 +1,5 @@
|
||||
name: staticcheck
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
jobs:
|
||||
|
||||
|
||||
39
.github/workflows/system.yml
vendored
39
.github/workflows/system.yml
vendored
@@ -1,4 +1,5 @@
|
||||
name: system tests
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
jobs:
|
||||
build:
|
||||
@@ -12,66 +13,85 @@ jobs:
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "mc-non-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "mc, posix, file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "mc-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "REST, posix, non-static, all, folder IAM"
|
||||
- set: "REST, posix, non-static, base|acl|multipart, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest"
|
||||
RUN_SET: "rest-base,rest-acl,rest-multipart"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "REST, posix, non-static, chunked|checksum|versioning|bucket, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-chunked,rest-checksum,rest-versioning,rest-bucket"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3, posix, non-file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3-non-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3, posix, file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, bucket|object|multipart, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, policy, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-policy"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, user, non-static, s3 IAM"
|
||||
IAM_TYPE: s3
|
||||
RUN_SET: "s3api-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, bucket, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-bucket"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, multipart, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-multipart"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, object, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-object"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, policy, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-policy"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, user, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-user"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
# TODO fix/debug s3 gateway
|
||||
#- set: "s3api, s3, multipart|object, non-static, folder IAM"
|
||||
@@ -88,16 +108,19 @@ jobs:
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3cmd-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3cmd, posix, non-user, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3cmd-non-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3cmd, posix, user, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3cmd-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
@@ -106,7 +129,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
go-version: "stable"
|
||||
id: go
|
||||
|
||||
- name: Get Dependencies
|
||||
@@ -122,6 +145,7 @@ jobs:
|
||||
|
||||
- name: Install s3cmd
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install s3cmd
|
||||
|
||||
- name: Install mc
|
||||
@@ -129,9 +153,10 @@ jobs:
|
||||
curl https://dl.min.io/client/mc/release/linux-amd64/mc --create-dirs -o /usr/local/bin/mc
|
||||
chmod 755 /usr/local/bin/mc
|
||||
|
||||
- name: Install xmllint (for rest)
|
||||
- name: Install xml libraries (for rest)
|
||||
run: |
|
||||
sudo apt-get install libxml2-utils
|
||||
sudo apt-get update
|
||||
sudo apt-get install libxml2-utils xmlstarlet
|
||||
|
||||
# see https://github.com/versity/versitygw/issues/1034
|
||||
- name: Install AWS cli
|
||||
@@ -150,6 +175,7 @@ jobs:
|
||||
RUN_VERSITYGW: true
|
||||
BACKEND: ${{ matrix.BACKEND }}
|
||||
RECREATE_BUCKETS: ${{ matrix.RECREATE_BUCKETS }}
|
||||
DELETE_BUCKETS_AFTER_TEST: ${{ matrix.DELETE_BUCKETS_AFTER_TEST }}
|
||||
CERT: ${{ github.workspace }}/cert.pem
|
||||
KEY: ${{ github.workspace }}/versitygw.pem
|
||||
LOCAL_FOLDER: /tmp/gw
|
||||
@@ -172,6 +198,9 @@ jobs:
|
||||
VERSIONING_DIR: ${{ github.workspace }}/versioning
|
||||
COMMAND_LOG: command.log
|
||||
TIME_LOG: time.log
|
||||
PYTHON_ENV_FOLDER: ${{ github.workspace }}/env
|
||||
AUTOGENERATE_USERS: true
|
||||
USER_AUTOGENERATION_PREFIX: github-actions-test-
|
||||
run: |
|
||||
make testbin
|
||||
export AWS_ACCESS_KEY_ID=ABCDEFGHIJKLMNOPQRST
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
version: 2
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
@@ -23,7 +25,7 @@ builds:
|
||||
- -X=main.Build={{.Commit}} -X=main.BuildTime={{.Date}} -X=main.Version={{.Version}}
|
||||
|
||||
archives:
|
||||
- format: tar.gz
|
||||
- formats: [ 'tar.gz' ]
|
||||
# this name template makes the OS and Arch compatible with the results of uname.
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_v{{ .Version }}_
|
||||
@@ -43,7 +45,7 @@ archives:
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
formats: [ 'zip' ]
|
||||
|
||||
# Additional files/globs you want to add to the archive.
|
||||
#
|
||||
@@ -58,7 +60,7 @@ checksum:
|
||||
name_template: 'checksums.txt'
|
||||
|
||||
snapshot:
|
||||
name_template: "{{ incpatch .Version }}-next"
|
||||
version_template: "{{ incpatch .Version }}-{{.ShortCommit}}"
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
@@ -86,7 +88,7 @@ nfpms:
|
||||
|
||||
license: Apache 2.0
|
||||
|
||||
builds:
|
||||
ids:
|
||||
- versitygw
|
||||
|
||||
formats:
|
||||
|
||||
11
Makefile
11
Makefile
@@ -72,6 +72,11 @@ dist:
|
||||
rm -f VERSION
|
||||
gzip -f $(TARFILE)
|
||||
|
||||
.PHONY: snapshot
|
||||
snapshot:
|
||||
# brew install goreleaser/tap/goreleaser
|
||||
goreleaser release --snapshot --skip publish --clean
|
||||
|
||||
# Creates and runs S3 gateway instance in a docker container
|
||||
.PHONY: up-posix
|
||||
up-posix:
|
||||
@@ -91,3 +96,9 @@ up-azurite:
|
||||
.PHONY: up-app
|
||||
up-app:
|
||||
$(DOCKERCOMPOSE) up
|
||||
|
||||
# Run the host-style tests in docker containers
|
||||
.PHONY: test-host-style
|
||||
test-host-style:
|
||||
docker compose -f tests/host-style-tests/docker-compose.yml up --build --abort-on-container-exit --exit-code-from test
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ Versity Gateway, a simple to use tool for seamless inline translation between AW
|
||||
|
||||
The server translates incoming S3 API requests and transforms them into equivalent operations to the backend service. By leveraging this gateway server, applications can interact with the S3-compatible API on top of already existing storage systems. This project enables leveraging existing infrastructure investments while seamlessly integrating with S3-compatible systems, offering increased flexibility and compatibility in managing data storage.
|
||||
|
||||
The Versity Gateway is focused on performance, simplicity, and expandability. The Versity Gateway is designed with modularity in mind, enabling future extensions to support additional backend storage systems. At present, the Versity Gateway supports any generic POSIX file backend storage and Versity’s open source ScoutFS filesystem.
|
||||
The Versity Gateway is focused on performance, simplicity, and expandability. The Versity Gateway is designed with modularity in mind, enabling future extensions to support additional backend storage systems. At present, the Versity Gateway supports any generic POSIX file backend storage, Versity’s open source ScoutFS filesystem, Azure Blob Storage, and other S3 servers.
|
||||
|
||||
The gateway is completely stateless. Multiple Versity Gateway instances may be deployed in a cluster to increase aggregate throughput. The Versity Gateway’s stateless architecture allows any request to be serviced by any gateway thereby distributing workloads and enhancing performance. Load balancers may be used to evenly distribute requests across the cluster of gateways for optimal performance.
|
||||
|
||||
|
||||
201
auth/access-control.go
Normal file
201
auth/access-control.go
Normal file
@@ -0,0 +1,201 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func VerifyObjectCopyAccess(ctx context.Context, be backend.Backend, copySource string, opts AccessOptions) error {
|
||||
if opts.IsRoot {
|
||||
return nil
|
||||
}
|
||||
if opts.Acc.Role == RoleAdmin {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify destination bucket access
|
||||
if err := VerifyAccess(ctx, be, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
// Verify source bucket access
|
||||
srcBucket, srcObject, found := strings.Cut(copySource, "/")
|
||||
if !found {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidCopySource)
|
||||
}
|
||||
|
||||
// Get source bucket ACL
|
||||
srcBucketACLBytes, err := be.GetBucketAcl(ctx, &s3.GetBucketAclInput{Bucket: &srcBucket})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var srcBucketAcl ACL
|
||||
if err := json.Unmarshal(srcBucketACLBytes, &srcBucketAcl); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifyAccess(ctx, be, AccessOptions{
|
||||
Acl: srcBucketAcl,
|
||||
AclPermission: PermissionRead,
|
||||
IsRoot: opts.IsRoot,
|
||||
Acc: opts.Acc,
|
||||
Bucket: srcBucket,
|
||||
Object: srcObject,
|
||||
Action: GetObjectAction,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type AccessOptions struct {
|
||||
Acl ACL
|
||||
AclPermission Permission
|
||||
IsRoot bool
|
||||
Acc Account
|
||||
Bucket string
|
||||
Object string
|
||||
Action Action
|
||||
Readonly bool
|
||||
IsBucketPublic bool
|
||||
}
|
||||
|
||||
func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) error {
|
||||
// Skip the access check for public buckets
|
||||
if opts.IsBucketPublic {
|
||||
return nil
|
||||
}
|
||||
if opts.Readonly {
|
||||
if opts.AclPermission == PermissionWrite || opts.AclPermission == PermissionWriteAcp {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
}
|
||||
if opts.IsRoot {
|
||||
return nil
|
||||
}
|
||||
if opts.Acc.Role == RoleAdmin {
|
||||
return nil
|
||||
}
|
||||
|
||||
policy, policyErr := be.GetBucketPolicy(ctx, opts.Bucket)
|
||||
if policyErr != nil {
|
||||
if !errors.Is(policyErr, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return policyErr
|
||||
}
|
||||
} else {
|
||||
return VerifyBucketPolicy(policy, opts.Acc.Access, opts.Bucket, opts.Object, opts.Action)
|
||||
}
|
||||
|
||||
if err := verifyACL(opts.Acl, opts.Acc.Access, opts.AclPermission); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Detects if the action is policy related
|
||||
// e.g.
|
||||
// 'GetBucketPolicy', 'PutBucketPolicy'
|
||||
func isPolicyAction(action Action) bool {
|
||||
return action == GetBucketPolicyAction || action == PutBucketPolicyAction
|
||||
}
|
||||
|
||||
// VerifyPublicAccess checks if the bucket is publically accessible by ACL or Policy
|
||||
func VerifyPublicAccess(ctx context.Context, be backend.Backend, action Action, permission Permission, bucket, object string) error {
|
||||
// ACL disabled
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
err = VerifyPublicBucketPolicy(policy, bucket, object, action)
|
||||
if err == nil {
|
||||
// if ACLs are disabled, and the bucket grants public access,
|
||||
// policy actions should return 'MethodNotAllowed'
|
||||
if isPolicyAction(action) {
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// if the action is not in the ACL whitelist the access is denied
|
||||
_, ok := publicACLAllowedActions[action]
|
||||
if !ok {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
err = VerifyPublicBucketACL(ctx, be, bucket, action, permission)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func MayCreateBucket(acct Account, isRoot bool) error {
|
||||
if isRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
if acct.Role == RoleUser {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsAdminOrOwner(acct Account, isRoot bool, acl ACL) error {
|
||||
// Owner check
|
||||
if acct.Access == acl.Owner {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Root user has access over almost everything
|
||||
if isRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Admin user case
|
||||
if acct.Role == RoleAdmin {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return access denied in all other cases
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
type PublicACLAllowedActions map[Action]struct{}
|
||||
|
||||
var publicACLAllowedActions PublicACLAllowedActions = PublicACLAllowedActions{
|
||||
ListBucketAction: struct{}{},
|
||||
PutObjectAction: struct{}{},
|
||||
ListBucketMultipartUploadsAction: struct{}{},
|
||||
DeleteObjectAction: struct{}{},
|
||||
ListBucketVersionsAction: struct{}{},
|
||||
GetObjectAction: struct{}{},
|
||||
GetObjectAttributesAction: struct{}{},
|
||||
GetObjectAclAction: struct{}{},
|
||||
}
|
||||
168
auth/acl.go
168
auth/acl.go
@@ -33,6 +33,17 @@ type ACL struct {
|
||||
Grantees []Grantee
|
||||
}
|
||||
|
||||
// IsPublic specifies if the acl grants public read access
|
||||
func (acl *ACL) IsPublic(permission Permission) bool {
|
||||
for _, grt := range acl.Grantees {
|
||||
if grt.Permission == permission && grt.Type == types.TypeGroup && grt.Access == "all-users" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type Grantee struct {
|
||||
Permission Permission
|
||||
Access string
|
||||
@@ -193,14 +204,25 @@ func ParseACL(data []byte) (ACL, error) {
|
||||
return acl, nil
|
||||
}
|
||||
|
||||
func ParseACLOutput(data []byte) (GetBucketAclOutput, error) {
|
||||
func ParseACLOutput(data []byte, owner string) (GetBucketAclOutput, error) {
|
||||
grants := []Grant{}
|
||||
|
||||
if len(data) == 0 {
|
||||
return GetBucketAclOutput{
|
||||
Owner: &types.Owner{
|
||||
ID: &owner,
|
||||
},
|
||||
AccessControlList: AccessControlList{
|
||||
Grants: grants,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var acl ACL
|
||||
if err := json.Unmarshal(data, &acl); err != nil {
|
||||
return GetBucketAclOutput{}, fmt.Errorf("parse acl: %w", err)
|
||||
}
|
||||
|
||||
grants := []Grant{}
|
||||
|
||||
for _, elem := range acl.Grantees {
|
||||
acs := elem.Access
|
||||
grants = append(grants, Grant{
|
||||
@@ -424,118 +446,50 @@ func verifyACL(acl ACL, access string, permission Permission) error {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
func MayCreateBucket(acct Account, isRoot bool) error {
|
||||
if isRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
if acct.Role == RoleUser {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsAdminOrOwner(acct Account, isRoot bool, acl ACL) error {
|
||||
// Owner check
|
||||
if acct.Access == acl.Owner {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Root user has access over almost everything
|
||||
if isRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Admin user case
|
||||
if acct.Role == RoleAdmin {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return access denied in all other cases
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
type AccessOptions struct {
|
||||
Acl ACL
|
||||
AclPermission Permission
|
||||
IsRoot bool
|
||||
Acc Account
|
||||
Bucket string
|
||||
Object string
|
||||
Action Action
|
||||
Readonly bool
|
||||
}
|
||||
|
||||
func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) error {
|
||||
if opts.Readonly {
|
||||
if opts.AclPermission == PermissionWrite || opts.AclPermission == PermissionWriteAcp {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
}
|
||||
if opts.IsRoot {
|
||||
return nil
|
||||
}
|
||||
if opts.Acc.Role == RoleAdmin {
|
||||
return nil
|
||||
}
|
||||
|
||||
policy, policyErr := be.GetBucketPolicy(ctx, opts.Bucket)
|
||||
if policyErr != nil {
|
||||
if !errors.Is(policyErr, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return policyErr
|
||||
}
|
||||
} else {
|
||||
return VerifyBucketPolicy(policy, opts.Acc.Access, opts.Bucket, opts.Object, opts.Action)
|
||||
}
|
||||
|
||||
if err := verifyACL(opts.Acl, opts.Acc.Access, opts.AclPermission); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func VerifyObjectCopyAccess(ctx context.Context, be backend.Backend, copySource string, opts AccessOptions) error {
|
||||
if opts.IsRoot {
|
||||
return nil
|
||||
}
|
||||
if opts.Acc.Role == RoleAdmin {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify destination bucket access
|
||||
if err := VerifyAccess(ctx, be, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
// Verify source bucket access
|
||||
srcBucket, srcObject, found := strings.Cut(copySource, "/")
|
||||
if !found {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidCopySource)
|
||||
}
|
||||
|
||||
// Get source bucket ACL
|
||||
srcBucketACLBytes, err := be.GetBucketAcl(ctx, &s3.GetBucketAclInput{Bucket: &srcBucket})
|
||||
// Verifies if the bucket acl grants public access
|
||||
func VerifyPublicBucketACL(ctx context.Context, be backend.Backend, bucket string, action Action, permission Permission) error {
|
||||
aclBytes, err := be.GetBucketAcl(ctx, &s3.GetBucketAclInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var srcBucketAcl ACL
|
||||
if err := json.Unmarshal(srcBucketACLBytes, &srcBucketAcl); err != nil {
|
||||
acl, err := ParseACL(aclBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifyAccess(ctx, be, AccessOptions{
|
||||
Acl: srcBucketAcl,
|
||||
AclPermission: PermissionRead,
|
||||
IsRoot: opts.IsRoot,
|
||||
Acc: opts.Acc,
|
||||
Bucket: srcBucket,
|
||||
Object: srcObject,
|
||||
Action: GetObjectAction,
|
||||
}); err != nil {
|
||||
return err
|
||||
if !acl.IsPublic(permission) {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBucketACLOwner sets default ACL with new owner and removes
|
||||
// any previous bucket policy that was in place
|
||||
func UpdateBucketACLOwner(ctx context.Context, be backend.Backend, bucket, newOwner string) error {
|
||||
acl := ACL{
|
||||
Owner: newOwner,
|
||||
Grantees: []Grantee{
|
||||
{
|
||||
Permission: PermissionFullControl,
|
||||
Access: newOwner,
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result, err := json.Marshal(acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal ACL: %w", err)
|
||||
}
|
||||
|
||||
err = be.PutBucketAcl(ctx, bucket, result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return be.DeleteBucketPolicy(ctx, bucket)
|
||||
}
|
||||
|
||||
@@ -22,20 +22,48 @@ import (
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
var (
|
||||
errResourceMismatch = errors.New("Action does not apply to any resource(s) in statement")
|
||||
//lint:ignore ST1005 Reason: This error message is intended for end-user clarity and follows their expectations
|
||||
errInvalidResource = errors.New("Policy has invalid resource")
|
||||
//lint:ignore ST1005 Reason: This error message is intended for end-user clarity and follows their expectations
|
||||
errInvalidPrincipal = errors.New("Invalid principal in policy")
|
||||
//lint:ignore ST1005 Reason: This error message is intended for end-user clarity and follows their expectations
|
||||
errInvalidAction = errors.New("Policy has invalid action")
|
||||
var ErrAccessDenied = errors.New("access denied")
|
||||
|
||||
type policyErr string
|
||||
|
||||
func (p policyErr) Error() string {
|
||||
return string(p)
|
||||
}
|
||||
|
||||
const (
|
||||
policyErrResourceMismatch = policyErr("Action does not apply to any resource(s) in statement")
|
||||
policyErrInvalidResource = policyErr("Policy has invalid resource")
|
||||
policyErrInvalidPrincipal = policyErr("Invalid principal in policy")
|
||||
policyErrInvalidAction = policyErr("Policy has invalid action")
|
||||
policyErrInvalidPolicy = policyErr("This policy contains invalid Json")
|
||||
policyErrInvalidFirstChar = policyErr("Policies must be valid JSON and the first byte must be '{'")
|
||||
policyErrEmptyStatement = policyErr("Could not parse the policy: Statement is empty!")
|
||||
policyErrMissingStatmentField = policyErr("Missing required field Statement")
|
||||
)
|
||||
|
||||
type BucketPolicy struct {
|
||||
Statement []BucketPolicyItem `json:"Statement"`
|
||||
}
|
||||
|
||||
func (bp *BucketPolicy) UnmarshalJSON(data []byte) error {
|
||||
var tmp struct {
|
||||
Statement *[]BucketPolicyItem `json:"Statement"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &tmp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If Statement is nil (not present in JSON), return an error
|
||||
if tmp.Statement == nil {
|
||||
return policyErrMissingStatmentField
|
||||
}
|
||||
|
||||
// Assign the parsed value to the actual struct
|
||||
bp.Statement = *tmp.Statement
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bp *BucketPolicy) Validate(bucket string, iam IAMService) error {
|
||||
for _, statement := range bp.Statement {
|
||||
err := statement.Validate(bucket, iam)
|
||||
@@ -48,18 +76,37 @@ func (bp *BucketPolicy) Validate(bucket string, iam IAMService) error {
|
||||
}
|
||||
|
||||
func (bp *BucketPolicy) isAllowed(principal string, action Action, resource string) bool {
|
||||
var isAllowed bool
|
||||
for _, statement := range bp.Statement {
|
||||
if statement.findMatch(principal, action, resource) {
|
||||
switch statement.Effect {
|
||||
case BucketPolicyAccessTypeAllow:
|
||||
return true
|
||||
isAllowed = true
|
||||
case BucketPolicyAccessTypeDeny:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return isAllowed
|
||||
}
|
||||
|
||||
// isPublic checks if the bucket policy statements contain
|
||||
// an entity granting public access
|
||||
func (bp *BucketPolicy) isPublic(resource string, action Action) bool {
|
||||
var isAllowed bool
|
||||
for _, statement := range bp.Statement {
|
||||
if statement.isPublic(resource, action) {
|
||||
switch statement.Effect {
|
||||
case BucketPolicyAccessTypeAllow:
|
||||
isAllowed = true
|
||||
case BucketPolicyAccessTypeDeny:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return isAllowed
|
||||
}
|
||||
|
||||
type BucketPolicyItem struct {
|
||||
@@ -89,10 +136,10 @@ func (bpi *BucketPolicyItem) Validate(bucket string, iam IAMService) error {
|
||||
break
|
||||
}
|
||||
if *isObjectAction && !containsObjectAction {
|
||||
return errResourceMismatch
|
||||
return policyErrResourceMismatch
|
||||
}
|
||||
if !*isObjectAction && !containsBucketAction {
|
||||
return errResourceMismatch
|
||||
return policyErrResourceMismatch
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,6 +154,11 @@ func (bpi *BucketPolicyItem) findMatch(principal string, action Action, resource
|
||||
return false
|
||||
}
|
||||
|
||||
// isPublic checks if the bucket policy statemant grants public access
|
||||
func (bpi *BucketPolicyItem) isPublic(resource string, action Action) bool {
|
||||
return bpi.Principals.IsPublic() && bpi.Actions.FindMatch(action) && bpi.Resources.FindMatch(resource)
|
||||
}
|
||||
|
||||
func getMalformedPolicyError(err error) error {
|
||||
return s3err.APIError{
|
||||
Code: "MalformedPolicy",
|
||||
@@ -116,14 +168,20 @@ func getMalformedPolicyError(err error) error {
|
||||
}
|
||||
|
||||
func ValidatePolicyDocument(policyBin []byte, bucket string, iam IAMService) error {
|
||||
if len(policyBin) == 0 || policyBin[0] != '{' {
|
||||
return getMalformedPolicyError(policyErrInvalidFirstChar)
|
||||
}
|
||||
var policy BucketPolicy
|
||||
if err := json.Unmarshal(policyBin, &policy); err != nil {
|
||||
return getMalformedPolicyError(err)
|
||||
var pe policyErr
|
||||
if errors.As(err, &pe) {
|
||||
return getMalformedPolicyError(err)
|
||||
}
|
||||
return getMalformedPolicyError(policyErrInvalidPolicy)
|
||||
}
|
||||
|
||||
if len(policy.Statement) == 0 {
|
||||
//lint:ignore ST1005 Reason: This error message is intended for end-user clarity and follows their expectations
|
||||
return getMalformedPolicyError(errors.New("Could not parse the policy: Statement is empty!"))
|
||||
return getMalformedPolicyError(policyErrEmptyStatement)
|
||||
}
|
||||
|
||||
if err := policy.Validate(bucket, iam); err != nil {
|
||||
@@ -150,3 +208,22 @@ func VerifyBucketPolicy(policy []byte, access, bucket, object string, action Act
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks if the bucket policy grants public access
|
||||
func VerifyPublicBucketPolicy(policy []byte, bucket, object string, action Action) error {
|
||||
var bucketPolicy BucketPolicy
|
||||
if err := json.Unmarshal(policy, &bucketPolicy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resource := bucket
|
||||
if object != "" {
|
||||
resource += "/" + object
|
||||
}
|
||||
|
||||
if !bucketPolicy.isPublic(resource, action) {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -58,6 +58,8 @@ const (
|
||||
BypassGovernanceRetentionAction Action = "s3:BypassGovernanceRetention"
|
||||
PutBucketOwnershipControlsAction Action = "s3:PutBucketOwnershipControls"
|
||||
GetBucketOwnershipControlsAction Action = "s3:GetBucketOwnershipControls"
|
||||
PutBucketCorsAction Action = "s3:PutBucketCORS"
|
||||
GetBucketCorsAction Action = "s3:GetBucketCORS"
|
||||
AllActions Action = "s3:*"
|
||||
)
|
||||
|
||||
@@ -89,6 +91,7 @@ var supportedActionList = map[Action]struct{}{
|
||||
DeleteObjectTaggingAction: {},
|
||||
ListBucketVersionsAction: {},
|
||||
ListBucketAction: {},
|
||||
GetBucketObjectLockConfigurationAction: {},
|
||||
PutBucketObjectLockConfigurationAction: {},
|
||||
GetObjectLegalHoldAction: {},
|
||||
PutObjectLegalHoldAction: {},
|
||||
@@ -97,6 +100,8 @@ var supportedActionList = map[Action]struct{}{
|
||||
BypassGovernanceRetentionAction: {},
|
||||
PutBucketOwnershipControlsAction: {},
|
||||
GetBucketOwnershipControlsAction: {},
|
||||
PutBucketCorsAction: {},
|
||||
GetBucketCorsAction: {},
|
||||
AllActions: {},
|
||||
}
|
||||
|
||||
@@ -125,7 +130,7 @@ var supportedObjectActionList = map[Action]struct{}{
|
||||
// Validates Action: it should either wildcard match with supported actions list or be in it
|
||||
func (a Action) IsValid() error {
|
||||
if !strings.HasPrefix(string(a), "s3:") {
|
||||
return errInvalidAction
|
||||
return policyErrInvalidAction
|
||||
}
|
||||
|
||||
if a == AllActions {
|
||||
@@ -140,12 +145,12 @@ func (a Action) IsValid() error {
|
||||
}
|
||||
}
|
||||
|
||||
return errInvalidAction
|
||||
return policyErrInvalidAction
|
||||
}
|
||||
|
||||
_, found := supportedActionList[a]
|
||||
if !found {
|
||||
return errInvalidAction
|
||||
return policyErrInvalidAction
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -191,7 +196,7 @@ func (a *Actions) UnmarshalJSON(data []byte) error {
|
||||
var err error
|
||||
if err = json.Unmarshal(data, &ss); err == nil {
|
||||
if len(ss) == 0 {
|
||||
return errInvalidAction
|
||||
return policyErrInvalidAction
|
||||
}
|
||||
*a = make(Actions)
|
||||
for _, s := range ss {
|
||||
@@ -204,7 +209,7 @@ func (a *Actions) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err = json.Unmarshal(data, &s); err == nil {
|
||||
if s == "" {
|
||||
return errInvalidAction
|
||||
return policyErrInvalidAction
|
||||
}
|
||||
*a = make(Actions)
|
||||
err = a.Add(s)
|
||||
|
||||
@@ -36,7 +36,7 @@ func (p *Principals) UnmarshalJSON(data []byte) error {
|
||||
|
||||
if err = json.Unmarshal(data, &ss); err == nil {
|
||||
if len(ss) == 0 {
|
||||
return errInvalidPrincipal
|
||||
return policyErrInvalidPrincipal
|
||||
}
|
||||
*p = make(Principals)
|
||||
for _, s := range ss {
|
||||
@@ -45,7 +45,7 @@ func (p *Principals) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
} else if err = json.Unmarshal(data, &s); err == nil {
|
||||
if s == "" {
|
||||
return errInvalidPrincipal
|
||||
return policyErrInvalidPrincipal
|
||||
}
|
||||
*p = make(Principals)
|
||||
p.Add(s)
|
||||
@@ -53,7 +53,7 @@ func (p *Principals) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
} else if err = json.Unmarshal(data, &k); err == nil {
|
||||
if k.AWS == "" {
|
||||
return errInvalidPrincipal
|
||||
return policyErrInvalidPrincipal
|
||||
}
|
||||
*p = make(Principals)
|
||||
p.Add(k.AWS)
|
||||
@@ -65,7 +65,7 @@ func (p *Principals) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
if err = json.Unmarshal(data, &sk); err == nil {
|
||||
if len(sk.AWS) == 0 {
|
||||
return errInvalidPrincipal
|
||||
return policyErrInvalidPrincipal
|
||||
}
|
||||
*p = make(Principals)
|
||||
for _, s := range sk.AWS {
|
||||
@@ -97,7 +97,7 @@ func (p Principals) Validate(iam IAMService) error {
|
||||
if len(p) == 1 {
|
||||
return nil
|
||||
}
|
||||
return errInvalidPrincipal
|
||||
return policyErrInvalidPrincipal
|
||||
}
|
||||
|
||||
accs, err := CheckIfAccountsExist(p.ToSlice(), iam)
|
||||
@@ -105,7 +105,7 @@ func (p Principals) Validate(iam IAMService) error {
|
||||
return err
|
||||
}
|
||||
if len(accs) > 0 {
|
||||
return errInvalidPrincipal
|
||||
return policyErrInvalidPrincipal
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -121,3 +121,10 @@ func (p Principals) Contains(userAccess string) bool {
|
||||
_, found := p[userAccess]
|
||||
return found
|
||||
}
|
||||
|
||||
// Bucket policy grants public access, if it contains
|
||||
// a wildcard match to all the users
|
||||
func (p Principals) IsPublic() bool {
|
||||
_, ok := p["*"]
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ func (r *Resources) UnmarshalJSON(data []byte) error {
|
||||
var err error
|
||||
if err = json.Unmarshal(data, &ss); err == nil {
|
||||
if len(ss) == 0 {
|
||||
return errInvalidResource
|
||||
return policyErrInvalidResource
|
||||
}
|
||||
*r = make(Resources)
|
||||
for _, s := range ss {
|
||||
@@ -42,7 +42,7 @@ func (r *Resources) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err = json.Unmarshal(data, &s); err == nil {
|
||||
if s == "" {
|
||||
return errInvalidResource
|
||||
return policyErrInvalidResource
|
||||
}
|
||||
*r = make(Resources)
|
||||
err = r.Add(s)
|
||||
@@ -59,7 +59,7 @@ func (r *Resources) UnmarshalJSON(data []byte) error {
|
||||
func (r Resources) Add(rc string) error {
|
||||
ok, pattern := isValidResource(rc)
|
||||
if !ok {
|
||||
return errInvalidResource
|
||||
return policyErrInvalidResource
|
||||
}
|
||||
|
||||
r[pattern] = struct{}{}
|
||||
@@ -93,7 +93,7 @@ func (r Resources) ContainsBucketPattern() bool {
|
||||
func (r Resources) Validate(bucket string) error {
|
||||
for resource := range r {
|
||||
if !strings.HasPrefix(resource, bucket) {
|
||||
return errInvalidResource
|
||||
return policyErrInvalidResource
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,21 +102,45 @@ func (r Resources) Validate(bucket string) error {
|
||||
|
||||
func (r Resources) FindMatch(resource string) bool {
|
||||
for res := range r {
|
||||
if strings.HasSuffix(res, "*") {
|
||||
pattern := strings.TrimSuffix(res, "*")
|
||||
if strings.HasPrefix(resource, pattern) {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
if res == resource {
|
||||
return true
|
||||
}
|
||||
if r.Match(res, resource) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Match checks if the input string matches the given pattern with wildcards (`*`, `?`).
|
||||
// - `?` matches exactly one occurrence of any character.
|
||||
// - `*` matches arbitrary many (including zero) occurrences of any character.
|
||||
func (r Resources) Match(pattern, input string) bool {
|
||||
pIdx, sIdx := 0, 0
|
||||
starIdx, matchIdx := -1, 0
|
||||
|
||||
for sIdx < len(input) {
|
||||
if pIdx < len(pattern) && (pattern[pIdx] == '?' || pattern[pIdx] == input[sIdx]) {
|
||||
sIdx++
|
||||
pIdx++
|
||||
} else if pIdx < len(pattern) && pattern[pIdx] == '*' {
|
||||
starIdx = pIdx
|
||||
matchIdx = sIdx
|
||||
pIdx++
|
||||
} else if starIdx != -1 {
|
||||
pIdx = starIdx + 1
|
||||
matchIdx++
|
||||
sIdx = matchIdx
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for pIdx < len(pattern) && pattern[pIdx] == '*' {
|
||||
pIdx++
|
||||
}
|
||||
|
||||
return pIdx == len(pattern)
|
||||
}
|
||||
|
||||
// Checks the resource to have arn prefix and not starting with /
|
||||
func isValidResource(rc string) (isValid bool, pattern string) {
|
||||
if !strings.HasPrefix(rc, ResourceArnPrefix) {
|
||||
|
||||
182
auth/bucket_policy_resources_test.go
Normal file
182
auth/bucket_policy_resources_test.go
Normal file
@@ -0,0 +1,182 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUnmarshalJSON(t *testing.T) {
|
||||
var r Resources
|
||||
|
||||
cases := []struct {
|
||||
input string
|
||||
expected int
|
||||
wantErr bool
|
||||
}{
|
||||
{`"arn:aws:s3:::my-bucket/*"`, 1, false},
|
||||
{`["arn:aws:s3:::my-bucket/*", "arn:aws:s3:::other-bucket"]`, 2, false},
|
||||
{`""`, 0, true},
|
||||
{`[]`, 0, true},
|
||||
{`["invalid-bucket"]`, 0, true},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
r = Resources{}
|
||||
err := json.Unmarshal([]byte(tc.input), &r)
|
||||
if (err != nil) != tc.wantErr {
|
||||
t.Errorf("Unexpected error status for input %s: %v", tc.input, err)
|
||||
}
|
||||
if len(r) != tc.expected {
|
||||
t.Errorf("Expected %d resources, got %d", tc.expected, len(r))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
r := Resources{}
|
||||
|
||||
cases := []struct {
|
||||
input string
|
||||
wantErr bool
|
||||
}{
|
||||
{"arn:aws:s3:::valid-bucket/*", false},
|
||||
{"arn:aws:s3:::valid-bucket/object", false},
|
||||
{"invalid-bucket/*", true},
|
||||
{"/invalid-start", true},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
err := r.Add(tc.input)
|
||||
if (err != nil) != tc.wantErr {
|
||||
t.Errorf("Unexpected error status for input %s: %v", tc.input, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainsObjectPattern(t *testing.T) {
|
||||
cases := []struct {
|
||||
resources []string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"arn:aws:s3:::my-bucket/my-object"}, true},
|
||||
{[]string{"arn:aws:s3:::my-bucket/*"}, true},
|
||||
{[]string{"arn:aws:s3:::my-bucket"}, false},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
r := Resources{}
|
||||
for _, res := range tc.resources {
|
||||
r.Add(res)
|
||||
}
|
||||
if r.ContainsObjectPattern() != tc.expected {
|
||||
t.Errorf("Expected object pattern to be %v for %v", tc.expected, tc.resources)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainsBucketPattern(t *testing.T) {
|
||||
cases := []struct {
|
||||
resources []string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"arn:aws:s3:::my-bucket"}, true},
|
||||
{[]string{"arn:aws:s3:::my-bucket/*"}, false},
|
||||
{[]string{"arn:aws:s3:::my-bucket/object"}, false},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
r := Resources{}
|
||||
for _, res := range tc.resources {
|
||||
r.Add(res)
|
||||
}
|
||||
if r.ContainsBucketPattern() != tc.expected {
|
||||
t.Errorf("Expected bucket pattern to be %v for %v", tc.expected, tc.resources)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
cases := []struct {
|
||||
resources []string
|
||||
bucket string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"arn:aws:s3:::valid-bucket/*"}, "valid-bucket", true},
|
||||
{[]string{"arn:aws:s3:::wrong-bucket/*"}, "valid-bucket", false},
|
||||
{[]string{"arn:aws:s3:::valid-bucket/*", "arn:aws:s3:::valid-bucket/object/*"}, "valid-bucket", true},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
r := Resources{}
|
||||
for _, res := range tc.resources {
|
||||
r.Add(res)
|
||||
}
|
||||
if (r.Validate(tc.bucket) == nil) != tc.expected {
|
||||
t.Errorf("Expected validation to be %v for bucket %s", tc.expected, tc.bucket)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindMatch(t *testing.T) {
|
||||
cases := []struct {
|
||||
resources []string
|
||||
input string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"arn:aws:s3:::my-bucket/*"}, "my-bucket/my-object", true},
|
||||
{[]string{"arn:aws:s3:::my-bucket/object"}, "other-bucket/my-object", false},
|
||||
{[]string{"arn:aws:s3:::my-bucket/object"}, "my-bucket/object", true},
|
||||
{[]string{"arn:aws:s3:::my-bucket/*", "arn:aws:s3:::other-bucket/*"}, "other-bucket/something", true},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
r := Resources{}
|
||||
for _, res := range tc.resources {
|
||||
r.Add(res)
|
||||
}
|
||||
if r.FindMatch(tc.input) != tc.expected {
|
||||
t.Errorf("Expected FindMatch to be %v for input %s", tc.expected, tc.input)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatch(t *testing.T) {
|
||||
r := Resources{}
|
||||
cases := []struct {
|
||||
pattern string
|
||||
input string
|
||||
expected bool
|
||||
}{
|
||||
{"my-bucket/*", "my-bucket/object", true},
|
||||
{"my-bucket/?bject", "my-bucket/object", true},
|
||||
{"my-bucket/*", "other-bucket/object", false},
|
||||
{"*", "any-bucket/object", true},
|
||||
{"my-bucket/*", "my-bucket/subdir/object", true},
|
||||
{"my-bucket/*", "other-bucket", false},
|
||||
{"my-bucket/*/*", "my-bucket/hello", false},
|
||||
{"my-bucket/*/*", "my-bucket/hello/world", true},
|
||||
{"foo/???/bar", "foo/qux/bar", true},
|
||||
{"foo/???/bar", "foo/quxx/bar", false},
|
||||
{"foo/???/bar/*/?", "foo/qux/bar/hello/g", true},
|
||||
{"foo/???/bar/*/?", "foo/qux/bar/hello/smth", false},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
if r.Match(tc.pattern, tc.input) != tc.expected {
|
||||
t.Errorf("Match(%s, %s) failed, expected %v", tc.pattern, tc.input, tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
19
auth/iam.go
19
auth/iam.go
@@ -18,6 +18,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
type Role string
|
||||
@@ -57,10 +59,19 @@ type ListUserAccountsResult struct {
|
||||
// Mutable props, which could be changed when updating an IAM account
|
||||
type MutableProps struct {
|
||||
Secret *string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID *int `json:"userID"`
|
||||
GroupID *int `json:"groupID"`
|
||||
}
|
||||
|
||||
func (m MutableProps) Validate() error {
|
||||
if m.Role != "" && !m.Role.IsValid() {
|
||||
return s3err.GetAPIError(s3err.ErrAdminInvalidUserRole)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateAcc(acc *Account, props MutableProps) {
|
||||
if props.Secret != nil {
|
||||
acc.Secret = *props.Secret
|
||||
@@ -71,6 +82,9 @@ func updateAcc(acc *Account, props MutableProps) {
|
||||
if props.UserID != nil {
|
||||
acc.UserID = *props.UserID
|
||||
}
|
||||
if props.Role != "" {
|
||||
acc.Role = props.Role
|
||||
}
|
||||
}
|
||||
|
||||
// IAMService is the interface for all IAM service implementations
|
||||
@@ -107,6 +121,7 @@ type Opts struct {
|
||||
LDAPGroupIdAtr string
|
||||
VaultEndpointURL string
|
||||
VaultSecretStoragePath string
|
||||
VaultAuthMethod string
|
||||
VaultMountPath string
|
||||
VaultRootToken string
|
||||
VaultRoleId string
|
||||
@@ -152,7 +167,7 @@ func New(o *Opts) (IAMService, error) {
|
||||
o.S3Endpoint, o.S3Bucket)
|
||||
case o.VaultEndpointURL != "":
|
||||
svc, err = NewVaultIAMService(o.RootAccount, o.VaultEndpointURL, o.VaultSecretStoragePath,
|
||||
o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
|
||||
o.VaultAuthMethod, o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
|
||||
o.VaultServerCert, o.VaultClientCert, o.VaultClientCertKey)
|
||||
fmt.Printf("initializing Vault IAM with %q\n", o.VaultEndpointURL)
|
||||
case o.IpaHost != "":
|
||||
@@ -161,7 +176,7 @@ func New(o *Opts) (IAMService, error) {
|
||||
default:
|
||||
// if no iam options selected, default to the single user mode
|
||||
fmt.Println("No IAM service configured, enabling single account mode")
|
||||
return IAMServiceSingle{}, nil
|
||||
return NewIAMServiceSingle(o.RootAccount), nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -290,93 +290,49 @@ func (s *IAMServiceInternal) readIAMData() ([]byte, error) {
|
||||
|
||||
func (s *IAMServiceInternal) storeIAM(update UpdateAcctFunc) error {
|
||||
// We are going to be racing with other running gateways without any
|
||||
// coordination. So the strategy here is to read the current file data.
|
||||
// If the file doesn't exist, then we assume someone else is currently
|
||||
// updating the file. So we just need to keep retrying. We also need
|
||||
// to make sure the data is consistent within a single update. So racing
|
||||
// writes to a file would possibly leave this in some invalid state.
|
||||
// We can get atomic updates with rename. If we read the data, update
|
||||
// the data, write to a temp file, then rename the tempfile back to the
|
||||
// data file. This should always result in a complete data image.
|
||||
// coordination. So the strategy here is to read the current file data,
|
||||
// update the data, write back out to a temp file, then rename the
|
||||
// temp file to the original file. This rename will replace the
|
||||
// original file with the new file. This is atomic and should always
|
||||
// allow for a consistent view of the data. There is a small
|
||||
// window where the file could be read and then updated by
|
||||
// another process. In this case any updates the other process did
|
||||
// will be lost. This is a limitation of the internal IAM service.
|
||||
// This should be rare, and even when it does happen should result
|
||||
// in a valid IAM file, just without the other process's updates.
|
||||
|
||||
// There is at least one unsolved failure mode here.
|
||||
// If a gateway removes the data file and then crashes, all other
|
||||
// gateways will retry forever thinking that the original will eventually
|
||||
// write the file.
|
||||
iamFname := filepath.Join(s.dir, iamFile)
|
||||
backupFname := filepath.Join(s.dir, iamBackupFile)
|
||||
|
||||
retries := 0
|
||||
fname := filepath.Join(s.dir, iamFile)
|
||||
b, err := os.ReadFile(iamFname)
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("read iam file: %w", err)
|
||||
}
|
||||
|
||||
for {
|
||||
b, err := os.ReadFile(fname)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// racing with someone else updating
|
||||
// keep retrying after backoff
|
||||
retries++
|
||||
if retries < maxretry {
|
||||
time.Sleep(backoff)
|
||||
continue
|
||||
}
|
||||
// save copy of data
|
||||
datacopy := make([]byte, len(b))
|
||||
copy(datacopy, b)
|
||||
|
||||
// we have been unsuccessful trying to read the iam file
|
||||
// so this must be the case where something happened and
|
||||
// the file did not get updated successfully, and probably
|
||||
// isn't going to be. The recovery procedure would be to
|
||||
// copy the backup file into place of the original.
|
||||
return fmt.Errorf("no iam file, needs backup recovery")
|
||||
}
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("read iam file: %w", err)
|
||||
}
|
||||
// make a backup copy in case something happens
|
||||
err = s.writeUsingTempFile(b, backupFname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write backup iam file: %w", err)
|
||||
}
|
||||
|
||||
// reset retries on successful read
|
||||
retries = 0
|
||||
b, err = update(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update iam data: %w", err)
|
||||
}
|
||||
|
||||
err = os.Remove(fname)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// racing with someone else updating
|
||||
// keep retrying after backoff
|
||||
time.Sleep(backoff)
|
||||
continue
|
||||
}
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove old iam file: %w", err)
|
||||
}
|
||||
|
||||
// save copy of data
|
||||
datacopy := make([]byte, len(b))
|
||||
copy(datacopy, b)
|
||||
|
||||
// make a backup copy in case we crash before update
|
||||
// this is after remove, so there is a small window something
|
||||
// can go wrong, but the remove should barrier other gateways
|
||||
// from trying to write backup at the same time. Only one
|
||||
// gateway will successfully remove the file.
|
||||
os.WriteFile(filepath.Join(s.dir, iamBackupFile), b, iamMode)
|
||||
|
||||
b, err = update(b)
|
||||
if err != nil {
|
||||
// update failed, try to write old data back out
|
||||
os.WriteFile(fname, datacopy, iamMode)
|
||||
return fmt.Errorf("update iam data: %w", err)
|
||||
}
|
||||
|
||||
err = s.writeTempFile(b)
|
||||
if err != nil {
|
||||
// update failed, try to write old data back out
|
||||
os.WriteFile(fname, datacopy, iamMode)
|
||||
return err
|
||||
}
|
||||
|
||||
break
|
||||
err = s.writeUsingTempFile(b, iamFname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write iam file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *IAMServiceInternal) writeTempFile(b []byte) error {
|
||||
fname := filepath.Join(s.dir, iamFile)
|
||||
|
||||
func (s *IAMServiceInternal) writeUsingTempFile(b []byte, fname string) error {
|
||||
f, err := os.CreateTemp(s.dir, iamFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create temp file: %w", err)
|
||||
@@ -384,6 +340,7 @@ func (s *IAMServiceInternal) writeTempFile(b []byte) error {
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
_, err = f.Write(b)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("write temp file: %w", err)
|
||||
}
|
||||
|
||||
@@ -27,11 +27,15 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
const IpaVersion = "2.254"
|
||||
@@ -52,7 +56,6 @@ type IpaIAMService struct {
|
||||
var _ IAMService = &IpaIAMService{}
|
||||
|
||||
func NewIpaIAMService(rootAcc Account, host, vaultName, username, password string, isInsecure, debug bool) (*IpaIAMService, error) {
|
||||
|
||||
ipa := IpaIAMService{
|
||||
id: 0,
|
||||
version: IpaVersion,
|
||||
@@ -72,6 +75,7 @@ func NewIpaIAMService(rootAcc Account, host, vaultName, username, password strin
|
||||
mTLSConfig := &tls.Config{InsecureSkipVerify: isInsecure}
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: mTLSConfig,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
ipa.client = http.Client{Jar: jar, Transport: tr}
|
||||
|
||||
@@ -102,13 +106,7 @@ func NewIpaIAMService(rootAcc Account, host, vaultName, username, password strin
|
||||
|
||||
ipa.kraTransportKey = cert.PublicKey.(*rsa.PublicKey)
|
||||
|
||||
isSupported := false
|
||||
for _, algo := range vaultConfig.Wrapping_supported_algorithms {
|
||||
if algo == "aes-128-cbc" {
|
||||
isSupported = true
|
||||
break
|
||||
}
|
||||
}
|
||||
isSupported := slices.Contains(vaultConfig.Wrapping_supported_algorithms, "aes-128-cbc")
|
||||
|
||||
if !isSupported {
|
||||
return nil,
|
||||
@@ -226,6 +224,8 @@ func (ipa *IpaIAMService) Shutdown() error {
|
||||
|
||||
// Implementation
|
||||
|
||||
const requestRetries = 3
|
||||
|
||||
func (ipa *IpaIAMService) login() error {
|
||||
form := url.Values{}
|
||||
form.Set("user", ipa.username)
|
||||
@@ -242,17 +242,33 @@ func (ipa *IpaIAMService) login() error {
|
||||
req.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
resp, err := ipa.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
var resp *http.Response
|
||||
for i := range requestRetries {
|
||||
resp, err = ipa.client.Do(req)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
// Check for transient network errors
|
||||
if isRetryable(err) {
|
||||
time.Sleep(time.Second * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("login POST to %s failed: %w", req.URL, err)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("login POST to %s failed after retries: %w",
|
||||
req.URL, err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == 401 {
|
||||
return errors.New("cannot login to FreeIPA: invalid credentials")
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("cannot login to FreeIPA: status code %d", resp.StatusCode)
|
||||
return fmt.Errorf("cannot login to FreeIPA: status code %d",
|
||||
resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -299,10 +315,27 @@ func (ipa *IpaIAMService) rpcInternal(req rpcRequest) (rpcResponse, error) {
|
||||
httpReq.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
httpResp, err := ipa.client.Do(httpReq)
|
||||
if err != nil {
|
||||
return rpcResponse{}, err
|
||||
var httpResp *http.Response
|
||||
for i := range requestRetries {
|
||||
httpResp, err = ipa.client.Do(httpReq)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
// Check for transient network errors
|
||||
if isRetryable(err) {
|
||||
time.Sleep(time.Second * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
return rpcResponse{}, fmt.Errorf("ipa request to %s failed: %w",
|
||||
httpReq.URL, err)
|
||||
}
|
||||
if err != nil {
|
||||
return rpcResponse{},
|
||||
fmt.Errorf("ipa request to %s failed after retries: %w",
|
||||
httpReq.URL, err)
|
||||
}
|
||||
|
||||
defer httpResp.Body.Close()
|
||||
|
||||
bytes, err := io.ReadAll(httpResp.Body)
|
||||
ipa.log(string(bytes))
|
||||
@@ -338,6 +371,30 @@ func (ipa *IpaIAMService) rpcInternal(req rpcRequest) (rpcResponse, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func isRetryable(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if errors.Is(err, io.EOF) {
|
||||
return true
|
||||
}
|
||||
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
return true
|
||||
}
|
||||
|
||||
if opErr, ok := err.(*net.OpError); ok {
|
||||
if sysErr, ok := opErr.Err.(*syscall.Errno); ok {
|
||||
if *sysErr == syscall.ECONNRESET {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) newRequest(method string, args []string, dict map[string]any) (rpcRequest, error) {
|
||||
|
||||
id := ipa.id
|
||||
|
||||
@@ -111,11 +111,13 @@ func (ld *LdapIAMService) GetUserAccount(access string) (Account, error) {
|
||||
entry := result.Entries[0]
|
||||
groupId, err := strconv.Atoi(entry.GetAttributeValue(ld.groupIdAtr))
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("invalid entry value for group-id: %v", entry.GetAttributeValue(ld.groupIdAtr))
|
||||
return Account{}, fmt.Errorf("invalid entry value for group-id %q: %w",
|
||||
entry.GetAttributeValue(ld.groupIdAtr), err)
|
||||
}
|
||||
userId, err := strconv.Atoi(entry.GetAttributeValue(ld.userIdAtr))
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("invalid entry value for group-id: %v", entry.GetAttributeValue(ld.userIdAtr))
|
||||
return Account{}, fmt.Errorf("invalid entry value for user-id %q: %w",
|
||||
entry.GetAttributeValue(ld.userIdAtr), err)
|
||||
}
|
||||
return Account{
|
||||
Access: entry.GetAttributeValue(ld.accessAtr),
|
||||
@@ -137,6 +139,9 @@ func (ld *LdapIAMService) UpdateUserAccount(access string, props MutableProps) e
|
||||
if props.UserID != nil {
|
||||
req.Replace(ld.userIdAtr, []string{fmt.Sprint(*props.UserID)})
|
||||
}
|
||||
if props.Role != "" {
|
||||
req.Replace(ld.roleAtr, []string{string(props.Role)})
|
||||
}
|
||||
|
||||
err := ld.conn.Modify(req)
|
||||
//TODO: Handle non existing user case
|
||||
@@ -183,11 +188,13 @@ func (ld *LdapIAMService) ListUserAccounts() ([]Account, error) {
|
||||
for _, el := range resp.Entries {
|
||||
groupId, err := strconv.Atoi(el.GetAttributeValue(ld.groupIdAtr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid entry value for group-id: %v", el.GetAttributeValue(ld.groupIdAtr))
|
||||
return nil, fmt.Errorf("invalid entry value for group-id %q: %w",
|
||||
el.GetAttributeValue(ld.groupIdAtr), err)
|
||||
}
|
||||
userId, err := strconv.Atoi(el.GetAttributeValue(ld.userIdAtr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid entry value for group-id: %v", el.GetAttributeValue(ld.userIdAtr))
|
||||
return nil, fmt.Errorf("invalid entry value for user-id %q: %w",
|
||||
el.GetAttributeValue(ld.userIdAtr), err)
|
||||
}
|
||||
result = append(result, Account{
|
||||
Access: el.GetAttributeValue(ld.accessAtr),
|
||||
|
||||
@@ -19,18 +19,30 @@ import (
|
||||
)
|
||||
|
||||
// IAMServiceSingle manages the single tenant (root-only) IAM service
|
||||
type IAMServiceSingle struct{}
|
||||
type IAMServiceSingle struct {
|
||||
root Account
|
||||
}
|
||||
|
||||
var _ IAMService = &IAMServiceSingle{}
|
||||
|
||||
func NewIAMServiceSingle(r Account) IAMService {
|
||||
return &IAMServiceSingle{
|
||||
root: r,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateAccount not valid in single tenant mode
|
||||
func (IAMServiceSingle) CreateAccount(account Account) error {
|
||||
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
|
||||
}
|
||||
|
||||
// GetUserAccount no accounts in single tenant mode
|
||||
func (IAMServiceSingle) GetUserAccount(access string) (Account, error) {
|
||||
return Account{}, s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
|
||||
// GetUserAccount returns root account, if the root access key
|
||||
// is provided and "ErrAdminUserNotFound" otherwise
|
||||
func (s IAMServiceSingle) GetUserAccount(access string) (Account, error) {
|
||||
if access == s.root.Access {
|
||||
return s.root, nil
|
||||
}
|
||||
return Account{}, s3err.GetAPIError(s3err.ErrAdminUserNotFound)
|
||||
}
|
||||
|
||||
// UpdateUserAccount no accounts in single tenant mode
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -26,20 +27,25 @@ import (
|
||||
"github.com/hashicorp/vault-client-go/schema"
|
||||
)
|
||||
|
||||
const requestTimeout = 10 * time.Second
|
||||
|
||||
type VaultIAMService struct {
|
||||
client *vault.Client
|
||||
reqOpts []vault.RequestOption
|
||||
authReqOpts []vault.RequestOption
|
||||
kvReqOpts []vault.RequestOption
|
||||
secretStoragePath string
|
||||
rootAcc Account
|
||||
creds schema.AppRoleLoginRequest
|
||||
}
|
||||
|
||||
var _ IAMService = &VaultIAMService{}
|
||||
|
||||
func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath, mountPath, rootToken, roleID, roleSecret, serverCert, clientCert, clientCertKey string) (IAMService, error) {
|
||||
func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
|
||||
authMethod, mountPath, rootToken, roleID, roleSecret, serverCert,
|
||||
clientCert, clientCertKey string) (IAMService, error) {
|
||||
opts := []vault.ClientOption{
|
||||
vault.WithAddress(endpoint),
|
||||
// set request timeout to 10 secs
|
||||
vault.WithRequestTimeout(10 * time.Second),
|
||||
vault.WithRequestTimeout(requestTimeout),
|
||||
}
|
||||
if serverCert != "" {
|
||||
tls := vault.TLSConfiguration{}
|
||||
@@ -62,10 +68,21 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath, mountPath,
|
||||
return nil, fmt.Errorf("init vault client: %w", err)
|
||||
}
|
||||
|
||||
reqOpts := []vault.RequestOption{}
|
||||
// if mount path is not specified, it defaults to "approle"
|
||||
authReqOpts := []vault.RequestOption{}
|
||||
// if auth method path is not specified, it defaults to "approle"
|
||||
if authMethod != "" {
|
||||
authReqOpts = append(authReqOpts, vault.WithMountPath(authMethod))
|
||||
}
|
||||
|
||||
kvReqOpts := []vault.RequestOption{}
|
||||
// if mount path is not specified, it defaults to "kv-v2"
|
||||
if mountPath != "" {
|
||||
reqOpts = append(reqOpts, vault.WithMountPath(mountPath))
|
||||
kvReqOpts = append(kvReqOpts, vault.WithMountPath(mountPath))
|
||||
}
|
||||
|
||||
creds := schema.AppRoleLoginRequest{
|
||||
RoleId: roleID,
|
||||
SecretId: roleSecret,
|
||||
}
|
||||
|
||||
// Authentication
|
||||
@@ -80,12 +97,8 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath, mountPath,
|
||||
return nil, fmt.Errorf("role id and role secret must both be specified")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
resp, err := client.Auth.AppRoleLogin(ctx, schema.AppRoleLoginRequest{
|
||||
RoleId: roleID,
|
||||
SecretId: roleSecret,
|
||||
}, reqOpts...)
|
||||
cancel()
|
||||
resp, err := client.Auth.AppRoleLogin(context.Background(),
|
||||
creds, authReqOpts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("approle authentication failure: %w", err)
|
||||
}
|
||||
@@ -99,33 +112,77 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath, mountPath,
|
||||
|
||||
return &VaultIAMService{
|
||||
client: client,
|
||||
reqOpts: reqOpts,
|
||||
authReqOpts: authReqOpts,
|
||||
kvReqOpts: kvReqOpts,
|
||||
secretStoragePath: secretStoragePath,
|
||||
rootAcc: rootAcc,
|
||||
creds: creds,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) reAuthIfNeeded(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Vault returns 403 for expired/revoked tokens
|
||||
// pass all other errors back unchanged
|
||||
if !vault.IsErrorStatus(err, http.StatusForbidden) {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, authErr := vt.client.Auth.AppRoleLogin(context.Background(),
|
||||
vt.creds, vt.authReqOpts...)
|
||||
if authErr != nil {
|
||||
return fmt.Errorf("vault re-authentication failure: %w", authErr)
|
||||
}
|
||||
if err := vt.client.SetToken(resp.Auth.ClientToken); err != nil {
|
||||
return fmt.Errorf("vault re-authentication set token failure: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) CreateAccount(account Account) error {
|
||||
if vt.rootAcc.Access == account.Access {
|
||||
return ErrUserExists
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
_, err := vt.client.Secrets.KvV2Write(ctx, vt.secretStoragePath+"/"+account.Access, schema.KvV2WriteRequest{
|
||||
Data: map[string]any{
|
||||
account.Access: account,
|
||||
},
|
||||
Options: map[string]interface{}{
|
||||
"cas": 0,
|
||||
},
|
||||
}, vt.reqOpts...)
|
||||
cancel()
|
||||
_, err := vt.client.Secrets.KvV2Write(context.Background(),
|
||||
vt.secretStoragePath+"/"+account.Access, schema.KvV2WriteRequest{
|
||||
Data: map[string]any{
|
||||
account.Access: account,
|
||||
},
|
||||
Options: map[string]any{
|
||||
"cas": 0,
|
||||
},
|
||||
}, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "check-and-set") {
|
||||
return ErrUserExists
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
reauthErr := vt.reAuthIfNeeded(err)
|
||||
if reauthErr != nil {
|
||||
return reauthErr
|
||||
}
|
||||
// retry once after re-auth
|
||||
_, err = vt.client.Secrets.KvV2Write(context.Background(),
|
||||
vt.secretStoragePath+"/"+account.Access, schema.KvV2WriteRequest{
|
||||
Data: map[string]any{
|
||||
account.Access: account,
|
||||
},
|
||||
Options: map[string]any{
|
||||
"cas": 0,
|
||||
},
|
||||
}, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "check-and-set") {
|
||||
return ErrUserExists
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -133,66 +190,84 @@ func (vt *VaultIAMService) GetUserAccount(access string) (Account, error) {
|
||||
if vt.rootAcc.Access == access {
|
||||
return vt.rootAcc, nil
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
resp, err := vt.client.Secrets.KvV2Read(ctx, vt.secretStoragePath+"/"+access, vt.reqOpts...)
|
||||
cancel()
|
||||
resp, err := vt.client.Secrets.KvV2Read(context.Background(),
|
||||
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
reauthErr := vt.reAuthIfNeeded(err)
|
||||
if reauthErr != nil {
|
||||
return Account{}, reauthErr
|
||||
}
|
||||
// retry once after re-auth
|
||||
resp, err = vt.client.Secrets.KvV2Read(context.Background(),
|
||||
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
}
|
||||
|
||||
acc, err := parseVaultUserAccount(resp.Data.Data, access)
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
|
||||
return acc, nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) UpdateUserAccount(access string, props MutableProps) error {
|
||||
//TODO: We need something like a transaction here ?
|
||||
acc, err := vt.GetUserAccount(access)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updateAcc(&acc, props)
|
||||
|
||||
err = vt.DeleteUserAccount(access)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = vt.CreateAccount(acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) DeleteUserAccount(access string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
_, err := vt.client.Secrets.KvV2DeleteMetadataAndAllVersions(ctx, vt.secretStoragePath+"/"+access, vt.reqOpts...)
|
||||
cancel()
|
||||
_, err := vt.client.Secrets.KvV2DeleteMetadataAndAllVersions(context.Background(),
|
||||
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
return err
|
||||
reauthErr := vt.reAuthIfNeeded(err)
|
||||
if reauthErr != nil {
|
||||
return reauthErr
|
||||
}
|
||||
// retry once after re-auth
|
||||
_, err = vt.client.Secrets.KvV2DeleteMetadataAndAllVersions(context.Background(),
|
||||
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) ListUserAccounts() ([]Account, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
resp, err := vt.client.Secrets.KvV2List(ctx, vt.secretStoragePath, vt.reqOpts...)
|
||||
cancel()
|
||||
resp, err := vt.client.Secrets.KvV2List(context.Background(),
|
||||
vt.secretStoragePath, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
if vault.IsErrorStatus(err, 404) {
|
||||
return []Account{}, nil
|
||||
reauthErr := vt.reAuthIfNeeded(err)
|
||||
if reauthErr != nil {
|
||||
if vault.IsErrorStatus(err, http.StatusNotFound) {
|
||||
return []Account{}, nil
|
||||
}
|
||||
return nil, reauthErr
|
||||
}
|
||||
// retry once after re-auth
|
||||
resp, err = vt.client.Secrets.KvV2List(context.Background(),
|
||||
vt.secretStoragePath, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
if vault.IsErrorStatus(err, http.StatusNotFound) {
|
||||
return []Account{}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accs := []Account{}
|
||||
|
||||
for _, acss := range resp.Data.Keys {
|
||||
acc, err := vt.GetUserAccount(acss)
|
||||
if err != nil {
|
||||
@@ -200,7 +275,6 @@ func (vt *VaultIAMService) ListUserAccounts() ([]Account, error) {
|
||||
}
|
||||
accs = append(accs, acc)
|
||||
}
|
||||
|
||||
return accs, nil
|
||||
}
|
||||
|
||||
@@ -211,8 +285,8 @@ func (vt *VaultIAMService) Shutdown() error {
|
||||
|
||||
var errInvalidUser error = errors.New("invalid user account entry in secrets engine")
|
||||
|
||||
func parseVaultUserAccount(data map[string]interface{}, access string) (acc Account, err error) {
|
||||
usrAcc, ok := data[access].(map[string]interface{})
|
||||
func parseVaultUserAccount(data map[string]any, access string) (acc Account, err error) {
|
||||
usrAcc, ok := data[access].(map[string]any)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
|
||||
@@ -95,7 +95,7 @@ func ParseBucketLockConfigurationOutput(input []byte) (*types.ObjectLockConfigur
|
||||
func ParseObjectLockRetentionInput(input []byte) ([]byte, error) {
|
||||
var retention s3response.PutObjectRetentionInput
|
||||
if err := xml.Unmarshal(input, &retention); err != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if retention.RetainUntilDate.Before(time.Now()) {
|
||||
@@ -120,23 +120,23 @@ func ParseObjectLockRetentionOutput(input []byte) (*types.ObjectLockRetention, e
|
||||
return &retention, nil
|
||||
}
|
||||
|
||||
func ParseObjectLegalHoldOutput(status *bool) *types.ObjectLockLegalHold {
|
||||
func ParseObjectLegalHoldOutput(status *bool) *s3response.GetObjectLegalHoldResult {
|
||||
if status == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if *status {
|
||||
return &types.ObjectLockLegalHold{
|
||||
return &s3response.GetObjectLegalHoldResult{
|
||||
Status: types.ObjectLockLegalHoldStatusOn,
|
||||
}
|
||||
}
|
||||
|
||||
return &types.ObjectLockLegalHold{
|
||||
return &s3response.GetObjectLegalHoldResult{
|
||||
Status: types.ObjectLockLegalHoldStatusOff,
|
||||
}
|
||||
}
|
||||
|
||||
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []types.ObjectIdentifier, bypass bool, be backend.Backend) error {
|
||||
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []types.ObjectIdentifier, bypass, isBucketPublic bool, be backend.Backend) error {
|
||||
data, err := be.GetObjectLockConfiguration(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound)) {
|
||||
@@ -211,7 +211,11 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
|
||||
if isBucketPublic {
|
||||
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
|
||||
} else {
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
|
||||
}
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
@@ -254,7 +258,11 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
|
||||
if isBucketPublic {
|
||||
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
|
||||
} else {
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
|
||||
}
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -64,6 +63,7 @@ const (
|
||||
keyBucketLock key = "Bucketlock"
|
||||
keyObjRetention key = "Objectretention"
|
||||
keyObjLegalHold key = "Objectlegalhold"
|
||||
keyExpires key = "Vgwexpires"
|
||||
onameAttr key = "Objname"
|
||||
onameAttrLower key = "objname"
|
||||
metaTmpMultipartPrefix key = ".sgwtmp" + "/multipart"
|
||||
@@ -77,6 +77,7 @@ func (key) Table() map[string]struct{} {
|
||||
"policy": {},
|
||||
"bucketlock": {},
|
||||
"objectretention": {},
|
||||
"vgwexpires": {},
|
||||
"objectlegalhold": {},
|
||||
"objname": {},
|
||||
".sgwtmp/multipart": {},
|
||||
@@ -180,11 +181,9 @@ func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
|
||||
return err
|
||||
}
|
||||
|
||||
var acl auth.ACL
|
||||
if len(aclBytes) > 0 {
|
||||
if err := json.Unmarshal(aclBytes, &acl); err != nil {
|
||||
return fmt.Errorf("unmarshal acl: %w", err)
|
||||
}
|
||||
acl, err := auth.ParseACL(aclBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if acl.Owner == acct.Access {
|
||||
@@ -293,14 +292,27 @@ func (az *Azure) DeleteBucketOwnershipControls(ctx context.Context, bucket strin
|
||||
return az.deleteContainerMetaData(ctx, bucket, string(keyOwnership))
|
||||
}
|
||||
|
||||
func (az *Azure) PutObject(ctx context.Context, po *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
tags, err := parseTags(po.Tagging)
|
||||
func (az *Azure) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
tags, err := backend.ParseObjectTags(getString(po.Tagging))
|
||||
if err != nil {
|
||||
return s3response.PutObjectOutput{}, err
|
||||
}
|
||||
|
||||
metadata := parseMetadata(po.Metadata)
|
||||
|
||||
// Store the "Expires" property in the object metadata
|
||||
if getString(po.Expires) != "" {
|
||||
if metadata == nil {
|
||||
metadata = map[string]*string{
|
||||
string(keyExpires): po.Expires,
|
||||
}
|
||||
} else {
|
||||
metadata[string(keyExpires)] = po.Expires
|
||||
}
|
||||
}
|
||||
|
||||
opts := &blockblob.UploadStreamOptions{
|
||||
Metadata: parseMetadata(po.Metadata),
|
||||
Metadata: metadata,
|
||||
Tags: tags,
|
||||
}
|
||||
|
||||
@@ -308,6 +320,8 @@ func (az *Azure) PutObject(ctx context.Context, po *s3.PutObjectInput) (s3respon
|
||||
opts.HTTPHeaders.BlobContentEncoding = po.ContentEncoding
|
||||
opts.HTTPHeaders.BlobContentLanguage = po.ContentLanguage
|
||||
opts.HTTPHeaders.BlobContentDisposition = po.ContentDisposition
|
||||
opts.HTTPHeaders.BlobContentLanguage = po.ContentLanguage
|
||||
opts.HTTPHeaders.BlobCacheControl = po.CacheControl
|
||||
if strings.HasSuffix(*po.Key, "/") {
|
||||
// Hardcode "application/x-directory" for direcoty objects
|
||||
opts.HTTPHeaders.BlobContentType = backend.GetPtrFromString(backend.DirContentType)
|
||||
@@ -390,17 +404,29 @@ func (az *Azure) DeleteBucketTagging(ctx context.Context, bucket string) error {
|
||||
}
|
||||
|
||||
func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
client, err := az.getBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := client.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
var opts *azblob.DownloadStreamOptions
|
||||
if *input.Range != "" {
|
||||
offset, count, err := backend.ParseRange(0, *input.Range)
|
||||
offset, count, isValid, err := backend.ParseObjectRange(*resp.ContentLength, *input.Range)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = &azblob.DownloadStreamOptions{
|
||||
Range: blob.HTTPRange{
|
||||
Count: count,
|
||||
Offset: offset,
|
||||
},
|
||||
if isValid {
|
||||
opts = &azblob.DownloadStreamOptions{
|
||||
Range: blob.HTTPRange{
|
||||
Count: count,
|
||||
Offset: offset,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
blobDownloadResponse, err := az.client.DownloadStream(ctx, *input.Bucket, *input.Key, opts)
|
||||
@@ -419,17 +445,21 @@ func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.G
|
||||
}
|
||||
|
||||
return &s3.GetObjectOutput{
|
||||
AcceptRanges: input.Range,
|
||||
ContentLength: blobDownloadResponse.ContentLength,
|
||||
ContentEncoding: blobDownloadResponse.ContentEncoding,
|
||||
ContentType: contentType,
|
||||
ETag: (*string)(blobDownloadResponse.ETag),
|
||||
LastModified: blobDownloadResponse.LastModified,
|
||||
Metadata: parseAzMetadata(blobDownloadResponse.Metadata),
|
||||
TagCount: &tagcount,
|
||||
ContentRange: blobDownloadResponse.ContentRange,
|
||||
Body: blobDownloadResponse.Body,
|
||||
StorageClass: types.StorageClassStandard,
|
||||
AcceptRanges: backend.GetPtrFromString("bytes"),
|
||||
ContentLength: blobDownloadResponse.ContentLength,
|
||||
ContentEncoding: blobDownloadResponse.ContentEncoding,
|
||||
ContentType: contentType,
|
||||
ContentDisposition: blobDownloadResponse.ContentDisposition,
|
||||
ContentLanguage: blobDownloadResponse.ContentLanguage,
|
||||
CacheControl: blobDownloadResponse.CacheControl,
|
||||
ExpiresString: blobDownloadResponse.Metadata[string(keyExpires)],
|
||||
ETag: (*string)(blobDownloadResponse.ETag),
|
||||
LastModified: blobDownloadResponse.LastModified,
|
||||
Metadata: parseAndFilterAzMetadata(blobDownloadResponse.Metadata),
|
||||
TagCount: &tagcount,
|
||||
ContentRange: blobDownloadResponse.ContentRange,
|
||||
Body: blobDownloadResponse.Body,
|
||||
StorageClass: types.StorageClassStandard,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -475,18 +505,35 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
var size int64
|
||||
if resp.ContentLength != nil {
|
||||
size = *resp.ContentLength
|
||||
}
|
||||
|
||||
startOffset, length, isValid, err := backend.ParseObjectRange(size, getString(input.Range))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var contentRange string
|
||||
if isValid {
|
||||
contentRange = fmt.Sprintf("bytes %v-%v/%v",
|
||||
startOffset, startOffset+length-1, size)
|
||||
}
|
||||
|
||||
result := &s3.HeadObjectOutput{
|
||||
AcceptRanges: resp.AcceptRanges,
|
||||
ContentLength: resp.ContentLength,
|
||||
ContentRange: &contentRange,
|
||||
AcceptRanges: backend.GetPtrFromString("bytes"),
|
||||
ContentLength: &length,
|
||||
ContentType: resp.ContentType,
|
||||
ContentEncoding: resp.ContentEncoding,
|
||||
ContentLanguage: resp.ContentLanguage,
|
||||
ContentDisposition: resp.ContentDisposition,
|
||||
CacheControl: resp.CacheControl,
|
||||
ExpiresString: resp.Metadata[string(keyExpires)],
|
||||
ETag: (*string)(resp.ETag),
|
||||
LastModified: resp.LastModified,
|
||||
Metadata: parseAzMetadata(resp.Metadata),
|
||||
Expires: resp.ExpiresOn,
|
||||
Metadata: parseAndFilterAzMetadata(resp.Metadata),
|
||||
StorageClass: types.StorageClassStandard,
|
||||
}
|
||||
|
||||
@@ -521,7 +568,7 @@ func (az *Azure) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAtt
|
||||
}
|
||||
|
||||
return s3response.GetObjectAttributesResponse{
|
||||
ETag: data.ETag,
|
||||
ETag: backend.TrimEtag(data.ETag),
|
||||
ObjectSize: data.ContentLength,
|
||||
StorageClass: data.StorageClass,
|
||||
LastModified: data.LastModified,
|
||||
@@ -551,6 +598,18 @@ func (az *Azure) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s
|
||||
maxKeys = *input.MaxKeys
|
||||
}
|
||||
|
||||
// Retrieve the bucket acl to get the bucket owner
|
||||
// All the objects in the bucket are owner by the bucket owner
|
||||
aclBytes, err := az.getContainerMetaData(ctx, *input.Bucket, string(keyAclCapital))
|
||||
if err != nil {
|
||||
return s3response.ListObjectsResult{}, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
acl, err := auth.ParseACL(aclBytes)
|
||||
if err != nil {
|
||||
return s3response.ListObjectsResult{}, err
|
||||
}
|
||||
|
||||
Pager:
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(ctx)
|
||||
@@ -564,11 +623,14 @@ Pager:
|
||||
break Pager
|
||||
}
|
||||
objects = append(objects, s3response.Object{
|
||||
ETag: (*string)(v.Properties.ETag),
|
||||
ETag: backend.GetPtrFromString(fmt.Sprintf("%q", *v.Properties.ETag)),
|
||||
Key: v.Name,
|
||||
LastModified: v.Properties.LastModified,
|
||||
Size: v.Properties.ContentLength,
|
||||
StorageClass: types.ObjectStorageClassStandard,
|
||||
Owner: &types.Owner{
|
||||
ID: &acl.Owner,
|
||||
},
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Segment.BlobPrefixes {
|
||||
@@ -628,10 +690,29 @@ func (az *Azure) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input
|
||||
var nextMarker *string
|
||||
var isTruncated bool
|
||||
var maxKeys int32 = math.MaxInt32
|
||||
var fetchOwner bool
|
||||
|
||||
if input.MaxKeys != nil {
|
||||
maxKeys = *input.MaxKeys
|
||||
}
|
||||
if input.FetchOwner != nil {
|
||||
fetchOwner = *input.FetchOwner
|
||||
}
|
||||
|
||||
// Retrieve the bucket acl to get the bucket owner, if "fetchOwner" is true
|
||||
// All the objects in the bucket are owner by the bucket owner
|
||||
var acl auth.ACL
|
||||
if fetchOwner {
|
||||
aclBytes, err := az.getContainerMetaData(ctx, *input.Bucket, string(keyAclCapital))
|
||||
if err != nil {
|
||||
return s3response.ListObjectsV2Result{}, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
acl, err = auth.ParseACL(aclBytes)
|
||||
if err != nil {
|
||||
return s3response.ListObjectsV2Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
Pager:
|
||||
for pager.More() {
|
||||
@@ -645,13 +726,20 @@ Pager:
|
||||
isTruncated = true
|
||||
break Pager
|
||||
}
|
||||
objects = append(objects, s3response.Object{
|
||||
ETag: (*string)(v.Properties.ETag),
|
||||
|
||||
obj := s3response.Object{
|
||||
ETag: backend.GetPtrFromString(fmt.Sprintf("%q", *v.Properties.ETag)),
|
||||
Key: v.Name,
|
||||
LastModified: v.Properties.LastModified,
|
||||
Size: v.Properties.ContentLength,
|
||||
StorageClass: types.ObjectStorageClassStandard,
|
||||
})
|
||||
}
|
||||
if fetchOwner {
|
||||
obj.Owner = &types.Owner{
|
||||
ID: &acl.Owner,
|
||||
}
|
||||
}
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
for _, v := range resp.Segment.BlobPrefixes {
|
||||
if *v.Name <= marker {
|
||||
@@ -734,41 +822,158 @@ func (az *Azure) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
bclient, err := az.getBlobClient(*input.Bucket, *input.Key)
|
||||
func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
|
||||
dstClient, err := az.getBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return s3response.CopyObjectOutput{}, err
|
||||
}
|
||||
|
||||
if strings.Join([]string{*input.Bucket, *input.Key}, "/") == *input.CopySource {
|
||||
props, err := bclient.GetProperties(ctx, nil)
|
||||
if input.MetadataDirective != types.MetadataDirectiveReplace {
|
||||
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
|
||||
}
|
||||
|
||||
// Set object meta http headers
|
||||
res, err := dstClient.SetHTTPHeaders(ctx, blob.HTTPHeaders{
|
||||
BlobCacheControl: input.CacheControl,
|
||||
BlobContentDisposition: input.ContentDisposition,
|
||||
BlobContentEncoding: input.ContentEncoding,
|
||||
BlobContentLanguage: input.ContentLanguage,
|
||||
BlobContentType: input.ContentType,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
mdmap := props.Metadata
|
||||
if isMetaSame(mdmap, input.Metadata) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
|
||||
meta := input.Metadata
|
||||
if meta == nil {
|
||||
meta = make(map[string]string)
|
||||
}
|
||||
|
||||
// Embed "Expires" in object metadata
|
||||
if getString(input.Expires) != "" {
|
||||
meta[string(keyExpires)] = *input.Expires
|
||||
}
|
||||
// Set object metadata
|
||||
_, err = dstClient.SetMetadata(ctx, parseMetadata(meta), nil)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
// Set object legal hold
|
||||
if input.ObjectLockLegalHoldStatus != "" {
|
||||
err = az.PutObjectLegalHold(ctx, *input.Bucket, *input.Key, "", input.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
}
|
||||
// Set object retention
|
||||
if input.ObjectLockMode != "" && input.ObjectLockRetainUntilDate != nil {
|
||||
retention := s3response.PutObjectRetentionInput{
|
||||
Mode: types.ObjectLockRetentionMode(input.ObjectLockMode),
|
||||
RetainUntilDate: s3response.AmzDate{
|
||||
Time: *input.ObjectLockRetainUntilDate,
|
||||
},
|
||||
}
|
||||
|
||||
retParsed, err := json.Marshal(retention)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, fmt.Errorf("parse object retention: %w", err)
|
||||
}
|
||||
err = az.PutObjectRetention(ctx, *input.Bucket, *input.Key, "", true, retParsed)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set object Tagging, if tagging directive is "REPLACE"
|
||||
if input.TaggingDirective == types.TaggingDirectiveReplace {
|
||||
tags, err := backend.ParseObjectTags(getString(input.Tagging))
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, err
|
||||
}
|
||||
_, err = dstClient.SetTags(ctx, tags, nil)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
}
|
||||
|
||||
return s3response.CopyObjectOutput{
|
||||
CopyObjectResult: &s3response.CopyObjectResult{
|
||||
LastModified: res.LastModified,
|
||||
ETag: (*string)(res.ETag),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
srcBucket, srcObj, _, err := backend.ParseCopySource(*input.CopySource)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, err
|
||||
}
|
||||
|
||||
// Get the source object
|
||||
downloadResp, err := az.client.DownloadStream(ctx, srcBucket, srcObj, nil)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
pInput := s3response.PutObjectInput{
|
||||
Body: downloadResp.Body,
|
||||
Bucket: input.Bucket,
|
||||
Key: input.Key,
|
||||
ContentLength: downloadResp.ContentLength,
|
||||
ContentType: input.ContentType,
|
||||
ContentEncoding: input.ContentEncoding,
|
||||
ContentDisposition: input.ContentDisposition,
|
||||
ContentLanguage: input.ContentLanguage,
|
||||
CacheControl: input.CacheControl,
|
||||
Expires: input.Expires,
|
||||
Metadata: input.Metadata,
|
||||
ObjectLockRetainUntilDate: input.ObjectLockRetainUntilDate,
|
||||
ObjectLockMode: input.ObjectLockMode,
|
||||
ObjectLockLegalHoldStatus: input.ObjectLockLegalHoldStatus,
|
||||
}
|
||||
|
||||
if input.MetadataDirective == types.MetadataDirectiveCopy {
|
||||
// Expires is in downloadResp.Metadata
|
||||
pInput.Expires = nil
|
||||
pInput.CacheControl = downloadResp.CacheControl
|
||||
pInput.ContentDisposition = downloadResp.ContentDisposition
|
||||
pInput.ContentEncoding = downloadResp.ContentEncoding
|
||||
pInput.ContentLanguage = downloadResp.ContentLanguage
|
||||
pInput.ContentType = downloadResp.ContentType
|
||||
pInput.Metadata = parseAzMetadata(downloadResp.Metadata)
|
||||
}
|
||||
|
||||
if input.TaggingDirective == types.TaggingDirectiveReplace {
|
||||
pInput.Tagging = input.Tagging
|
||||
}
|
||||
|
||||
// Create the destination object
|
||||
resp, err := az.PutObject(ctx, pInput)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, err
|
||||
}
|
||||
|
||||
// Copy the object tagging, if tagging directive is "COPY"
|
||||
if input.TaggingDirective == types.TaggingDirectiveCopy {
|
||||
srcClient, err := az.getBlobClient(srcBucket, srcObj)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, err
|
||||
}
|
||||
res, err := srcClient.GetTags(ctx, nil)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
_, err = dstClient.SetTags(ctx, parseAzTags(res.BlobTagSet), nil)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
}
|
||||
|
||||
tags, err := parseTags(input.Tagging)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := bclient.CopyFromURL(ctx, az.serviceURL+"/"+*input.CopySource, &blob.CopyFromURLOptions{
|
||||
BlobTags: tags,
|
||||
Metadata: parseMetadata(input.Metadata),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return &s3.CopyObjectOutput{
|
||||
CopyObjectResult: &types.CopyObjectResult{
|
||||
ETag: (*string)(resp.ETag),
|
||||
LastModified: resp.LastModified,
|
||||
return s3response.CopyObjectOutput{
|
||||
CopyObjectResult: &s3response.CopyObjectResult{
|
||||
ETag: &resp.ETag,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -815,7 +1020,7 @@ func (az *Azure) DeleteObjectTagging(ctx context.Context, bucket, object string)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Azure) CreateMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
|
||||
func (az *Azure) CreateMultipartUpload(ctx context.Context, input s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
|
||||
if input.ObjectLockLegalHoldStatus != "" || input.ObjectLockMode != "" {
|
||||
bucketLock, err := az.getContainerMetaData(ctx, *input.Bucket, string(keyBucketLock))
|
||||
if err != nil {
|
||||
@@ -839,21 +1044,14 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input *s3.CreateMult
|
||||
meta := parseMetadata(input.Metadata)
|
||||
meta[string(onameAttr)] = input.Key
|
||||
|
||||
if getString(input.Expires) != "" {
|
||||
meta[string(keyExpires)] = input.Expires
|
||||
}
|
||||
|
||||
// parse object tags
|
||||
tagsStr := getString(input.Tagging)
|
||||
tags := map[string]string{}
|
||||
if tagsStr != "" {
|
||||
tagParts := strings.Split(tagsStr, "&")
|
||||
for _, prt := range tagParts {
|
||||
p := strings.Split(prt, "=")
|
||||
if len(p) != 2 {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidTag)
|
||||
}
|
||||
if len(p[0]) > 128 || len(p[1]) > 256 {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidTag)
|
||||
}
|
||||
tags[p[0]] = p[1]
|
||||
}
|
||||
tags, err := backend.ParseObjectTags(getString(input.Tagging))
|
||||
if err != nil {
|
||||
return s3response.InitiateMultipartUploadResult{}, err
|
||||
}
|
||||
|
||||
// set blob legal hold status in metadata
|
||||
@@ -881,18 +1079,19 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input *s3.CreateMult
|
||||
opts := &blockblob.UploadBufferOptions{
|
||||
Metadata: meta,
|
||||
Tags: tags,
|
||||
}
|
||||
if getString(input.ContentType) != "" {
|
||||
opts.HTTPHeaders = &blob.HTTPHeaders{
|
||||
BlobContentType: input.ContentType,
|
||||
BlobContentEncoding: input.ContentEncoding,
|
||||
}
|
||||
HTTPHeaders: &blob.HTTPHeaders{
|
||||
BlobContentType: input.ContentType,
|
||||
BlobContentEncoding: input.ContentEncoding,
|
||||
BlobCacheControl: input.CacheControl,
|
||||
BlobContentDisposition: input.ContentDisposition,
|
||||
BlobContentLanguage: input.ContentLanguage,
|
||||
},
|
||||
}
|
||||
|
||||
// Create and empty blob in .sgwtmp/multipart/<uploadId>/<object hash>
|
||||
// The blob indicates multipart upload initialization and holds the mp metadata
|
||||
// e.g tagging, content-type, metadata, object lock status ...
|
||||
_, err := az.client.UploadBuffer(ctx, *input.Bucket, tmpPath, []byte{}, opts)
|
||||
_, err = az.client.UploadBuffer(ctx, *input.Bucket, tmpPath, []byte{}, opts)
|
||||
if err != nil {
|
||||
return s3response.InitiateMultipartUploadResult{}, azureErrToS3Err(err)
|
||||
}
|
||||
@@ -905,9 +1104,9 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input *s3.CreateMult
|
||||
}
|
||||
|
||||
// Each part is translated into an uncommitted block in a newly created blob in staging area
|
||||
func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (etag string, err error) {
|
||||
func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
if err := az.checkIfMpExists(ctx, *input.Bucket, *input.Key, *input.UploadId); err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: request streamable version of StageBlock()
|
||||
@@ -916,32 +1115,34 @@ func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (eta
|
||||
// the body in memory to create an io.ReadSeekCloser
|
||||
rdr, err := getReadSeekCloser(input.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// block id serves as etag here
|
||||
etag = blockIDInt32ToBase64(*input.PartNumber)
|
||||
etag := blockIDInt32ToBase64(*input.PartNumber)
|
||||
_, err = client.StageBlock(ctx, etag, rdr, nil)
|
||||
if err != nil {
|
||||
return "", parseMpError(err)
|
||||
return nil, parseMpError(err)
|
||||
}
|
||||
|
||||
return etag, nil
|
||||
return &s3.UploadPartOutput{
|
||||
ETag: &etag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
|
||||
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, nil
|
||||
return s3response.CopyPartResult{}, nil
|
||||
}
|
||||
|
||||
if err := az.checkIfMpExists(ctx, *input.Bucket, *input.Key, *input.UploadId); err != nil {
|
||||
return s3response.CopyObjectResult{}, err
|
||||
return s3response.CopyPartResult{}, err
|
||||
}
|
||||
|
||||
eTag := blockIDInt32ToBase64(*input.PartNumber)
|
||||
@@ -949,10 +1150,10 @@ func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInp
|
||||
//TODO: the action returns not implemented on azurite, maybe in production this will work?
|
||||
_, err = client.StageBlockFromURL(ctx, eTag, *input.CopySource, nil)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, parseMpError(err)
|
||||
return s3response.CopyPartResult{}, parseMpError(err)
|
||||
}
|
||||
|
||||
return s3response.CopyObjectResult{}, nil
|
||||
return s3response.CopyPartResult{}, nil
|
||||
}
|
||||
|
||||
// Lists all uncommitted parts from the blob
|
||||
@@ -1164,89 +1365,114 @@ func (az *Azure) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultip
|
||||
// Copeies the multipart metadata from .sgwtmp namespace into the newly created blob
|
||||
// Deletes the multipart upload 'blob' from .sgwtmp namespace
|
||||
// It indicates the end of the multipart upload
|
||||
func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
|
||||
var res s3response.CompleteMultipartUploadResult
|
||||
|
||||
tmpPath := createMetaTmpPath(*input.Key, *input.UploadId)
|
||||
blobClient, err := az.getBlobClient(*input.Bucket, tmpPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return res, "", err
|
||||
}
|
||||
|
||||
props, err := blobClient.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, parseMpError(err)
|
||||
return res, "", parseMpError(err)
|
||||
}
|
||||
tags, err := blobClient.GetTags(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, parseMpError(err)
|
||||
return res, "", parseMpError(err)
|
||||
}
|
||||
|
||||
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return res, "", err
|
||||
}
|
||||
blockIds := []string{}
|
||||
|
||||
blockList, err := client.GetBlockList(ctx, blockblob.BlockListTypeUncommitted, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
return res, "", azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
if len(blockList.UncommittedBlocks) != len(input.MultipartUpload.Parts) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
|
||||
slices.SortFunc(blockList.UncommittedBlocks, func(a *blockblob.Block, b *blockblob.Block) int {
|
||||
ptNumber, _ := decodeBlockId(*a.Name)
|
||||
nextPtNumber, _ := decodeBlockId(*b.Name)
|
||||
return ptNumber - nextPtNumber
|
||||
})
|
||||
|
||||
last := len(blockList.UncommittedBlocks) - 1
|
||||
for i, block := range blockList.UncommittedBlocks {
|
||||
ptNumber, err := decodeBlockId(*block.Name)
|
||||
uncommittedBlocks := map[int32]*blockblob.Block{}
|
||||
for _, el := range blockList.UncommittedBlocks {
|
||||
ptNumber, err := decodeBlockId(backend.GetStringFromPtr(el.Name))
|
||||
if err != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
return res, "", fmt.Errorf("invalid block name: %w", err)
|
||||
}
|
||||
|
||||
if *input.MultipartUpload.Parts[i].ETag != *block.Name {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
uncommittedBlocks[int32(ptNumber)] = el
|
||||
}
|
||||
|
||||
// The initialie values is the lower limit of partNumber: 0
|
||||
var totalSize int64
|
||||
var partNumber int32
|
||||
last := len(blockList.UncommittedBlocks) - 1
|
||||
for i, part := range input.MultipartUpload.Parts {
|
||||
if part.PartNumber == nil {
|
||||
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
if *input.MultipartUpload.Parts[i].PartNumber != int32(ptNumber) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
if *part.PartNumber < 1 {
|
||||
return res, "", s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
|
||||
}
|
||||
// all parts except the last need to be greater, thena
|
||||
if *part.PartNumber <= partNumber {
|
||||
return res, "", s3err.GetAPIError(s3err.ErrInvalidPartOrder)
|
||||
}
|
||||
partNumber = *part.PartNumber
|
||||
|
||||
block, ok := uncommittedBlocks[*part.PartNumber]
|
||||
if !ok {
|
||||
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
|
||||
if *part.ETag != *block.Name {
|
||||
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
// all parts except the last need to be greater, than
|
||||
// the minimum allowed size (5 Mib)
|
||||
if i < last && *block.Size < backend.MinPartSize {
|
||||
return nil, s3err.GetAPIError(s3err.ErrEntityTooSmall)
|
||||
return res, "", s3err.GetAPIError(s3err.ErrEntityTooSmall)
|
||||
}
|
||||
totalSize += *block.Size
|
||||
blockIds = append(blockIds, *block.Name)
|
||||
}
|
||||
|
||||
if input.MpuObjectSize != nil && totalSize != *input.MpuObjectSize {
|
||||
return res, "", s3err.GetIncorrectMpObjectSizeErr(totalSize, *input.MpuObjectSize)
|
||||
}
|
||||
|
||||
opts := &blockblob.CommitBlockListOptions{
|
||||
Metadata: props.Metadata,
|
||||
Tags: parseAzTags(tags.BlobTagSet),
|
||||
}
|
||||
opts.HTTPHeaders = &blob.HTTPHeaders{
|
||||
BlobContentType: props.ContentType,
|
||||
BlobContentEncoding: props.ContentEncoding,
|
||||
BlobContentType: props.ContentType,
|
||||
BlobContentEncoding: props.ContentEncoding,
|
||||
BlobContentDisposition: props.ContentDisposition,
|
||||
BlobContentLanguage: props.ContentLanguage,
|
||||
BlobCacheControl: props.CacheControl,
|
||||
}
|
||||
|
||||
resp, err := client.CommitBlockList(ctx, blockIds, opts)
|
||||
if err != nil {
|
||||
return nil, parseMpError(err)
|
||||
return res, "", parseMpError(err)
|
||||
}
|
||||
|
||||
// cleanup the multipart upload
|
||||
_, err = blobClient.Delete(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, parseMpError(err)
|
||||
return res, "", parseMpError(err)
|
||||
}
|
||||
|
||||
return &s3.CompleteMultipartUploadOutput{
|
||||
return s3response.CompleteMultipartUploadResult{
|
||||
Bucket: input.Bucket,
|
||||
Key: input.Key,
|
||||
ETag: (*string)(resp.ETag),
|
||||
}, nil
|
||||
}, "", nil
|
||||
}
|
||||
|
||||
func (az *Azure) PutBucketAcl(ctx context.Context, bucket string, data []byte) error {
|
||||
@@ -1457,8 +1683,8 @@ func (az *Azure) GetObjectLegalHold(ctx context.Context, bucket, object, version
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
func (az *Azure) ChangeBucketOwner(ctx context.Context, bucket string, acl []byte) error {
|
||||
return az.PutBucketAcl(ctx, bucket, acl)
|
||||
func (az *Azure) ChangeBucketOwner(ctx context.Context, bucket, owner string) error {
|
||||
return auth.UpdateBucketACLOwner(ctx, az, bucket, owner)
|
||||
}
|
||||
|
||||
// The action actually returns the containers owned by the user, who initialized the gateway
|
||||
@@ -1566,7 +1792,7 @@ func parseMetadata(m map[string]string) map[string]*string {
|
||||
return meta
|
||||
}
|
||||
|
||||
func parseAzMetadata(m map[string]*string) map[string]string {
|
||||
func parseAndFilterAzMetadata(m map[string]*string) map[string]string {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -1585,22 +1811,17 @@ func parseAzMetadata(m map[string]*string) map[string]string {
|
||||
return meta
|
||||
}
|
||||
|
||||
func parseTags(tagstr *string) (map[string]string, error) {
|
||||
tagsStr := getString(tagstr)
|
||||
tags := make(map[string]string)
|
||||
|
||||
if tagsStr != "" {
|
||||
tagParts := strings.Split(tagsStr, "&")
|
||||
for _, prt := range tagParts {
|
||||
p := strings.Split(prt, "=")
|
||||
if len(p) != 2 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTag)
|
||||
}
|
||||
tags[p[0]] = p[1]
|
||||
}
|
||||
func parseAzMetadata(m map[string]*string) map[string]string {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
meta := make(map[string]string)
|
||||
|
||||
for k, v := range m {
|
||||
meta[k] = *v
|
||||
}
|
||||
return meta
|
||||
}
|
||||
|
||||
func parseAzTags(tagSet []*blob.Tags) map[string]string {
|
||||
@@ -1743,11 +1964,9 @@ func (az *Azure) deleteContainerMetaData(ctx context.Context, bucket, key string
|
||||
}
|
||||
|
||||
func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
|
||||
var acl auth.ACL
|
||||
|
||||
data, ok := meta[string(key)]
|
||||
if !ok {
|
||||
return &acl, nil
|
||||
return &auth.ACL{}, nil
|
||||
}
|
||||
|
||||
value, err := decodeString(*data)
|
||||
@@ -1755,36 +1974,14 @@ func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(value) == 0 {
|
||||
return &acl, nil
|
||||
}
|
||||
|
||||
err = json.Unmarshal(value, &acl)
|
||||
acl, err := auth.ParseACL(value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unmarshal acl: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &acl, nil
|
||||
}
|
||||
|
||||
func isMetaSame(azMeta map[string]*string, awsMeta map[string]string) bool {
|
||||
if len(azMeta) != len(awsMeta) {
|
||||
return false
|
||||
}
|
||||
|
||||
for key, val := range azMeta {
|
||||
if key == string(keyAclCapital) || key == string(keyAclLower) {
|
||||
continue
|
||||
}
|
||||
awsVal, ok := awsMeta[key]
|
||||
if !ok || awsVal != *val {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func createMetaTmpPath(obj, uploadId string) string {
|
||||
objNameSum := sha256.Sum256([]byte(obj))
|
||||
return filepath.Join(string(metaTmpMultipartPrefix), uploadId, fmt.Sprintf("%x", objNameSum))
|
||||
|
||||
@@ -40,7 +40,7 @@ func azErrToS3err(azErr *azcore.ResponseError) s3err.APIError {
|
||||
case "BlobNotFound":
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
case "TagsTooLarge":
|
||||
return s3err.GetAPIError(s3err.ErrInvalidTag)
|
||||
return s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
case "Requested Range Not Satisfiable":
|
||||
return s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
@@ -46,23 +46,26 @@ type Backend interface {
|
||||
PutBucketOwnershipControls(_ context.Context, bucket string, ownership types.ObjectOwnership) error
|
||||
GetBucketOwnershipControls(_ context.Context, bucket string) (types.ObjectOwnership, error)
|
||||
DeleteBucketOwnershipControls(_ context.Context, bucket string) error
|
||||
PutBucketCors(context.Context, []byte) error
|
||||
GetBucketCors(_ context.Context, bucket string) ([]byte, error)
|
||||
DeleteBucketCors(_ context.Context, bucket string) error
|
||||
|
||||
// multipart operations
|
||||
CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
|
||||
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
|
||||
CreateMultipartUpload(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
|
||||
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (_ s3response.CompleteMultipartUploadResult, versionid string, _ error)
|
||||
AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error
|
||||
ListMultipartUploads(context.Context, *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error)
|
||||
ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error)
|
||||
UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error)
|
||||
UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error)
|
||||
UploadPart(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error)
|
||||
UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error)
|
||||
|
||||
// standard object operations
|
||||
PutObject(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error)
|
||||
PutObject(context.Context, s3response.PutObjectInput) (s3response.PutObjectOutput, error)
|
||||
HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
|
||||
GetObject(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error)
|
||||
GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
|
||||
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error)
|
||||
CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
|
||||
CopyObject(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error)
|
||||
ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error)
|
||||
ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error)
|
||||
DeleteObject(context.Context, *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
|
||||
@@ -93,7 +96,7 @@ type Backend interface {
|
||||
GetObjectLegalHold(_ context.Context, bucket, object, versionId string) (*bool, error)
|
||||
|
||||
// non AWS actions
|
||||
ChangeBucketOwner(_ context.Context, bucket string, acl []byte) error
|
||||
ChangeBucketOwner(_ context.Context, bucket, owner string) error
|
||||
ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error)
|
||||
}
|
||||
|
||||
@@ -150,12 +153,21 @@ func (BackendUnsupported) GetBucketOwnershipControls(_ context.Context, bucket s
|
||||
func (BackendUnsupported) DeleteBucketOwnershipControls(_ context.Context, bucket string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutBucketCors(context.Context, []byte) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetBucketCors(_ context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteBucketCors(_ context.Context, bucket string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
|
||||
func (BackendUnsupported) CreateMultipartUpload(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
|
||||
return s3response.CompleteMultipartUploadResult{}, "", s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
@@ -166,14 +178,14 @@ func (BackendUnsupported) ListMultipartUploads(context.Context, *s3.ListMultipar
|
||||
func (BackendUnsupported) ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error) {
|
||||
return s3response.ListPartsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) UploadPart(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
|
||||
return s3response.CopyPartResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) PutObject(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
func (BackendUnsupported) PutObject(context.Context, s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
return s3response.PutObjectOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
@@ -188,8 +200,8 @@ func (BackendUnsupported) GetObjectAcl(context.Context, *s3.GetObjectAclInput) (
|
||||
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
|
||||
return s3response.GetObjectAttributesResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) CopyObject(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
|
||||
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||
return s3response.ListObjectsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
@@ -268,7 +280,7 @@ func (BackendUnsupported) GetObjectLegalHold(_ context.Context, bucket, object,
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) ChangeBucketOwner(_ context.Context, bucket string, acl []byte) error {
|
||||
func (BackendUnsupported) ChangeBucketOwner(_ context.Context, bucket, owner string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error) {
|
||||
|
||||
@@ -17,12 +17,17 @@ package backend
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
@@ -71,45 +76,130 @@ func GetTimePtr(t time.Time) *time.Time {
|
||||
return &t
|
||||
}
|
||||
|
||||
func TrimEtag(etag *string) *string {
|
||||
if etag == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return GetPtrFromString(strings.Trim(*etag, "\""))
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
errInvalidCopySourceRange = s3err.GetAPIError(s3err.ErrInvalidCopySourceRange)
|
||||
)
|
||||
|
||||
// ParseRange parses input range header and returns startoffset, length, and
|
||||
// error. If no endoffset specified, then length is set to -1.
|
||||
func ParseRange(size int64, acceptRange string) (int64, int64, error) {
|
||||
// ParseObjectRange parses input range header and returns startoffset, length, isValid
|
||||
// and error. If no endoffset specified, then length is set to the object size
|
||||
// for invalid inputs, it returns no error, but isValid=false
|
||||
// `InvalidRange` error is returnd, only if startoffset is greater than the object size
|
||||
func ParseObjectRange(size int64, acceptRange string) (int64, int64, bool, error) {
|
||||
if acceptRange == "" {
|
||||
return 0, size, false, nil
|
||||
}
|
||||
|
||||
rangeKv := strings.Split(acceptRange, "=")
|
||||
|
||||
if len(rangeKv) != 2 {
|
||||
return 0, size, false, nil
|
||||
}
|
||||
|
||||
if rangeKv[0] != "bytes" {
|
||||
return 0, size, false, nil
|
||||
}
|
||||
|
||||
bRange := strings.Split(rangeKv[1], "-")
|
||||
if len(bRange) != 2 {
|
||||
return 0, size, false, nil
|
||||
}
|
||||
|
||||
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
|
||||
if err != nil && bRange[0] != "" {
|
||||
return 0, size, false, nil
|
||||
}
|
||||
|
||||
if bRange[1] == "" {
|
||||
if bRange[0] == "" {
|
||||
return 0, size, false, nil
|
||||
}
|
||||
if startOffset >= size {
|
||||
return 0, 0, false, errInvalidRange
|
||||
}
|
||||
return startOffset, size - startOffset, true, nil
|
||||
}
|
||||
|
||||
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, size, false, nil
|
||||
}
|
||||
|
||||
if startOffset > endOffset {
|
||||
return 0, size, false, nil
|
||||
}
|
||||
|
||||
// for ranges like 'bytes=-100' return the last bytes specified with 'endOffset'
|
||||
if bRange[0] == "" {
|
||||
endOffset = min(endOffset, size)
|
||||
return size - endOffset, endOffset, true, nil
|
||||
}
|
||||
|
||||
if startOffset >= size {
|
||||
return 0, 0, false, errInvalidRange
|
||||
}
|
||||
|
||||
if endOffset >= size {
|
||||
endOffset = size - 1
|
||||
}
|
||||
|
||||
return startOffset, endOffset - startOffset + 1, true, nil
|
||||
}
|
||||
|
||||
// ParseCopySourceRange parses input range header and returns startoffset, length
|
||||
// and error. If no endoffset specified, then length is set to the object size
|
||||
func ParseCopySourceRange(size int64, acceptRange string) (int64, int64, error) {
|
||||
if acceptRange == "" {
|
||||
return 0, size, nil
|
||||
}
|
||||
|
||||
rangeKv := strings.Split(acceptRange, "=")
|
||||
|
||||
if len(rangeKv) < 2 {
|
||||
return 0, 0, errInvalidRange
|
||||
if len(rangeKv) != 2 {
|
||||
return 0, 0, errInvalidCopySourceRange
|
||||
}
|
||||
|
||||
if rangeKv[0] != "bytes" {
|
||||
return 0, 0, errInvalidCopySourceRange
|
||||
}
|
||||
|
||||
bRange := strings.Split(rangeKv[1], "-")
|
||||
if len(bRange) < 1 || len(bRange) > 2 {
|
||||
return 0, 0, errInvalidRange
|
||||
if len(bRange) != 2 {
|
||||
return 0, 0, errInvalidCopySourceRange
|
||||
}
|
||||
|
||||
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errInvalidRange
|
||||
return 0, 0, errInvalidCopySourceRange
|
||||
}
|
||||
|
||||
endOffset := int64(-1)
|
||||
if len(bRange) == 1 || bRange[1] == "" {
|
||||
return startOffset, endOffset, nil
|
||||
if startOffset >= size {
|
||||
return 0, 0, s3err.CreateExceedingRangeErr(size)
|
||||
}
|
||||
|
||||
endOffset, err = strconv.ParseInt(bRange[1], 10, 64)
|
||||
if bRange[1] == "" {
|
||||
return startOffset, size - startOffset + 1, nil
|
||||
}
|
||||
|
||||
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errInvalidRange
|
||||
return 0, 0, errInvalidCopySourceRange
|
||||
}
|
||||
|
||||
if endOffset < startOffset {
|
||||
return 0, 0, errInvalidRange
|
||||
return 0, 0, errInvalidCopySourceRange
|
||||
}
|
||||
|
||||
if endOffset >= size {
|
||||
return 0, 0, s3err.CreateExceedingRangeErr(size)
|
||||
}
|
||||
|
||||
return startOffset, endOffset - startOffset + 1, nil
|
||||
@@ -139,12 +229,82 @@ func ParseCopySource(copySourceHeader string) (string, string, string, error) {
|
||||
return srcBucket, srcObject, versionId, nil
|
||||
}
|
||||
|
||||
func CreateExceedingRangeErr(objSize int64) s3err.APIError {
|
||||
return s3err.APIError{
|
||||
Code: "InvalidArgument",
|
||||
Description: fmt.Sprintf("Range specified is not valid for source object of size: %d", objSize),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
// ParseObjectTags parses the url encoded input string into
|
||||
// map[string]string with unescaped key/value pair
|
||||
func ParseObjectTags(tagging string) (map[string]string, error) {
|
||||
if tagging == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tagSet := make(map[string]string)
|
||||
|
||||
for tagging != "" {
|
||||
var tag string
|
||||
tag, tagging, _ = strings.Cut(tagging, "&")
|
||||
// if 'tag' before the first appearance of '&' is empty continue
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
key, value, found := strings.Cut(tag, "=")
|
||||
// if key is empty, but "=" is present, return invalid url ecnoding err
|
||||
if found && key == "" {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
|
||||
}
|
||||
|
||||
// return invalid tag key, if the key is longer than 128
|
||||
if len(key) > 128 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// return invalid tag value, if tag value is longer than 256
|
||||
if len(value) > 256 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// query unescape tag key
|
||||
key, err := url.QueryUnescape(key)
|
||||
if err != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
|
||||
}
|
||||
|
||||
// query unescape tag value
|
||||
value, err = url.QueryUnescape(value)
|
||||
if err != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
|
||||
}
|
||||
|
||||
// check tag key to be valid
|
||||
if !isValidTagComponent(key) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// check tag value to be valid
|
||||
if !isValidTagComponent(value) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// duplicate keys are not allowed: return invalid url encoding err
|
||||
_, ok := tagSet[key]
|
||||
if ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
|
||||
}
|
||||
|
||||
tagSet[key] = value
|
||||
}
|
||||
|
||||
return tagSet, nil
|
||||
}
|
||||
|
||||
var validTagComponent = regexp.MustCompile(`^[a-zA-Z0-9:/_.\-+ ]+$`)
|
||||
|
||||
// isValidTagComponent matches strings which contain letters, decimal digits,
|
||||
// and special chars: '/', '_', '-', '+', '.', ' ' (space)
|
||||
func isValidTagComponent(str string) bool {
|
||||
if str == "" {
|
||||
return true
|
||||
}
|
||||
return validTagComponent.Match([]byte(str))
|
||||
}
|
||||
|
||||
func GetMultipartMD5(parts []types.CompletedPart) string {
|
||||
@@ -152,8 +312,8 @@ func GetMultipartMD5(parts []types.CompletedPart) string {
|
||||
for _, part := range parts {
|
||||
partsEtagBytes = append(partsEtagBytes, getEtagBytes(*part.ETag)...)
|
||||
}
|
||||
s3MD5 := fmt.Sprintf("%s-%d", md5String(partsEtagBytes), len(parts))
|
||||
return s3MD5
|
||||
|
||||
return fmt.Sprintf("\"%s-%d\"", md5String(partsEtagBytes), len(parts))
|
||||
}
|
||||
|
||||
func getEtagBytes(etag string) []byte {
|
||||
@@ -181,3 +341,65 @@ func (f *FileSectionReadCloser) Read(p []byte) (int, error) {
|
||||
func (f *FileSectionReadCloser) Close() error {
|
||||
return f.F.Close()
|
||||
}
|
||||
|
||||
// MoveFile moves a file from source to destination.
|
||||
func MoveFile(source, destination string, perm os.FileMode) error {
|
||||
// We use Rename as the atomic operation for object puts. The upload is
|
||||
// written to a temp file to not conflict with any other simultaneous
|
||||
// uploads. The final operation is to move the temp file into place for
|
||||
// the object. This ensures the object semantics of last upload completed
|
||||
// wins and is not some combination of writes from simultaneous uploads.
|
||||
err := os.Rename(source, destination)
|
||||
if err == nil || !errors.Is(err, syscall.EXDEV) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rename can fail if the source and destination are not on the same
|
||||
// filesystem. The fallback is to copy the file and then remove the source.
|
||||
// We need to be careful that the desination does not exist before copying
|
||||
// to prevent any other simultaneous writes to the file.
|
||||
sourceFile, err := os.Open(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open source: %w", err)
|
||||
}
|
||||
defer sourceFile.Close()
|
||||
|
||||
var destFile *os.File
|
||||
for {
|
||||
destFile, err = os.OpenFile(destination, os.O_CREATE|os.O_EXCL|os.O_WRONLY, perm)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrExist) {
|
||||
if removeErr := os.Remove(destination); removeErr != nil {
|
||||
return fmt.Errorf("remove existing destination: %w", removeErr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("create destination: %w", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
defer destFile.Close()
|
||||
|
||||
_, err = io.Copy(destFile, sourceFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copy data: %w", err)
|
||||
}
|
||||
|
||||
err = os.Remove(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove source: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateEtag generates a new quoted etag from the provided hash.Hash
|
||||
func GenerateEtag(h hash.Hash) string {
|
||||
dataSum := h.Sum(nil)
|
||||
return fmt.Sprintf("\"%s\"", hex.EncodeToString(dataSum[:]))
|
||||
}
|
||||
|
||||
// AreEtagsSame compares 2 etags by ignoring quotes
|
||||
func AreEtagsSame(e1, e2 string) bool {
|
||||
return strings.Trim(e1, `"`) == strings.Trim(e2, `"`)
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -56,10 +57,18 @@ func (x XattrMeta) RetrieveAttribute(f *os.File, bucket, object, attribute strin
|
||||
// StoreAttribute stores the value of a specific attribute for an object in a bucket.
|
||||
func (x XattrMeta) StoreAttribute(f *os.File, bucket, object, attribute string, value []byte) error {
|
||||
if f != nil {
|
||||
return xattr.FSet(f, xattrPrefix+attribute, value)
|
||||
err := xattr.FSet(f, xattrPrefix+attribute, value)
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return xattr.Set(filepath.Join(bucket, object), xattrPrefix+attribute, value)
|
||||
err := xattr.Set(filepath.Join(bucket, object), xattrPrefix+attribute, value)
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteAttribute removes the value of a specific attribute for an object in a bucket.
|
||||
@@ -68,6 +77,9 @@ func (x XattrMeta) DeleteAttribute(bucket, object, attribute string) error {
|
||||
if errors.Is(err, xattr.ENOATTR) {
|
||||
return ErrNoSuchKey
|
||||
}
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -29,6 +29,7 @@ import (
|
||||
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -51,9 +52,13 @@ var (
|
||||
defaultFilePerm uint32 = 0644
|
||||
)
|
||||
|
||||
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, dofalloc bool) (*tmpfile, error) {
|
||||
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, dofalloc bool, forceNoTmpFile bool) (*tmpfile, error) {
|
||||
uid, gid, doChown := p.getChownIDs(acct)
|
||||
|
||||
if forceNoTmpFile {
|
||||
return p.openMkTemp(dir, bucket, obj, size, dofalloc, uid, gid, doChown)
|
||||
}
|
||||
|
||||
// O_TMPFILE allows for a file handle to an unnamed file in the filesystem.
|
||||
// This can help reduce contention within the namespace (parent directories),
|
||||
// etc. And will auto cleanup the inode on close if we never link this
|
||||
@@ -62,38 +67,12 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
|
||||
// this is not supported.
|
||||
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, defaultFilePerm)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
|
||||
// O_TMPFILE not supported, try fallback
|
||||
err = backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("make temp dir: %w", err)
|
||||
}
|
||||
f, err := os.CreateTemp(dir,
|
||||
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmp := &tmpfile{
|
||||
f: f,
|
||||
bucket: bucket,
|
||||
objname: obj,
|
||||
size: size,
|
||||
needsChown: doChown,
|
||||
uid: uid,
|
||||
gid: gid,
|
||||
}
|
||||
// falloc is best effort, its fine if this fails
|
||||
if size > 0 && dofalloc {
|
||||
tmp.falloc()
|
||||
}
|
||||
|
||||
if doChown {
|
||||
err := f.Chown(uid, gid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("set temp file ownership: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return tmp, nil
|
||||
return p.openMkTemp(dir, bucket, obj, size, dofalloc, uid, gid, doChown)
|
||||
}
|
||||
|
||||
// for O_TMPFILE, filename is /proc/self/fd/<fd> to be used
|
||||
@@ -127,6 +106,46 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (p *Posix) openMkTemp(dir, bucket, obj string, size int64, dofalloc bool, uid, gid int, doChown bool) (*tmpfile, error) {
|
||||
err := backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return nil, fmt.Errorf("make temp dir: %w", err)
|
||||
}
|
||||
f, err := os.CreateTemp(dir,
|
||||
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
tmp := &tmpfile{
|
||||
f: f,
|
||||
bucket: bucket,
|
||||
objname: obj,
|
||||
size: size,
|
||||
needsChown: doChown,
|
||||
uid: uid,
|
||||
gid: gid,
|
||||
}
|
||||
// falloc is best effort, its fine if this fails
|
||||
if size > 0 && dofalloc {
|
||||
tmp.falloc()
|
||||
}
|
||||
|
||||
if doChown {
|
||||
err := f.Chown(uid, gid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("set temp file ownership: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) falloc() error {
|
||||
err := syscall.Fallocate(int(tmp.f.Fd()), 0, 0, tmp.size)
|
||||
if err != nil {
|
||||
@@ -217,7 +236,9 @@ func (tmp *tmpfile) fallbackLink() error {
|
||||
objPath := filepath.Join(tmp.bucket, tmp.objname)
|
||||
err = os.Rename(tempname, objPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rename tmpfile: %w", err)
|
||||
// rename only works for files within the same filesystem
|
||||
// if this fails fallback to copy
|
||||
return backend.MoveFile(tempname, objPath, fs.FileMode(defaultFilePerm))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -24,9 +24,11 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
type tmpfile struct {
|
||||
@@ -36,18 +38,24 @@ type tmpfile struct {
|
||||
size int64
|
||||
}
|
||||
|
||||
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, _ bool) (*tmpfile, error) {
|
||||
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, _ bool, _ bool) (*tmpfile, error) {
|
||||
uid, gid, doChown := p.getChownIDs(acct)
|
||||
|
||||
// Create a temp file for upload while in progress (see link comments below).
|
||||
var err error
|
||||
err = backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return nil, fmt.Errorf("make temp dir: %w", err)
|
||||
}
|
||||
f, err := os.CreateTemp(dir,
|
||||
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return nil, fmt.Errorf("create temp file: %w", err)
|
||||
}
|
||||
|
||||
@@ -72,31 +80,17 @@ func (tmp *tmpfile) link() error {
|
||||
// this will no longer exist
|
||||
defer os.Remove(tempname)
|
||||
|
||||
// We use Rename as the atomic operation for object puts. The upload is
|
||||
// written to a temp file to not conflict with any other simultaneous
|
||||
// uploads. The final operation is to move the temp file into place for
|
||||
// the object. This ensures the object semantics of last upload completed
|
||||
// wins and is not some combination of writes from simultaneous uploads.
|
||||
objPath := filepath.Join(tmp.bucket, tmp.objname)
|
||||
err := os.Remove(objPath)
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove stale path: %w", err)
|
||||
}
|
||||
|
||||
// reset default file mode because CreateTemp uses 0600
|
||||
tmp.f.Chmod(defaultFilePerm)
|
||||
|
||||
err = tmp.f.Close()
|
||||
err := tmp.f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("close tmpfile: %w", err)
|
||||
}
|
||||
|
||||
err = os.Rename(tempname, objPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rename tmpfile: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return backend.MoveFile(tempname, objPath, defaultFilePerm)
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) Write(b []byte) (int, error) {
|
||||
|
||||
@@ -36,6 +36,11 @@ func (s *S3Proxy) getClientWithCtx(ctx context.Context) (*s3.Client, error) {
|
||||
if s.endpoint != "" {
|
||||
return s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
o.BaseEndpoint = &s.endpoint
|
||||
o.UsePathStyle = s.usePathStyle
|
||||
// The http body stream is not seekable, so most operations cannot
|
||||
// be retried. The error returned to the original client may be
|
||||
// retried by the client.
|
||||
o.Retryer = aws.NopRetryer{}
|
||||
}), nil
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -23,6 +23,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
@@ -31,6 +32,7 @@ import (
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/meta"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
@@ -52,14 +54,15 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
}
|
||||
|
||||
return &ScoutFS{
|
||||
Posix: p,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
meta: metastore,
|
||||
chownuid: opts.ChownUID,
|
||||
chowngid: opts.ChownGID,
|
||||
glaciermode: opts.GlacierMode,
|
||||
newDirPerm: opts.NewDirPerm,
|
||||
Posix: p,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
meta: metastore,
|
||||
chownuid: opts.ChownUID,
|
||||
chowngid: opts.ChownGID,
|
||||
glaciermode: opts.GlacierMode,
|
||||
newDirPerm: opts.NewDirPerm,
|
||||
disableNoArchive: opts.DisableNoArchive,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -89,6 +92,9 @@ func (s *ScoutFS) openTmpFile(dir, bucket, obj string, size int64, acct auth.Acc
|
||||
// file descriptor into the namespace.
|
||||
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, defaultFilePerm)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -149,10 +155,20 @@ func (tmp *tmpfile) link() error {
|
||||
}
|
||||
defer dirf.Close()
|
||||
|
||||
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
|
||||
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
|
||||
if err != nil {
|
||||
return fmt.Errorf("link tmpfile: %w", err)
|
||||
for {
|
||||
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
|
||||
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
|
||||
if errors.Is(err, fs.ErrExist) {
|
||||
err := os.Remove(objPath)
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove stale path: %w", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("link tmpfile: %w", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
err = tmp.f.Close()
|
||||
|
||||
145
backend/walk.go
145
backend/walk.go
@@ -19,7 +19,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
@@ -38,18 +37,53 @@ type GetObjFunc func(path string, d fs.DirEntry) (s3response.Object, error)
|
||||
|
||||
var ErrSkipObj = errors.New("skip this object")
|
||||
|
||||
// map to store object common prefixes
|
||||
type cpMap map[string]int
|
||||
|
||||
func (c cpMap) Add(key string) {
|
||||
_, ok := c[key]
|
||||
if !ok {
|
||||
c[key] = len(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the length of the map
|
||||
func (c cpMap) Len() int {
|
||||
return len(c)
|
||||
}
|
||||
|
||||
// CpArray converts the map into a sorted []types.CommonPrefixes array
|
||||
func (c cpMap) CpArray() []types.CommonPrefix {
|
||||
commonPrefixes := make([]types.CommonPrefix, c.Len())
|
||||
for cp, i := range c {
|
||||
pfx := cp
|
||||
commonPrefixes[i] = types.CommonPrefix{
|
||||
Prefix: &pfx,
|
||||
}
|
||||
}
|
||||
|
||||
return commonPrefixes
|
||||
}
|
||||
|
||||
// Walk walks the supplied fs.FS and returns results compatible with list
|
||||
// objects responses
|
||||
func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker string, max int32, getObj GetObjFunc, skipdirs []string) (WalkResults, error) {
|
||||
cpmap := make(map[string]struct{})
|
||||
cpmap := cpMap{}
|
||||
var objects []s3response.Object
|
||||
|
||||
// if max is 0, it should return empty non-truncated result
|
||||
if max == 0 {
|
||||
return WalkResults{
|
||||
Truncated: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var pastMarker bool
|
||||
if marker == "" {
|
||||
pastMarker = true
|
||||
}
|
||||
|
||||
pastMax := max == 0
|
||||
var pastMax bool
|
||||
var newMarker string
|
||||
var truncated bool
|
||||
|
||||
@@ -76,14 +110,6 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
|
||||
return fs.SkipDir
|
||||
}
|
||||
|
||||
if pastMax {
|
||||
if len(objects) != 0 {
|
||||
newMarker = *objects[len(objects)-1].Key
|
||||
truncated = true
|
||||
}
|
||||
return fs.SkipAll
|
||||
}
|
||||
|
||||
// After this point, return skipflag instead of nil
|
||||
// so we can skip a directory without an early return
|
||||
var skipflag error
|
||||
@@ -108,13 +134,7 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
|
||||
strings.Contains(strings.TrimPrefix(path+"/", prefix), delimiter) {
|
||||
skipflag = fs.SkipDir
|
||||
} else {
|
||||
// TODO: can we do better here rather than a second readdir
|
||||
// per directory?
|
||||
ents, err := fs.ReadDir(fileSystem, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("readdir %q: %w", path, err)
|
||||
}
|
||||
if len(ents) == 0 && delimiter == "" {
|
||||
if delimiter == "" {
|
||||
dirobj, err := getObj(path+"/", d)
|
||||
if err == ErrSkipObj {
|
||||
return skipflag
|
||||
@@ -122,11 +142,26 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
|
||||
if err != nil {
|
||||
return fmt.Errorf("directory to object %q: %w", path, err)
|
||||
}
|
||||
if pastMax {
|
||||
truncated = true
|
||||
return fs.SkipAll
|
||||
}
|
||||
objects = append(objects, dirobj)
|
||||
if (len(objects) + cpmap.Len()) == int(max) {
|
||||
newMarker = path
|
||||
pastMax = true
|
||||
}
|
||||
|
||||
return skipflag
|
||||
}
|
||||
|
||||
// TODO: can we do better here rather than a second readdir
|
||||
// per directory?
|
||||
ents, err := fs.ReadDir(fileSystem, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("readdir %q: %w", path, err)
|
||||
}
|
||||
|
||||
if len(ents) != 0 {
|
||||
return skipflag
|
||||
}
|
||||
@@ -159,9 +194,15 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
|
||||
if err != nil {
|
||||
return fmt.Errorf("file to object %q: %w", path, err)
|
||||
}
|
||||
if pastMax {
|
||||
truncated = true
|
||||
return fs.SkipAll
|
||||
}
|
||||
|
||||
objects = append(objects, obj)
|
||||
|
||||
if max > 0 && (len(objects)+len(cpmap)) == int(max) {
|
||||
if (len(objects) + cpmap.Len()) == int(max) {
|
||||
newMarker = path
|
||||
pastMax = true
|
||||
}
|
||||
|
||||
@@ -199,8 +240,13 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
|
||||
if err != nil {
|
||||
return fmt.Errorf("file to object %q: %w", path, err)
|
||||
}
|
||||
if pastMax {
|
||||
truncated = true
|
||||
return fs.SkipAll
|
||||
}
|
||||
objects = append(objects, obj)
|
||||
if (len(objects) + len(cpmap)) == int(max) {
|
||||
if (len(objects) + cpmap.Len()) == int(max) {
|
||||
newMarker = path
|
||||
pastMax = true
|
||||
}
|
||||
return skipflag
|
||||
@@ -221,12 +267,15 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
|
||||
return skipflag
|
||||
}
|
||||
|
||||
cpmap[cpref] = struct{}{}
|
||||
if (len(objects) + len(cpmap)) == int(max) {
|
||||
newMarker = cpref
|
||||
if pastMax {
|
||||
truncated = true
|
||||
return fs.SkipAll
|
||||
}
|
||||
cpmap.Add(cpref)
|
||||
if (len(objects) + cpmap.Len()) == int(max) {
|
||||
newMarker = cpref
|
||||
pastMax = true
|
||||
}
|
||||
|
||||
return skipflag
|
||||
})
|
||||
@@ -238,21 +287,12 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
|
||||
return WalkResults{}, err
|
||||
}
|
||||
|
||||
var commonPrefixStrings []string
|
||||
for k := range cpmap {
|
||||
commonPrefixStrings = append(commonPrefixStrings, k)
|
||||
}
|
||||
sort.Strings(commonPrefixStrings)
|
||||
commonPrefixes := make([]types.CommonPrefix, 0, len(commonPrefixStrings))
|
||||
for _, cp := range commonPrefixStrings {
|
||||
pfx := cp
|
||||
commonPrefixes = append(commonPrefixes, types.CommonPrefix{
|
||||
Prefix: &pfx,
|
||||
})
|
||||
if !truncated {
|
||||
newMarker = ""
|
||||
}
|
||||
|
||||
return WalkResults{
|
||||
CommonPrefixes: commonPrefixes,
|
||||
CommonPrefixes: cpmap.CpArray(),
|
||||
Objects: objects,
|
||||
Truncated: truncated,
|
||||
NextMarker: newMarker,
|
||||
@@ -270,7 +310,7 @@ func contains(a string, strs []string) bool {
|
||||
|
||||
type WalkVersioningResults struct {
|
||||
CommonPrefixes []types.CommonPrefix
|
||||
ObjectVersions []types.ObjectVersion
|
||||
ObjectVersions []s3response.ObjectVersion
|
||||
DelMarkers []types.DeleteMarkerEntry
|
||||
Truncated bool
|
||||
NextMarker string
|
||||
@@ -278,7 +318,7 @@ type WalkVersioningResults struct {
|
||||
}
|
||||
|
||||
type ObjVersionFuncResult struct {
|
||||
ObjectVersions []types.ObjectVersion
|
||||
ObjectVersions []s3response.ObjectVersion
|
||||
DelMarkers []types.DeleteMarkerEntry
|
||||
NextVersionIdMarker string
|
||||
Truncated bool
|
||||
@@ -289,8 +329,8 @@ type GetVersionsFunc func(path, versionIdMarker string, pastVersionIdMarker *boo
|
||||
// WalkVersions walks the supplied fs.FS and returns results compatible with
|
||||
// ListObjectVersions action response
|
||||
func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyMarker, versionIdMarker string, max int, getObj GetVersionsFunc, skipdirs []string) (WalkVersioningResults, error) {
|
||||
cpmap := make(map[string]struct{})
|
||||
var objects []types.ObjectVersion
|
||||
cpmap := cpMap{}
|
||||
var objects []s3response.ObjectVersion
|
||||
var delMarkers []types.DeleteMarkerEntry
|
||||
|
||||
var pastMarker bool
|
||||
@@ -345,11 +385,11 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
|
||||
if delimiter == "/" &&
|
||||
prefix != path+"/" &&
|
||||
strings.HasPrefix(path+"/", prefix) {
|
||||
cpmap[path+"/"] = struct{}{}
|
||||
cpmap.Add(path + "/")
|
||||
return fs.SkipDir
|
||||
}
|
||||
|
||||
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
|
||||
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-cpmap.Len(), d)
|
||||
if err == ErrSkipObj {
|
||||
return nil
|
||||
}
|
||||
@@ -376,7 +416,7 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
|
||||
if delimiter == "" {
|
||||
// If no delimiter specified, then all files with matching
|
||||
// prefix are included in results
|
||||
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
|
||||
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-cpmap.Len(), d)
|
||||
if err == ErrSkipObj {
|
||||
return nil
|
||||
}
|
||||
@@ -419,7 +459,7 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
|
||||
suffix := strings.TrimPrefix(path, prefix)
|
||||
before, _, found := strings.Cut(suffix, delimiter)
|
||||
if !found {
|
||||
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
|
||||
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-cpmap.Len(), d)
|
||||
if err == ErrSkipObj {
|
||||
return nil
|
||||
}
|
||||
@@ -441,8 +481,8 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
|
||||
// Common prefixes are a set, so should not have duplicates.
|
||||
// These are abstractly a "directory", so need to include the
|
||||
// delimiter at the end.
|
||||
cpmap[prefix+before+delimiter] = struct{}{}
|
||||
if (len(objects) + len(cpmap)) == int(max) {
|
||||
cpmap.Add(prefix + before + delimiter)
|
||||
if (len(objects) + cpmap.Len()) == int(max) {
|
||||
nextMarker = path
|
||||
truncated = true
|
||||
|
||||
@@ -455,21 +495,8 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
|
||||
return WalkVersioningResults{}, err
|
||||
}
|
||||
|
||||
var commonPrefixStrings []string
|
||||
for k := range cpmap {
|
||||
commonPrefixStrings = append(commonPrefixStrings, k)
|
||||
}
|
||||
sort.Strings(commonPrefixStrings)
|
||||
commonPrefixes := make([]types.CommonPrefix, 0, len(commonPrefixStrings))
|
||||
for _, cp := range commonPrefixStrings {
|
||||
pfx := cp
|
||||
commonPrefixes = append(commonPrefixes, types.CommonPrefix{
|
||||
Prefix: &pfx,
|
||||
})
|
||||
}
|
||||
|
||||
return WalkVersioningResults{
|
||||
CommonPrefixes: commonPrefixes,
|
||||
CommonPrefixes: cpmap.CpArray(),
|
||||
ObjectVersions: objects,
|
||||
DelMarkers: delMarkers,
|
||||
Truncated: truncated,
|
||||
|
||||
@@ -100,6 +100,11 @@ func adminCommand() *cli.Command {
|
||||
Usage: "secret access key for the new user",
|
||||
Aliases: []string{"s"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "role",
|
||||
Usage: "the new user role",
|
||||
Aliases: []string{"r"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "user-id",
|
||||
Usage: "userID for the new user",
|
||||
@@ -311,8 +316,14 @@ func deleteUser(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func updateUser(ctx *cli.Context) error {
|
||||
access, secret, userId, groupId := ctx.String("access"), ctx.String("secret"), ctx.Int("user-id"), ctx.Int("group-id")
|
||||
access, secret, userId, groupId, role := ctx.String("access"), ctx.String("secret"), ctx.Int("user-id"), ctx.Int("group-id"), auth.Role(ctx.String("role"))
|
||||
props := auth.MutableProps{}
|
||||
if ctx.IsSet("role") {
|
||||
if !role.IsValid() {
|
||||
return fmt.Errorf("invalid user role: %v", role)
|
||||
}
|
||||
props.Role = role
|
||||
}
|
||||
if ctx.IsSet("secret") {
|
||||
props.Secret = &secret
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@ var (
|
||||
logWebhookURL, accessLog string
|
||||
adminLogFile string
|
||||
healthPath string
|
||||
virtualDomain string
|
||||
debug bool
|
||||
pprof string
|
||||
quiet bool
|
||||
@@ -60,10 +61,10 @@ var (
|
||||
ldapAccessAtr, ldapSecAtr, ldapRoleAtr string
|
||||
ldapUserIdAtr, ldapGroupIdAtr string
|
||||
vaultEndpointURL, vaultSecretStoragePath string
|
||||
vaultMountPath, vaultRootToken string
|
||||
vaultRoleId, vaultRoleSecret string
|
||||
vaultServerCert, vaultClientCert string
|
||||
vaultClientCertKey string
|
||||
vaultAuthMethod, vaultMountPath string
|
||||
vaultRootToken, vaultRoleId string
|
||||
vaultRoleSecret, vaultServerCert string
|
||||
vaultClientCert, vaultClientCertKey string
|
||||
s3IamAccess, s3IamSecret string
|
||||
s3IamRegion, s3IamBucket string
|
||||
s3IamEndpoint string
|
||||
@@ -98,6 +99,7 @@ func main() {
|
||||
scoutfsCommand(),
|
||||
s3Command(),
|
||||
azureCommand(),
|
||||
pluginCommand(),
|
||||
adminCommand(),
|
||||
testCommand(),
|
||||
utilsCommand(),
|
||||
@@ -226,6 +228,13 @@ func initFlags() []cli.Flag {
|
||||
Destination: &quiet,
|
||||
Aliases: []string{"q"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "virtual-domain",
|
||||
Usage: "enables the virtual host style bucket addressing with the specified arg as the base domain",
|
||||
EnvVars: []string{"VGW_VIRTUAL_DOMAIN"},
|
||||
Destination: &virtualDomain,
|
||||
Aliases: []string{"vd"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "access-log",
|
||||
Usage: "enable server access logging to specified file",
|
||||
@@ -371,6 +380,12 @@ func initFlags() []cli.Flag {
|
||||
EnvVars: []string{"VGW_IAM_VAULT_SECRET_STORAGE_PATH"},
|
||||
Destination: &vaultSecretStoragePath,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-auth-method",
|
||||
Usage: "vault server auth method",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_AUTH_METHOD"},
|
||||
Destination: &vaultAuthMethod,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-mount-path",
|
||||
Usage: "vault server mount path",
|
||||
@@ -524,19 +539,19 @@ func initFlags() []cli.Flag {
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "ipa-user",
|
||||
Usage: "Username used to connect to FreeIPA. Needs permissions to read user vault contents",
|
||||
Usage: "Username used to connect to FreeIPA (requires permissions to read user vault contents)",
|
||||
EnvVars: []string{"VGW_IPA_USER"},
|
||||
Destination: &ipaUser,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "ipa-password",
|
||||
Usage: "Password of the user used to connect to FreeIPA.",
|
||||
Usage: "Password of the user used to connect to FreeIPA",
|
||||
EnvVars: []string{"VGW_IPA_PASSWORD"},
|
||||
Destination: &ipaPassword,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "ipa-insecure",
|
||||
Usage: "Verify TLS certificate of FreeIPA server. Default is 'true'.",
|
||||
Usage: "Disable verify TLS certificate of FreeIPA server",
|
||||
EnvVars: []string{"VGW_IPA_INSECURE"},
|
||||
Destination: &ipaInsecure,
|
||||
},
|
||||
@@ -602,6 +617,9 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
if readonly {
|
||||
opts = append(opts, s3api.WithReadOnly())
|
||||
}
|
||||
if virtualDomain != "" {
|
||||
opts = append(opts, s3api.WithHostStyle(virtualDomain))
|
||||
}
|
||||
|
||||
admApp := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
@@ -646,6 +664,7 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
LDAPGroupIdAtr: ldapGroupIdAtr,
|
||||
VaultEndpointURL: vaultEndpointURL,
|
||||
VaultSecretStoragePath: vaultSecretStoragePath,
|
||||
VaultAuthMethod: vaultAuthMethod,
|
||||
VaultMountPath: vaultMountPath,
|
||||
VaultRootToken: vaultRootToken,
|
||||
VaultRoleId: vaultRoleId,
|
||||
|
||||
74
cmd/versitygw/plugin.go
Normal file
74
cmd/versitygw/plugin.go
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"plugin"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/plugins"
|
||||
)
|
||||
|
||||
func pluginCommand() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "plugin",
|
||||
Usage: "load a backend from a plugin",
|
||||
Description: "Runs a s3 gateway and redirects the requests to the backend defined in the plugin",
|
||||
Action: runPluginBackend,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "location of the config file",
|
||||
Aliases: []string{"c"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runPluginBackend(ctx *cli.Context) error {
|
||||
if ctx.NArg() == 0 {
|
||||
return fmt.Errorf("no plugin file provided to be loaded")
|
||||
}
|
||||
|
||||
pluginPath := ctx.Args().Get(0)
|
||||
config := ctx.String("config")
|
||||
|
||||
p, err := plugin.Open(pluginPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
backendSymbol, err := p.Lookup("Backend")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
backendPluginPtr, ok := backendSymbol.(*plugins.BackendPlugin)
|
||||
if !ok {
|
||||
return errors.New("plugin is not of type *plugins.BackendPlugin")
|
||||
}
|
||||
|
||||
if backendPluginPtr == nil {
|
||||
return errors.New("variable Backend is nil")
|
||||
}
|
||||
|
||||
be, err := (*backendPluginPtr).New(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return runGateway(ctx.Context, be)
|
||||
}
|
||||
@@ -31,6 +31,7 @@ var (
|
||||
dirPerms uint
|
||||
sidecar string
|
||||
nometa bool
|
||||
forceNoTmpFile bool
|
||||
)
|
||||
|
||||
func posixCommand() *cli.Command {
|
||||
@@ -93,6 +94,12 @@ will be translated into the file /mnt/fs/gwroot/mybucket/a/b/c/myobject`,
|
||||
EnvVars: []string{"VGW_META_NONE"},
|
||||
Destination: &nometa,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "disableotmp",
|
||||
Usage: "disable O_TMPFILE support for new objects",
|
||||
EnvVars: []string{"VGW_DISABLE_OTMP"},
|
||||
Destination: &forceNoTmpFile,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -113,11 +120,12 @@ func runPosix(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
opts := posix.PosixOpts{
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
BucketLinks: bucketlinks,
|
||||
VersioningDir: versioningDir,
|
||||
NewDirPerm: fs.FileMode(dirPerms),
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
BucketLinks: bucketlinks,
|
||||
VersioningDir: versioningDir,
|
||||
NewDirPerm: fs.FileMode(dirPerms),
|
||||
ForceNoTmpFile: forceNoTmpFile,
|
||||
}
|
||||
|
||||
var ms meta.MetadataStorer
|
||||
|
||||
@@ -26,8 +26,10 @@ var (
|
||||
s3proxySecret string
|
||||
s3proxyEndpoint string
|
||||
s3proxyRegion string
|
||||
s3proxyMetaBucket string
|
||||
s3proxyDisableChecksum bool
|
||||
s3proxySslSkipVerify bool
|
||||
s3proxyUsePathStyle bool
|
||||
s3proxyDebug bool
|
||||
)
|
||||
|
||||
@@ -71,6 +73,12 @@ to an s3 storage backend service.`,
|
||||
EnvVars: []string{"VGW_S3_REGION"},
|
||||
Destination: &s3proxyRegion,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "meta-bucket",
|
||||
Usage: "s3 service meta bucket to store buckets acl/policy",
|
||||
EnvVars: []string{"VGW_S3_META_BUCKET"},
|
||||
Destination: &s3proxyMetaBucket,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "disable-checksum",
|
||||
Usage: "disable gateway to server object checksums",
|
||||
@@ -85,6 +93,13 @@ to an s3 storage backend service.`,
|
||||
Value: false,
|
||||
Destination: &s3proxySslSkipVerify,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "use-path-style",
|
||||
Usage: "use path style addressing for s3 proxy",
|
||||
EnvVars: []string{"VGW_S3_USE_PATH_STYLE"},
|
||||
Value: false,
|
||||
Destination: &s3proxyUsePathStyle,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "output extra debug tracing",
|
||||
@@ -97,8 +112,8 @@ to an s3 storage backend service.`,
|
||||
}
|
||||
|
||||
func runS3(ctx *cli.Context) error {
|
||||
be, err := s3proxy.New(s3proxyAccess, s3proxySecret, s3proxyEndpoint, s3proxyRegion,
|
||||
s3proxyDisableChecksum, s3proxySslSkipVerify, s3proxyDebug)
|
||||
be, err := s3proxy.New(ctx.Context, s3proxyAccess, s3proxySecret, s3proxyEndpoint, s3proxyRegion,
|
||||
s3proxyMetaBucket, s3proxyDisableChecksum, s3proxySslSkipVerify, s3proxyUsePathStyle, s3proxyDebug)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init s3 backend: %w", err)
|
||||
}
|
||||
|
||||
@@ -24,7 +24,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
glacier bool
|
||||
glacier bool
|
||||
disableNoArchive bool
|
||||
)
|
||||
|
||||
func scoutfsCommand() *cli.Command {
|
||||
@@ -79,6 +80,12 @@ move interfaces as well as support for tiered filesystems.`,
|
||||
DefaultText: "0755",
|
||||
Value: 0755,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "disable-noarchive",
|
||||
Usage: "disable setting noarchive for multipart part uploads",
|
||||
EnvVars: []string{"VGW_DISABLE_NOARCHIVE"},
|
||||
Destination: &disableNoArchive,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -98,6 +105,7 @@ func runScoutfs(ctx *cli.Context) error {
|
||||
opts.ChownGID = chowngid
|
||||
opts.BucketLinks = bucketlinks
|
||||
opts.NewDirPerm = fs.FileMode(dirPerms)
|
||||
opts.DisableNoArchive = disableNoArchive
|
||||
|
||||
be, err := scoutfs.New(ctx.Args().Get(0), opts)
|
||||
if err != nil {
|
||||
|
||||
@@ -34,7 +34,7 @@ var (
|
||||
totalReqs int
|
||||
upload bool
|
||||
download bool
|
||||
pathStyle bool
|
||||
hostStyle bool
|
||||
checksumDisable bool
|
||||
versioningEnabled bool
|
||||
azureTests bool
|
||||
@@ -74,6 +74,12 @@ func initTestFlags() []cli.Flag {
|
||||
Destination: &endpoint,
|
||||
Aliases: []string{"e"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "host-style",
|
||||
Usage: "Use host-style bucket addressing",
|
||||
Value: false,
|
||||
Destination: &hostStyle,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "enable debug mode",
|
||||
@@ -124,6 +130,11 @@ func initTestCommands() []*cli.Command {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "scoutfs",
|
||||
Usage: "Tests scoutfs full flow",
|
||||
Action: getAction(integration.TestScoutfs),
|
||||
},
|
||||
{
|
||||
Name: "iam",
|
||||
Usage: "Tests iam service",
|
||||
@@ -186,12 +197,6 @@ func initTestCommands() []*cli.Command {
|
||||
Value: 1,
|
||||
Destination: &concurrency,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "pathStyle",
|
||||
Usage: "Use Pathstyle bucket addressing",
|
||||
Value: false,
|
||||
Destination: &pathStyle,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "checksumDis",
|
||||
Usage: "Disable server checksum",
|
||||
@@ -223,8 +228,8 @@ func initTestCommands() []*cli.Command {
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
}
|
||||
if pathStyle {
|
||||
opts = append(opts, integration.WithPathStyle())
|
||||
if hostStyle {
|
||||
opts = append(opts, integration.WithHostStyle())
|
||||
}
|
||||
if checksumDisable {
|
||||
opts = append(opts, integration.WithDisableChecksum())
|
||||
@@ -287,6 +292,9 @@ func initTestCommands() []*cli.Command {
|
||||
if checksumDisable {
|
||||
opts = append(opts, integration.WithDisableChecksum())
|
||||
}
|
||||
if hostStyle {
|
||||
opts = append(opts, integration.WithHostStyle())
|
||||
}
|
||||
|
||||
s3conf := integration.NewS3Conf(opts...)
|
||||
|
||||
@@ -316,6 +324,9 @@ func getAction(tf testFunc) func(*cli.Context) error {
|
||||
if azureTests {
|
||||
opts = append(opts, integration.WithAzureMode())
|
||||
}
|
||||
if hostStyle {
|
||||
opts = append(opts, integration.WithHostStyle())
|
||||
}
|
||||
|
||||
s := integration.NewS3Conf(opts...)
|
||||
tf(s)
|
||||
@@ -351,6 +362,9 @@ func extractIntTests() (commands []*cli.Command) {
|
||||
if versioningEnabled {
|
||||
opts = append(opts, integration.WithVersioningEnabled())
|
||||
}
|
||||
if hostStyle {
|
||||
opts = append(opts, integration.WithHostStyle())
|
||||
}
|
||||
|
||||
s := integration.NewS3Conf(opts...)
|
||||
err := testFunc(s)
|
||||
|
||||
@@ -99,6 +99,26 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
# endpoint is unauthenticated, and returns a 200 status for GET.
|
||||
#VGW_HEALTH=
|
||||
|
||||
# Enable VGW_READ_ONLY to only allow read operations to the S3 server. No write
|
||||
# operations will be allowed.
|
||||
#VGW_READ_ONLY=false
|
||||
|
||||
# The VGW_VIRTUAL_DOMAIN option enables the virtual host style bucket
|
||||
# addressing. The path style addressing is the default, and remains enabled
|
||||
# even when virtual host style is enabled. The VGW_VIRTUAL_DOMAIN option
|
||||
# specifies the domain name that will be used for the virtual host style
|
||||
# addressing. For virtual addressing, access to a bucket is in the request
|
||||
# form:
|
||||
# https://<bucket>.<VGW_VIRTUAL_DOMAIN>/
|
||||
# for example: https://mybucket.example.com/ where
|
||||
# VGW_VIRTUAL_DOMAIN=example.com
|
||||
# and all subdomains of VGW_VIRTUAL_DOMAIN should be reserved for buckets.
|
||||
# This means that virtual host addressing will generally require a DNS
|
||||
# entry for each bucket that needs to be accessed.
|
||||
# The default path style request is of the form:
|
||||
# https://<VGW_ENDPOINT>/<bucket>
|
||||
#VGW_VIRTUAL_DOMAIN=
|
||||
|
||||
###############
|
||||
# Access Logs #
|
||||
###############
|
||||
@@ -240,6 +260,24 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_IAM_LDAP_USER_ID_ATR=
|
||||
#VGW_IAM_LDAP_GROUP_ID_ATR=
|
||||
|
||||
# The FreeIPA options will enable the FreeIPA IAM service with accounts stored
|
||||
# in an external FreeIPA service. Currently the FreeIPA IAM service only
|
||||
# supports account retrieval. Creating and modifying accounts must be done
|
||||
# outside of the versitygw service.
|
||||
# FreeIPA server url e.g. https://ipa.example.test
|
||||
#VGW_IPA_HOST=
|
||||
# A name of the user vault containing their secret
|
||||
#VGW_IPA_VAULT_NAME=
|
||||
# Username used to connect to FreeIPA (requires permissions to read user vault
|
||||
# contents)
|
||||
#VGW_IPA_USER=
|
||||
# Password of the user used to connect to FreeIPA
|
||||
#VGW_IPA_PASSWORD=
|
||||
# Disable verify TLS certificate of FreeIPA server
|
||||
#VGW_IPA_INSECURE=false
|
||||
# FreeIPA IAM debug output
|
||||
#VGW_IPA_DEBUG=false
|
||||
|
||||
###############
|
||||
# IAM caching #
|
||||
###############
|
||||
@@ -317,6 +355,40 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
# as any parent directories automatically created with object uploads.
|
||||
#VGW_DIR_PERMS=0755
|
||||
|
||||
# To enable object versions, the VGW_VERSIONING_DIR option must be set to the
|
||||
# directory that will be used to store the object versions. The version
|
||||
# directory must NOT be a subdirectory of the VGW_BACKEND_ARG directory.
|
||||
#VGW_VERSIONING_DIR=
|
||||
|
||||
# The gateway uses xattrs to store metadata for objects by default. For systems
|
||||
# that do not support xattrs, the VGW_META_SIDECAR option can be set to a
|
||||
# directory that will be used to store the metadata for objects. This is
|
||||
# currently experimental, and may have issues for some edge cases.
|
||||
#VGW_META_SIDECAR=
|
||||
|
||||
# The VGW_META_NONE option will disable the metadata functionality for the
|
||||
# gateway. This will cause the gateway to not store any metadata for objects
|
||||
# or buckets. This include bucket ACLs and Policy. This may be useful for
|
||||
# read only access to pre-existing data where the gateway should not modify
|
||||
# the data. It is recommened to enable VGW_READ_ONLY (Global Options) along
|
||||
# with this.
|
||||
#VGW_META_NONE=false
|
||||
|
||||
# The gateway will use O_TMPFILE for writing objects while uploading and
|
||||
# link the file to the final object name when the upload is complete if the
|
||||
# filesystem supports O_TMPFILE. This creates an atomic object creation
|
||||
# that is not visible to other clients or racing uploads until the upload
|
||||
# is complete. This will not work if there is a different filesystem mounted
|
||||
# below the bucket level than where the bucket resides. The VGW_DISABLE_OTMP
|
||||
# option can be set to true to disable this functionality and force the fallback
|
||||
# mode when O_TMPFILE is not available. This fallback will create a temporary
|
||||
# file in the bucket directory and rename it to the final object name when
|
||||
# the upload is complete if the final location is in the same filesystem, or
|
||||
# copy the file to the final location if the final location is in a different
|
||||
# filesystem. This fallback mode is still atomic, but may be less efficient
|
||||
# than O_TMPFILE when the data needs to be copied into the final location.
|
||||
#VGW_DISABLE_OTMP=false
|
||||
|
||||
###########
|
||||
# scoutfs #
|
||||
###########
|
||||
@@ -358,6 +430,13 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
# as any parent directories automatically created with object uploads.
|
||||
#VGW_DIR_PERMS=0755
|
||||
|
||||
# The default behavior of the gateway is to automatically set the noarchive
|
||||
# flag on the multipart upload parts while the multipart upload is in progress.
|
||||
# This is to prevent the parts from being archived since they are temporary
|
||||
# and will be deleted after the multipart upload is completed or aborted. The
|
||||
# VGW_DISABLE_NOARCHIVE option can be set to true to disable this behavior.
|
||||
#VGW_DISABLE_NOARCHIVE=false
|
||||
|
||||
######
|
||||
# s3 #
|
||||
######
|
||||
|
||||
97
go.mod
97
go.mod
@@ -1,82 +1,83 @@
|
||||
module github.com/versity/versitygw
|
||||
|
||||
go 1.21.0
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.24.1
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1
|
||||
github.com/DataDog/datadog-go/v5 v5.6.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.2
|
||||
github.com/aws/smithy-go v1.22.2
|
||||
github.com/go-ldap/ldap/v3 v3.4.10
|
||||
github.com/gofiber/fiber/v2 v2.52.6
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.5
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0
|
||||
github.com/aws/smithy-go v1.22.4
|
||||
github.com/go-ldap/ldap/v3 v3.4.11
|
||||
github.com/gofiber/fiber/v2 v2.52.8
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/vault-client-go v0.4.3
|
||||
github.com/nats-io/nats.go v1.38.0
|
||||
github.com/oklog/ulid/v2 v2.1.0
|
||||
github.com/pkg/xattr v0.4.10
|
||||
github.com/segmentio/kafka-go v0.4.47
|
||||
github.com/nats-io/nats.go v1.43.0
|
||||
github.com/oklog/ulid/v2 v2.1.1
|
||||
github.com/pkg/xattr v0.4.12
|
||||
github.com/segmentio/kafka-go v0.4.48
|
||||
github.com/smira/go-statsd v1.3.4
|
||||
github.com/urfave/cli/v2 v2.27.5
|
||||
github.com/valyala/fasthttp v1.58.0
|
||||
github.com/urfave/cli/v2 v2.27.7
|
||||
github.com/valyala/fasthttp v1.63.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/sys v0.29.0
|
||||
golang.org/x/sync v0.16.0
|
||||
golang.org/x/sys v0.34.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.12 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/nats-io/nkeys v0.4.9 // indirect
|
||||
github.com/nats-io/nkeys v0.4.11 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/net v0.34.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/crypto v0.40.0 // indirect
|
||||
golang.org/x/net v0.42.0 // indirect
|
||||
golang.org/x/text v0.27.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.4
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.57
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.57
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.17
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.70
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/tcplisten v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
)
|
||||
|
||||
237
go.sum
237
go.sum
@@ -1,21 +1,21 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 h1:1mvYtZfWQAnwNah/C+Z+Jb9rQH95LPE2vlmMuWAHJk8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1/go.mod h1:75I/mXtme1JyWFtz8GocPHVFyH421IBoZErnO16dd0k=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1 h1:Bk5uOhSAenHyR5P61D/NzeQCv+4fEVV8mOkJ82NqpWw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1/go.mod h1:QZ4pw3or1WPmRBxf0cHd1tknzrT54WPBOQoGutCPvSU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 h1:Wc1ml6QlJs2BHQ/9Bqu1jiyggbsSjramq2oUmp5WeIo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0 h1:LR0kAX9ykz8G4YgLCaRDVJ3+n43R8MneB5dTy2konZo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0/go.mod h1:DWAciXemNf++PQJLeXUB4HHH5OpsAh12HZnu2wXE1jA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 h1:lhZdRq7TIx0GJQvSyX2Si406vrYsov2FXGp/RnSEtcs=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+effbARHMQjgOKA2AYvcohNm7KEt42mSV8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 h1:H5xDQaE3XowWfhZRUpnfC+rGZMEVoSiji+b+/HFAPU4=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw=
|
||||
github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
|
||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
@@ -23,50 +23,50 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.4 h1:ObNqKsDYFGr2WxnoXKOhCvTlf3HhwtoGgc+KmZ4H5yg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.4/go.mod h1:j2/AF7j/qxVmsNIChw1tWfsVKOayJoGRDjg1Tgq7NPk=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.57 h1:kFQDsbdBAR3GZsB8xA+51ptEnq9TIj3tS4MuP5b+TcQ=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.57/go.mod h1:2kerxPUUbTagAr/kkaHiqvj/bcYHzi2qiJS/ZinllU0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.57 h1:4hFrvTb32jty/LpKdIwWhMgqITPxNo9l1X1hjUyVCZ4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.57/go.mod h1:n6n8rfggAVPgDVldL1zk9QUzIWImRb6OWI8t9CfDImM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 h1:8IwBjuLdqIO1dGB+dZ9zJEl8wzY3bVYxcs0Xyu/Lsc0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31/go.mod h1:8tMBcuVjL4kP/ECEIWTCWtwV2kj6+ouEKl4cqR4iWLw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 h1:siiQ+jummya9OLPDEyHVb2dLW4aOMe22FGDd0sAfuSw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5/go.mod h1:iHVx2J9pWzITdP5MJY6qWfG34TfD9EA+Qi3eV6qQCXw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 h1:tkVNm99nkJnFo1H9IIQb5QkCiPcvCDn3Pos+IeTbGRA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12/go.mod h1:dIVlquSPUMqEJtx2/W17SM2SuESRaVEhEV9alcMqxjw=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.2 h1:dyC+iA2+Yc7iDMDh0R4eT6fi8TgBduc+BOWCy6Br0/o=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.2/go.mod h1:FHSHmyEUkzRbaFFqqm6bkLAOQHgqhsLmfCahvCBMiyA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.12 h1:fqg6c1KVrc3SYWma/egWue5rKI4G2+M4wMQN2JosNAA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.12/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w=
|
||||
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
|
||||
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
|
||||
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.5 h1:0OF9RiEMEdDdZEMqF9MRjevyxAQcf6gY+E7vwBILFj0=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.5/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11/go.mod h1:dd+Lkp6YmMryke+qxW/VnKyhMBDTYP41Q2Bb+6gNZgY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 h1:KAXP9JSHO1vKGCr5f4O6WmlVKLFFXgWYAGoJosorxzU=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83 h1:08otkOELsIi0toRRGMytlJhOctcN8xfKfKFR2NXz3kE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83/go.mod h1:dGsGb2wI8JDWeMAhjVPP+z+dqvYjL6k6o+EujcRNk5c=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 h1:SsytQyTMHMDPspp+spo7XwXTP44aJZZAC7fBV2C5+5s=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36/go.mod h1:Q1lnJArKRXkenyog6+Y+zr7WDpk4e6XlR6gs20bbeNo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 h1:i2vNHQiXUvKhs3quBR6aqlgJaiaexz/aNvdCktW/kAM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36/go.mod h1:UdyGa7Q91id/sdyHPwth+043HhmP6yP9MBHgbZM0xo8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 h1:GMYy2EOWfzdP3wfVAGXBNKY5vK4K8vMET4sYOYltmqs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36/go.mod h1:gDhdAV6wL3PmPqBhiPbnlS447GoWs8HTTOYef9/9Inw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 h1:CXV68E2dNqhuynZJPB80bhPQwAKqBWVer887figW6Jc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4/go.mod h1:/xFi9KtvBXP97ppCz1TAEvU1Uf66qvid89rbem3wCzQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 h1:nAP2GYbfh8dd2zGZqFRSMlq+/F6cMPBUuCsGAMkN074=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4/go.mod h1:LT10DsiGjLWh4GbjInf9LQejkYEhBgBCjLG5+lvk4EE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 h1:t0E6FzREdtCsiLIoLCWsYliNsRBgyGD/MCK571qk4MI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17/go.mod h1:ygpklyoaypuyDvOM5ujWGrYWpAK3h7ugnmKCU/76Ys4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 h1:qcLWgdhq45sDM9na4cvXax9dyLitn8EYBRl8Ak4XtG4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17/go.mod h1:M+jkjBFZ2J6DJrjMv2+vkBbuht6kxJYtJiwoVgX4p4U=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0 h1:5Y75q0RPQoAbieyOuGLhjV9P3txvYgXv2lg0UwJOfmE=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w=
|
||||
github.com/aws/smithy-go v1.22.4 h1:uqXzVZNuNexwc/xrh6Tb56u89WDlJY6HS+KC0S4QSjw=
|
||||
github.com/aws/smithy-go v1.22.4/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -74,32 +74,29 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.4.10 h1:ot/iwPOhfpNVgB1o+AVXljizWZ9JTp7YF5oeyONmcJU=
|
||||
github.com/go-ldap/ldap/v3 v3.4.10/go.mod h1:JXh4Uxgi40P6E9rdsYqpUtbW46D9UTjJ9QSwGRznplY=
|
||||
github.com/gofiber/fiber/v2 v2.52.6 h1:Rfp+ILPiYSvvVuIPvxrBns+HJp8qGLDnLJawAu27XVI=
|
||||
github.com/gofiber/fiber/v2 v2.52.6/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.4.11 h1:4k0Yxweg+a3OyBLjdYn5OKglv18JNvfDykSoI8bW0gU=
|
||||
github.com/go-ldap/ldap/v3 v3.4.11/go.mod h1:bY7t0FLK8OAVpp/vV6sSlpz3EQDGcQwc8pF0ujLgKvM=
|
||||
github.com/gofiber/fiber/v2 v2.52.8 h1:xl4jJQ0BV5EJTA2aWiKw/VddRpHrKeZLF0QPUxqn0x4=
|
||||
github.com/gofiber/fiber/v2 v2.52.8/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/vault-client-go v0.4.3 h1:zG7STGVgn/VK6rnZc0k8PGbfv2x/sJExRKHSUg3ljWc=
|
||||
@@ -116,11 +113,11 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
|
||||
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
|
||||
github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
|
||||
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
|
||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
@@ -131,14 +128,14 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/nats-io/nats.go v1.38.0 h1:A7P+g7Wjp4/NWqDOOP/K6hfhr54DvdDQUznt5JFg9XA=
|
||||
github.com/nats-io/nats.go v1.38.0/go.mod h1:IGUM++TwokGnXPs82/wCuiHS02/aKrdYUQkU8If6yjw=
|
||||
github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0=
|
||||
github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE=
|
||||
github.com/nats-io/nats.go v1.43.0 h1:uRFZ2FEoRvP64+UUhaTokyS18XBCR/xM2vQZKO4i8ug=
|
||||
github.com/nats-io/nats.go v1.43.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
||||
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
|
||||
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
|
||||
github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
|
||||
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
|
||||
github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
|
||||
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
|
||||
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
|
||||
@@ -146,12 +143,12 @@ github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFu
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA=
|
||||
github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
|
||||
github.com/pkg/xattr v0.4.12 h1:rRTkSyFNTRElv6pkA3zpjHpQ90p/OdHQC1GmGh1aTjM=
|
||||
github.com/pkg/xattr v0.4.12/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
|
||||
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
|
||||
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
|
||||
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
@@ -159,8 +156,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
||||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
||||
github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0=
|
||||
github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
|
||||
github.com/segmentio/kafka-go v0.4.48 h1:9jyu9CWK4W5W+SroCe8EffbrRZVqAOkuaLd/ApID4Vs=
|
||||
github.com/segmentio/kafka-go v0.4.48/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smira/go-statsd v1.3.4 h1:kBYWcLSGT+qC6JVbvfz48kX7mQys32fjDOPrfmsSx2c=
|
||||
github.com/smira/go-statsd v1.3.4/go.mod h1:RjdsESPgDODtg1VpVVf9MJrEW2Hw0wtRNbmB1CAhu6A=
|
||||
@@ -169,20 +166,17 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w=
|
||||
github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
|
||||
github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=
|
||||
github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.58.0 h1:GGB2dWxSbEprU9j0iMJHgdKYJVDyjrOwF9RE59PbRuE=
|
||||
github.com/valyala/fasthttp v1.58.0/go.mod h1:SYXvHHaFp7QZHGKSHmoMipInhrI5StHrhDTYVEjK/Kw=
|
||||
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/valyala/fasthttp v1.63.0 h1:DisIL8OjB7ul2d7cBaMRcKTQDYnrGy56R4FCiuDP0Ns=
|
||||
github.com/valyala/fasthttp v1.63.0/go.mod h1:REc4IeW+cAEyLrRPa5A81MIjvz0QE1laoTX2EaPHKJM=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44 h1:Wx1o3pNrCzsHIIDyZ2MLRr6tF/1FhAr7HNDn80QqDWE=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||
@@ -200,45 +194,28 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -254,23 +231,14 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
@@ -278,24 +246,19 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -72,6 +72,9 @@ var (
|
||||
ActionPutBucketOwnershipControls = "s3_PutBucketOwnershipControls"
|
||||
ActionGetBucketOwnershipControls = "s3_GetBucketOwnershipControls"
|
||||
ActionDeleteBucketOwnershipControls = "s3_DeleteBucketOwnershipControls"
|
||||
ActionPutBucketCors = "s3_PutBucketCors"
|
||||
ActionGetBucketCors = "s3_GetBucketCors"
|
||||
ActionDeleteBucketCors = "s3_DeleteBucketCors"
|
||||
|
||||
// Admin actions
|
||||
ActionAdminCreateUser = "admin_CreateUser"
|
||||
@@ -266,4 +269,16 @@ func init() {
|
||||
Name: "UploadPartCopy",
|
||||
Service: "s3",
|
||||
}
|
||||
ActionMap[ActionPutBucketCors] = Action{
|
||||
Name: "PutBucketCors",
|
||||
Service: "s3",
|
||||
}
|
||||
ActionMap[ActionGetBucketCors] = Action{
|
||||
Name: "GetBucketCors",
|
||||
Service: "s3",
|
||||
}
|
||||
ActionMap[ActionDeleteBucketCors] = Action{
|
||||
Name: "DeleteBucketCors",
|
||||
Service: "s3",
|
||||
}
|
||||
}
|
||||
|
||||
35
plugins/plugins.go
Normal file
35
plugins/plugins.go
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package plugins
|
||||
|
||||
import "github.com/versity/versitygw/backend"
|
||||
|
||||
// BackendPlugin defines an interface for creating backend
|
||||
// implementation instances.
|
||||
// Plugins implementing this interface can be built as shared
|
||||
// libraries using Go's plugin system (to build use `go build -buildmode=plugin`).
|
||||
// The shared library should export an instance of
|
||||
// this interface in a variable named `Backend`.
|
||||
type BackendPlugin interface {
|
||||
// New creates and initializes a new backend.Backend instance.
|
||||
// The config parameter specifies the path of the file containing
|
||||
// the configuration for the backend.
|
||||
//
|
||||
// Implementations of this method should perform the necessary steps to
|
||||
// establish a connection to the underlying storage system or service
|
||||
// (e.g., network storage system, distributed storage system, cloud storage)
|
||||
// and configure it according to the provided configuration.
|
||||
New(config string) (backend.Backend, error)
|
||||
}
|
||||
@@ -15,13 +15,10 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
@@ -100,7 +97,16 @@ func (c AdminController) UpdateUser(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
err := c.iam.UpdateUserAccount(access, props)
|
||||
err := props.Validate()
|
||||
if err != nil {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminInvalidUserRole),
|
||||
&MetaOpts{
|
||||
Logger: c.l,
|
||||
Action: metrics.ActionAdminUpdateUser,
|
||||
})
|
||||
}
|
||||
|
||||
err = c.iam.UpdateUserAccount(access, props)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "user not found") {
|
||||
err = s3err.GetAPIError(s3err.ErrAdminUserNotFound)
|
||||
@@ -163,27 +169,7 @@ func (c AdminController) ChangeBucketOwner(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
acl := auth.ACL{
|
||||
Owner: owner,
|
||||
Grantees: []auth.Grantee{
|
||||
{
|
||||
Permission: auth.PermissionFullControl,
|
||||
Access: owner,
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
aclParsed, err := json.Marshal(acl)
|
||||
if err != nil {
|
||||
return SendResponse(ctx, fmt.Errorf("failed to marshal the bucket acl: %w", err),
|
||||
&MetaOpts{
|
||||
Logger: c.l,
|
||||
Action: metrics.ActionAdminChangeBucketOwner,
|
||||
})
|
||||
}
|
||||
|
||||
err = c.be.ChangeBucketOwner(ctx.Context(), bucket, aclParsed)
|
||||
err = c.be.ChangeBucketOwner(ctx.Context(), bucket, owner)
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.l,
|
||||
|
||||
@@ -324,7 +324,7 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
|
||||
}
|
||||
adminController := AdminController{
|
||||
be: &BackendMock{
|
||||
ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket string, acl []byte) error {
|
||||
ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket, owner string) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
|
||||
@@ -26,24 +26,27 @@ var _ backend.Backend = &BackendMock{}
|
||||
// AbortMultipartUploadFunc: func(contextMoqParam context.Context, abortMultipartUploadInput *s3.AbortMultipartUploadInput) error {
|
||||
// panic("mock out the AbortMultipartUpload method")
|
||||
// },
|
||||
// ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket string, acl []byte) error {
|
||||
// ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket string, owner string) error {
|
||||
// panic("mock out the ChangeBucketOwner method")
|
||||
// },
|
||||
// CompleteMultipartUploadFunc: func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
// CompleteMultipartUploadFunc: func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
|
||||
// panic("mock out the CompleteMultipartUpload method")
|
||||
// },
|
||||
// CopyObjectFunc: func(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
// CopyObjectFunc: func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
|
||||
// panic("mock out the CopyObject method")
|
||||
// },
|
||||
// CreateBucketFunc: func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error {
|
||||
// panic("mock out the CreateBucket method")
|
||||
// },
|
||||
// CreateMultipartUploadFunc: func(contextMoqParam context.Context, createMultipartUploadInput *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
|
||||
// CreateMultipartUploadFunc: func(contextMoqParam context.Context, createMultipartUploadInput s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
|
||||
// panic("mock out the CreateMultipartUpload method")
|
||||
// },
|
||||
// DeleteBucketFunc: func(contextMoqParam context.Context, bucket string) error {
|
||||
// panic("mock out the DeleteBucket method")
|
||||
// },
|
||||
// DeleteBucketCorsFunc: func(contextMoqParam context.Context, bucket string) error {
|
||||
// panic("mock out the DeleteBucketCors method")
|
||||
// },
|
||||
// DeleteBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string) error {
|
||||
// panic("mock out the DeleteBucketOwnershipControls method")
|
||||
// },
|
||||
@@ -65,6 +68,9 @@ var _ backend.Backend = &BackendMock{}
|
||||
// GetBucketAclFunc: func(contextMoqParam context.Context, getBucketAclInput *s3.GetBucketAclInput) ([]byte, error) {
|
||||
// panic("mock out the GetBucketAcl method")
|
||||
// },
|
||||
// GetBucketCorsFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
// panic("mock out the GetBucketCors method")
|
||||
// },
|
||||
// GetBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error) {
|
||||
// panic("mock out the GetBucketOwnershipControls method")
|
||||
// },
|
||||
@@ -128,6 +134,9 @@ var _ backend.Backend = &BackendMock{}
|
||||
// PutBucketAclFunc: func(contextMoqParam context.Context, bucket string, data []byte) error {
|
||||
// panic("mock out the PutBucketAcl method")
|
||||
// },
|
||||
// PutBucketCorsFunc: func(contextMoqParam context.Context, bytes []byte) error {
|
||||
// panic("mock out the PutBucketCors method")
|
||||
// },
|
||||
// PutBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string, ownership types.ObjectOwnership) error {
|
||||
// panic("mock out the PutBucketOwnershipControls method")
|
||||
// },
|
||||
@@ -140,7 +149,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// PutBucketVersioningFunc: func(contextMoqParam context.Context, bucket string, status types.BucketVersioningStatus) error {
|
||||
// panic("mock out the PutBucketVersioning method")
|
||||
// },
|
||||
// PutObjectFunc: func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
// PutObjectFunc: func(contextMoqParam context.Context, putObjectInput s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
// panic("mock out the PutObject method")
|
||||
// },
|
||||
// PutObjectAclFunc: func(contextMoqParam context.Context, putObjectAclInput *s3.PutObjectAclInput) error {
|
||||
@@ -170,10 +179,10 @@ var _ backend.Backend = &BackendMock{}
|
||||
// StringFunc: func() string {
|
||||
// panic("mock out the String method")
|
||||
// },
|
||||
// UploadPartFunc: func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (string, error) {
|
||||
// UploadPartFunc: func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
// panic("mock out the UploadPart method")
|
||||
// },
|
||||
// UploadPartCopyFunc: func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
// UploadPartCopyFunc: func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
|
||||
// panic("mock out the UploadPartCopy method")
|
||||
// },
|
||||
// }
|
||||
@@ -187,23 +196,26 @@ type BackendMock struct {
|
||||
AbortMultipartUploadFunc func(contextMoqParam context.Context, abortMultipartUploadInput *s3.AbortMultipartUploadInput) error
|
||||
|
||||
// ChangeBucketOwnerFunc mocks the ChangeBucketOwner method.
|
||||
ChangeBucketOwnerFunc func(contextMoqParam context.Context, bucket string, acl []byte) error
|
||||
ChangeBucketOwnerFunc func(contextMoqParam context.Context, bucket string, owner string) error
|
||||
|
||||
// CompleteMultipartUploadFunc mocks the CompleteMultipartUpload method.
|
||||
CompleteMultipartUploadFunc func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
|
||||
CompleteMultipartUploadFunc func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error)
|
||||
|
||||
// CopyObjectFunc mocks the CopyObject method.
|
||||
CopyObjectFunc func(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
|
||||
CopyObjectFunc func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (s3response.CopyObjectOutput, error)
|
||||
|
||||
// CreateBucketFunc mocks the CreateBucket method.
|
||||
CreateBucketFunc func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error
|
||||
|
||||
// CreateMultipartUploadFunc mocks the CreateMultipartUpload method.
|
||||
CreateMultipartUploadFunc func(contextMoqParam context.Context, createMultipartUploadInput *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
|
||||
CreateMultipartUploadFunc func(contextMoqParam context.Context, createMultipartUploadInput s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
|
||||
|
||||
// DeleteBucketFunc mocks the DeleteBucket method.
|
||||
DeleteBucketFunc func(contextMoqParam context.Context, bucket string) error
|
||||
|
||||
// DeleteBucketCorsFunc mocks the DeleteBucketCors method.
|
||||
DeleteBucketCorsFunc func(contextMoqParam context.Context, bucket string) error
|
||||
|
||||
// DeleteBucketOwnershipControlsFunc mocks the DeleteBucketOwnershipControls method.
|
||||
DeleteBucketOwnershipControlsFunc func(contextMoqParam context.Context, bucket string) error
|
||||
|
||||
@@ -225,6 +237,9 @@ type BackendMock struct {
|
||||
// GetBucketAclFunc mocks the GetBucketAcl method.
|
||||
GetBucketAclFunc func(contextMoqParam context.Context, getBucketAclInput *s3.GetBucketAclInput) ([]byte, error)
|
||||
|
||||
// GetBucketCorsFunc mocks the GetBucketCors method.
|
||||
GetBucketCorsFunc func(contextMoqParam context.Context, bucket string) ([]byte, error)
|
||||
|
||||
// GetBucketOwnershipControlsFunc mocks the GetBucketOwnershipControls method.
|
||||
GetBucketOwnershipControlsFunc func(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error)
|
||||
|
||||
@@ -288,6 +303,9 @@ type BackendMock struct {
|
||||
// PutBucketAclFunc mocks the PutBucketAcl method.
|
||||
PutBucketAclFunc func(contextMoqParam context.Context, bucket string, data []byte) error
|
||||
|
||||
// PutBucketCorsFunc mocks the PutBucketCors method.
|
||||
PutBucketCorsFunc func(contextMoqParam context.Context, bytes []byte) error
|
||||
|
||||
// PutBucketOwnershipControlsFunc mocks the PutBucketOwnershipControls method.
|
||||
PutBucketOwnershipControlsFunc func(contextMoqParam context.Context, bucket string, ownership types.ObjectOwnership) error
|
||||
|
||||
@@ -301,7 +319,7 @@ type BackendMock struct {
|
||||
PutBucketVersioningFunc func(contextMoqParam context.Context, bucket string, status types.BucketVersioningStatus) error
|
||||
|
||||
// PutObjectFunc mocks the PutObject method.
|
||||
PutObjectFunc func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (s3response.PutObjectOutput, error)
|
||||
PutObjectFunc func(contextMoqParam context.Context, putObjectInput s3response.PutObjectInput) (s3response.PutObjectOutput, error)
|
||||
|
||||
// PutObjectAclFunc mocks the PutObjectAcl method.
|
||||
PutObjectAclFunc func(contextMoqParam context.Context, putObjectAclInput *s3.PutObjectAclInput) error
|
||||
@@ -331,10 +349,10 @@ type BackendMock struct {
|
||||
StringFunc func() string
|
||||
|
||||
// UploadPartFunc mocks the UploadPart method.
|
||||
UploadPartFunc func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (string, error)
|
||||
UploadPartFunc func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (*s3.UploadPartOutput, error)
|
||||
|
||||
// UploadPartCopyFunc mocks the UploadPartCopy method.
|
||||
UploadPartCopyFunc func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error)
|
||||
UploadPartCopyFunc func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyPartResult, error)
|
||||
|
||||
// calls tracks calls to the methods.
|
||||
calls struct {
|
||||
@@ -351,8 +369,8 @@ type BackendMock struct {
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
// ACL is the acl argument value.
|
||||
ACL []byte
|
||||
// Owner is the owner argument value.
|
||||
Owner string
|
||||
}
|
||||
// CompleteMultipartUpload holds details about calls to the CompleteMultipartUpload method.
|
||||
CompleteMultipartUpload []struct {
|
||||
@@ -366,7 +384,7 @@ type BackendMock struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// CopyObjectInput is the copyObjectInput argument value.
|
||||
CopyObjectInput *s3.CopyObjectInput
|
||||
CopyObjectInput s3response.CopyObjectInput
|
||||
}
|
||||
// CreateBucket holds details about calls to the CreateBucket method.
|
||||
CreateBucket []struct {
|
||||
@@ -382,7 +400,7 @@ type BackendMock struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// CreateMultipartUploadInput is the createMultipartUploadInput argument value.
|
||||
CreateMultipartUploadInput *s3.CreateMultipartUploadInput
|
||||
CreateMultipartUploadInput s3response.CreateMultipartUploadInput
|
||||
}
|
||||
// DeleteBucket holds details about calls to the DeleteBucket method.
|
||||
DeleteBucket []struct {
|
||||
@@ -391,6 +409,13 @@ type BackendMock struct {
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
}
|
||||
// DeleteBucketCors holds details about calls to the DeleteBucketCors method.
|
||||
DeleteBucketCors []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
}
|
||||
// DeleteBucketOwnershipControls holds details about calls to the DeleteBucketOwnershipControls method.
|
||||
DeleteBucketOwnershipControls []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
@@ -442,6 +467,13 @@ type BackendMock struct {
|
||||
// GetBucketAclInput is the getBucketAclInput argument value.
|
||||
GetBucketAclInput *s3.GetBucketAclInput
|
||||
}
|
||||
// GetBucketCors holds details about calls to the GetBucketCors method.
|
||||
GetBucketCors []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
}
|
||||
// GetBucketOwnershipControls holds details about calls to the GetBucketOwnershipControls method.
|
||||
GetBucketOwnershipControls []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
@@ -599,6 +631,13 @@ type BackendMock struct {
|
||||
// Data is the data argument value.
|
||||
Data []byte
|
||||
}
|
||||
// PutBucketCors holds details about calls to the PutBucketCors method.
|
||||
PutBucketCors []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// Bytes is the bytes argument value.
|
||||
Bytes []byte
|
||||
}
|
||||
// PutBucketOwnershipControls holds details about calls to the PutBucketOwnershipControls method.
|
||||
PutBucketOwnershipControls []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
@@ -640,7 +679,7 @@ type BackendMock struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// PutObjectInput is the putObjectInput argument value.
|
||||
PutObjectInput *s3.PutObjectInput
|
||||
PutObjectInput s3response.PutObjectInput
|
||||
}
|
||||
// PutObjectAcl holds details about calls to the PutObjectAcl method.
|
||||
PutObjectAcl []struct {
|
||||
@@ -739,6 +778,7 @@ type BackendMock struct {
|
||||
lockCreateBucket sync.RWMutex
|
||||
lockCreateMultipartUpload sync.RWMutex
|
||||
lockDeleteBucket sync.RWMutex
|
||||
lockDeleteBucketCors sync.RWMutex
|
||||
lockDeleteBucketOwnershipControls sync.RWMutex
|
||||
lockDeleteBucketPolicy sync.RWMutex
|
||||
lockDeleteBucketTagging sync.RWMutex
|
||||
@@ -746,6 +786,7 @@ type BackendMock struct {
|
||||
lockDeleteObjectTagging sync.RWMutex
|
||||
lockDeleteObjects sync.RWMutex
|
||||
lockGetBucketAcl sync.RWMutex
|
||||
lockGetBucketCors sync.RWMutex
|
||||
lockGetBucketOwnershipControls sync.RWMutex
|
||||
lockGetBucketPolicy sync.RWMutex
|
||||
lockGetBucketTagging sync.RWMutex
|
||||
@@ -767,6 +808,7 @@ type BackendMock struct {
|
||||
lockListObjectsV2 sync.RWMutex
|
||||
lockListParts sync.RWMutex
|
||||
lockPutBucketAcl sync.RWMutex
|
||||
lockPutBucketCors sync.RWMutex
|
||||
lockPutBucketOwnershipControls sync.RWMutex
|
||||
lockPutBucketPolicy sync.RWMutex
|
||||
lockPutBucketTagging sync.RWMutex
|
||||
@@ -822,23 +864,23 @@ func (mock *BackendMock) AbortMultipartUploadCalls() []struct {
|
||||
}
|
||||
|
||||
// ChangeBucketOwner calls ChangeBucketOwnerFunc.
|
||||
func (mock *BackendMock) ChangeBucketOwner(contextMoqParam context.Context, bucket string, acl []byte) error {
|
||||
func (mock *BackendMock) ChangeBucketOwner(contextMoqParam context.Context, bucket string, owner string) error {
|
||||
if mock.ChangeBucketOwnerFunc == nil {
|
||||
panic("BackendMock.ChangeBucketOwnerFunc: method is nil but Backend.ChangeBucketOwner was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
ACL []byte
|
||||
Owner string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
ACL: acl,
|
||||
Owner: owner,
|
||||
}
|
||||
mock.lockChangeBucketOwner.Lock()
|
||||
mock.calls.ChangeBucketOwner = append(mock.calls.ChangeBucketOwner, callInfo)
|
||||
mock.lockChangeBucketOwner.Unlock()
|
||||
return mock.ChangeBucketOwnerFunc(contextMoqParam, bucket, acl)
|
||||
return mock.ChangeBucketOwnerFunc(contextMoqParam, bucket, owner)
|
||||
}
|
||||
|
||||
// ChangeBucketOwnerCalls gets all the calls that were made to ChangeBucketOwner.
|
||||
@@ -848,12 +890,12 @@ func (mock *BackendMock) ChangeBucketOwner(contextMoqParam context.Context, buck
|
||||
func (mock *BackendMock) ChangeBucketOwnerCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
ACL []byte
|
||||
Owner string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
ACL []byte
|
||||
Owner string
|
||||
}
|
||||
mock.lockChangeBucketOwner.RLock()
|
||||
calls = mock.calls.ChangeBucketOwner
|
||||
@@ -862,7 +904,7 @@ func (mock *BackendMock) ChangeBucketOwnerCalls() []struct {
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload calls CompleteMultipartUploadFunc.
|
||||
func (mock *BackendMock) CompleteMultipartUpload(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
func (mock *BackendMock) CompleteMultipartUpload(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
|
||||
if mock.CompleteMultipartUploadFunc == nil {
|
||||
panic("BackendMock.CompleteMultipartUploadFunc: method is nil but Backend.CompleteMultipartUpload was just called")
|
||||
}
|
||||
@@ -898,13 +940,13 @@ func (mock *BackendMock) CompleteMultipartUploadCalls() []struct {
|
||||
}
|
||||
|
||||
// CopyObject calls CopyObjectFunc.
|
||||
func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
|
||||
if mock.CopyObjectFunc == nil {
|
||||
panic("BackendMock.CopyObjectFunc: method is nil but Backend.CopyObject was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
CopyObjectInput *s3.CopyObjectInput
|
||||
CopyObjectInput s3response.CopyObjectInput
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
CopyObjectInput: copyObjectInput,
|
||||
@@ -921,11 +963,11 @@ func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectI
|
||||
// len(mockedBackend.CopyObjectCalls())
|
||||
func (mock *BackendMock) CopyObjectCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
CopyObjectInput *s3.CopyObjectInput
|
||||
CopyObjectInput s3response.CopyObjectInput
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
CopyObjectInput *s3.CopyObjectInput
|
||||
CopyObjectInput s3response.CopyObjectInput
|
||||
}
|
||||
mock.lockCopyObject.RLock()
|
||||
calls = mock.calls.CopyObject
|
||||
@@ -974,13 +1016,13 @@ func (mock *BackendMock) CreateBucketCalls() []struct {
|
||||
}
|
||||
|
||||
// CreateMultipartUpload calls CreateMultipartUploadFunc.
|
||||
func (mock *BackendMock) CreateMultipartUpload(contextMoqParam context.Context, createMultipartUploadInput *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
|
||||
func (mock *BackendMock) CreateMultipartUpload(contextMoqParam context.Context, createMultipartUploadInput s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
|
||||
if mock.CreateMultipartUploadFunc == nil {
|
||||
panic("BackendMock.CreateMultipartUploadFunc: method is nil but Backend.CreateMultipartUpload was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
CreateMultipartUploadInput *s3.CreateMultipartUploadInput
|
||||
CreateMultipartUploadInput s3response.CreateMultipartUploadInput
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
CreateMultipartUploadInput: createMultipartUploadInput,
|
||||
@@ -997,11 +1039,11 @@ func (mock *BackendMock) CreateMultipartUpload(contextMoqParam context.Context,
|
||||
// len(mockedBackend.CreateMultipartUploadCalls())
|
||||
func (mock *BackendMock) CreateMultipartUploadCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
CreateMultipartUploadInput *s3.CreateMultipartUploadInput
|
||||
CreateMultipartUploadInput s3response.CreateMultipartUploadInput
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
CreateMultipartUploadInput *s3.CreateMultipartUploadInput
|
||||
CreateMultipartUploadInput s3response.CreateMultipartUploadInput
|
||||
}
|
||||
mock.lockCreateMultipartUpload.RLock()
|
||||
calls = mock.calls.CreateMultipartUpload
|
||||
@@ -1045,6 +1087,42 @@ func (mock *BackendMock) DeleteBucketCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// DeleteBucketCors calls DeleteBucketCorsFunc.
|
||||
func (mock *BackendMock) DeleteBucketCors(contextMoqParam context.Context, bucket string) error {
|
||||
if mock.DeleteBucketCorsFunc == nil {
|
||||
panic("BackendMock.DeleteBucketCorsFunc: method is nil but Backend.DeleteBucketCors was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
}
|
||||
mock.lockDeleteBucketCors.Lock()
|
||||
mock.calls.DeleteBucketCors = append(mock.calls.DeleteBucketCors, callInfo)
|
||||
mock.lockDeleteBucketCors.Unlock()
|
||||
return mock.DeleteBucketCorsFunc(contextMoqParam, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketCorsCalls gets all the calls that were made to DeleteBucketCors.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.DeleteBucketCorsCalls())
|
||||
func (mock *BackendMock) DeleteBucketCorsCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
}
|
||||
mock.lockDeleteBucketCors.RLock()
|
||||
calls = mock.calls.DeleteBucketCors
|
||||
mock.lockDeleteBucketCors.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// DeleteBucketOwnershipControls calls DeleteBucketOwnershipControlsFunc.
|
||||
func (mock *BackendMock) DeleteBucketOwnershipControls(contextMoqParam context.Context, bucket string) error {
|
||||
if mock.DeleteBucketOwnershipControlsFunc == nil {
|
||||
@@ -1301,6 +1379,42 @@ func (mock *BackendMock) GetBucketAclCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// GetBucketCors calls GetBucketCorsFunc.
|
||||
func (mock *BackendMock) GetBucketCors(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
if mock.GetBucketCorsFunc == nil {
|
||||
panic("BackendMock.GetBucketCorsFunc: method is nil but Backend.GetBucketCors was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
}
|
||||
mock.lockGetBucketCors.Lock()
|
||||
mock.calls.GetBucketCors = append(mock.calls.GetBucketCors, callInfo)
|
||||
mock.lockGetBucketCors.Unlock()
|
||||
return mock.GetBucketCorsFunc(contextMoqParam, bucket)
|
||||
}
|
||||
|
||||
// GetBucketCorsCalls gets all the calls that were made to GetBucketCors.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.GetBucketCorsCalls())
|
||||
func (mock *BackendMock) GetBucketCorsCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
}
|
||||
mock.lockGetBucketCors.RLock()
|
||||
calls = mock.calls.GetBucketCors
|
||||
mock.lockGetBucketCors.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// GetBucketOwnershipControls calls GetBucketOwnershipControlsFunc.
|
||||
func (mock *BackendMock) GetBucketOwnershipControls(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error) {
|
||||
if mock.GetBucketOwnershipControlsFunc == nil {
|
||||
@@ -2077,6 +2191,42 @@ func (mock *BackendMock) PutBucketAclCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// PutBucketCors calls PutBucketCorsFunc.
|
||||
func (mock *BackendMock) PutBucketCors(contextMoqParam context.Context, bytes []byte) error {
|
||||
if mock.PutBucketCorsFunc == nil {
|
||||
panic("BackendMock.PutBucketCorsFunc: method is nil but Backend.PutBucketCors was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bytes []byte
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bytes: bytes,
|
||||
}
|
||||
mock.lockPutBucketCors.Lock()
|
||||
mock.calls.PutBucketCors = append(mock.calls.PutBucketCors, callInfo)
|
||||
mock.lockPutBucketCors.Unlock()
|
||||
return mock.PutBucketCorsFunc(contextMoqParam, bytes)
|
||||
}
|
||||
|
||||
// PutBucketCorsCalls gets all the calls that were made to PutBucketCors.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.PutBucketCorsCalls())
|
||||
func (mock *BackendMock) PutBucketCorsCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bytes []byte
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bytes []byte
|
||||
}
|
||||
mock.lockPutBucketCors.RLock()
|
||||
calls = mock.calls.PutBucketCors
|
||||
mock.lockPutBucketCors.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// PutBucketOwnershipControls calls PutBucketOwnershipControlsFunc.
|
||||
func (mock *BackendMock) PutBucketOwnershipControls(contextMoqParam context.Context, bucket string, ownership types.ObjectOwnership) error {
|
||||
if mock.PutBucketOwnershipControlsFunc == nil {
|
||||
@@ -2238,13 +2388,13 @@ func (mock *BackendMock) PutBucketVersioningCalls() []struct {
|
||||
}
|
||||
|
||||
// PutObject calls PutObjectFunc.
|
||||
func (mock *BackendMock) PutObject(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
func (mock *BackendMock) PutObject(contextMoqParam context.Context, putObjectInput s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
if mock.PutObjectFunc == nil {
|
||||
panic("BackendMock.PutObjectFunc: method is nil but Backend.PutObject was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
PutObjectInput *s3.PutObjectInput
|
||||
PutObjectInput s3response.PutObjectInput
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
PutObjectInput: putObjectInput,
|
||||
@@ -2261,11 +2411,11 @@ func (mock *BackendMock) PutObject(contextMoqParam context.Context, putObjectInp
|
||||
// len(mockedBackend.PutObjectCalls())
|
||||
func (mock *BackendMock) PutObjectCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
PutObjectInput *s3.PutObjectInput
|
||||
PutObjectInput s3response.PutObjectInput
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
PutObjectInput *s3.PutObjectInput
|
||||
PutObjectInput s3response.PutObjectInput
|
||||
}
|
||||
mock.lockPutObject.RLock()
|
||||
calls = mock.calls.PutObject
|
||||
@@ -2620,7 +2770,7 @@ func (mock *BackendMock) StringCalls() []struct {
|
||||
}
|
||||
|
||||
// UploadPart calls UploadPartFunc.
|
||||
func (mock *BackendMock) UploadPart(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (string, error) {
|
||||
func (mock *BackendMock) UploadPart(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
if mock.UploadPartFunc == nil {
|
||||
panic("BackendMock.UploadPartFunc: method is nil but Backend.UploadPart was just called")
|
||||
}
|
||||
@@ -2656,7 +2806,7 @@ func (mock *BackendMock) UploadPartCalls() []struct {
|
||||
}
|
||||
|
||||
// UploadPartCopy calls UploadPartCopyFunc.
|
||||
func (mock *BackendMock) UploadPartCopy(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
func (mock *BackendMock) UploadPartCopy(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
|
||||
if mock.UploadPartCopyFunc == nil {
|
||||
panic("BackendMock.UploadPartCopyFunc: method is nil but Backend.UploadPartCopy was just called")
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/valyala/fasthttp"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
@@ -99,8 +100,7 @@ func TestS3ApiController_ListBuckets(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access", Role: "admin:"})
|
||||
ctx.Locals("isDebug", false)
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access", Role: "admin:"})
|
||||
return ctx.Next()
|
||||
})
|
||||
app.Get("/", s3ApiController.ListBuckets)
|
||||
@@ -116,8 +116,7 @@ func TestS3ApiController_ListBuckets(t *testing.T) {
|
||||
}
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access", Role: "admin:"})
|
||||
ctx.Locals("isDebug", false)
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access", Role: "admin:"})
|
||||
return ctx.Next()
|
||||
})
|
||||
appErr.Get("/", s3ApiControllerErr.ListBuckets)
|
||||
@@ -220,10 +219,9 @@ func TestS3ApiController_GetActions(t *testing.T) {
|
||||
},
|
||||
}
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
|
||||
return ctx.Next()
|
||||
})
|
||||
app.Get("/:bucket/:key/*", s3ApiController.GetActions)
|
||||
@@ -232,6 +230,9 @@ func TestS3ApiController_GetActions(t *testing.T) {
|
||||
getObjAttrs := httptest.NewRequest(http.MethodGet, "/my-bucket/key", nil)
|
||||
getObjAttrs.Header.Set("X-Amz-Object-Attributes", "hello")
|
||||
|
||||
invalidChecksumMode := httptest.NewRequest(http.MethodGet, "/my-bucket/key", nil)
|
||||
invalidChecksumMode.Header.Set("x-amz-checksum-mode", "invalid_checksum_mode")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
app *fiber.App
|
||||
@@ -329,6 +330,15 @@ func TestS3ApiController_GetActions(t *testing.T) {
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Get-actions-get-object-invalid-checksum-mode",
|
||||
app: app,
|
||||
args: args{
|
||||
req: invalidChecksumMode,
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Get-actions-get-object-success",
|
||||
app: app,
|
||||
@@ -401,10 +411,9 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -426,10 +435,9 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
}
|
||||
appError := fiber.New()
|
||||
appError.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
|
||||
return ctx.Next()
|
||||
})
|
||||
appError.Get("/:bucket", s3ApiControllerError.ListActions)
|
||||
@@ -622,8 +630,7 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
</VersioningConfiguration>
|
||||
`
|
||||
|
||||
policyBody := `
|
||||
{
|
||||
policyBody := `{
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
@@ -696,10 +703,9 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
}
|
||||
// Mock ctx.Locals
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{Owner: "valid access"})
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{Owner: "valid access"})
|
||||
return ctx.Next()
|
||||
})
|
||||
app.Put("/:bucket", s3ApiController.PutBucketActions)
|
||||
@@ -878,15 +884,6 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
wantErr: false,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Create-bucket-invalid-bucket-name",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPut, "/aa", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Create-bucket-success",
|
||||
app: app,
|
||||
@@ -963,22 +960,22 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
PutObjectAclFunc: func(context.Context, *s3.PutObjectAclInput) error {
|
||||
return nil
|
||||
},
|
||||
CopyObjectFunc: func(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
return &s3.CopyObjectOutput{
|
||||
CopyObjectResult: &types.CopyObjectResult{},
|
||||
CopyObjectFunc: func(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
|
||||
return s3response.CopyObjectOutput{
|
||||
CopyObjectResult: &s3response.CopyObjectResult{},
|
||||
}, nil
|
||||
},
|
||||
PutObjectFunc: func(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
PutObjectFunc: func(context.Context, s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
return s3response.PutObjectOutput{}, nil
|
||||
},
|
||||
UploadPartFunc: func(context.Context, *s3.UploadPartInput) (string, error) {
|
||||
return "hello", nil
|
||||
UploadPartFunc: func(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
return &s3.UploadPartOutput{}, nil
|
||||
},
|
||||
PutObjectTaggingFunc: func(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
return nil
|
||||
},
|
||||
UploadPartCopyFunc: func(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
return s3response.CopyObjectResult{}, nil
|
||||
UploadPartCopyFunc: func(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
|
||||
return s3response.CopyPartResult{}, nil
|
||||
},
|
||||
PutObjectLegalHoldFunc: func(contextMoqParam context.Context, bucket, object, versionId string, status bool) error {
|
||||
return nil
|
||||
@@ -992,10 +989,9 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
},
|
||||
}
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
|
||||
return ctx.Next()
|
||||
})
|
||||
app.Put("/:bucket/:key/*", s3ApiController.PutActions)
|
||||
@@ -1012,6 +1008,11 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
cpySrcReq := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
|
||||
cpySrcReq.Header.Set("X-Amz-Copy-Source", "srcBucket/srcObject")
|
||||
|
||||
// CopyObject invalid checksum algorithm
|
||||
cpyInvChecksumAlgo := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
|
||||
cpyInvChecksumAlgo.Header.Set("X-Amz-Copy-Source", "srcBucket/srcObject")
|
||||
cpyInvChecksumAlgo.Header.Set("X-Amz-Checksum-Algorithm", "invalid_checksum_algorithm")
|
||||
|
||||
// PutObjectAcl success
|
||||
aclReq := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
|
||||
aclReq.Header.Set("X-Amz-Acl", "private")
|
||||
@@ -1033,6 +1034,40 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
invAclBodyGrtReq := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?acl", strings.NewReader(body))
|
||||
invAclBodyGrtReq.Header.Set("X-Amz-Grant-Read", "hello")
|
||||
|
||||
// PutObject invalid checksum algorithm
|
||||
invChecksumAlgo := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
|
||||
invChecksumAlgo.Header.Set("X-Amz-Checksum-Algorithm", "invalid_checksum_algorithm")
|
||||
|
||||
// PutObject invalid base64 checksum
|
||||
invBase64Checksum := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
|
||||
invBase64Checksum.Header.Set("X-Amz-Checksum-Crc32", "invalid_base64")
|
||||
|
||||
// PutObject invalid crc32
|
||||
invCrc32 := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
|
||||
invCrc32.Header.Set("X-Amz-Checksum-Crc32", "YXNkZmFkc2Zhc2Rm")
|
||||
|
||||
// PutObject invalid crc32c
|
||||
invCrc32c := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
|
||||
invCrc32c.Header.Set("X-Amz-Checksum-Crc32c", "YXNkZmFkc2Zhc2RmYXNkZg==")
|
||||
|
||||
// PutObject invalid sha1
|
||||
invSha1 := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
|
||||
invSha1.Header.Set("X-Amz-Checksum-Sha1", "YXNkZmFkc2Zhc2RmYXNkZnNkYWZkYXNmZGFzZg==")
|
||||
|
||||
// PutObject invalid sha256
|
||||
invSha256 := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
|
||||
invSha256.Header.Set("X-Amz-Checksum-Sha256", "YXNkZmFkc2Zhc2RmYXNkZnNkYWZkYXNmZGFzZmFkc2Zhc2Rm")
|
||||
|
||||
// PutObject multiple checksum headers
|
||||
mulChecksumHdrs := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
|
||||
mulChecksumHdrs.Header.Set("X-Amz-Checksum-Sha256", "d1SPCd/kZ2rAzbbLUC0n/bEaOSx70FNbXbIqoIxKuPY=")
|
||||
mulChecksumHdrs.Header.Set("X-Amz-Checksum-Crc32c", "ww2FVQ==")
|
||||
|
||||
// PutObject checksum algorithm and header mismatch
|
||||
checksumHdrMismatch := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
|
||||
checksumHdrMismatch.Header.Set("X-Amz-Checksum-Algorithm", "SHA1")
|
||||
checksumHdrMismatch.Header.Set("X-Amz-Checksum-Crc32c", "ww2FVQ==")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
app *fiber.App
|
||||
@@ -1175,6 +1210,15 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Copy-object-invalid-checksum-algorithm",
|
||||
app: app,
|
||||
args: args{
|
||||
req: cpyInvChecksumAlgo,
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Copy-object-success",
|
||||
app: app,
|
||||
@@ -1233,10 +1277,9 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -1319,10 +1362,9 @@ func TestS3ApiController_DeleteObjects(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
|
||||
return ctx.Next()
|
||||
})
|
||||
app.Post("/:bucket", s3ApiController.DeleteObjects)
|
||||
@@ -1399,10 +1441,9 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
|
||||
return ctx.Next()
|
||||
})
|
||||
app.Delete("/:bucket/:key/*", s3ApiController.DeleteActions)
|
||||
@@ -1423,10 +1464,9 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
|
||||
}}
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
|
||||
return ctx.Next()
|
||||
})
|
||||
appErr.Delete("/:bucket/:key/*", s3ApiControllerErr.DeleteActions)
|
||||
@@ -1506,11 +1546,10 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
ctx.Locals("region", "us-east-1")
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
|
||||
utils.ContextKeyRegion.Set(ctx, "us-east-1")
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -1524,17 +1563,16 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
|
||||
return acldata, nil
|
||||
},
|
||||
HeadBucketFunc: func(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
return nil, s3err.GetAPIError(3)
|
||||
return nil, s3err.GetAPIError(s3err.ErrBucketNotEmpty)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
ctx.Locals("region", "us-east-1")
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
|
||||
utils.ContextKeyRegion.Set(ctx, "us-east-1")
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -1611,10 +1649,9 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
|
||||
return ctx.Next()
|
||||
})
|
||||
app.Head("/:bucket/:key/*", s3ApiController.HeadObject)
|
||||
@@ -1634,14 +1671,16 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
}
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
|
||||
return ctx.Next()
|
||||
})
|
||||
appErr.Head("/:bucket/:key/*", s3ApiControllerErr.HeadObject)
|
||||
|
||||
invChecksumMode := httptest.NewRequest(http.MethodHead, "/my-bucket/my-key", nil)
|
||||
invChecksumMode.Header.Set("X-Amz-Checksum-Mode", "invalid_checksum_mode")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
app *fiber.App
|
||||
@@ -1658,6 +1697,15 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Head-object-invalid-checksum-mode",
|
||||
app: app,
|
||||
args: args{
|
||||
req: invChecksumMode,
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Head-object-error",
|
||||
app: appErr,
|
||||
@@ -1694,10 +1742,10 @@ func TestS3ApiController_CreateActions(t *testing.T) {
|
||||
RestoreObjectFunc: func(context.Context, *s3.RestoreObjectInput) error {
|
||||
return nil
|
||||
},
|
||||
CompleteMultipartUploadFunc: func(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
return &s3.CompleteMultipartUploadOutput{}, nil
|
||||
CompleteMultipartUploadFunc: func(context.Context, *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
|
||||
return s3response.CompleteMultipartUploadResult{}, "", nil
|
||||
},
|
||||
CreateMultipartUploadFunc: func(context.Context, *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
|
||||
CreateMultipartUploadFunc: func(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
|
||||
return s3response.InitiateMultipartUploadResult{}, nil
|
||||
},
|
||||
SelectObjectContentFunc: func(context.Context, *s3.SelectObjectContentInput) func(w *bufio.Writer) {
|
||||
@@ -1727,14 +1775,16 @@ func TestS3ApiController_CreateActions(t *testing.T) {
|
||||
`
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
|
||||
utils.ContextKeyIsRoot.Set(ctx, true)
|
||||
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
|
||||
return ctx.Next()
|
||||
})
|
||||
app.Post("/:bucket/:key/*", s3ApiController.CreateActions)
|
||||
|
||||
invChecksumAlgo := httptest.NewRequest(http.MethodPost, "/my-bucket/my-key", nil)
|
||||
invChecksumAlgo.Header.Set("X-Amz-Checksum-Algorithm", "invalid_checksum_algorithm")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
app *fiber.App
|
||||
@@ -1796,6 +1846,15 @@ func TestS3ApiController_CreateActions(t *testing.T) {
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Create-multipart-upload-invalid-checksum-algorithm",
|
||||
app: app,
|
||||
args: args{
|
||||
req: invChecksumAlgo,
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Create-multipart-upload-success",
|
||||
app: app,
|
||||
|
||||
226
s3api/debuglogger/logger.go
Normal file
226
s3api/debuglogger/logger.go
Normal file
@@ -0,0 +1,226 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package debuglogger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
type Color string
|
||||
|
||||
const (
|
||||
green Color = "\033[32m"
|
||||
yellow Color = "\033[33m"
|
||||
blue Color = "\033[34m"
|
||||
Purple Color = "\033[0;35m"
|
||||
|
||||
reset = "\033[0m"
|
||||
borderChar = "─"
|
||||
boxWidth = 120
|
||||
)
|
||||
|
||||
// Logs http request details: headers, body, params, query args
|
||||
func LogFiberRequestDetails(ctx *fiber.Ctx) {
|
||||
// Log the full request url
|
||||
fullURL := ctx.Protocol() + "://" + ctx.Hostname() + ctx.OriginalURL()
|
||||
fmt.Printf("%s[URL]: %s%s\n", green, fullURL, reset)
|
||||
|
||||
// log request headers
|
||||
wrapInBox(green, "REQUEST HEADERS", boxWidth, func() {
|
||||
for key, value := range ctx.Request().Header.All() {
|
||||
printWrappedLine(yellow, string(key), string(value))
|
||||
}
|
||||
})
|
||||
// skip request body log for PutObject and UploadPart
|
||||
skipBodyLog := isLargeDataAction(ctx)
|
||||
if !skipBodyLog {
|
||||
body := ctx.Request().Body()
|
||||
if len(body) != 0 {
|
||||
printBoxTitleLine(blue, "REQUEST BODY", boxWidth, false)
|
||||
fmt.Printf("%s%s%s\n", blue, body, reset)
|
||||
printHorizontalBorder(blue, boxWidth, false)
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Len() != 0 {
|
||||
for key, value := range ctx.Request().URI().QueryArgs().All() {
|
||||
log.Printf("%s: %s", key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Logs http response details: body, headers
|
||||
func LogFiberResponseDetails(ctx *fiber.Ctx) {
|
||||
wrapInBox(green, "RESPONSE HEADERS", boxWidth, func() {
|
||||
for key, value := range ctx.Response().Header.All() {
|
||||
printWrappedLine(yellow, string(key), string(value))
|
||||
}
|
||||
})
|
||||
|
||||
_, ok := ctx.Locals("skip-res-body-log").(bool)
|
||||
if !ok {
|
||||
body := ctx.Response().Body()
|
||||
if len(body) != 0 {
|
||||
PrintInsideHorizontalBorders(blue, "RESPONSE BODY", string(body), boxWidth)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var debugEnabled atomic.Bool
|
||||
|
||||
// SetDebugEnabled sets the debug mode
|
||||
func SetDebugEnabled() {
|
||||
debugEnabled.Store(true)
|
||||
}
|
||||
|
||||
// Logf is the same as 'fmt.Printf' with debug prefix,
|
||||
// a color added and '\n' at the end
|
||||
func Logf(format string, v ...any) {
|
||||
if !debugEnabled.Load() {
|
||||
return
|
||||
}
|
||||
debugPrefix := "[DEBUG]: "
|
||||
fmt.Printf(string(yellow)+debugPrefix+format+reset+"\n", v...)
|
||||
}
|
||||
|
||||
// Infof prints out green info block with [INFO]: prefix
|
||||
func Infof(format string, v ...any) {
|
||||
if !debugEnabled.Load() {
|
||||
return
|
||||
}
|
||||
debugPrefix := "[INFO]: "
|
||||
fmt.Printf(string(green)+debugPrefix+format+reset+"\n", v...)
|
||||
}
|
||||
|
||||
// PrintInsideHorizontalBorders prints the text inside horizontal
|
||||
// border and title in the center of upper border
|
||||
func PrintInsideHorizontalBorders(color Color, title, text string, width int) {
|
||||
if !debugEnabled.Load() {
|
||||
return
|
||||
}
|
||||
printBoxTitleLine(color, title, width, false)
|
||||
fmt.Printf("%s%s%s\n", color, text, reset)
|
||||
printHorizontalBorder(color, width, false)
|
||||
}
|
||||
|
||||
// Prints out box title either with closing characters or not: "┌", "┐"
|
||||
// e.g ┌────────────────[ RESPONSE HEADERS ]────────────────┐
|
||||
func printBoxTitleLine(color Color, title string, length int, closing bool) {
|
||||
leftCorner, rightCorner := "┌", "┐"
|
||||
|
||||
if !closing {
|
||||
leftCorner, rightCorner = borderChar, borderChar
|
||||
}
|
||||
|
||||
// Calculate how many border characters are needed
|
||||
titleFormatted := fmt.Sprintf("[ %s ]", title)
|
||||
borderSpace := length - len(titleFormatted) - 2 // 2 for corners
|
||||
leftLen := borderSpace / 2
|
||||
rightLen := borderSpace - leftLen
|
||||
|
||||
// Build the line
|
||||
line := leftCorner +
|
||||
strings.Repeat(borderChar, leftLen) +
|
||||
titleFormatted +
|
||||
strings.Repeat(borderChar, rightLen) +
|
||||
rightCorner
|
||||
|
||||
fmt.Println(string(color) + line + reset)
|
||||
}
|
||||
|
||||
// Prints out a horizontal line either with closing characters or not: "└", "┘"
|
||||
func printHorizontalBorder(color Color, length int, closing bool) {
|
||||
leftCorner, rightCorner := "└", "┘"
|
||||
if !closing {
|
||||
leftCorner, rightCorner = borderChar, borderChar
|
||||
}
|
||||
|
||||
line := leftCorner + strings.Repeat(borderChar, length-2) + rightCorner + reset
|
||||
fmt.Println(string(color) + line)
|
||||
}
|
||||
|
||||
// wrapInBox wraps the output of a function call (fn) inside a styled box with a title.
|
||||
func wrapInBox(color Color, title string, length int, fn func()) {
|
||||
printBoxTitleLine(color, title, length, true)
|
||||
fn()
|
||||
printHorizontalBorder(color, length, true)
|
||||
}
|
||||
|
||||
// returns the provided string length
|
||||
// defaulting to 13 for exceeding lengths
|
||||
func getLen(str string) int {
|
||||
if len(str) < 13 {
|
||||
return 13
|
||||
}
|
||||
|
||||
return len(str)
|
||||
}
|
||||
|
||||
// prints a formatted key-value pair within a box layout,
|
||||
// wrapping the value text if it exceeds the allowed width.
|
||||
func printWrappedLine(keyColor Color, key, value string) {
|
||||
prefix := fmt.Sprintf("%s│%s %s%-13s%s : ", green, reset, keyColor, key, reset)
|
||||
prefixLen := len(prefix) - len(green) - len(reset) - len(keyColor) - len(reset)
|
||||
// the actual prefix size without colors
|
||||
actualPrefixLen := getLen(key) + 5
|
||||
|
||||
lineWidth := boxWidth - prefixLen
|
||||
valueLines := wrapText(value, lineWidth)
|
||||
|
||||
for i, line := range valueLines {
|
||||
if i == 0 {
|
||||
if len(line) < lineWidth {
|
||||
line += strings.Repeat(" ", lineWidth-len(line))
|
||||
}
|
||||
fmt.Printf("%s%s%s %s│%s\n", prefix, reset, line, green, reset)
|
||||
} else {
|
||||
line = strings.Repeat(" ", actualPrefixLen-2) + line
|
||||
if len(line) < boxWidth-4 {
|
||||
line += strings.Repeat(" ", boxWidth-len(line)-4)
|
||||
}
|
||||
fmt.Printf("%s│ %s%s %s│%s\n", green, reset, line, green, reset)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// wrapText splits the input text into lines of at most `width` characters each.
|
||||
func wrapText(text string, width int) []string {
|
||||
var lines []string
|
||||
for len(text) > width {
|
||||
lines = append(lines, text[:width])
|
||||
text = text[width:]
|
||||
}
|
||||
if text != "" {
|
||||
lines = append(lines, text)
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
// TODO: remove this and use utils.IsBidDataAction after refactoring
|
||||
// and creating 'internal' package
|
||||
func isLargeDataAction(ctx *fiber.Ctx) bool {
|
||||
if ctx.Method() == http.MethodPut && len(strings.Split(ctx.Path(), "/")) >= 3 {
|
||||
if !ctx.Request().URI().QueryArgs().Has("tagging") && ctx.Get("X-Amz-Copy-Source") == "" && !ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
@@ -34,7 +35,6 @@ var (
|
||||
|
||||
func AclParser(be backend.Backend, logger s3log.AuditLogger, readonly bool) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
isRoot, acct := ctx.Locals("isRoot").(bool), ctx.Locals("account").(auth.Account)
|
||||
path := ctx.Path()
|
||||
pathParts := strings.Split(path, "/")
|
||||
bucket := pathParts[1]
|
||||
@@ -51,7 +51,9 @@ func AclParser(be backend.Backend, logger s3log.AuditLogger, readonly bool) fibe
|
||||
!ctx.Request().URI().QueryArgs().Has("versioning") &&
|
||||
!ctx.Request().URI().QueryArgs().Has("policy") &&
|
||||
!ctx.Request().URI().QueryArgs().Has("object-lock") &&
|
||||
!ctx.Request().URI().QueryArgs().Has("ownershipControls") {
|
||||
!ctx.Request().URI().QueryArgs().Has("ownershipControls") &&
|
||||
!ctx.Request().URI().QueryArgs().Has("cors") {
|
||||
isRoot, acct := utils.ContextKeyIsRoot.Get(ctx).(bool), utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
if err := auth.MayCreateBucket(acct, isRoot); err != nil {
|
||||
return controllers.SendXMLResponse(ctx, nil, err, &controllers.MetaOpts{Logger: logger, Action: "CreateBucket"})
|
||||
}
|
||||
@@ -74,7 +76,12 @@ func AclParser(be backend.Backend, logger s3log.AuditLogger, readonly bool) fibe
|
||||
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
ctx.Locals("parsedAcl", parsedAcl)
|
||||
// if owner is not set, set default owner to root account
|
||||
if parsedAcl.Owner == "" {
|
||||
parsedAcl.Owner = utils.ContextKeyRootAccessKey.Get(ctx).(string)
|
||||
}
|
||||
|
||||
utils.ContextKeyParsedAcl.Set(ctx, parsedAcl)
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,13 +21,14 @@ import (
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/metrics"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
func IsAdmin(logger s3log.AuditLogger) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
if acct.Role != auth.RoleAdmin {
|
||||
path := ctx.Path()
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminAccessDenied),
|
||||
|
||||
@@ -33,7 +33,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
iso8601Format = "20060102T150405Z"
|
||||
iso8601Format = "20060102T150405Z"
|
||||
maxObjSizeLimit = 5 * 1024 * 1024 * 1024 // 5gb
|
||||
)
|
||||
|
||||
type RootUserConfig struct {
|
||||
@@ -45,14 +46,15 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
|
||||
acct := accounts{root: root, iam: iam}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
// If account is set in context locals, it means it was presigned url case
|
||||
_, ok := ctx.Locals("account").(auth.Account)
|
||||
if ok {
|
||||
// The bucket is public, no need to check this signature
|
||||
if utils.ContextKeyPublicBucket.IsSet(ctx) {
|
||||
return ctx.Next()
|
||||
}
|
||||
// If ContextKeyAuthenticated is set in context locals, it means it was presigned url case
|
||||
if utils.ContextKeyAuthenticated.IsSet(ctx) {
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
ctx.Locals("region", region)
|
||||
ctx.Locals("startTime", time.Now())
|
||||
authorization := ctx.Get("Authorization")
|
||||
if authorization == "" {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrAuthHeaderEmpty), logger, mm)
|
||||
@@ -63,10 +65,6 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
|
||||
return sendResponse(ctx, err, logger, mm)
|
||||
}
|
||||
|
||||
if authData.Algorithm != "AWS4-HMAC-SHA256" {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported), logger, mm)
|
||||
}
|
||||
|
||||
if authData.Region != region {
|
||||
return sendResponse(ctx, s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
@@ -75,7 +73,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
|
||||
}, logger, mm)
|
||||
}
|
||||
|
||||
ctx.Locals("isRoot", authData.Access == root.Access)
|
||||
utils.ContextKeyIsRoot.Set(ctx, authData.Access == root.Access)
|
||||
|
||||
account, err := acct.getAccount(authData.Access)
|
||||
if err == auth.ErrNoSuchUser {
|
||||
@@ -84,7 +82,8 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger, mm)
|
||||
}
|
||||
ctx.Locals("account", account)
|
||||
|
||||
utils.ContextKeyAccount.Set(ctx, account)
|
||||
|
||||
// Check X-Amz-Date header
|
||||
date := ctx.Get("X-Amz-Date")
|
||||
@@ -108,6 +107,17 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
|
||||
return sendResponse(ctx, err, logger, mm)
|
||||
}
|
||||
|
||||
var contentLength int64
|
||||
contentLengthStr := ctx.Get("Content-Length")
|
||||
if contentLengthStr != "" {
|
||||
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
|
||||
//TODO: not sure if InvalidRequest should be returned in this case
|
||||
if err != nil {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), logger, mm)
|
||||
}
|
||||
}
|
||||
|
||||
hashPayload := ctx.Get("X-Amz-Content-Sha256")
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
// for streaming PUT actions, authorization is deferred
|
||||
// until end of stream due to need to get length and
|
||||
@@ -115,10 +125,36 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
return utils.NewAuthReader(ctx, r, authData, account.Secret, debug)
|
||||
})
|
||||
|
||||
// wrap the io.Reader with ChunkReader if x-amz-content-sha256
|
||||
// provide chunk encoding value
|
||||
if utils.IsStreamingPayload(hashPayload) {
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
var cr io.Reader
|
||||
cr, err = utils.NewChunkReader(ctx, r, authData, region, account.Secret, tdate)
|
||||
return cr
|
||||
})
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger, mm)
|
||||
}
|
||||
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
// Content-Length has to be set for data uploads: PutObject, UploadPart
|
||||
if contentLengthStr == "" {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingContentLength), logger, mm)
|
||||
}
|
||||
// the upload limit for big data actions: PutObject, UploadPart
|
||||
// is 5gb. If the size exceeds the limit, return 'EntityTooLarge' err
|
||||
if contentLength > maxObjSizeLimit {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrEntityTooLarge), logger, mm)
|
||||
}
|
||||
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
hashPayload := ctx.Get("X-Amz-Content-Sha256")
|
||||
if !utils.IsSpecialPayload(hashPayload) {
|
||||
// Calculate the hash of the request payload
|
||||
hashedPayload := sha256.Sum256(ctx.Body())
|
||||
@@ -130,15 +166,6 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
|
||||
}
|
||||
}
|
||||
|
||||
var contentLength int64
|
||||
contentLengthStr := ctx.Get("Content-Length")
|
||||
if contentLengthStr != "" {
|
||||
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
|
||||
if err != nil {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), logger, mm)
|
||||
}
|
||||
}
|
||||
|
||||
err = utils.CheckValidSignature(ctx, authData, account.Secret, hashPayload, tdate, contentLength, debug)
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger, mm)
|
||||
|
||||
@@ -18,14 +18,15 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
)
|
||||
|
||||
func wrapBodyReader(ctx *fiber.Ctx, wr func(io.Reader) io.Reader) {
|
||||
r, ok := ctx.Locals("body-reader").(io.Reader)
|
||||
r, ok := utils.ContextKeyBodyReader.Get(ctx).(io.Reader)
|
||||
if !ok {
|
||||
r = ctx.Request().BodyStream()
|
||||
}
|
||||
|
||||
r = wr(r)
|
||||
ctx.Locals("body-reader", r)
|
||||
utils.ContextKeyBodyReader.Set(ctx, r)
|
||||
}
|
||||
|
||||
58
s3api/middlewares/bucket-object-name-validator.go
Normal file
58
s3api/middlewares/bucket-object-name-validator.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/metrics"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
// BucketObjectNameValidator extracts and validates
|
||||
// the bucket and object names from the request URI.
|
||||
func BucketObjectNameValidator(l s3log.AuditLogger, mm *metrics.Manager) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
// skip the check for admin apis
|
||||
if ctx.Method() == http.MethodPatch {
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
path := ctx.Path()
|
||||
// skip the check if the operation isn't bucket/object scoped
|
||||
// e.g ListBuckets
|
||||
if path == "/" {
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
bucket, object := parsePath(path)
|
||||
|
||||
// check if the provided bucket name is valid
|
||||
if !utils.IsValidBucketName(bucket) {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidBucketName), l, mm)
|
||||
}
|
||||
|
||||
// check if the provided object name is valid
|
||||
// skip for empty objects: e.g bucket operations: HeadBucket...
|
||||
if object != "" && !utils.IsObjectNameValid(object) {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrBadRequest), l, mm)
|
||||
}
|
||||
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
// Copyright 2024 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/metrics"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
// ProcessChunkedBody initializes the chunked upload stream if the
|
||||
// request appears to be a chunked upload
|
||||
func ProcessChunkedBody(root RootUserConfig, iam auth.IAMService, logger s3log.AuditLogger, mm *metrics.Manager, region string) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
decodedLength := ctx.Get("X-Amz-Decoded-Content-Length")
|
||||
if decodedLength == "" {
|
||||
return ctx.Next()
|
||||
}
|
||||
// TODO: validate content length
|
||||
|
||||
authData, err := utils.ParseAuthorization(ctx.Get("Authorization"))
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger, mm)
|
||||
}
|
||||
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
amzdate := ctx.Get("X-Amz-Date")
|
||||
date, _ := time.Parse(iso8601Format, amzdate)
|
||||
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
var cr io.Reader
|
||||
cr, err = utils.NewChunkReader(ctx, r, authData, region, acct.Secret, date)
|
||||
return cr
|
||||
})
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger, mm)
|
||||
}
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
40
s3api/middlewares/host-style-parser.go
Normal file
40
s3api/middlewares/host-style-parser.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// HostStyleParser is a middleware which parses the bucket name
|
||||
// from the 'Host' header and appends in the request URL path
|
||||
func HostStyleParser(virtualDomain string) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
host := string(ctx.Request().Host())
|
||||
// the host should match this pattern: '<bucket_name>.<virtual_domain>'
|
||||
bucket, _, found := strings.Cut(host, "."+virtualDomain)
|
||||
if !found || bucket == "" {
|
||||
return ctx.Next()
|
||||
}
|
||||
path := ctx.Path()
|
||||
pathStyleUrl := fmt.Sprintf("/%v%v", bucket, path)
|
||||
ctx.Path(pathStyleUrl)
|
||||
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
@@ -15,30 +15,15 @@
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/debuglogger"
|
||||
)
|
||||
|
||||
func RequestLogger(isDebug bool) fiber.Handler {
|
||||
func DebugLogger() fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("isDebug", isDebug)
|
||||
if isDebug {
|
||||
log.Println("Request headers: ")
|
||||
ctx.Request().Header.VisitAll(func(key, val []byte) {
|
||||
log.Printf("%s: %s", key, val)
|
||||
})
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Len() != 0 {
|
||||
fmt.Println()
|
||||
log.Println("Request query arguments: ")
|
||||
ctx.Request().URI().QueryArgs().VisitAll(func(key, val []byte) {
|
||||
log.Printf("%s: %s", key, val)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.Next()
|
||||
debuglogger.LogFiberRequestDetails(ctx)
|
||||
err := ctx.Next()
|
||||
debuglogger.LogFiberResponseDetails(ctx)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ func VerifyMD5Body(logger s3log.AuditLogger) fiber.Handler {
|
||||
}
|
||||
|
||||
sum := md5.Sum(ctx.Body())
|
||||
calculatedSum := utils.Md5SumString(sum[:])
|
||||
calculatedSum := utils.Base64SumString(sum[:])
|
||||
|
||||
if incomingSum != calculatedSum {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidDigest), &controllers.MetaOpts{Logger: logger})
|
||||
|
||||
@@ -16,7 +16,7 @@ package middlewares
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
"strconv"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
@@ -30,19 +30,25 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, logger
|
||||
acct := accounts{root: root, iam: iam}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
// The bucket is public, no need to check this signature
|
||||
if utils.ContextKeyPublicBucket.IsSet(ctx) {
|
||||
return ctx.Next()
|
||||
}
|
||||
if ctx.Query("X-Amz-Signature") == "" {
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
ctx.Locals("region", region)
|
||||
ctx.Locals("startTime", time.Now())
|
||||
// Set in the context the "authenticated" key, in case the authentication succeeds,
|
||||
// otherwise the middleware will return the caucht error
|
||||
utils.ContextKeyAuthenticated.Set(ctx, true)
|
||||
|
||||
authData, err := utils.ParsePresignedURIParts(ctx)
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger, mm)
|
||||
}
|
||||
|
||||
ctx.Locals("isRoot", authData.Access == root.Access)
|
||||
utils.ContextKeyIsRoot.Set(ctx, authData.Access == root.Access)
|
||||
|
||||
account, err := acct.getAccount(authData.Access)
|
||||
if err == auth.ErrNoSuchUser {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidAccessKeyID), logger, mm)
|
||||
@@ -50,9 +56,28 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, logger
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger, mm)
|
||||
}
|
||||
ctx.Locals("account", account)
|
||||
utils.ContextKeyAccount.Set(ctx, account)
|
||||
|
||||
var contentLength int64
|
||||
contentLengthStr := ctx.Get("Content-Length")
|
||||
if contentLengthStr != "" {
|
||||
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
|
||||
//TODO: not sure if InvalidRequest should be returned in this case
|
||||
if err != nil {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), logger, mm)
|
||||
}
|
||||
}
|
||||
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
// Content-Length has to be set for data uploads: PutObject, UploadPart
|
||||
if contentLengthStr == "" {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingContentLength), logger, mm)
|
||||
}
|
||||
// the upload limit for big data actions: PutObject, UploadPart
|
||||
// is 5gb. If the size exceeds the limit, return 'EntityTooLarge' err
|
||||
if contentLength > maxObjSizeLimit {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrEntityTooLarge), logger, mm)
|
||||
}
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
return utils.NewPresignedAuthReader(ctx, r, authData, account.Secret, debug)
|
||||
})
|
||||
|
||||
298
s3api/middlewares/public-bucket.go
Normal file
298
s3api/middlewares/public-bucket.go
Normal file
@@ -0,0 +1,298 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/metrics"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
func AuthorizePublicBucketAccess(be backend.Backend, l s3log.AuditLogger, mm *metrics.Manager) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
// skip for auhtneicated requests
|
||||
if ctx.Query("X-Amz-Algorithm") != "" || ctx.Get("Authorization") != "" {
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
bucket, object := parsePath(ctx.Path())
|
||||
|
||||
action, permission, err := detectS3Action(ctx, object == "")
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, l, mm)
|
||||
}
|
||||
|
||||
err = auth.VerifyPublicAccess(ctx.Context(), be, action, permission, bucket, object)
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, l, mm)
|
||||
}
|
||||
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
payloadType := ctx.Get("X-Amz-Content-Sha256")
|
||||
if utils.IsUnsignedStreamingPayload(payloadType) {
|
||||
checksumType, err := utils.ExtractChecksumType(ctx)
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, l, mm)
|
||||
}
|
||||
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
var cr io.Reader
|
||||
cr, err = utils.NewUnsignedChunkReader(r, checksumType)
|
||||
return cr
|
||||
})
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, l, mm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
utils.ContextKeyPublicBucket.Set(ctx, true)
|
||||
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func detectS3Action(ctx *fiber.Ctx, isBucketAction bool) (auth.Action, auth.Permission, error) {
|
||||
path := ctx.Path()
|
||||
// ListBuckets is not publically available
|
||||
if path == "/" {
|
||||
//TODO: Still not clear what kind of error should be returned in this case(ListBuckets)
|
||||
return "", auth.PermissionRead, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
queryArgs := ctx.Context().QueryArgs()
|
||||
|
||||
switch ctx.Method() {
|
||||
case fiber.MethodPatch:
|
||||
// Admin apis should always be protected
|
||||
return "", "", s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
case fiber.MethodHead:
|
||||
// HeadBucket
|
||||
if isBucketAction {
|
||||
return auth.ListBucketAction, auth.PermissionRead, nil
|
||||
}
|
||||
|
||||
// HeadObject
|
||||
return auth.GetObjectAction, auth.PermissionRead, nil
|
||||
case fiber.MethodGet:
|
||||
if isBucketAction {
|
||||
if queryArgs.Has("tagging") {
|
||||
// GetBucketTagging
|
||||
return auth.GetBucketTaggingAction, auth.PermissionRead, nil
|
||||
} else if queryArgs.Has("ownershipControls") {
|
||||
// GetBucketOwnershipControls
|
||||
return auth.GetBucketOwnershipControlsAction, auth.PermissionRead, s3err.GetAPIError(s3err.ErrAnonymousGetBucketOwnership)
|
||||
} else if queryArgs.Has("versioning") {
|
||||
// GetBucketVersioning
|
||||
return auth.GetBucketVersioningAction, auth.PermissionRead, nil
|
||||
} else if queryArgs.Has("policy") {
|
||||
// GetBucketPolicy
|
||||
return auth.GetBucketPolicyAction, auth.PermissionRead, nil
|
||||
} else if queryArgs.Has("cors") {
|
||||
// GetBucketCors
|
||||
return auth.GetBucketCorsAction, auth.PermissionRead, nil
|
||||
} else if queryArgs.Has("versions") {
|
||||
// ListObjectVersions
|
||||
return auth.ListBucketVersionsAction, auth.PermissionRead, nil
|
||||
} else if queryArgs.Has("object-lock") {
|
||||
// GetObjectLockConfiguration
|
||||
return auth.GetBucketObjectLockConfigurationAction, auth.PermissionReadAcp, nil
|
||||
} else if queryArgs.Has("acl") {
|
||||
// GetBucketAcl
|
||||
return auth.GetBucketAclAction, auth.PermissionRead, nil
|
||||
} else if queryArgs.Has("uploads") {
|
||||
// ListMultipartUploads
|
||||
return auth.ListBucketMultipartUploadsAction, auth.PermissionRead, nil
|
||||
} else if queryArgs.GetUintOrZero("list-type") == 2 {
|
||||
// ListObjectsV2
|
||||
return auth.ListBucketAction, auth.PermissionRead, nil
|
||||
}
|
||||
// All the other requests are considerd as ListObjects in the router
|
||||
// no matter what kind of query arguments are provided apart from the ones above
|
||||
|
||||
return auth.ListBucketAction, auth.PermissionRead, nil
|
||||
}
|
||||
|
||||
if queryArgs.Has("tagging") {
|
||||
// GetObjectTagging
|
||||
return auth.GetObjectTaggingAction, auth.PermissionRead, nil
|
||||
} else if queryArgs.Has("retention") {
|
||||
// GetObjectRetention
|
||||
return auth.GetObjectRetentionAction, auth.PermissionRead, nil
|
||||
} else if queryArgs.Has("legal-hold") {
|
||||
// GetObjectLegalHold
|
||||
return auth.GetObjectLegalHoldAction, auth.PermissionReadAcp, nil
|
||||
} else if queryArgs.Has("acl") {
|
||||
// GetObjectAcl
|
||||
return auth.GetObjectAclAction, auth.PermissionRead, nil
|
||||
} else if queryArgs.Has("attributes") {
|
||||
// GetObjectAttributes
|
||||
return auth.GetObjectAttributesAction, auth.PermissionRead, nil
|
||||
} else if queryArgs.Has("uploadId") {
|
||||
// ListParts
|
||||
return auth.ListMultipartUploadPartsAction, auth.PermissionRead, nil
|
||||
}
|
||||
|
||||
// All the other requests are considerd as GetObject in the router
|
||||
// no matter what kind of query arguments are provided apart from the ones above
|
||||
if queryArgs.Has("versionId") {
|
||||
return auth.GetObjectVersionAction, auth.PermissionRead, nil
|
||||
}
|
||||
return auth.GetObjectAction, auth.PermissionRead, nil
|
||||
case fiber.MethodPut:
|
||||
if isBucketAction {
|
||||
if queryArgs.Has("tagging") {
|
||||
// PutBucketTagging
|
||||
return auth.PutBucketTaggingAction, auth.PermissionWrite, nil
|
||||
}
|
||||
if queryArgs.Has("ownershipControls") {
|
||||
// PutBucketOwnershipControls
|
||||
return auth.PutBucketOwnershipControlsAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAnonymousPutBucketOwnership)
|
||||
}
|
||||
if queryArgs.Has("versioning") {
|
||||
// PutBucketVersioning
|
||||
return auth.PutBucketVersioningAction, auth.PermissionWrite, nil
|
||||
}
|
||||
if queryArgs.Has("object-lock") {
|
||||
// PutObjectLockConfiguration
|
||||
return auth.PutBucketObjectLockConfigurationAction, auth.PermissionWrite, nil
|
||||
}
|
||||
if queryArgs.Has("cors") {
|
||||
// PutBucketCors
|
||||
return auth.PutBucketCorsAction, auth.PermissionWrite, nil
|
||||
}
|
||||
if queryArgs.Has("policy") {
|
||||
// PutBucketPolicy
|
||||
return auth.PutBucketPolicyAction, auth.PermissionWrite, nil
|
||||
}
|
||||
if queryArgs.Has("acl") {
|
||||
// PutBucketAcl
|
||||
return auth.PutBucketAclAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAnonymousRequest)
|
||||
}
|
||||
|
||||
// All the other rquestes are considered as 'CreateBucket' in the router
|
||||
return "", "", s3err.GetAPIError(s3err.ErrAnonymousRequest)
|
||||
}
|
||||
|
||||
if queryArgs.Has("tagging") {
|
||||
// PutObjectTagging
|
||||
return auth.PutObjectTaggingAction, auth.PermissionWrite, nil
|
||||
}
|
||||
if queryArgs.Has("retention") {
|
||||
// PutObjectRetention
|
||||
return auth.PutObjectRetentionAction, auth.PermissionWrite, nil
|
||||
}
|
||||
if queryArgs.Has("legal-hold") {
|
||||
// PutObjectLegalHold
|
||||
return auth.PutObjectLegalHoldAction, auth.PermissionWrite, nil
|
||||
}
|
||||
if queryArgs.Has("acl") {
|
||||
// PutObjectAcl
|
||||
return auth.PutObjectAclAction, auth.PermissionWriteAcp, s3err.GetAPIError(s3err.ErrAnonymousRequest)
|
||||
}
|
||||
if queryArgs.Has("uploadId") && queryArgs.Has("partNumber") {
|
||||
if ctx.Get("X-Amz-Copy-Source") != "" {
|
||||
// UploadPartCopy
|
||||
//TODO: Add public access check for copy-source
|
||||
// Return AccessDenied for now
|
||||
return auth.PutObjectAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
utils.ContextKeyBodyReader.Set(ctx, ctx.Request().BodyStream())
|
||||
// UploadPart
|
||||
return auth.PutObjectAction, auth.PermissionWrite, nil
|
||||
}
|
||||
if ctx.Get("X-Amz-Copy-Source") != "" {
|
||||
return auth.PutObjectAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAnonymousCopyObject)
|
||||
}
|
||||
|
||||
utils.ContextKeyBodyReader.Set(ctx, ctx.Request().BodyStream())
|
||||
// All the other requests are considered as 'PutObject' in the router
|
||||
return auth.PutObjectAction, auth.PermissionWrite, nil
|
||||
case fiber.MethodPost:
|
||||
if isBucketAction {
|
||||
// DeleteObjects
|
||||
// FIXME: should be fixed with https://github.com/versity/versitygw/issues/1327
|
||||
// Return AccessDenied for now
|
||||
return auth.DeleteObjectAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
if queryArgs.Has("restore") {
|
||||
return auth.RestoreObjectAction, auth.PermissionWrite, nil
|
||||
}
|
||||
if queryArgs.Has("select") && ctx.Query("select-type") == "2" {
|
||||
// SelectObjectContent
|
||||
return auth.GetObjectAction, auth.PermissionRead, s3err.GetAPIError(s3err.ErrAnonymousRequest)
|
||||
}
|
||||
if queryArgs.Has("uploadId") {
|
||||
// CompleteMultipartUpload
|
||||
return auth.PutObjectAction, auth.PermissionWrite, nil
|
||||
}
|
||||
|
||||
// All the other requests are considered as 'CreateMultipartUpload' in the router
|
||||
return "", "", s3err.GetAPIError(s3err.ErrAnonymousCreateMp)
|
||||
case fiber.MethodDelete:
|
||||
if isBucketAction {
|
||||
if queryArgs.Has("tagging") {
|
||||
// DeleteBucketTagging
|
||||
return auth.PutBucketTaggingAction, auth.PermissionWrite, nil
|
||||
}
|
||||
if queryArgs.Has("ownershipControls") {
|
||||
// DeleteBucketOwnershipControls
|
||||
return auth.PutBucketOwnershipControlsAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAnonymousPutBucketOwnership)
|
||||
}
|
||||
if queryArgs.Has("policy") {
|
||||
// DeleteBucketPolicy
|
||||
return auth.PutBucketPolicyAction, auth.PermissionWrite, nil
|
||||
}
|
||||
if queryArgs.Has("cors") {
|
||||
// DeleteBucketCors
|
||||
return auth.PutBucketCorsAction, auth.PermissionWrite, nil
|
||||
}
|
||||
|
||||
// All the other requests are considered as 'DeleteBucket' in the router
|
||||
return auth.DeleteBucketAction, auth.PermissionWrite, nil
|
||||
}
|
||||
|
||||
if queryArgs.Has("tagging") {
|
||||
// DeleteObjectTagging
|
||||
return auth.PutObjectTaggingAction, auth.PermissionWrite, nil
|
||||
}
|
||||
if queryArgs.Has("uploadId") {
|
||||
// AbortMultipartUpload
|
||||
return auth.AbortMultipartUploadAction, auth.PermissionWrite, nil
|
||||
}
|
||||
// All the other requests are considered as 'DeleteObject' in the router
|
||||
return auth.DeleteObjectAction, auth.PermissionWrite, nil
|
||||
default:
|
||||
// In no action is detected, return AccessDenied ?
|
||||
return "", "", s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
}
|
||||
|
||||
// parsePath extracts the bucket and object names from the path
|
||||
func parsePath(path string) (string, string) {
|
||||
p := strings.TrimPrefix(path, "/")
|
||||
bucket, object, _ := strings.Cut(p, "/")
|
||||
|
||||
return bucket, object
|
||||
}
|
||||
37
s3api/middlewares/set-default-keys.go
Normal file
37
s3api/middlewares/set-default-keys.go
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
)
|
||||
|
||||
func SetDefaultValues(root RootUserConfig, region string) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
// These are necessary for the server access logs
|
||||
utils.ContextKeyRegion.Set(ctx, region)
|
||||
utils.ContextKeyStartTime.Set(ctx, time.Now())
|
||||
utils.ContextKeyRootAccessKey.Set(ctx, root.Access)
|
||||
// Set the account and isRoot to some defulat values, to avoid panics
|
||||
// in case of public buckets
|
||||
utils.ContextKeyAccount.Set(ctx, auth.Account{})
|
||||
utils.ContextKeyIsRoot.Set(ctx, false)
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
|
||||
func DecodeURL(logger s3log.AuditLogger, mm *metrics.Manager) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
unescp, err := url.QueryUnescape(string(ctx.Request().URI().PathOriginal()))
|
||||
unescp, err := url.PathUnescape(string(ctx.Request().URI().PathOriginal()))
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidURI), &controllers.MetaOpts{Logger: logger, MetricsMng: mm})
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
app.Patch("/delete-user", middlewares.IsAdmin(logger), adminController.DeleteUser)
|
||||
|
||||
// UpdateUser admin api
|
||||
app.Patch("update-user", middlewares.IsAdmin(logger), adminController.UpdateUser)
|
||||
app.Patch("/update-user", middlewares.IsAdmin(logger), adminController.UpdateUser)
|
||||
|
||||
// ListUsers admin api
|
||||
app.Patch("/list-users", middlewares.IsAdmin(logger), adminController.ListUsers)
|
||||
|
||||
@@ -29,15 +29,16 @@ import (
|
||||
)
|
||||
|
||||
type S3ApiServer struct {
|
||||
app *fiber.App
|
||||
backend backend.Backend
|
||||
router *S3ApiRouter
|
||||
port string
|
||||
cert *tls.Certificate
|
||||
quiet bool
|
||||
debug bool
|
||||
readonly bool
|
||||
health string
|
||||
app *fiber.App
|
||||
backend backend.Backend
|
||||
router *S3ApiRouter
|
||||
port string
|
||||
cert *tls.Certificate
|
||||
quiet bool
|
||||
debug bool
|
||||
readonly bool
|
||||
health string
|
||||
virtualDomain string
|
||||
}
|
||||
|
||||
func New(
|
||||
@@ -76,12 +77,29 @@ func New(
|
||||
})
|
||||
}
|
||||
app.Use(middlewares.DecodeURL(l, mm))
|
||||
app.Use(middlewares.RequestLogger(server.debug))
|
||||
|
||||
// initialize host-style parser in virtual domain is specified
|
||||
if server.virtualDomain != "" {
|
||||
app.Use(middlewares.HostStyleParser(server.virtualDomain))
|
||||
}
|
||||
|
||||
// initilaze the default value setter middleware
|
||||
app.Use(middlewares.SetDefaultValues(root, region))
|
||||
|
||||
// initialize the debug logger in debug mode
|
||||
if server.debug {
|
||||
app.Use(middlewares.DebugLogger())
|
||||
}
|
||||
|
||||
// initialize the bucket/object name validator
|
||||
app.Use(middlewares.BucketObjectNameValidator(l, mm))
|
||||
|
||||
// Public buckets access checker
|
||||
app.Use(middlewares.AuthorizePublicBucketAccess(be, l, mm))
|
||||
|
||||
// Authentication middlewares
|
||||
app.Use(middlewares.VerifyPresignedV4Signature(root, iam, l, mm, region, server.debug))
|
||||
app.Use(middlewares.VerifyV4Signature(root, iam, l, mm, region, server.debug))
|
||||
app.Use(middlewares.ProcessChunkedBody(root, iam, l, mm, region))
|
||||
app.Use(middlewares.VerifyMD5Body(l))
|
||||
app.Use(middlewares.AclParser(be, l, server.readonly))
|
||||
|
||||
@@ -122,6 +140,11 @@ func WithReadOnly() Option {
|
||||
return func(s *S3ApiServer) { s.readonly = true }
|
||||
}
|
||||
|
||||
// WithHostStyle enabled host-style bucket addressing on the server
|
||||
func WithHostStyle(virtualDomain string) Option {
|
||||
return func(s *S3ApiServer) { s.virtualDomain = virtualDomain }
|
||||
}
|
||||
|
||||
func (sa *S3ApiServer) Serve() (err error) {
|
||||
if sa.cert != nil {
|
||||
return sa.app.ListenTLSWithCertificate(sa.port, *sa.cert)
|
||||
|
||||
@@ -56,7 +56,7 @@ func NewAuthReader(ctx *fiber.Ctx, r io.Reader, auth AuthData, secret string, de
|
||||
var hr *HashReader
|
||||
hashPayload := ctx.Get("X-Amz-Content-Sha256")
|
||||
if !IsSpecialPayload(hashPayload) {
|
||||
hr, _ = NewHashReader(r, "", HashTypeSha256)
|
||||
hr, _ = NewHashReader(r, "", HashTypeSha256Hex)
|
||||
} else {
|
||||
hr, _ = NewHashReader(r, "", HashTypeNone)
|
||||
}
|
||||
@@ -190,6 +190,10 @@ func ParseAuthorization(authorization string) (AuthData, error) {
|
||||
|
||||
algo := authParts[0]
|
||||
|
||||
if algo != "AWS4-HMAC-SHA256" {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported)
|
||||
}
|
||||
|
||||
kvData := authParts[1]
|
||||
kvPairs := strings.Split(kvData, ",")
|
||||
// we are expecting at least Credential, SignedHeaders, and Signature
|
||||
|
||||
@@ -17,9 +17,18 @@ package utils
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
const (
|
||||
maxObjSizeLimit = 5 * 1024 * 1024 * 1024 // 5gb
|
||||
)
|
||||
|
||||
type payloadType string
|
||||
@@ -33,6 +42,14 @@ const (
|
||||
payloadTypeStreamingEcdsaTrailer payloadType = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"
|
||||
)
|
||||
|
||||
func getPayloadTypeNotSupportedErr(p payloadType) error {
|
||||
return s3err.APIError{
|
||||
HTTPStatusCode: http.StatusNotImplemented,
|
||||
Code: "NotImplemented",
|
||||
Description: fmt.Sprintf("The chunk encoding algorithm %v is not supported.", p),
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
specialValues = map[payloadType]bool{
|
||||
payloadTypeUnsigned: true,
|
||||
@@ -71,29 +88,82 @@ func (c checksumType) isValid() bool {
|
||||
c == checksumTypeCrc64nvme
|
||||
}
|
||||
|
||||
// IsSpecialPayload checks for streaming/unsigned authorization types
|
||||
// Extracts and validates the checksum type from the 'X-Amz-Trailer' header
|
||||
func ExtractChecksumType(ctx *fiber.Ctx) (checksumType, error) {
|
||||
trailer := ctx.Get("X-Amz-Trailer")
|
||||
chType := checksumType(strings.ToLower(trailer))
|
||||
if chType != "" && !chType.isValid() {
|
||||
debuglogger.Logf("invalid value for 'X-Amz-Trailer': %v", chType)
|
||||
return "", s3err.GetAPIError(s3err.ErrTrailerHeaderNotSupported)
|
||||
}
|
||||
|
||||
return chType, nil
|
||||
}
|
||||
|
||||
// IsSpecialPayload checks for special authorization types
|
||||
func IsSpecialPayload(str string) bool {
|
||||
return specialValues[payloadType(str)]
|
||||
}
|
||||
|
||||
// Checks if the provided string is unsigned payload trailer type
|
||||
func IsUnsignedStreamingPayload(str string) bool {
|
||||
return payloadType(str) == payloadTypeStreamingUnsignedTrailer
|
||||
}
|
||||
|
||||
// IsChunkEncoding checks for streaming/unsigned authorization types
|
||||
func IsStreamingPayload(str string) bool {
|
||||
pt := payloadType(str)
|
||||
return pt == payloadTypeStreamingUnsignedTrailer ||
|
||||
pt == payloadTypeStreamingSigned ||
|
||||
pt == payloadTypeStreamingSignedTrailer
|
||||
}
|
||||
|
||||
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
|
||||
decContLengthStr := ctx.Get("X-Amz-Decoded-Content-Length")
|
||||
if decContLengthStr == "" {
|
||||
debuglogger.Logf("missing required header 'X-Amz-Decoded-Content-Length'")
|
||||
return nil, s3err.GetAPIError(s3err.ErrMissingContentLength)
|
||||
}
|
||||
decContLength, err := strconv.ParseInt(decContLengthStr, 10, 64)
|
||||
//TODO: not sure if InvalidRequest should be returned in this case
|
||||
if err != nil {
|
||||
debuglogger.Logf("invalid value for 'X-Amz-Decoded-Content-Length': %v", decContLengthStr)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
if decContLength > maxObjSizeLimit {
|
||||
debuglogger.Logf("the object size exceeds the allowed limit: (size): %v, (limit): %v", decContLength, maxObjSizeLimit)
|
||||
return nil, s3err.GetAPIError(s3err.ErrEntityTooLarge)
|
||||
}
|
||||
|
||||
contentSha256 := payloadType(ctx.Get("X-Amz-Content-Sha256"))
|
||||
if !contentSha256.isValid() {
|
||||
//TODO: Add proper APIError
|
||||
debuglogger.Logf("invalid value for 'X-Amz-Content-Sha256': %v", contentSha256)
|
||||
return nil, fmt.Errorf("invalid x-amz-content-sha256: %v", string(contentSha256))
|
||||
}
|
||||
|
||||
checksumType := checksumType(ctx.Get("X-Amz-Trailer"))
|
||||
if checksumType != "" && !checksumType.isValid() {
|
||||
//TODO: Add proper APIError
|
||||
return nil, fmt.Errorf("invalid X-Amz-Trailer: %v", checksumType)
|
||||
checksumType, err := ExtractChecksumType(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if contentSha256 != payloadTypeStreamingSigned && checksumType == "" {
|
||||
debuglogger.Logf("empty value for required trailer header 'X-Amz-Trailer': %v", checksumType)
|
||||
return nil, s3err.GetAPIError(s3err.ErrTrailerHeaderNotSupported)
|
||||
}
|
||||
|
||||
switch contentSha256 {
|
||||
case payloadTypeStreamingUnsignedTrailer:
|
||||
return NewUnsignedChunkReader(r, checksumType)
|
||||
//TODO: Add other chunk readers
|
||||
case payloadTypeStreamingSignedTrailer:
|
||||
return NewSignedChunkReader(r, authdata, region, secret, date, checksumType)
|
||||
case payloadTypeStreamingSigned:
|
||||
return NewSignedChunkReader(r, authdata, region, secret, date, "")
|
||||
// return not supported for:
|
||||
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD
|
||||
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER
|
||||
default:
|
||||
debuglogger.Logf("unsupported chunk reader algorithm: %v", contentSha256)
|
||||
return nil, getPayloadTypeNotSupportedErr(contentSha256)
|
||||
}
|
||||
|
||||
return NewSignedChunkReader(r, authdata, region, secret, date)
|
||||
}
|
||||
|
||||
65
s3api/utils/context-keys.go
Normal file
65
s3api/utils/context-keys.go
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// Region, StartTime, IsRoot, Account, AccessKey context locals
|
||||
// are set to defualut values in middlewares.SetDefaultValues
|
||||
// to avoid the nil interface conversions
|
||||
type ContextKey string
|
||||
|
||||
const (
|
||||
ContextKeyRegion ContextKey = "region"
|
||||
ContextKeyStartTime ContextKey = "start-time"
|
||||
ContextKeyIsRoot ContextKey = "is-root"
|
||||
ContextKeyRootAccessKey ContextKey = "root-access-key"
|
||||
ContextKeyAccount ContextKey = "account"
|
||||
ContextKeyAuthenticated ContextKey = "authenticated"
|
||||
ContextKeyPublicBucket ContextKey = "public-bucket"
|
||||
ContextKeyParsedAcl ContextKey = "parsed-acl"
|
||||
ContextKeySkipResBodyLog ContextKey = "skip-res-body-log"
|
||||
ContextKeyBodyReader ContextKey = "body-reader"
|
||||
)
|
||||
|
||||
func (ck ContextKey) Values() []ContextKey {
|
||||
return []ContextKey{
|
||||
ContextKeyRegion,
|
||||
ContextKeyStartTime,
|
||||
ContextKeyIsRoot,
|
||||
ContextKeyRootAccessKey,
|
||||
ContextKeyAccount,
|
||||
ContextKeyAuthenticated,
|
||||
ContextKeyPublicBucket,
|
||||
ContextKeyParsedAcl,
|
||||
ContextKeySkipResBodyLog,
|
||||
ContextKeyBodyReader,
|
||||
}
|
||||
}
|
||||
|
||||
func (ck ContextKey) Set(ctx *fiber.Ctx, val any) {
|
||||
ctx.Locals(string(ck), val)
|
||||
}
|
||||
|
||||
func (ck ContextKey) IsSet(ctx *fiber.Ctx) bool {
|
||||
val := ctx.Locals(string(ck))
|
||||
return val != nil
|
||||
}
|
||||
|
||||
func (ck ContextKey) Get(ctx *fiber.Ctx) any {
|
||||
return ctx.Locals(string(ck))
|
||||
}
|
||||
180
s3api/utils/crc.go
Normal file
180
s3api/utils/crc.go
Normal file
@@ -0,0 +1,180 @@
|
||||
// Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler
|
||||
//
|
||||
// This software is provided 'as-is', without any express or implied
|
||||
// warranty. In no event will the authors be held liable for any damages
|
||||
// arising from the use of this software.
|
||||
//
|
||||
// Permission is granted to anyone to use this software for any purpose,
|
||||
// including commercial applications, and to alter it and redistribute it
|
||||
// freely, subject to the following restrictions:
|
||||
//
|
||||
// 1. The origin of this software must not be misrepresented; you must not
|
||||
// claim that you wrote the original software. If you use this software
|
||||
// in a product, an acknowledgment in the product documentation would be
|
||||
// appreciated but is not required.
|
||||
// 2. Altered source versions must be plainly marked as such, and must not be
|
||||
// misrepresented as being the original software.
|
||||
// 3. This notice may not be removed or altered from any source distribution.
|
||||
//
|
||||
// Jean-loup Gailly Mark Adler
|
||||
// jloup@gzip.org madler@alumni.caltech.edu
|
||||
|
||||
// Original implementation is from
|
||||
// https://github.com/vimeo/go-util/blob/8cd4c737f091d9317f72b25df78ce6cf869f7d30/crc32combine/crc32combine.go
|
||||
// extended for crc64 support.
|
||||
|
||||
// Following is ported from C to Go in 2016 by Justin Ruggles, with minimal alteration.
|
||||
// Used uint for unsigned long. Used uint32 for input arguments in order to match
|
||||
// the Go hash/crc32 package. zlib CRC32 combine (https://github.com/madler/zlib)
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"hash/crc64"
|
||||
)
|
||||
|
||||
const crc64NVME = 0x9a6c_9329_ac4b_c9b5
|
||||
|
||||
var crc64NVMETable = crc64.MakeTable(crc64NVME)
|
||||
|
||||
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
|
||||
var sum uint64
|
||||
|
||||
for vec != 0 {
|
||||
if vec&1 != 0 {
|
||||
sum ^= mat[0]
|
||||
}
|
||||
vec >>= 1
|
||||
mat = mat[1:]
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func gf2MatrixSquare(square, mat []uint64) {
|
||||
if len(square) != len(mat) {
|
||||
panic("square matrix size mismatch")
|
||||
}
|
||||
for n := range mat {
|
||||
square[n] = gf2MatrixTimes(mat, mat[n])
|
||||
}
|
||||
}
|
||||
|
||||
// crc32Combine returns the combined CRC-32 hash value of the two passed CRC-32
|
||||
// hash values crc1 and crc2. poly represents the generator polynomial
|
||||
// and len2 specifies the byte length that the crc2 hash covers.
|
||||
func crc32Combine(poly uint32, crc1, crc2 uint32, len2 int64) uint32 {
|
||||
// degenerate case (also disallow negative lengths)
|
||||
if len2 <= 0 {
|
||||
return crc1
|
||||
}
|
||||
|
||||
even := make([]uint64, 32) // even-power-of-two zeros operator
|
||||
odd := make([]uint64, 32) // odd-power-of-two zeros operator
|
||||
|
||||
// put operator for one zero bit in odd
|
||||
odd[0] = uint64(poly) // CRC-32 polynomial
|
||||
row := uint64(1)
|
||||
for n := 1; n < 32; n++ {
|
||||
odd[n] = row
|
||||
row <<= 1
|
||||
}
|
||||
|
||||
// put operator for two zero bits in even
|
||||
gf2MatrixSquare(even, odd)
|
||||
|
||||
// put operator for four zero bits in odd
|
||||
gf2MatrixSquare(odd, even)
|
||||
|
||||
// apply len2 zeros to crc1 (first square will put the operator for one
|
||||
// zero byte, eight zero bits, in even)
|
||||
crc1n := uint64(crc1)
|
||||
for {
|
||||
// apply zeros operator for this bit of len2
|
||||
gf2MatrixSquare(even, odd)
|
||||
if len2&1 != 0 {
|
||||
crc1n = gf2MatrixTimes(even, crc1n)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// if no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// another iteration of the loop with odd and even swapped
|
||||
gf2MatrixSquare(odd, even)
|
||||
if len2&1 != 0 {
|
||||
crc1n = gf2MatrixTimes(odd, crc1n)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// if no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// return combined crc
|
||||
crc1n ^= uint64(crc2)
|
||||
return uint32(crc1n)
|
||||
}
|
||||
|
||||
// crc64Combine returns the combined CRC-64 hash value of the two passed CRC-64
|
||||
// hash values crc1 and crc2. poly represents the generator polynomial
|
||||
// and len2 specifies the byte length that the crc2 hash covers.
|
||||
func crc64Combine(poly uint64, crc1, crc2 uint64, len2 int64) uint64 {
|
||||
// degenerate case (also disallow negative lengths)
|
||||
if len2 <= 0 {
|
||||
return crc1
|
||||
}
|
||||
|
||||
even := make([]uint64, 64) // even-power-of-two zeros operator
|
||||
odd := make([]uint64, 64) // odd-power-of-two zeros operator
|
||||
|
||||
// put operator for one zero bit in odd
|
||||
odd[0] = poly // CRC-64 polynomial
|
||||
row := uint64(1)
|
||||
for n := 1; n < 64; n++ {
|
||||
odd[n] = row
|
||||
row <<= 1
|
||||
}
|
||||
|
||||
// put operator for two zero bits in even
|
||||
gf2MatrixSquare(even, odd)
|
||||
|
||||
// put operator for four zero bits in odd
|
||||
gf2MatrixSquare(odd, even)
|
||||
|
||||
// apply len2 zeros to crc1 (first square will put the operator for one
|
||||
// zero byte, eight zero bits, in even)
|
||||
crc1n := crc1
|
||||
for {
|
||||
// apply zeros operator for this bit of len2
|
||||
gf2MatrixSquare(even, odd)
|
||||
if len2&1 != 0 {
|
||||
crc1n = gf2MatrixTimes(even, crc1n)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// if no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// another iteration of the loop with odd and even swapped
|
||||
gf2MatrixSquare(odd, even)
|
||||
if len2&1 != 0 {
|
||||
crc1n = gf2MatrixTimes(odd, crc1n)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// if no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// return combined crc
|
||||
crc1n ^= crc2
|
||||
return crc1n
|
||||
}
|
||||
57
s3api/utils/crc_test.go
Normal file
57
s3api/utils/crc_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"hash/crc32"
|
||||
"hash/crc64"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCRC32Combine(t *testing.T) {
|
||||
data := []byte("The quick brown fox jumps over the lazy dog")
|
||||
mid := len(data) / 2
|
||||
part1 := data[:mid]
|
||||
part2 := data[mid:]
|
||||
|
||||
var poly uint32 = crc32.IEEE
|
||||
tab := crc32.MakeTable(poly)
|
||||
crc1 := crc32.Checksum(part1, tab)
|
||||
crc2 := crc32.Checksum(part2, tab)
|
||||
combined := crc32Combine(poly, crc1, crc2, int64(len(part2)))
|
||||
full := crc32.Checksum(data, tab)
|
||||
|
||||
if combined != full {
|
||||
t.Errorf("crc32Combine failed: got %08x, want %08x", combined, full)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCRC64Combine(t *testing.T) {
|
||||
data := []byte("The quick brown fox jumps over the lazy dog")
|
||||
mid := len(data) / 2
|
||||
part1 := data[:mid]
|
||||
part2 := data[mid:]
|
||||
|
||||
var poly uint64 = crc64NVME
|
||||
tab := crc64NVMETable
|
||||
crc1 := crc64.Checksum(part1, tab)
|
||||
crc2 := crc64.Checksum(part2, tab)
|
||||
combined := crc64Combine(poly, crc1, crc2, int64(len(part2)))
|
||||
full := crc64.Checksum(data, tab)
|
||||
|
||||
if combined != full {
|
||||
t.Errorf("crc64Combine failed: got %016x, want %016x", combined, full)
|
||||
}
|
||||
}
|
||||
@@ -16,13 +16,18 @@ package utils
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
@@ -31,11 +36,21 @@ type HashType string
|
||||
|
||||
const (
|
||||
// HashTypeMd5 generates MD5 checksum for the data stream
|
||||
HashTypeMd5 = "md5"
|
||||
// HashTypeSha256 generates SHA256 checksum for the data stream
|
||||
HashTypeSha256 = "sha256"
|
||||
HashTypeMd5 HashType = "md5"
|
||||
// HashTypeSha256 generates SHA256 Base64-Encoded checksum for the data stream
|
||||
HashTypeSha256 HashType = "sha256"
|
||||
// HashTypeSha256Hex generates SHA256 hex encoded checksum for the data stream
|
||||
HashTypeSha256Hex HashType = "sha256-hex"
|
||||
// HashTypeSha1 generates SHA1 Base64-Encoded checksum for the data stream
|
||||
HashTypeSha1 HashType = "sha1"
|
||||
// HashTypeCRC32 generates CRC32 Base64-Encoded checksum for the data stream
|
||||
HashTypeCRC32 HashType = "crc32"
|
||||
// HashTypeCRC32C generates CRC32C Base64-Encoded checksum for the data stream
|
||||
HashTypeCRC32C HashType = "crc32c"
|
||||
// HashTypeCRC64NVME generates CRC64NVME Base64-Encoded checksum for the data stream
|
||||
HashTypeCRC64NVME HashType = "crc64nvme"
|
||||
// HashTypeNone is a no-op checksum for the data stream
|
||||
HashTypeNone = "none"
|
||||
HashTypeNone HashType = "none"
|
||||
)
|
||||
|
||||
// HashReader is an io.Reader that calculates the checksum
|
||||
@@ -62,8 +77,18 @@ func NewHashReader(r io.Reader, expectedSum string, ht HashType) (*HashReader, e
|
||||
switch ht {
|
||||
case HashTypeMd5:
|
||||
hash = md5.New()
|
||||
case HashTypeSha256Hex:
|
||||
hash = sha256.New()
|
||||
case HashTypeSha256:
|
||||
hash = sha256.New()
|
||||
case HashTypeSha1:
|
||||
hash = sha1.New()
|
||||
case HashTypeCRC32:
|
||||
hash = crc32.NewIEEE()
|
||||
case HashTypeCRC32C:
|
||||
hash = crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
||||
case HashTypeCRC64NVME:
|
||||
hash = crc64.New(crc64NVMETable)
|
||||
case HashTypeNone:
|
||||
hash = noop{}
|
||||
default:
|
||||
@@ -88,15 +113,40 @@ func (hr *HashReader) Read(p []byte) (int, error) {
|
||||
if errors.Is(readerr, io.EOF) && hr.sum != "" {
|
||||
switch hr.hashType {
|
||||
case HashTypeMd5:
|
||||
sum := base64.StdEncoding.EncodeToString(hr.hash.Sum(nil))
|
||||
sum := hr.Sum()
|
||||
if sum != hr.sum {
|
||||
return n, s3err.GetAPIError(s3err.ErrInvalidDigest)
|
||||
}
|
||||
case HashTypeSha256:
|
||||
sum := hex.EncodeToString(hr.hash.Sum(nil))
|
||||
case HashTypeSha256Hex:
|
||||
sum := hr.Sum()
|
||||
if sum != hr.sum {
|
||||
return n, s3err.GetAPIError(s3err.ErrContentSHA256Mismatch)
|
||||
}
|
||||
case HashTypeCRC32:
|
||||
sum := hr.Sum()
|
||||
if sum != hr.sum {
|
||||
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmCrc32)
|
||||
}
|
||||
case HashTypeCRC32C:
|
||||
sum := hr.Sum()
|
||||
if sum != hr.sum {
|
||||
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmCrc32c)
|
||||
}
|
||||
case HashTypeSha1:
|
||||
sum := hr.Sum()
|
||||
if sum != hr.sum {
|
||||
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmSha1)
|
||||
}
|
||||
case HashTypeSha256:
|
||||
sum := hr.Sum()
|
||||
if sum != hr.sum {
|
||||
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmSha256)
|
||||
}
|
||||
case HashTypeCRC64NVME:
|
||||
sum := hr.Sum()
|
||||
if sum != hr.sum {
|
||||
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmCrc64nvme)
|
||||
}
|
||||
default:
|
||||
return n, errInvalidHashType
|
||||
}
|
||||
@@ -104,20 +154,38 @@ func (hr *HashReader) Read(p []byte) (int, error) {
|
||||
return n, readerr
|
||||
}
|
||||
|
||||
func (hr *HashReader) SetReader(r io.Reader) {
|
||||
hr.r = r
|
||||
}
|
||||
|
||||
// Sum returns the checksum hash of the data read so far
|
||||
func (hr *HashReader) Sum() string {
|
||||
switch hr.hashType {
|
||||
case HashTypeMd5:
|
||||
return Md5SumString(hr.hash.Sum(nil))
|
||||
case HashTypeSha256:
|
||||
return Base64SumString(hr.hash.Sum(nil))
|
||||
case HashTypeSha256Hex:
|
||||
return hex.EncodeToString(hr.hash.Sum(nil))
|
||||
case HashTypeCRC32:
|
||||
return Base64SumString(hr.hash.Sum(nil))
|
||||
case HashTypeCRC32C:
|
||||
return Base64SumString(hr.hash.Sum(nil))
|
||||
case HashTypeSha1:
|
||||
return Base64SumString(hr.hash.Sum(nil))
|
||||
case HashTypeSha256:
|
||||
return Base64SumString(hr.hash.Sum(nil))
|
||||
case HashTypeCRC64NVME:
|
||||
return Base64SumString(hr.hash.Sum(nil))
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// Md5SumString converts the hash bytes to the string checksum value
|
||||
func Md5SumString(b []byte) string {
|
||||
func (hr *HashReader) Type() HashType {
|
||||
return hr.hashType
|
||||
}
|
||||
|
||||
// Base64SumString converts the hash bytes to the b64 encoded string checksum value
|
||||
func Base64SumString(b []byte) string {
|
||||
return base64.StdEncoding.EncodeToString(b)
|
||||
}
|
||||
|
||||
@@ -128,3 +196,161 @@ func (n noop) Sum(b []byte) []byte { return []byte{} }
|
||||
func (n noop) Reset() {}
|
||||
func (n noop) Size() int { return 0 }
|
||||
func (n noop) BlockSize() int { return 1 }
|
||||
|
||||
// IsChecksumComposable tests if the final foll object crc can be calculated
|
||||
// based on the part crc values.
|
||||
func IsChecksumComposable(algo types.ChecksumAlgorithm) bool {
|
||||
switch algo {
|
||||
case types.ChecksumAlgorithmCrc32, types.ChecksumAlgorithmCrc32c, types.ChecksumAlgorithmCrc64nvme:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// AddCRCChecksum calculates the composite CRC checksum after adding the part crc.
|
||||
// Only CRC32, CRC32C, and CRC64NVME are supported. The input checksums must be base64-encoded strings.
|
||||
func AddCRCChecksum(algo types.ChecksumAlgorithm, crc, partCrc string, partLen int64) (string, error) {
|
||||
switch algo {
|
||||
case types.ChecksumAlgorithmCrc32:
|
||||
data, err := base64.StdEncoding.DecodeString(partCrc)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("base64 decode partCrc: %w", err)
|
||||
}
|
||||
if len(data) != 4 {
|
||||
return "", fmt.Errorf("invalid crc32 part checksum length: %d", len(data))
|
||||
}
|
||||
currentCRC, err := base64.StdEncoding.DecodeString(crc)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("base64 decode crc: %w", err)
|
||||
}
|
||||
if len(currentCRC) != 4 {
|
||||
return "", fmt.Errorf("invalid crc32 checksum length: %d", len(currentCRC))
|
||||
}
|
||||
|
||||
currentVal := uint32(currentCRC[0])<<24 | uint32(currentCRC[1])<<16 | uint32(currentCRC[2])<<8 | uint32(currentCRC[3])
|
||||
val := uint32(data[0])<<24 | uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
|
||||
composite := crc32Combine(crc32.IEEE, currentVal, val, partLen)
|
||||
|
||||
out := []byte{
|
||||
byte(composite >> 24),
|
||||
byte(composite >> 16),
|
||||
byte(composite >> 8),
|
||||
byte(composite),
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(out), nil
|
||||
case types.ChecksumAlgorithmCrc32c:
|
||||
data, err := base64.StdEncoding.DecodeString(partCrc)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("base64 decode partCrc: %w", err)
|
||||
}
|
||||
if len(data) != 4 {
|
||||
return "", fmt.Errorf("invalid crc32 part checksum length: %d", len(data))
|
||||
}
|
||||
currentCRC, err := base64.StdEncoding.DecodeString(crc)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("base64 decode crc: %w", err)
|
||||
}
|
||||
if len(currentCRC) != 4 {
|
||||
return "", fmt.Errorf("invalid crc32 checksum length: %d", len(currentCRC))
|
||||
}
|
||||
|
||||
currentVal := uint32(currentCRC[0])<<24 | uint32(currentCRC[1])<<16 | uint32(currentCRC[2])<<8 | uint32(currentCRC[3])
|
||||
val := uint32(data[0])<<24 | uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
|
||||
composite := crc32Combine(crc32.Castagnoli, currentVal, val, partLen)
|
||||
|
||||
// Convert composite to big-endian bytes
|
||||
out := []byte{
|
||||
byte(composite >> 24),
|
||||
byte(composite >> 16),
|
||||
byte(composite >> 8),
|
||||
byte(composite),
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(out), nil
|
||||
case types.ChecksumAlgorithmCrc64nvme:
|
||||
data, err := base64.StdEncoding.DecodeString(partCrc)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("base64 decode partCrc: %w", err)
|
||||
}
|
||||
if len(data) != 8 {
|
||||
return "", fmt.Errorf("invalid crc64 part checksum length: %d", len(data))
|
||||
}
|
||||
currentCRC, err := base64.StdEncoding.DecodeString(crc)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("base64 decode crc: %w", err)
|
||||
}
|
||||
if len(currentCRC) != 8 {
|
||||
return "", fmt.Errorf("invalid crc64 checksum length: %d", len(currentCRC))
|
||||
}
|
||||
|
||||
currentVal := uint64(currentCRC[0])<<56 | uint64(currentCRC[1])<<48 | uint64(currentCRC[2])<<40 | uint64(currentCRC[3])<<32 |
|
||||
uint64(currentCRC[4])<<24 | uint64(currentCRC[5])<<16 | uint64(currentCRC[6])<<8 | uint64(currentCRC[7])
|
||||
val := uint64(data[0])<<56 | uint64(data[1])<<48 | uint64(data[2])<<40 | uint64(data[3])<<32 |
|
||||
uint64(data[4])<<24 | uint64(data[5])<<16 | uint64(data[6])<<8 | uint64(data[7])
|
||||
composite := crc64Combine(crc64NVME, currentVal, val, partLen)
|
||||
|
||||
out := []byte{
|
||||
byte(composite >> 56), byte(composite >> 48), byte(composite >> 40), byte(composite >> 32),
|
||||
byte(composite >> 24), byte(composite >> 16), byte(composite >> 8), byte(composite),
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(out), nil
|
||||
default:
|
||||
return "", fmt.Errorf("composite checksum not supported for algorithm: %v", algo)
|
||||
}
|
||||
}
|
||||
|
||||
// NewCompositeChecksumReader initializes a composite checksum
|
||||
// processor, which decodes and validates the provided
|
||||
// checksums and returns the final checksum based on
|
||||
// the previous processings.
|
||||
//
|
||||
// The supported checksum types are:
|
||||
// - CRC32
|
||||
// - CRC32C
|
||||
// - SHA1
|
||||
// - SHA256
|
||||
func NewCompositeChecksumReader(ht HashType) (*CompositeChecksumReader, error) {
|
||||
var hasher hash.Hash
|
||||
switch ht {
|
||||
case HashTypeSha256:
|
||||
hasher = sha256.New()
|
||||
case HashTypeSha1:
|
||||
hasher = sha1.New()
|
||||
case HashTypeCRC32:
|
||||
hasher = crc32.NewIEEE()
|
||||
case HashTypeCRC32C:
|
||||
hasher = crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
||||
case HashTypeNone:
|
||||
hasher = noop{}
|
||||
default:
|
||||
return nil, errInvalidHashType
|
||||
}
|
||||
|
||||
return &CompositeChecksumReader{
|
||||
hasher: hasher,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type CompositeChecksumReader struct {
|
||||
hasher hash.Hash
|
||||
}
|
||||
|
||||
// Decodes and writes the checksum in the hasher
|
||||
func (ccr *CompositeChecksumReader) Process(checksum string) error {
|
||||
data, err := base64.StdEncoding.DecodeString(checksum)
|
||||
if err != nil {
|
||||
return fmt.Errorf("base64 decode: %w", err)
|
||||
}
|
||||
|
||||
_, err = ccr.hasher.Write(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("hash write: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the base64 encoded composite checksum
|
||||
func (ccr *CompositeChecksumReader) Sum() string {
|
||||
return Base64SumString(ccr.hasher.Sum(nil))
|
||||
}
|
||||
|
||||
120
s3api/utils/csum-reader_test.go
Normal file
120
s3api/utils/csum-reader_test.go
Normal file
@@ -0,0 +1,120 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"hash/crc32"
|
||||
"hash/crc64"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
|
||||
func TestAddCRCChecksum_CRC32(t *testing.T) {
|
||||
data := []byte("this is a test buffer for crc32")
|
||||
mid := len(data) / 2
|
||||
part1 := data[:mid]
|
||||
part2 := data[mid:]
|
||||
|
||||
crc1 := crc32.Checksum(part1, crc32.IEEETable)
|
||||
crc2 := crc32.Checksum(part2, crc32.IEEETable)
|
||||
crcFull := crc32.Checksum(data, crc32.IEEETable)
|
||||
|
||||
crc1b := []byte{byte(crc1 >> 24), byte(crc1 >> 16), byte(crc1 >> 8), byte(crc1)}
|
||||
crc2b := []byte{byte(crc2 >> 24), byte(crc2 >> 16), byte(crc2 >> 8), byte(crc2)}
|
||||
crc1b64 := base64.StdEncoding.EncodeToString(crc1b)
|
||||
crc2b64 := base64.StdEncoding.EncodeToString(crc2b)
|
||||
|
||||
combined, err := AddCRCChecksum(types.ChecksumAlgorithmCrc32, crc1b64, crc2b64, int64(len(part2)))
|
||||
if err != nil {
|
||||
t.Fatalf("AddCRCChecksum failed: %v", err)
|
||||
}
|
||||
combinedBytes, err := base64.StdEncoding.DecodeString(combined)
|
||||
if err != nil {
|
||||
t.Fatalf("base64 decode failed: %v", err)
|
||||
}
|
||||
combinedVal := uint32(combinedBytes[0])<<24 | uint32(combinedBytes[1])<<16 | uint32(combinedBytes[2])<<8 | uint32(combinedBytes[3])
|
||||
if combinedVal != crcFull {
|
||||
t.Errorf("CRC32 combine mismatch: got %x, want %x", combinedVal, crcFull)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddCRCChecksum_CRC32c(t *testing.T) {
|
||||
data := []byte("this is a test buffer for crc32c")
|
||||
mid := len(data) / 2
|
||||
part1 := data[:mid]
|
||||
part2 := data[mid:]
|
||||
|
||||
castagnoli := crc32.MakeTable(crc32.Castagnoli)
|
||||
crc1 := crc32.Checksum(part1, castagnoli)
|
||||
crc2 := crc32.Checksum(part2, castagnoli)
|
||||
crcFull := crc32.Checksum(data, castagnoli)
|
||||
|
||||
crc1b := []byte{byte(crc1 >> 24), byte(crc1 >> 16), byte(crc1 >> 8), byte(crc1)}
|
||||
crc2b := []byte{byte(crc2 >> 24), byte(crc2 >> 16), byte(crc2 >> 8), byte(crc2)}
|
||||
crc1b64 := base64.StdEncoding.EncodeToString(crc1b)
|
||||
crc2b64 := base64.StdEncoding.EncodeToString(crc2b)
|
||||
|
||||
combined, err := AddCRCChecksum(types.ChecksumAlgorithmCrc32c, crc1b64, crc2b64, int64(len(part2)))
|
||||
if err != nil {
|
||||
t.Fatalf("AddCRCChecksum failed: %v", err)
|
||||
}
|
||||
combinedBytes, err := base64.StdEncoding.DecodeString(combined)
|
||||
if err != nil {
|
||||
t.Fatalf("base64 decode failed: %v", err)
|
||||
}
|
||||
combinedVal := uint32(combinedBytes[0])<<24 | uint32(combinedBytes[1])<<16 | uint32(combinedBytes[2])<<8 | uint32(combinedBytes[3])
|
||||
if combinedVal != crcFull {
|
||||
t.Errorf("CRC32c combine mismatch: got %x, want %x", combinedVal, crcFull)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddCRCChecksum_CRC64NVME(t *testing.T) {
|
||||
data := []byte("this is a test buffer for crc64nvme")
|
||||
mid := len(data) / 2
|
||||
part1 := data[:mid]
|
||||
part2 := data[mid:]
|
||||
|
||||
table := crc64NVMETable
|
||||
crc1 := crc64.Checksum(part1, table)
|
||||
crc2 := crc64.Checksum(part2, table)
|
||||
crcFull := crc64.Checksum(data, table)
|
||||
|
||||
crc1b := []byte{
|
||||
byte(crc1 >> 56), byte(crc1 >> 48), byte(crc1 >> 40), byte(crc1 >> 32),
|
||||
byte(crc1 >> 24), byte(crc1 >> 16), byte(crc1 >> 8), byte(crc1),
|
||||
}
|
||||
crc2b := []byte{
|
||||
byte(crc2 >> 56), byte(crc2 >> 48), byte(crc2 >> 40), byte(crc2 >> 32),
|
||||
byte(crc2 >> 24), byte(crc2 >> 16), byte(crc2 >> 8), byte(crc2),
|
||||
}
|
||||
crc1b64 := base64.StdEncoding.EncodeToString(crc1b)
|
||||
crc2b64 := base64.StdEncoding.EncodeToString(crc2b)
|
||||
|
||||
combined, err := AddCRCChecksum(types.ChecksumAlgorithmCrc64nvme, crc1b64, crc2b64, int64(len(part2)))
|
||||
if err != nil {
|
||||
t.Fatalf("AddCRCChecksum failed: %v", err)
|
||||
}
|
||||
combinedBytes, err := base64.StdEncoding.DecodeString(combined)
|
||||
if err != nil {
|
||||
t.Fatalf("base64 decode failed: %v", err)
|
||||
}
|
||||
combinedVal := uint64(combinedBytes[0])<<56 | uint64(combinedBytes[1])<<48 | uint64(combinedBytes[2])<<40 | uint64(combinedBytes[3])<<32 |
|
||||
uint64(combinedBytes[4])<<24 | uint64(combinedBytes[5])<<16 | uint64(combinedBytes[6])<<8 | uint64(combinedBytes[7])
|
||||
if combinedVal != crcFull {
|
||||
t.Errorf("CRC64NVME combine mismatch: got %x, want %x", combinedVal, crcFull)
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func LogCtxDetails(ctx *fiber.Ctx, respBody []byte) {
|
||||
isDebug, ok := ctx.Locals("isDebug").(bool)
|
||||
_, notLogReqBody := ctx.Locals("logReqBody").(bool)
|
||||
_, notLogResBody := ctx.Locals("logResBody").(bool)
|
||||
if isDebug && ok {
|
||||
// Log request body
|
||||
if !notLogReqBody {
|
||||
fmt.Println()
|
||||
log.Printf("Request Body: %s", ctx.Request().Body())
|
||||
}
|
||||
|
||||
// Log path parameters
|
||||
fmt.Println()
|
||||
log.Println("Path parameters: ")
|
||||
for key, val := range ctx.AllParams() {
|
||||
log.Printf("%s: %s", key, val)
|
||||
}
|
||||
|
||||
// Log response headers
|
||||
fmt.Println()
|
||||
log.Println("Response Headers: ")
|
||||
ctx.Response().Header.VisitAll(func(key, val []byte) {
|
||||
log.Printf("%s: %s", key, val)
|
||||
})
|
||||
|
||||
// Log response body
|
||||
if !notLogResBody && len(respBody) > 0 {
|
||||
fmt.Println()
|
||||
log.Printf("Response body %s", ctx.Response().Body())
|
||||
}
|
||||
}
|
||||
}
|
||||
24
s3api/utils/name_validate.go
Normal file
24
s3api/utils/name_validate.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
func IsObjectNameValid(name string) bool {
|
||||
switch clean(name) {
|
||||
case "", ".", "..", "/":
|
||||
return false
|
||||
}
|
||||
|
||||
return isObjectLocal(name)
|
||||
}
|
||||
171
s3api/utils/path.go
Normal file
171
s3api/utils/path.go
Normal file
@@ -0,0 +1,171 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// code modified from golang std library src/internal/filepathlite/path.go
|
||||
// to support path separator '/' for all platforms.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const separator = '/'
|
||||
|
||||
// isObjectLocal checks if the given path would result in an object
|
||||
// that is local to the bucket.
|
||||
func isObjectLocal(path string) bool {
|
||||
if path == "" || path == "." {
|
||||
return true
|
||||
}
|
||||
|
||||
path = strings.Join([]string{".", path}, string(separator))
|
||||
|
||||
hasDots := false
|
||||
for p := path; p != ""; {
|
||||
var part string
|
||||
part, p, _ = strings.Cut(p, "/")
|
||||
if part == "." || part == ".." {
|
||||
hasDots = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if hasDots {
|
||||
path = clean(path)
|
||||
}
|
||||
if path == ".." || strings.HasPrefix(path, "../") {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func clean(path string) string {
|
||||
originalPath := path
|
||||
if path == "" {
|
||||
return originalPath + "."
|
||||
}
|
||||
rooted := isPathSeparator(path[0])
|
||||
|
||||
// Invariants:
|
||||
// reading from path; r is index of next byte to process.
|
||||
// writing to buf; w is index of next byte to write.
|
||||
// dotdot is index in buf where .. must stop, either because
|
||||
// it is the leading slash or it is a leading ../../.. prefix.
|
||||
n := len(path)
|
||||
out := lazybuf{path: path, volAndPath: originalPath, volLen: 0}
|
||||
r, dotdot := 0, 0
|
||||
if rooted {
|
||||
out.append(separator)
|
||||
r, dotdot = 1, 1
|
||||
}
|
||||
|
||||
for r < n {
|
||||
switch {
|
||||
case isPathSeparator(path[r]):
|
||||
// empty path element
|
||||
r++
|
||||
case path[r] == '.' && (r+1 == n || isPathSeparator(path[r+1])):
|
||||
// . element
|
||||
r++
|
||||
case path[r] == '.' && path[r+1] == '.' && (r+2 == n || isPathSeparator(path[r+2])):
|
||||
// .. element: remove to last separator
|
||||
r += 2
|
||||
switch {
|
||||
case out.w > dotdot:
|
||||
// can backtrack
|
||||
out.w--
|
||||
for out.w > dotdot && !isPathSeparator(out.index(out.w)) {
|
||||
out.w--
|
||||
}
|
||||
case !rooted:
|
||||
// cannot backtrack, but not rooted, so append .. element.
|
||||
if out.w > 0 {
|
||||
out.append(separator)
|
||||
}
|
||||
out.append('.')
|
||||
out.append('.')
|
||||
dotdot = out.w
|
||||
}
|
||||
default:
|
||||
// real path element.
|
||||
// add slash if needed
|
||||
if rooted && out.w != 1 || !rooted && out.w != 0 {
|
||||
out.append(separator)
|
||||
}
|
||||
// copy element
|
||||
for ; r < n && !isPathSeparator(path[r]); r++ {
|
||||
out.append(path[r])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Turn empty string into "."
|
||||
if out.w == 0 {
|
||||
out.append('.')
|
||||
}
|
||||
|
||||
return FromSlash(out.string())
|
||||
}
|
||||
|
||||
func isPathSeparator(c uint8) bool {
|
||||
return c == '/'
|
||||
}
|
||||
|
||||
func FromSlash(path string) string {
|
||||
if separator == '/' {
|
||||
return path
|
||||
}
|
||||
return replaceStringByte(path, '/', separator)
|
||||
}
|
||||
|
||||
func replaceStringByte(s string, old, new byte) string {
|
||||
if strings.IndexByte(s, old) == -1 {
|
||||
return s
|
||||
}
|
||||
n := []byte(s)
|
||||
for i := range n {
|
||||
if n[i] == old {
|
||||
n[i] = new
|
||||
}
|
||||
}
|
||||
return string(n)
|
||||
}
|
||||
|
||||
// A lazybuf is a lazily constructed path buffer.
|
||||
// It supports append, reading previously appended bytes,
|
||||
// and retrieving the final string. It does not allocate a buffer
|
||||
// to hold the output until that output diverges from s.
|
||||
type lazybuf struct {
|
||||
path string
|
||||
buf []byte
|
||||
w int
|
||||
volAndPath string
|
||||
volLen int
|
||||
}
|
||||
|
||||
func (b *lazybuf) index(i int) byte {
|
||||
if b.buf != nil {
|
||||
return b.buf[i]
|
||||
}
|
||||
return b.path[i]
|
||||
}
|
||||
|
||||
func (b *lazybuf) append(c byte) {
|
||||
if b.buf == nil {
|
||||
if b.w < len(b.path) && b.path[b.w] == c {
|
||||
b.w++
|
||||
return
|
||||
}
|
||||
b.buf = make([]byte, len(b.path))
|
||||
copy(b.buf, b.path[:b.w])
|
||||
}
|
||||
b.buf[b.w] = c
|
||||
b.w++
|
||||
}
|
||||
|
||||
func (b *lazybuf) string() string {
|
||||
if b.buf == nil {
|
||||
return b.volAndPath[:b.volLen+b.w]
|
||||
}
|
||||
return b.volAndPath[:b.volLen] + string(b.buf[:b.w])
|
||||
}
|
||||
64
s3api/utils/path_test.go
Normal file
64
s3api/utils/path_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
)
|
||||
|
||||
func TestIsObjectNameValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
want bool
|
||||
}{
|
||||
// valid names
|
||||
{"simple file", "file.txt", true},
|
||||
{"nested file", "dir/file.txt", true},
|
||||
{"absolute nested file", "/dir/file.txt", true},
|
||||
{"trailing slash", "dir/", true},
|
||||
{"slash prefix", "/file.txt", true}, // treated as local after joined with bucket
|
||||
{"dot slash prefix", "./file.txt", true},
|
||||
|
||||
// invalid names
|
||||
{"dot dot only", "..", false},
|
||||
{"dot only", ".", false},
|
||||
{"dot slash", "./", false},
|
||||
{"dot slash dot dot", "./..", false},
|
||||
{"cleans to dot", "./../.", false},
|
||||
{"empty", "", false},
|
||||
{"file escapes 1", "../file.txt", false},
|
||||
{"file escapes 2", "dir/../../file.txt", false},
|
||||
{"file escapes 3", "../../../file.txt", false},
|
||||
{"dir escapes 1", "../dir/", false},
|
||||
{"dir escapes 2", "dir/../../dir/", false},
|
||||
{"dir escapes 3", "../../../dir/", false},
|
||||
{"dot escapes 1", "../.", false},
|
||||
{"dot escapes 2", "dir/../../.", false},
|
||||
{"dot escapes 3", "../../../.", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := utils.IsObjectNameValid(tt.input)
|
||||
if got != tt.want {
|
||||
t.Errorf("%v: IsObjectNameValid(%q) = %v, want %v",
|
||||
tt.name, tt.input, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -180,7 +180,7 @@ func ParsePresignedURIParts(ctx *fiber.Ctx) (AuthData, error) {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch)
|
||||
}
|
||||
|
||||
if ctx.Locals("region") != creds[2] {
|
||||
if ContextKeyRegion.Get(ctx) != creds[2] {
|
||||
return a, s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Credential should be scoped to a valid Region, not %v", creds[2]),
|
||||
|
||||
@@ -15,9 +15,11 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -25,8 +27,11 @@ import (
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/s3api/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
@@ -34,58 +39,87 @@ import (
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
|
||||
|
||||
const (
|
||||
chunkHdrStr = ";chunk-signature="
|
||||
chunkHdrDelim = "\r\n"
|
||||
zeroLenSig = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
awsV4 = "AWS4"
|
||||
awsS3Service = "s3"
|
||||
awsV4Request = "aws4_request"
|
||||
streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD"
|
||||
chunkHdrDelim = "\r\n"
|
||||
zeroLenSig = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
awsV4 = "AWS4"
|
||||
awsS3Service = "s3"
|
||||
awsV4Request = "aws4_request"
|
||||
trailerSignatureHeader = "x-amz-trailer-signature"
|
||||
streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD"
|
||||
streamPayloadTrailerAlgo = "AWS4-HMAC-SHA256-TRAILER"
|
||||
)
|
||||
|
||||
// ChunkReader reads from chunked upload request body, and returns
|
||||
// object data stream
|
||||
type ChunkReader struct {
|
||||
r io.Reader
|
||||
signingKey []byte
|
||||
prevSig string
|
||||
parsedSig string
|
||||
currentChunkSize int64
|
||||
chunkDataLeft int64
|
||||
trailerExpected int
|
||||
stash []byte
|
||||
chunkHash hash.Hash
|
||||
strToSignPrefix string
|
||||
skipcheck bool
|
||||
r io.Reader
|
||||
signingKey []byte
|
||||
prevSig string
|
||||
parsedSig string
|
||||
chunkDataLeft int64
|
||||
trailer checksumType
|
||||
trailerSig string
|
||||
parsedChecksum string
|
||||
stash []byte
|
||||
chunkHash hash.Hash
|
||||
checksumHash hash.Hash
|
||||
isEOF bool
|
||||
isFirstHeader bool
|
||||
region string
|
||||
date time.Time
|
||||
}
|
||||
|
||||
// NewChunkReader reads from request body io.Reader and parses out the
|
||||
// chunk metadata in stream. The headers are validated for proper signatures.
|
||||
// Reading from the chunk reader will read only the object data stream
|
||||
// without the chunk headers/trailers.
|
||||
func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
|
||||
return &ChunkReader{
|
||||
func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string, date time.Time, chType checksumType) (io.Reader, error) {
|
||||
chRdr := &ChunkReader{
|
||||
r: r,
|
||||
signingKey: getSigningKey(secret, region, date),
|
||||
// the authdata.Signature is validated in the auth-reader,
|
||||
// so we can use that here without any other checks
|
||||
prevSig: authdata.Signature,
|
||||
chunkHash: sha256.New(),
|
||||
strToSignPrefix: getStringToSignPrefix(date, region),
|
||||
}, nil
|
||||
prevSig: authdata.Signature,
|
||||
chunkHash: sha256.New(),
|
||||
isFirstHeader: true,
|
||||
date: date,
|
||||
region: region,
|
||||
trailer: chType,
|
||||
}
|
||||
|
||||
if chType != "" {
|
||||
checksumHasher, err := getHasher(chType)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to initialize hash calculator: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chRdr.checksumHash = checksumHasher
|
||||
}
|
||||
if chType == "" {
|
||||
debuglogger.Infof("initializing signed chunk reader")
|
||||
} else {
|
||||
debuglogger.Infof("initializing signed chunk reader with '%v' trailing checksum", chType)
|
||||
}
|
||||
return chRdr, nil
|
||||
}
|
||||
|
||||
// Read satisfies the io.Reader for this type
|
||||
func (cr *ChunkReader) Read(p []byte) (int, error) {
|
||||
n, err := cr.r.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
return n, err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
cr.isEOF = err == io.EOF
|
||||
|
||||
if cr.chunkDataLeft < int64(n) {
|
||||
chunkSize := cr.chunkDataLeft
|
||||
if chunkSize > 0 {
|
||||
cr.chunkHash.Write(p[:chunkSize])
|
||||
if cr.checksumHash != nil {
|
||||
cr.checksumHash.Write(p[:chunkSize])
|
||||
}
|
||||
}
|
||||
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
|
||||
n += int(chunkSize)
|
||||
@@ -94,22 +128,25 @@ func (cr *ChunkReader) Read(p []byte) (int, error) {
|
||||
|
||||
cr.chunkDataLeft -= int64(n)
|
||||
cr.chunkHash.Write(p[:n])
|
||||
if cr.checksumHash != nil {
|
||||
cr.checksumHash.Write(p[:n])
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
|
||||
// This part is the same for all chunks,
|
||||
// only the previous signature and hash of current chunk changes
|
||||
func getStringToSignPrefix(date time.Time, region string) string {
|
||||
func (cr *ChunkReader) getStringToSignPrefix(algo string) string {
|
||||
credentialScope := fmt.Sprintf("%s/%s/%s/%s",
|
||||
date.Format("20060102"),
|
||||
region,
|
||||
cr.date.Format("20060102"),
|
||||
cr.region,
|
||||
awsS3Service,
|
||||
awsV4Request)
|
||||
|
||||
return fmt.Sprintf("%s\n%s\n%s",
|
||||
streamPayloadAlgo,
|
||||
date.Format("20060102T150405Z"),
|
||||
algo,
|
||||
cr.date.Format("20060102T150405Z"),
|
||||
credentialScope)
|
||||
}
|
||||
|
||||
@@ -117,12 +154,77 @@ func getStringToSignPrefix(date time.Time, region string) string {
|
||||
// signature For each chunk, you calculate the signature using the following
|
||||
// string to sign. For the first chunk, you use the seed-signature as the
|
||||
// previous signature.
|
||||
func getChunkStringToSign(prefix, prevSig string, chunkHash []byte) string {
|
||||
return fmt.Sprintf("%s\n%s\n%s\n%s",
|
||||
func (cr *ChunkReader) getChunkStringToSign() string {
|
||||
prefix := cr.getStringToSignPrefix(streamPayloadAlgo)
|
||||
chunkHash := cr.chunkHash.Sum(nil)
|
||||
strToSign := fmt.Sprintf("%s\n%s\n%s\n%s",
|
||||
prefix,
|
||||
prevSig,
|
||||
cr.prevSig,
|
||||
zeroLenSig,
|
||||
hex.EncodeToString(chunkHash))
|
||||
debuglogger.PrintInsideHorizontalBorders(debuglogger.Purple, "STRING TO SIGN", strToSign, 64)
|
||||
return strToSign
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html#example-signature-calculations-trailing-header
|
||||
// Builds the final chunk trailing signature string to sign
|
||||
func (cr *ChunkReader) getTrailerChunkStringToSign() string {
|
||||
trailer := fmt.Sprintf("%v:%v\n", cr.trailer, cr.parsedChecksum)
|
||||
hsh := sha256.Sum256([]byte(trailer))
|
||||
sig := hex.EncodeToString(hsh[:])
|
||||
|
||||
prefix := cr.getStringToSignPrefix(streamPayloadTrailerAlgo)
|
||||
|
||||
strToSign := fmt.Sprintf("%s\n%s\n%s",
|
||||
prefix,
|
||||
cr.prevSig,
|
||||
sig,
|
||||
)
|
||||
|
||||
debuglogger.PrintInsideHorizontalBorders(debuglogger.Purple, "TRAILER STRING TO SIGN", strToSign, 64)
|
||||
|
||||
return strToSign
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html#example-signature-calculations-trailing-header
|
||||
// Calculates and validates the final chunk trailer signature
|
||||
func (cr *ChunkReader) verifyTrailerSignature() error {
|
||||
strToSign := cr.getTrailerChunkStringToSign()
|
||||
sig := hex.EncodeToString(hmac256(cr.signingKey, []byte(strToSign)))
|
||||
|
||||
if sig != cr.trailerSig {
|
||||
debuglogger.Logf("incorrect trailing signature: (calculated): %v, (got): %v", sig, cr.trailerSig)
|
||||
return s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verifies the object checksum
|
||||
func (cr *ChunkReader) verifyChecksum() error {
|
||||
checksumHash := cr.checksumHash.Sum(nil)
|
||||
checksum := base64.StdEncoding.EncodeToString(checksumHash)
|
||||
if checksum != cr.parsedChecksum {
|
||||
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(cr.trailer), "x-amz-checksum-")))
|
||||
debuglogger.Logf("incorrect trailing checksum: (calculated): %v, (got): %v", checksum, cr.parsedChecksum)
|
||||
return s3err.GetChecksumBadDigestErr(algo)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Calculates and verifies the chunk signature
|
||||
func (cr *ChunkReader) checkSignature() error {
|
||||
sigstr := cr.getChunkStringToSign()
|
||||
cr.chunkHash.Reset()
|
||||
cr.prevSig = hex.EncodeToString(hmac256(cr.signingKey, []byte(sigstr)))
|
||||
|
||||
if cr.prevSig != cr.parsedSig {
|
||||
debuglogger.Logf("incorrect signature: (calculated): %v, (got) %v", cr.prevSig, cr.parsedSig)
|
||||
return s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
cr.parsedSig = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// The provided p should have all of the previous chunk data and trailer
|
||||
@@ -137,50 +239,48 @@ func getChunkStringToSign(prefix, prevSig string, chunkHash []byte) string {
|
||||
func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
|
||||
n := len(p)
|
||||
|
||||
if !cr.skipcheck && cr.parsedSig != "" {
|
||||
chunkhash := cr.chunkHash.Sum(nil)
|
||||
cr.chunkHash.Reset()
|
||||
|
||||
sigstr := getChunkStringToSign(cr.strToSignPrefix, cr.prevSig, chunkhash)
|
||||
cr.prevSig = hex.EncodeToString(hmac256(cr.signingKey, []byte(sigstr)))
|
||||
|
||||
if cr.currentChunkSize != 0 && cr.prevSig != cr.parsedSig {
|
||||
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
|
||||
if cr.parsedSig != "" {
|
||||
err := cr.checkSignature()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if cr.trailerExpected != 0 {
|
||||
if len(p) < len(chunkHdrDelim) {
|
||||
// This is the special case where we need to consume the
|
||||
// trailer, but instead hit the end of the buffer. The
|
||||
// subsequent call will finish consuming the trailer.
|
||||
cr.chunkDataLeft = 0
|
||||
cr.trailerExpected -= len(p)
|
||||
cr.skipcheck = true
|
||||
return 0, nil
|
||||
}
|
||||
// move data up to remove trailer
|
||||
copy(p, p[cr.trailerExpected:])
|
||||
n -= cr.trailerExpected
|
||||
}
|
||||
|
||||
cr.skipcheck = false
|
||||
|
||||
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n])
|
||||
cr.currentChunkSize = chunkSize
|
||||
cr.parsedSig = sig
|
||||
if err == errskipHeader {
|
||||
cr.chunkDataLeft = 0
|
||||
return 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse chunk headers: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
cr.parsedSig = sig
|
||||
// If we hit the final chunk, calculate and validate the final
|
||||
// chunk signature and finish reading
|
||||
if chunkSize == 0 {
|
||||
debuglogger.Infof("final chunk parsed:\nchunk size: %v\nsignature: %v\nbuffer offset: %v", chunkSize, sig, bufOffset)
|
||||
cr.chunkHash.Reset()
|
||||
err := cr.checkSignature()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if cr.trailer != "" {
|
||||
debuglogger.Infof("final chunk trailers parsed:\nchecksum: %v\ntrailing signature: %v", cr.parsedChecksum, cr.trailerSig)
|
||||
err := cr.verifyChecksum()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = cr.verifyTrailerSignature()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
cr.trailerExpected = len(chunkHdrDelim)
|
||||
debuglogger.Infof("chunk headers parsed:\nchunk size: %v\nsignature: %v\nbuffer offset: %v", chunkSize, sig, bufOffset)
|
||||
|
||||
// move data up to remove chunk header
|
||||
copy(p, p[bufOffset:n])
|
||||
@@ -191,8 +291,12 @@ func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
|
||||
if int64(n) > chunkSize {
|
||||
cr.chunkDataLeft = 0
|
||||
cr.chunkHash.Write(p[:chunkSize])
|
||||
if cr.checksumHash != nil {
|
||||
cr.checksumHash.Write(p[:chunkSize])
|
||||
}
|
||||
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
|
||||
if (chunkSize + int64(n)) > math.MaxInt {
|
||||
debuglogger.Logf("exceeding the limit of maximum integer allowed: (value): %v, (limit): %v", chunkSize+int64(n), math.MaxInt)
|
||||
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
return n + int(chunkSize), err
|
||||
@@ -200,6 +304,9 @@ func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
|
||||
|
||||
cr.chunkDataLeft = chunkSize - int64(n)
|
||||
cr.chunkHash.Write(p[:n])
|
||||
if cr.checksumHash != nil {
|
||||
cr.checksumHash.Write(p[:n])
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
@@ -212,6 +319,7 @@ func getSigningKey(secret, region string, date time.Time) []byte {
|
||||
dateRegionKey := hmac256(dateKey, []byte(region))
|
||||
dateRegionServiceKey := hmac256(dateRegionKey, []byte(awsS3Service))
|
||||
signingKey := hmac256(dateRegionServiceKey, []byte(awsV4Request))
|
||||
debuglogger.Infof("signing key: %s", hex.EncodeToString(signingKey))
|
||||
return signingKey
|
||||
}
|
||||
|
||||
@@ -236,41 +344,190 @@ const (
|
||||
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, error) {
|
||||
stashLen := len(cr.stash)
|
||||
if stashLen > maxHeaderSize {
|
||||
debuglogger.Logf("the stash length exceeds the maximum allowed chunk header size: (stash len): %v, (header limit): %v", stashLen, maxHeaderSize)
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
}
|
||||
if cr.stash != nil {
|
||||
tmp := make([]byte, maxHeaderSize)
|
||||
debuglogger.Logf("recovering the stash: (stash len): %v", stashLen)
|
||||
tmp := make([]byte, stashLen+len(header))
|
||||
copy(tmp, cr.stash)
|
||||
copy(tmp[len(cr.stash):], header)
|
||||
header = tmp
|
||||
cr.stash = nil
|
||||
}
|
||||
|
||||
semicolonIndex := bytes.Index(header, []byte(chunkHdrStr))
|
||||
if semicolonIndex == -1 {
|
||||
cr.stash = make([]byte, len(header))
|
||||
copy(cr.stash, header)
|
||||
cr.trailerExpected = 0
|
||||
return 0, "", 0, errskipHeader
|
||||
rdr := bufio.NewReader(bytes.NewReader(header))
|
||||
|
||||
// After the first chunk each chunk header should start
|
||||
// with "\n\r\n"
|
||||
if !cr.isFirstHeader {
|
||||
err := readAndSkip(rdr, '\r', '\n')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read chunk header first 2 bytes: (should be): \\r\\n, (got): %q", header[:2])
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
}
|
||||
|
||||
sigIndex := semicolonIndex + len(chunkHdrStr)
|
||||
sigEndIndex := bytes.Index(header[sigIndex:], []byte(chunkHdrDelim))
|
||||
if sigEndIndex == -1 {
|
||||
cr.stash = make([]byte, len(header))
|
||||
copy(cr.stash, header)
|
||||
cr.trailerExpected = 0
|
||||
return 0, "", 0, errskipHeader
|
||||
}
|
||||
|
||||
chunkSizeBytes := header[:semicolonIndex]
|
||||
chunkSize, err := strconv.ParseInt(string(chunkSizeBytes), 16, 64)
|
||||
// read and parse the chunk size
|
||||
chunkSizeStr, err := readAndTrim(rdr, ';')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read chunk size: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
chunkSize, err := strconv.ParseInt(chunkSizeStr, 16, 64)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse chunk size: (size): %v, (err): %v", chunkSizeStr, err)
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
}
|
||||
|
||||
signature := string(header[sigIndex:(sigIndex + sigEndIndex)])
|
||||
dataStartOffset := sigIndex + sigEndIndex + len(chunkHdrDelim)
|
||||
// read the chunk signature
|
||||
err = readAndSkip(rdr, 'c', 'h', 'u', 'n', 'k', '-', 's', 'i', 'g', 'n', 'a', 't', 'u', 'r', 'e', '=')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read 'chunk-signature=': %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
sig, err := readAndTrim(rdr, '\r')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read '\\r', after chunk signature: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
return chunkSize, signature, dataStartOffset - stashLen, nil
|
||||
// read and parse the final chunk trailer and checksum
|
||||
if chunkSize == 0 {
|
||||
if cr.trailer != "" {
|
||||
err = readAndSkip(rdr, '\n')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read \\n before the trailer: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
// parse and validate the trailing header
|
||||
trailer, err := readAndTrim(rdr, ':')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read trailer prefix: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
if trailer != string(cr.trailer) {
|
||||
debuglogger.Logf("incorrect trailer prefix: (expected): %v, (got): %v", cr.trailer, trailer)
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
}
|
||||
|
||||
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(trailer, "x-amz-checksum-")))
|
||||
|
||||
// parse the checksum
|
||||
checksum, err := readAndTrim(rdr, '\r')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read checksum value: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
if !IsValidChecksum(checksum, algo) {
|
||||
debuglogger.Logf("invalid checksum value: %v", checksum)
|
||||
return 0, "", 0, s3err.GetInvalidTrailingChecksumHeaderErr(trailer)
|
||||
}
|
||||
|
||||
err = readAndSkip(rdr, '\n')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read \\n after checksum: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
// parse the trailing signature
|
||||
trailerSigPrefix, err := readAndTrim(rdr, ':')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read trailing signature prefix: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
if trailerSigPrefix != trailerSignatureHeader {
|
||||
debuglogger.Logf("invalid trailing signature prefix: (expected): %v, (got): %v", trailerSignatureHeader, trailerSigPrefix)
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
}
|
||||
|
||||
trailerSig, err := readAndTrim(rdr, '\r')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read trailing signature: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
cr.trailerSig = trailerSig
|
||||
cr.parsedChecksum = checksum
|
||||
}
|
||||
|
||||
// "\r\n\r\n" is followed after the last chunk
|
||||
err = readAndSkip(rdr, '\n', '\r', '\n')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read \\n\\r\\n at the end of chunk header: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
return 0, sig, 0, nil
|
||||
}
|
||||
|
||||
err = readAndSkip(rdr, '\n')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read \\n at the end of chunk header: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
// find the index of chunk ending: '\r\n'
|
||||
// skip the first 2 bytes as it is the starting '\r\n'
|
||||
// the first chunk doesn't contain the starting '\r\n', but
|
||||
// anyway, trimming the first 2 bytes doesn't pollute the logic.
|
||||
ind := bytes.Index(header[2:], []byte{'\r', '\n'})
|
||||
cr.isFirstHeader = false
|
||||
|
||||
// the offset is the found index + 4 - the stash length
|
||||
// where:
|
||||
// ind is the index of '\r\n'
|
||||
// 4 specifies the trimmed 2 bytes plus 2 to shift the index at the end of '\r\n'
|
||||
offset := ind + 4 - stashLen
|
||||
return chunkSize, sig, offset, nil
|
||||
}
|
||||
|
||||
// Stashes the header in cr.stash and returns "errskipHeader"
|
||||
func (cr *ChunkReader) stashAndSkipHeader(header []byte) (int64, string, int, error) {
|
||||
cr.stash = make([]byte, len(header))
|
||||
copy(cr.stash, header)
|
||||
debuglogger.Logf("stashing the header: (header length): %v", len(header))
|
||||
return 0, "", 0, errskipHeader
|
||||
}
|
||||
|
||||
// Returns "errInvalidChunkFormat" if the passed err is "io.EOF" and cr.rdr EOF is reached
|
||||
// calls "cr.stashAndSkipHeader" if the passed err is "io.EOF" and cr.isEOF is false
|
||||
// Returns the error otherwise
|
||||
func (cr *ChunkReader) handleRdrErr(err error, header []byte) (int64, string, int, error) {
|
||||
if err == io.EOF {
|
||||
if cr.isEOF {
|
||||
debuglogger.Logf("incomplete chunk encoding, EOF reached")
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
}
|
||||
return cr.stashAndSkipHeader(header)
|
||||
}
|
||||
return 0, "", 0, err
|
||||
}
|
||||
|
||||
// reads data from the "rdr" and validates the passed data bytes
|
||||
func readAndSkip(rdr *bufio.Reader, data ...byte) error {
|
||||
for _, d := range data {
|
||||
b, err := rdr.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b != d {
|
||||
return errMalformedEncoding
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reads string by "delim" and trims the delimiter at the end
|
||||
func readAndTrim(r *bufio.Reader, delim byte) (string, error) {
|
||||
str, err := r.ReadString(delim)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(str, string(delim)), nil
|
||||
}
|
||||
|
||||
@@ -29,6 +29,8 @@ import (
|
||||
"math/bits"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/versity/versitygw/s3api/debuglogger"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -42,27 +44,28 @@ type UnsignedChunkReader struct {
|
||||
expectedChecksum string
|
||||
hasher hash.Hash
|
||||
stash []byte
|
||||
chunkCounter int
|
||||
offset int
|
||||
}
|
||||
|
||||
func NewUnsignedChunkReader(r io.Reader, ct checksumType) (*UnsignedChunkReader, error) {
|
||||
hasher, err := getHasher(ct)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to initialize hash calculator: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
debuglogger.Infof("initializing unsigned chunk reader")
|
||||
return &UnsignedChunkReader{
|
||||
reader: bufio.NewReader(r),
|
||||
checksumType: ct,
|
||||
stash: make([]byte, 0),
|
||||
hasher: hasher,
|
||||
chunkCounter: 1,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
|
||||
// First read any stashed data
|
||||
if len(ucr.stash) != 0 {
|
||||
debuglogger.Infof("recovering the stash: (stash length): %v", len(ucr.stash))
|
||||
n := copy(p, ucr.stash)
|
||||
ucr.offset += n
|
||||
|
||||
@@ -89,22 +92,24 @@ func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
|
||||
// Read and cache the payload
|
||||
_, err = io.ReadFull(rdr, payload)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read chunk data: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Skip the trailing "\r\n"
|
||||
if err := ucr.readAndSkip('\r', '\n'); err != nil {
|
||||
debuglogger.Logf("failed to read trailing \\r\\n after chunk data: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Copy the payload into the io.Reader buffer
|
||||
n := copy(p[ucr.offset:], payload)
|
||||
ucr.offset += n
|
||||
ucr.chunkCounter++
|
||||
|
||||
if int64(n) < chunkSize {
|
||||
// stash the remaining data
|
||||
ucr.stash = payload[n:]
|
||||
debuglogger.Infof("stashing the remaining data: (stash length): %v", len(ucr.stash))
|
||||
dataRead := ucr.offset
|
||||
ucr.offset = 0
|
||||
return dataRead, nil
|
||||
@@ -113,6 +118,7 @@ func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
|
||||
|
||||
// Read and validate trailers
|
||||
if err := ucr.readTrailer(); err != nil {
|
||||
debuglogger.Logf("failed to read trailer: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -142,15 +148,19 @@ func (ucr *UnsignedChunkReader) readAndSkip(data ...byte) error {
|
||||
func (ucr *UnsignedChunkReader) extractChunkSize() (int64, error) {
|
||||
line, err := ucr.reader.ReadString('\n')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse chunk size: %v", err)
|
||||
return 0, errMalformedEncoding
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
chunkSize, err := strconv.ParseInt(line, 16, 64)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to convert chunk size: %v", err)
|
||||
return 0, errMalformedEncoding
|
||||
}
|
||||
|
||||
debuglogger.Infof("chunk size extracted: %v", chunkSize)
|
||||
|
||||
return chunkSize, nil
|
||||
}
|
||||
|
||||
@@ -161,6 +171,7 @@ func (ucr *UnsignedChunkReader) readTrailer() error {
|
||||
for {
|
||||
v, err := ucr.reader.ReadByte()
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read byte: %v", err)
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@@ -173,12 +184,14 @@ func (ucr *UnsignedChunkReader) readTrailer() error {
|
||||
var tmp [3]byte
|
||||
_, err = io.ReadFull(ucr.reader, tmp[:])
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read chunk ending: \\n\\r\\n: %v", err)
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(tmp[:], trailerDelim) {
|
||||
debuglogger.Logf("incorrect trailer delimiter: (expected): \\n\\r\\n, (got): %q", tmp[:])
|
||||
return errMalformedEncoding
|
||||
}
|
||||
break
|
||||
@@ -189,15 +202,18 @@ func (ucr *UnsignedChunkReader) readTrailer() error {
|
||||
trailerHeader = strings.TrimSpace(trailerHeader)
|
||||
trailerHeaderParts := strings.Split(trailerHeader, ":")
|
||||
if len(trailerHeaderParts) != 2 {
|
||||
debuglogger.Logf("invalid trailer header parts: %v", trailerHeaderParts)
|
||||
return errMalformedEncoding
|
||||
}
|
||||
|
||||
if trailerHeaderParts[0] != string(ucr.checksumType) {
|
||||
debuglogger.Logf("invalid checksum type: %v", trailerHeaderParts[0])
|
||||
//TODO: handle the error
|
||||
return errMalformedEncoding
|
||||
}
|
||||
|
||||
ucr.expectedChecksum = trailerHeaderParts[1]
|
||||
debuglogger.Infof("parsed the trailing header:\n%v:%v", trailerHeaderParts[0], trailerHeaderParts[1])
|
||||
|
||||
// Validate checksum
|
||||
return ucr.validateChecksum()
|
||||
@@ -209,6 +225,7 @@ func (ucr *UnsignedChunkReader) validateChecksum() error {
|
||||
checksum := base64.StdEncoding.EncodeToString(csum)
|
||||
|
||||
if checksum != ucr.expectedChecksum {
|
||||
debuglogger.Logf("incorrect checksum: (expected): %v, (got): %v", ucr.expectedChecksum, checksum)
|
||||
return fmt.Errorf("actual checksum: %v, expected checksum: %v", checksum, ucr.expectedChecksum)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@ package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -27,9 +29,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go/encoding/httpbinding"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
"github.com/versity/versitygw/s3api/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
@@ -39,21 +41,17 @@ var (
|
||||
bucketNameIpRegexp = regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`)
|
||||
)
|
||||
|
||||
const (
|
||||
upperhex = "0123456789ABCDEF"
|
||||
)
|
||||
|
||||
func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]string) {
|
||||
metadata = make(map[string]string)
|
||||
headers.DisableNormalizing()
|
||||
headers.VisitAllInOrder(func(key, value []byte) {
|
||||
for key, value := range headers.AllInOrder() {
|
||||
hKey := string(key)
|
||||
if strings.HasPrefix(strings.ToLower(hKey), "x-amz-meta-") {
|
||||
trimmedKey := hKey[11:]
|
||||
headerValue := string(value)
|
||||
metadata[trimmedKey] = headerValue
|
||||
}
|
||||
})
|
||||
}
|
||||
headers.EnableNormalizing()
|
||||
|
||||
return
|
||||
@@ -68,20 +66,20 @@ func createHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, contentLength
|
||||
body = bytes.NewReader(req.Body())
|
||||
}
|
||||
|
||||
escapedURI := escapeOriginalURI(ctx)
|
||||
uri := ctx.OriginalURL()
|
||||
|
||||
httpReq, err := http.NewRequest(string(req.Header.Method()), escapedURI, body)
|
||||
httpReq, err := http.NewRequest(string(req.Header.Method()), uri, body)
|
||||
if err != nil {
|
||||
return nil, errors.New("error in creating an http request")
|
||||
}
|
||||
|
||||
// Set the request headers
|
||||
req.Header.VisitAll(func(key, value []byte) {
|
||||
for key, value := range req.Header.All() {
|
||||
keyStr := string(key)
|
||||
if includeHeader(keyStr, signedHdrs) {
|
||||
httpReq.Header.Add(keyStr, string(value))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// make sure all headers in the signed headers are present
|
||||
for _, header := range signedHdrs {
|
||||
@@ -123,11 +121,10 @@ func createPresignedHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, cont
|
||||
body = bytes.NewReader(req.Body())
|
||||
}
|
||||
|
||||
uri := string(ctx.Request().URI().Path())
|
||||
uri = httpbinding.EscapePath(uri, false)
|
||||
uri, _, _ := strings.Cut(ctx.OriginalURL(), "?")
|
||||
isFirst := true
|
||||
|
||||
ctx.Request().URI().QueryArgs().VisitAll(func(key, value []byte) {
|
||||
for key, value := range ctx.Request().URI().QueryArgs().All() {
|
||||
_, ok := signedQueryArgs[string(key)]
|
||||
if !ok {
|
||||
escapeValue := url.QueryEscape(string(value))
|
||||
@@ -138,19 +135,19 @@ func createPresignedHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, cont
|
||||
uri += fmt.Sprintf("&%s=%s", key, escapeValue)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequest(string(req.Header.Method()), uri, body)
|
||||
if err != nil {
|
||||
return nil, errors.New("error in creating an http request")
|
||||
}
|
||||
// Set the request headers
|
||||
req.Header.VisitAll(func(key, value []byte) {
|
||||
for key, value := range req.Header.All() {
|
||||
keyStr := string(key)
|
||||
if includeHeader(keyStr, signedHdrs) {
|
||||
httpReq.Header.Add(keyStr, string(value))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Check if Content-Length in signed headers
|
||||
// If content length is non 0, then the header will be included
|
||||
@@ -178,9 +175,17 @@ func ParseUint(str string) (int32, error) {
|
||||
if str == "" {
|
||||
return 1000, nil
|
||||
}
|
||||
num, err := strconv.ParseUint(str, 10, 16)
|
||||
num, err := strconv.ParseInt(str, 10, 32)
|
||||
if err != nil {
|
||||
return 1000, fmt.Errorf("invalid uint: %w", err)
|
||||
debuglogger.Logf("invalid intager provided: %v\n", err)
|
||||
return 1000, fmt.Errorf("invalid int: %w", err)
|
||||
}
|
||||
if num < 0 {
|
||||
debuglogger.Logf("negative intager provided: %v\n", num)
|
||||
return 1000, fmt.Errorf("negative uint: %v", num)
|
||||
}
|
||||
if num > 1000 {
|
||||
num = 1000
|
||||
}
|
||||
return int32(num), nil
|
||||
}
|
||||
@@ -197,40 +202,26 @@ func SetResponseHeaders(ctx *fiber.Ctx, headers []CustomHeader) {
|
||||
}
|
||||
|
||||
// Streams the response body by chunks
|
||||
func StreamResponseBody(ctx *fiber.Ctx, rdr io.ReadCloser) error {
|
||||
buf := make([]byte, 4096) // 4KB chunks
|
||||
defer rdr.Close()
|
||||
for {
|
||||
n, err := rdr.Read(buf)
|
||||
if n > 0 {
|
||||
_, writeErr := ctx.Write(buf[:n])
|
||||
if writeErr != nil {
|
||||
return fmt.Errorf("write chunk: %w", writeErr)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
return fmt.Errorf("read chunk: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
func StreamResponseBody(ctx *fiber.Ctx, rdr io.ReadCloser, bodysize int) {
|
||||
// SetBodyStream will call Close() on the reader when the stream is done
|
||||
// since rdr is a ReadCloser
|
||||
ctx.Context().SetBodyStream(rdr, bodysize)
|
||||
}
|
||||
|
||||
func IsValidBucketName(bucket string) bool {
|
||||
if len(bucket) < 3 || len(bucket) > 63 {
|
||||
debuglogger.Logf("bucket name length should be in 3-63 range, got: %v\n", len(bucket))
|
||||
return false
|
||||
}
|
||||
// Checks to contain only digits, lowercase letters, dot, hyphen.
|
||||
// Checks to start and end with only digits and lowercase letters.
|
||||
if !bucketNameRegexp.MatchString(bucket) {
|
||||
debuglogger.Logf("invalid bucket name: %v\n", bucket)
|
||||
return false
|
||||
}
|
||||
// Checks not to be a valid IP address
|
||||
if bucketNameIpRegexp.MatchString(bucket) {
|
||||
debuglogger.Logf("bucket name is an ip address: %v\n", bucket)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -296,7 +287,9 @@ func FilterObjectAttributes(attrs map[s3response.ObjectAttributes]struct{}, outp
|
||||
if _, ok := attrs[s3response.ObjectAttributesStorageClass]; !ok {
|
||||
output.StorageClass = ""
|
||||
}
|
||||
fmt.Printf("%+v\n", output)
|
||||
if _, ok := attrs[s3response.ObjectAttributesChecksum]; !ok {
|
||||
output.Checksum = nil
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
@@ -304,25 +297,34 @@ func FilterObjectAttributes(attrs map[s3response.ObjectAttributes]struct{}, outp
|
||||
func ParseObjectAttributes(ctx *fiber.Ctx) (map[s3response.ObjectAttributes]struct{}, error) {
|
||||
attrs := map[s3response.ObjectAttributes]struct{}{}
|
||||
var err error
|
||||
ctx.Request().Header.VisitAll(func(key, value []byte) {
|
||||
for key, value := range ctx.Request().Header.All() {
|
||||
if string(key) == "X-Amz-Object-Attributes" {
|
||||
if len(value) == 0 {
|
||||
break
|
||||
}
|
||||
oattrs := strings.Split(string(value), ",")
|
||||
for _, a := range oattrs {
|
||||
attr := s3response.ObjectAttributes(a)
|
||||
if !attr.IsValid() {
|
||||
debuglogger.Logf("invalid object attribute: %v\n", attr)
|
||||
err = s3err.GetAPIError(s3err.ErrInvalidObjectAttributes)
|
||||
break
|
||||
}
|
||||
attrs[attr] = struct{}{}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(attrs) == 0 {
|
||||
debuglogger.Logf("empty get object attributes")
|
||||
return nil, s3err.GetAPIError(s3err.ErrObjectAttributesInvalidHeader)
|
||||
}
|
||||
|
||||
return attrs, err
|
||||
return attrs, nil
|
||||
}
|
||||
|
||||
type objLockCfg struct {
|
||||
@@ -337,6 +339,7 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
|
||||
objLockDate := ctx.Get("X-Amz-Object-Lock-Retain-Until-Date")
|
||||
|
||||
if (objLockDate != "" && objLockModeHdr == "") || (objLockDate == "" && objLockModeHdr != "") {
|
||||
debuglogger.Logf("one of 2 required params is missing: (lock date): %v, (lock mode): %v\n", objLockDate, objLockModeHdr)
|
||||
return nil, s3err.GetAPIError(s3err.ErrObjectLockInvalidHeaders)
|
||||
}
|
||||
|
||||
@@ -344,9 +347,11 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
|
||||
if objLockDate != "" {
|
||||
rDate, err := time.Parse(time.RFC3339, objLockDate)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse retain until date: %v\n", err)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
if rDate.Before(time.Now()) {
|
||||
debuglogger.Logf("expired retain until date: %v\n", rDate.Format(time.RFC3339))
|
||||
return nil, s3err.GetAPIError(s3err.ErrPastObjectLockRetainDate)
|
||||
}
|
||||
retainUntilDate = rDate
|
||||
@@ -357,13 +362,15 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
|
||||
if objLockMode != "" &&
|
||||
objLockMode != types.ObjectLockModeCompliance &&
|
||||
objLockMode != types.ObjectLockModeGovernance {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
debuglogger.Logf("invalid object lock mode: %v\n", objLockMode)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectLockMode)
|
||||
}
|
||||
|
||||
legalHold := types.ObjectLockLegalHoldStatus(legalHoldHdr)
|
||||
|
||||
if legalHold != "" && legalHold != types.ObjectLockLegalHoldStatusOff && legalHold != types.ObjectLockLegalHoldStatusOn {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
debuglogger.Logf("invalid object lock legal hold status: %v\n", legalHold)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidLegalHoldStatus)
|
||||
}
|
||||
|
||||
return &objLockCfg{
|
||||
@@ -382,77 +389,278 @@ func IsValidOwnership(val types.ObjectOwnership) bool {
|
||||
case types.ObjectOwnershipObjectWriter:
|
||||
return true
|
||||
default:
|
||||
debuglogger.Logf("invalid object ownership: %v\n", val)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func escapeOriginalURI(ctx *fiber.Ctx) string {
|
||||
path := ctx.Path()
|
||||
type ChecksumValues map[types.ChecksumAlgorithm]string
|
||||
|
||||
// Escape the URI original path
|
||||
escapedURI := escapePath(path)
|
||||
// Headers concatinates checksum algorithm by prefixing each
|
||||
// with 'x-amz-checksum-'
|
||||
// e.g.
|
||||
// "x-amz-checksum-crc64nvme, x-amz-checksum-sha1"
|
||||
func (cv ChecksumValues) Headers() string {
|
||||
result := ""
|
||||
isFirst := false
|
||||
|
||||
// Add the URI query params
|
||||
query := string(ctx.Request().URI().QueryArgs().QueryString())
|
||||
if query != "" {
|
||||
escapedURI = escapedURI + "?" + query
|
||||
for key := range cv {
|
||||
if !isFirst {
|
||||
result += ", "
|
||||
}
|
||||
result += fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(key)))
|
||||
}
|
||||
|
||||
return escapedURI
|
||||
return result
|
||||
}
|
||||
|
||||
// Escapes the path string
|
||||
// Most of the parts copied from std url
|
||||
func escapePath(s string) string {
|
||||
hexCount := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if shouldEscape(c) {
|
||||
hexCount++
|
||||
func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, ChecksumValues, error) {
|
||||
sdkAlgorithm := types.ChecksumAlgorithm(strings.ToUpper(ctx.Get("X-Amz-Sdk-Checksum-Algorithm")))
|
||||
|
||||
err := IsChecksumAlgorithmValid(sdkAlgorithm)
|
||||
if err != nil {
|
||||
debuglogger.Logf("invalid checksum algorithm: %v\n", sdkAlgorithm)
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
checksums := ChecksumValues{}
|
||||
|
||||
var hdrErr error
|
||||
// Parse and validate checksum headers
|
||||
for key, value := range ctx.Request().Header.All() {
|
||||
// Skip `X-Amz-Checksum-Type` as it's a special header
|
||||
if !strings.HasPrefix(string(key), "X-Amz-Checksum-") || string(key) == "X-Amz-Checksum-Type" {
|
||||
continue
|
||||
}
|
||||
|
||||
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(key), "X-Amz-Checksum-")))
|
||||
err := IsChecksumAlgorithmValid(algo)
|
||||
if err != nil {
|
||||
debuglogger.Logf("invalid checksum header: %s\n", key)
|
||||
hdrErr = s3err.GetAPIError(s3err.ErrInvalidChecksumHeader)
|
||||
break
|
||||
}
|
||||
|
||||
checksums[algo] = string(value)
|
||||
}
|
||||
|
||||
if hdrErr != nil {
|
||||
return sdkAlgorithm, nil, hdrErr
|
||||
}
|
||||
|
||||
if len(checksums) > 1 {
|
||||
debuglogger.Logf("multiple checksum headers provided: %v\n", checksums.Headers())
|
||||
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
|
||||
}
|
||||
|
||||
for al, val := range checksums {
|
||||
if !IsValidChecksum(val, al) {
|
||||
return sdkAlgorithm, checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(al))))
|
||||
}
|
||||
// If any other checksum value is provided,
|
||||
// rather than x-amz-sdk-checksum-algorithm
|
||||
if sdkAlgorithm != "" && sdkAlgorithm != al {
|
||||
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
|
||||
}
|
||||
sdkAlgorithm = al
|
||||
}
|
||||
|
||||
return sdkAlgorithm, checksums, nil
|
||||
}
|
||||
|
||||
var checksumLengths = map[types.ChecksumAlgorithm]int{
|
||||
types.ChecksumAlgorithmCrc32: 4,
|
||||
types.ChecksumAlgorithmCrc32c: 4,
|
||||
types.ChecksumAlgorithmCrc64nvme: 8,
|
||||
types.ChecksumAlgorithmSha1: 20,
|
||||
types.ChecksumAlgorithmSha256: 32,
|
||||
}
|
||||
|
||||
func IsValidChecksum(checksum string, algorithm types.ChecksumAlgorithm) bool {
|
||||
decoded, err := base64.StdEncoding.DecodeString(checksum)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse checksum base64: %v\n", err)
|
||||
return false
|
||||
}
|
||||
|
||||
expectedLength, exists := checksumLengths[algorithm]
|
||||
if !exists {
|
||||
debuglogger.Logf("unknown checksum algorithm: %v\n", algorithm)
|
||||
return false
|
||||
}
|
||||
|
||||
isValid := len(decoded) == expectedLength
|
||||
if !isValid {
|
||||
debuglogger.Logf("decoded checksum length: (expected): %v, (got): %v\n", expectedLength, len(decoded))
|
||||
}
|
||||
|
||||
return isValid
|
||||
}
|
||||
|
||||
func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm) error {
|
||||
alg = types.ChecksumAlgorithm(strings.ToUpper(string(alg)))
|
||||
if alg != "" &&
|
||||
alg != types.ChecksumAlgorithmCrc32 &&
|
||||
alg != types.ChecksumAlgorithmCrc32c &&
|
||||
alg != types.ChecksumAlgorithmSha1 &&
|
||||
alg != types.ChecksumAlgorithmSha256 &&
|
||||
alg != types.ChecksumAlgorithmCrc64nvme {
|
||||
debuglogger.Logf("invalid checksum algorithm: %v\n", alg)
|
||||
return s3err.GetAPIError(s3err.ErrInvalidChecksumAlgorithm)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validates the provided checksum type
|
||||
func IsChecksumTypeValid(t types.ChecksumType) error {
|
||||
if t != "" &&
|
||||
t != types.ChecksumTypeComposite &&
|
||||
t != types.ChecksumTypeFullObject {
|
||||
debuglogger.Logf("invalid checksum type: %v\n", t)
|
||||
return s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-type")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type checksumTypeSchema map[types.ChecksumType]struct{}
|
||||
type checksumSchema map[types.ChecksumAlgorithm]checksumTypeSchema
|
||||
|
||||
// A table defining the checksum algorithm/type support
|
||||
var checksumMap checksumSchema = checksumSchema{
|
||||
types.ChecksumAlgorithmCrc32: checksumTypeSchema{
|
||||
types.ChecksumTypeComposite: struct{}{},
|
||||
types.ChecksumTypeFullObject: struct{}{},
|
||||
"": struct{}{},
|
||||
},
|
||||
types.ChecksumAlgorithmCrc32c: checksumTypeSchema{
|
||||
types.ChecksumTypeComposite: struct{}{},
|
||||
types.ChecksumTypeFullObject: struct{}{},
|
||||
"": struct{}{},
|
||||
},
|
||||
types.ChecksumAlgorithmSha1: checksumTypeSchema{
|
||||
types.ChecksumTypeComposite: struct{}{},
|
||||
"": struct{}{},
|
||||
},
|
||||
types.ChecksumAlgorithmSha256: checksumTypeSchema{
|
||||
types.ChecksumTypeComposite: struct{}{},
|
||||
"": struct{}{},
|
||||
},
|
||||
types.ChecksumAlgorithmCrc64nvme: checksumTypeSchema{
|
||||
types.ChecksumTypeFullObject: struct{}{},
|
||||
"": struct{}{},
|
||||
},
|
||||
// Both could be empty
|
||||
"": checksumTypeSchema{
|
||||
"": struct{}{},
|
||||
},
|
||||
}
|
||||
|
||||
// Checks if checksum type and algorithm are supported together
|
||||
func checkChecksumTypeAndAlgo(algo types.ChecksumAlgorithm, t types.ChecksumType) error {
|
||||
typeSchema := checksumMap[algo]
|
||||
_, ok := typeSchema[t]
|
||||
if !ok {
|
||||
debuglogger.Logf("checksum type and algorithm mismatch: (type): %v, (algorithm): %v\n", t, algo)
|
||||
return s3err.GetChecksumSchemaMismatchErr(algo, t)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parses and validates the x-amz-checksum-algorithm and x-amz-checksum-type headers
|
||||
func ParseCreateMpChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, types.ChecksumType, error) {
|
||||
algo := types.ChecksumAlgorithm(ctx.Get("x-amz-checksum-algorithm"))
|
||||
if err := IsChecksumAlgorithmValid(algo); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
chType := types.ChecksumType(ctx.Get("x-amz-checksum-type"))
|
||||
if err := IsChecksumTypeValid(chType); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// Verify if checksum algorithm is provided, if
|
||||
// checksum type is specified
|
||||
if chType != "" && algo == "" {
|
||||
debuglogger.Logf("checksum type can only be used with checksum algorithm: (type): %v\n", chType)
|
||||
return algo, chType, s3err.GetAPIError(s3err.ErrChecksumTypeWithAlgo)
|
||||
}
|
||||
|
||||
// Verify if the checksum type is supported for
|
||||
// the provided checksum algorithm
|
||||
if err := checkChecksumTypeAndAlgo(algo, chType); err != nil {
|
||||
return algo, chType, err
|
||||
}
|
||||
|
||||
// x-amz-checksum-type defaults to COMPOSITE
|
||||
// if x-amz-checksum-algorithm is set except
|
||||
// for the CRC64NVME algorithm: it defaults to FULL_OBJECT
|
||||
if algo != "" && chType == "" {
|
||||
if algo == types.ChecksumAlgorithmCrc64nvme {
|
||||
chType = types.ChecksumTypeFullObject
|
||||
} else {
|
||||
chType = types.ChecksumTypeComposite
|
||||
}
|
||||
}
|
||||
|
||||
if hexCount == 0 {
|
||||
return s
|
||||
return algo, chType, nil
|
||||
}
|
||||
|
||||
// TagLimit specifies the allowed tag count in a tag set
|
||||
type TagLimit int
|
||||
|
||||
const (
|
||||
// Tag limit for bucket tagging
|
||||
TagLimitBucket TagLimit = 50
|
||||
// Tag limit for object tagging
|
||||
TagLimitObject TagLimit = 10
|
||||
)
|
||||
|
||||
// Parses and validates tagging
|
||||
func ParseTagging(data []byte, limit TagLimit) (map[string]string, error) {
|
||||
var tagging s3response.TaggingInput
|
||||
err := xml.Unmarshal(data, &tagging)
|
||||
if err != nil {
|
||||
debuglogger.Logf("invalid taggging: %s", data)
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
var buf [64]byte
|
||||
var t []byte
|
||||
|
||||
required := len(s) + 2*hexCount
|
||||
if required <= len(buf) {
|
||||
t = buf[:required]
|
||||
} else {
|
||||
t = make([]byte, required)
|
||||
}
|
||||
|
||||
j := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; {
|
||||
case shouldEscape(c):
|
||||
t[j] = '%'
|
||||
t[j+1] = upperhex[c>>4]
|
||||
t[j+2] = upperhex[c&15]
|
||||
j += 3
|
||||
default:
|
||||
t[j] = s[i]
|
||||
j++
|
||||
tLen := len(tagging.TagSet.Tags)
|
||||
if tLen > int(limit) {
|
||||
switch limit {
|
||||
case TagLimitObject:
|
||||
debuglogger.Logf("bucket tagging length exceeds %v: %v", limit, tLen)
|
||||
return nil, s3err.GetAPIError(s3err.ErrObjectTaggingLimited)
|
||||
case TagLimitBucket:
|
||||
debuglogger.Logf("object tagging length exceeds %v: %v", limit, tLen)
|
||||
return nil, s3err.GetAPIError(s3err.ErrBucketTaggingLimited)
|
||||
}
|
||||
}
|
||||
|
||||
return string(t)
|
||||
}
|
||||
tagSet := make(map[string]string, tLen)
|
||||
|
||||
// Checks if the character needs to be escaped
|
||||
func shouldEscape(c byte) bool {
|
||||
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
|
||||
return false
|
||||
for _, tag := range tagging.TagSet.Tags {
|
||||
// validate tag key
|
||||
if len(tag.Key) == 0 || len(tag.Key) > 128 {
|
||||
debuglogger.Logf("tag key should 0 < tag.Key <= 128, key: %v", tag.Key)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// validate tag value
|
||||
if len(tag.Value) > 256 {
|
||||
debuglogger.Logf("invalid long tag value: (length): %v, (value): %v", len(tag.Value), tag.Value)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// make sure there are no duplicate keys
|
||||
_, ok := tagSet[tag.Key]
|
||||
if ok {
|
||||
debuglogger.Logf("duplicate tag key: %v", tag.Key)
|
||||
return nil, s3err.GetAPIError(s3err.ErrDuplicateTagKey)
|
||||
}
|
||||
|
||||
tagSet[tag.Key] = tag.Value
|
||||
}
|
||||
|
||||
switch c {
|
||||
case '-', '_', '.', '~', '/':
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return tagSet, nil
|
||||
}
|
||||
|
||||
@@ -16,6 +16,9 @@ package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
@@ -25,6 +28,7 @@ import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
@@ -268,6 +272,14 @@ func TestParseUint(t *testing.T) {
|
||||
want: 23,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Parse-uint-greater-than-1000",
|
||||
args: args{
|
||||
str: "25000000",
|
||||
},
|
||||
want: 1000,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -406,9 +418,78 @@ func TestIsValidOwnership(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_shouldEscape(t *testing.T) {
|
||||
func TestIsChecksumAlgorithmValid(t *testing.T) {
|
||||
type args struct {
|
||||
c byte
|
||||
alg types.ChecksumAlgorithm
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
args: args{
|
||||
alg: "",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "crc32",
|
||||
args: args{
|
||||
alg: types.ChecksumAlgorithmCrc32,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "crc32c",
|
||||
args: args{
|
||||
alg: types.ChecksumAlgorithmCrc32c,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "sha1",
|
||||
args: args{
|
||||
alg: types.ChecksumAlgorithmSha1,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "sha256",
|
||||
args: args{
|
||||
alg: types.ChecksumAlgorithmSha256,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "crc64nvme",
|
||||
args: args{
|
||||
alg: types.ChecksumAlgorithmCrc64nvme,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid",
|
||||
args: args{
|
||||
alg: types.ChecksumAlgorithm("invalid_checksum_algorithm"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := IsChecksumAlgorithmValid(tt.args.alg); (err != nil) != tt.wantErr {
|
||||
t.Errorf("IsChecksumAlgorithmValid() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidChecksum(t *testing.T) {
|
||||
type args struct {
|
||||
checksum string
|
||||
algorithm types.ChecksumAlgorithm
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -416,113 +497,403 @@ func Test_shouldEscape(t *testing.T) {
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "shouldn't-escape-alphanum",
|
||||
name: "invalid-base64",
|
||||
args: args{
|
||||
c: 'h',
|
||||
checksum: "invalid_base64_string",
|
||||
algorithm: types.ChecksumAlgorithmCrc32,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "shouldn't-escape-unreserved-char",
|
||||
name: "invalid-crc32",
|
||||
args: args{
|
||||
c: '_',
|
||||
checksum: "YXNkZmFzZGZhc2Rm",
|
||||
algorithm: types.ChecksumAlgorithmCrc32,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "shouldn't-escape-unreserved-number",
|
||||
name: "valid-crc32",
|
||||
args: args{
|
||||
c: '0',
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "shouldn't-escape-path-separator",
|
||||
args: args{
|
||||
c: '/',
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "should-escape-special-char-1",
|
||||
args: args{
|
||||
c: '&',
|
||||
checksum: "ww2FVQ==",
|
||||
algorithm: types.ChecksumAlgorithmCrc32,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "should-escape-special-char-2",
|
||||
name: "invalid-crc32c",
|
||||
args: args{
|
||||
c: '*',
|
||||
checksum: "Zmdoa2doZmtnZmhr",
|
||||
algorithm: types.ChecksumAlgorithmCrc32c,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "valid-crc32c",
|
||||
args: args{
|
||||
checksum: "DOsb4w==",
|
||||
algorithm: types.ChecksumAlgorithmCrc32c,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "should-escape-special-char-3",
|
||||
name: "invalid-sha1",
|
||||
args: args{
|
||||
c: '(',
|
||||
checksum: "YXNkZmFzZGZhc2RmYXNkZnNhZGZzYWRm",
|
||||
algorithm: types.ChecksumAlgorithmSha1,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "valid-sha1",
|
||||
args: args{
|
||||
checksum: "L4q6V59Zcwn12wyLIytoE2c1ugk=",
|
||||
algorithm: types.ChecksumAlgorithmSha1,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-sha256",
|
||||
args: args{
|
||||
checksum: "Zmdoa2doZmtnZmhrYXNkZmFzZGZhc2RmZHNmYXNkZg==",
|
||||
algorithm: types.ChecksumAlgorithmSha256,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "valid-sha256",
|
||||
args: args{
|
||||
checksum: "d1SPCd/kZ2rAzbbLUC0n/bEaOSx70FNbXbIqoIxKuPY=",
|
||||
algorithm: types.ChecksumAlgorithmSha256,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := shouldEscape(tt.args.c); got != tt.want {
|
||||
t.Errorf("shouldEscape() = %v, want %v", got, tt.want)
|
||||
if got := IsValidChecksum(tt.args.checksum, tt.args.algorithm); got != tt.want {
|
||||
t.Errorf("IsValidChecksum() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_escapePath(t *testing.T) {
|
||||
func TestIsChecksumTypeValid(t *testing.T) {
|
||||
type args struct {
|
||||
s string
|
||||
t types.ChecksumType
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty-string",
|
||||
name: "valid_FULL_OBJECT",
|
||||
args: args{
|
||||
s: "",
|
||||
t: types.ChecksumTypeFullObject,
|
||||
},
|
||||
want: "",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "alphanum-path",
|
||||
name: "valid_COMPOSITE",
|
||||
args: args{
|
||||
s: "/test-bucket/test-key",
|
||||
t: types.ChecksumTypeComposite,
|
||||
},
|
||||
want: "/test-bucket/test-key",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "path-with-unescapable-chars",
|
||||
name: "invalid",
|
||||
args: args{
|
||||
s: "/test~bucket/test.key",
|
||||
t: types.ChecksumType("invalid_checksum_type"),
|
||||
},
|
||||
want: "/test~bucket/test.key",
|
||||
},
|
||||
{
|
||||
name: "path-with-escapable-chars",
|
||||
args: args{
|
||||
s: "/bucket-*(/test=key&",
|
||||
},
|
||||
want: "/bucket-%2A%28/test%3Dkey%26",
|
||||
},
|
||||
{
|
||||
name: "path-with-space",
|
||||
args: args{
|
||||
s: "/test-bucket/my key",
|
||||
},
|
||||
want: "/test-bucket/my%20key",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := escapePath(tt.args.s); got != tt.want {
|
||||
t.Errorf("escapePath() = %v, want %v", got, tt.want)
|
||||
if err := IsChecksumTypeValid(tt.args.t); (err != nil) != tt.wantErr {
|
||||
t.Errorf("IsChecksumTypeValid() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_checkChecksumTypeAndAlgo(t *testing.T) {
|
||||
type args struct {
|
||||
algo types.ChecksumAlgorithm
|
||||
t types.ChecksumType
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "full_object-crc32",
|
||||
args: args{
|
||||
algo: types.ChecksumAlgorithmCrc32,
|
||||
t: types.ChecksumTypeFullObject,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "full_object-crc32c",
|
||||
args: args{
|
||||
algo: types.ChecksumAlgorithmCrc32c,
|
||||
t: types.ChecksumTypeFullObject,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "full_object-sha1",
|
||||
args: args{
|
||||
algo: types.ChecksumAlgorithmSha1,
|
||||
t: types.ChecksumTypeFullObject,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "full_object-sha256",
|
||||
args: args{
|
||||
algo: types.ChecksumAlgorithmSha1,
|
||||
t: types.ChecksumTypeFullObject,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "full_object-crc64nvme",
|
||||
args: args{
|
||||
algo: types.ChecksumAlgorithmCrc64nvme,
|
||||
t: types.ChecksumTypeFullObject,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "full_object-crc32",
|
||||
args: args{
|
||||
algo: types.ChecksumAlgorithmCrc32,
|
||||
t: types.ChecksumTypeFullObject,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "composite-crc32",
|
||||
args: args{
|
||||
algo: types.ChecksumAlgorithmCrc32,
|
||||
t: types.ChecksumTypeComposite,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "composite-crc32c",
|
||||
args: args{
|
||||
algo: types.ChecksumAlgorithmCrc32c,
|
||||
t: types.ChecksumTypeComposite,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "composite-sha1",
|
||||
args: args{
|
||||
algo: types.ChecksumAlgorithmSha1,
|
||||
t: types.ChecksumTypeComposite,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "composite-sha256",
|
||||
args: args{
|
||||
algo: types.ChecksumAlgorithmSha256,
|
||||
t: types.ChecksumTypeComposite,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "composite-crc64nvme",
|
||||
args: args{
|
||||
algo: types.ChecksumAlgorithmCrc64nvme,
|
||||
t: types.ChecksumTypeComposite,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "composite-empty",
|
||||
args: args{
|
||||
t: types.ChecksumTypeComposite,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "full_object-empty",
|
||||
args: args{
|
||||
t: types.ChecksumTypeFullObject,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := checkChecksumTypeAndAlgo(tt.args.algo, tt.args.t); (err != nil) != tt.wantErr {
|
||||
t.Errorf("checkChecksumTypeAndAlgo() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTagging(t *testing.T) {
|
||||
genRandStr := func(lgth int) string {
|
||||
b := make([]byte, lgth)
|
||||
for i := range b {
|
||||
b[i] = byte(rand.Intn(95) + 32) // 126 - 32 + 1 = 95 printable characters
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
getTagSet := func(lgth int) s3response.TaggingInput {
|
||||
res := s3response.TaggingInput{
|
||||
TagSet: s3response.TagSet{
|
||||
Tags: []s3response.Tag{},
|
||||
},
|
||||
}
|
||||
|
||||
for i := 0; i < lgth; i++ {
|
||||
res.TagSet.Tags = append(res.TagSet.Tags, s3response.Tag{
|
||||
Key: genRandStr(10),
|
||||
Value: genRandStr(20),
|
||||
})
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
type args struct {
|
||||
data s3response.TaggingInput
|
||||
overrideXML []byte
|
||||
limit TagLimit
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want map[string]string
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "valid tags within limit",
|
||||
args: args{
|
||||
data: s3response.TaggingInput{
|
||||
TagSet: s3response.TagSet{
|
||||
Tags: []s3response.Tag{
|
||||
{Key: "key1", Value: "value1"},
|
||||
{Key: "key2", Value: "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
limit: TagLimitObject,
|
||||
},
|
||||
want: map[string]string{"key1": "value1", "key2": "value2"},
|
||||
wantErr: nil,
|
||||
},
|
||||
{
|
||||
name: "malformed XML",
|
||||
args: args{
|
||||
overrideXML: []byte("invalid xml"),
|
||||
limit: TagLimitObject,
|
||||
},
|
||||
want: nil,
|
||||
wantErr: s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
},
|
||||
{
|
||||
name: "exceeds bucket tag limit",
|
||||
args: args{
|
||||
data: getTagSet(51),
|
||||
limit: TagLimitBucket,
|
||||
},
|
||||
want: nil,
|
||||
wantErr: s3err.GetAPIError(s3err.ErrBucketTaggingLimited),
|
||||
},
|
||||
{
|
||||
name: "exceeds object tag limit",
|
||||
args: args{
|
||||
data: getTagSet(11),
|
||||
limit: TagLimitObject,
|
||||
},
|
||||
want: nil,
|
||||
wantErr: s3err.GetAPIError(s3err.ErrObjectTaggingLimited),
|
||||
},
|
||||
{
|
||||
name: "invalid 0 length tag key",
|
||||
args: args{
|
||||
data: s3response.TaggingInput{
|
||||
TagSet: s3response.TagSet{
|
||||
Tags: []s3response.Tag{{Key: "", Value: "value1"}},
|
||||
},
|
||||
},
|
||||
limit: TagLimitObject,
|
||||
},
|
||||
want: nil,
|
||||
wantErr: s3err.GetAPIError(s3err.ErrInvalidTagKey),
|
||||
},
|
||||
{
|
||||
name: "invalid long tag key",
|
||||
args: args{
|
||||
data: s3response.TaggingInput{
|
||||
TagSet: s3response.TagSet{
|
||||
Tags: []s3response.Tag{{Key: genRandStr(130), Value: "value1"}},
|
||||
},
|
||||
},
|
||||
limit: TagLimitObject,
|
||||
},
|
||||
want: nil,
|
||||
wantErr: s3err.GetAPIError(s3err.ErrInvalidTagKey),
|
||||
},
|
||||
{
|
||||
name: "invalid long tag value",
|
||||
args: args{
|
||||
data: s3response.TaggingInput{
|
||||
TagSet: s3response.TagSet{
|
||||
Tags: []s3response.Tag{{Key: "key", Value: genRandStr(257)}},
|
||||
},
|
||||
},
|
||||
limit: TagLimitBucket,
|
||||
},
|
||||
want: nil,
|
||||
wantErr: s3err.GetAPIError(s3err.ErrInvalidTagValue),
|
||||
},
|
||||
{
|
||||
name: "duplicate tag key",
|
||||
args: args{
|
||||
data: s3response.TaggingInput{
|
||||
TagSet: s3response.TagSet{
|
||||
Tags: []s3response.Tag{
|
||||
{Key: "key", Value: "value1"},
|
||||
{Key: "key", Value: "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
limit: TagLimitObject,
|
||||
},
|
||||
want: nil,
|
||||
wantErr: s3err.GetAPIError(s3err.ErrDuplicateTagKey),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var data []byte
|
||||
if tt.args.overrideXML != nil {
|
||||
data = tt.args.overrideXML
|
||||
} else {
|
||||
var err error
|
||||
data, err = xml.Marshal(tt.args.data)
|
||||
if err != nil {
|
||||
t.Fatalf("error marshalling input: %v", err)
|
||||
}
|
||||
}
|
||||
got, err := ParseTagging(data, tt.args.limit)
|
||||
|
||||
if !errors.Is(err, tt.wantErr) {
|
||||
t.Errorf("expected error %v, got %v", tt.wantErr, err)
|
||||
}
|
||||
if err == nil && !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("expected result %v, got %v", tt.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
233
s3err/s3err.go
233
s3err/s3err.go
@@ -17,7 +17,11 @@ package s3err
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
|
||||
// APIError structure
|
||||
@@ -55,6 +59,11 @@ type ErrorCode int
|
||||
const (
|
||||
ErrNone ErrorCode = iota
|
||||
ErrAccessDenied
|
||||
ErrAnonymousRequest
|
||||
ErrAnonymousCreateMp
|
||||
ErrAnonymousCopyObject
|
||||
ErrAnonymousPutBucketOwnership
|
||||
ErrAnonymousGetBucketOwnership
|
||||
ErrMethodNotAllowed
|
||||
ErrBucketNotEmpty
|
||||
ErrVersionedBucketNotEmpty
|
||||
@@ -74,10 +83,18 @@ const (
|
||||
ErrInvalidPart
|
||||
ErrEmptyParts
|
||||
ErrInvalidPartNumber
|
||||
ErrInvalidPartOrder
|
||||
ErrInvalidCompleteMpPartNumber
|
||||
ErrInternalError
|
||||
ErrInvalidCopyDest
|
||||
ErrInvalidCopySource
|
||||
ErrInvalidTag
|
||||
ErrInvalidCopySourceRange
|
||||
ErrInvalidTagKey
|
||||
ErrInvalidTagValue
|
||||
ErrDuplicateTagKey
|
||||
ErrBucketTaggingLimited
|
||||
ErrObjectTaggingLimited
|
||||
ErrInvalidURLEncodedTagging
|
||||
ErrAuthHeaderEmpty
|
||||
ErrSignatureVersionNotSupported
|
||||
ErrMalformedPOSTRequest
|
||||
@@ -106,6 +123,7 @@ const (
|
||||
ErrSignatureTerminationStr
|
||||
ErrSignatureIncorrService
|
||||
ErrContentSHA256Mismatch
|
||||
ErrMissingContentLength
|
||||
ErrInvalidAccessKeyID
|
||||
ErrRequestNotReadyYet
|
||||
ErrMissingDateHeader
|
||||
@@ -123,6 +141,8 @@ const (
|
||||
ErrObjectLocked
|
||||
ErrPastObjectLockRetainDate
|
||||
ErrObjectLockInvalidRetentionPeriod
|
||||
ErrInvalidLegalHoldStatus
|
||||
ErrInvalidObjectLockMode
|
||||
ErrNoSuchBucketPolicy
|
||||
ErrBucketTaggingNotFound
|
||||
ErrObjectLockInvalidHeaders
|
||||
@@ -136,10 +156,18 @@ const (
|
||||
ErrUnexpectedContent
|
||||
ErrMissingSecurityHeader
|
||||
ErrInvalidMetadataDirective
|
||||
ErrInvalidTaggingDirective
|
||||
ErrKeyTooLong
|
||||
ErrInvalidVersionId
|
||||
ErrNoSuchVersion
|
||||
ErrSuspendedVersioningNotAllowed
|
||||
ErrMultipleChecksumHeaders
|
||||
ErrInvalidChecksumAlgorithm
|
||||
ErrInvalidChecksumPart
|
||||
ErrChecksumTypeWithAlgo
|
||||
ErrInvalidChecksumHeader
|
||||
ErrTrailerHeaderNotSupported
|
||||
ErrBadRequest
|
||||
|
||||
// Non-AWS errors
|
||||
ErrExistingObjectIsDirectory
|
||||
@@ -164,6 +192,31 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Access Denied.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAnonymousRequest: {
|
||||
Code: "AccessDenied",
|
||||
Description: "Anonymous users cannot invoke this API. Please authenticate.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAnonymousCreateMp: {
|
||||
Code: "AccessDenied",
|
||||
Description: "Anonymous users cannot initiate multipart uploads. Please authenticate.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAnonymousCopyObject: {
|
||||
Code: "AccessDenied",
|
||||
Description: "Anonymous users cannot copy objects. Please authenticate.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAnonymousPutBucketOwnership: {
|
||||
Code: "AccessDenied",
|
||||
Description: "s3:PutBucketOwnershipControls does not support Anonymous requests!",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAnonymousGetBucketOwnership: {
|
||||
Code: "AccessDenied",
|
||||
Description: "s3:GetBucketOwnershipControls does not support Anonymous requests!",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrMethodNotAllowed: {
|
||||
Code: "MethodNotAllowed",
|
||||
Description: "The specified method is not allowed against this resource.",
|
||||
@@ -264,6 +317,16 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Part number must be an integer between 1 and 10000, inclusive.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidPartOrder: {
|
||||
Code: "InvalidPartOrder",
|
||||
Description: "The list of parts was not in ascending order. Parts must be ordered by part number.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidCompleteMpPartNumber: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "PartNumber must be >= 1",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidCopyDest: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.",
|
||||
@@ -274,9 +337,39 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTag: {
|
||||
ErrInvalidCopySourceRange: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "The Tag value you have provided is invalid",
|
||||
Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTagKey: {
|
||||
Code: "InvalidTag",
|
||||
Description: "The TagKey you have provided is invalid",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTagValue: {
|
||||
Code: "InvalidTag",
|
||||
Description: "The TagValue you have provided is invalid",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrDuplicateTagKey: {
|
||||
Code: "InvalidTag",
|
||||
Description: "Cannot provide multiple Tags with the same key",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketTaggingLimited: {
|
||||
Code: "BadRequest",
|
||||
Description: "Bucket tag count cannot be greater than 50",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectTaggingLimited: {
|
||||
Code: "BadRequest",
|
||||
Description: "Object tags cannot be greater than 10",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidURLEncodedTagging: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "The header 'x-amz-tagging' shall be encoded as UTF-8 then URLEncoded URL query parameters without tag name duplicates.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMalformedXML: {
|
||||
@@ -424,6 +517,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingContentLength: {
|
||||
Code: "MissingContentLength",
|
||||
Description: "You must provide the Content-Length HTTP header.",
|
||||
HTTPStatusCode: http.StatusLengthRequired,
|
||||
},
|
||||
ErrMissingDateHeader: {
|
||||
Code: "AccessDenied",
|
||||
Description: "AWS authentication requires a valid Date or x-amz-date header.",
|
||||
@@ -456,7 +554,7 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
},
|
||||
ErrInvalidRange: {
|
||||
Code: "InvalidRange",
|
||||
Description: "The requested range is not valid for the request. Try another range.",
|
||||
Description: "The requested range is not satisfiable",
|
||||
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||
},
|
||||
ErrInvalidURI: {
|
||||
@@ -499,6 +597,16 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "the retention days/years must be positive integer.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidLegalHoldStatus: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Legal Hold must be either of 'ON' or 'OFF'",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidObjectLockMode: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Unknown wormMode directive.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchBucketPolicy: {
|
||||
Code: "NoSuchBucketPolicy",
|
||||
Description: "The bucket policy does not exist.",
|
||||
@@ -564,6 +672,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Unknown metadata directive.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTaggingDirective: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Unknown tagging directive.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidVersionId: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Invalid version id specified",
|
||||
@@ -584,6 +697,41 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "An Object Lock configuration is present on this bucket, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMultipleChecksumHeaders: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Expecting a single x-amz-checksum- header. Multiple checksum Types are not allowed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidChecksumAlgorithm: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Checksum algorithm provided is unsupported. Please try again with any of the valid types: [CRC32, CRC32C, SHA1, SHA256]",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidChecksumPart: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Invalid Base64 or multiple checksums present in request",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrChecksumTypeWithAlgo: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "The x-amz-checksum-type header can only be used with the x-amz-checksum-algorithm header.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidChecksumHeader: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "The algorithm type you specified in x-amz-checksum- header is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrTrailerHeaderNotSupported: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "The value specified in the x-amz-trailer header is not supported",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBadRequest: {
|
||||
Code: "400",
|
||||
Description: "Bad Request",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// non aws errors
|
||||
ErrExistingObjectIsDirectory: {
|
||||
@@ -678,3 +826,80 @@ func encodeResponse(response interface{}) []byte {
|
||||
e.Encode(response)
|
||||
return bytesBuffer.Bytes()
|
||||
}
|
||||
|
||||
// Returns invalid checksum error with the provided header in the error description
|
||||
func GetInvalidChecksumHeaderErr(header string) APIError {
|
||||
return APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: fmt.Sprintf("Value for %v header is invalid.", header),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
func GetInvalidTrailingChecksumHeaderErr(header string) APIError {
|
||||
return APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: fmt.Sprintf("Value for %v trailing header is invalid.", header),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns checksum type mismatch APIError
|
||||
func GetChecksumTypeMismatchErr(expected, actual types.ChecksumAlgorithm) APIError {
|
||||
return APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: fmt.Sprintf("Checksum Type mismatch occurred, expected checksum Type: %v, actual checksum Type: %v", expected, actual),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns incorrect checksum APIError
|
||||
func GetChecksumBadDigestErr(algo types.ChecksumAlgorithm) APIError {
|
||||
return APIError{
|
||||
Code: "BadDigest",
|
||||
Description: fmt.Sprintf("The %v you specified did not match the calculated checksum.", algo),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns checksum type mismatch error with checksum algorithm
|
||||
func GetChecksumSchemaMismatchErr(algo types.ChecksumAlgorithm, t types.ChecksumType) APIError {
|
||||
return APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: fmt.Sprintf("The %v checksum type cannot be used with the %v checksum algorithm.", algo, strings.ToLower(string(t))),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns checksum type mismatch error for multipart uploads
|
||||
func GetChecksumTypeMismatchOnMpErr(t types.ChecksumType) APIError {
|
||||
return APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: fmt.Sprintf("The upload was created using the %v checksum mode. The complete request must use the same checksum mode.", t),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
func GetIncorrectMpObjectSizeErr(expected, actual int64) APIError {
|
||||
return APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: fmt.Sprintf("The provided 'x-amz-mp-object-size' header value %v does not match what was computed: %v", expected, actual),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
func GetInvalidMpObjectSizeErr(val int64) APIError {
|
||||
return APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: fmt.Sprintf("Value for x-amz-mp-object-size header is less than zero: '%v'", val),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
func CreateExceedingRangeErr(objSize int64) APIError {
|
||||
return APIError{
|
||||
Code: "InvalidArgument",
|
||||
Description: fmt.Sprintf("Range specified is not valid for source object of size: %d", objSize),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
)
|
||||
|
||||
type S3EventSender interface {
|
||||
@@ -141,15 +142,20 @@ func InitEventSender(cfg *EventConfig) (S3EventSender, error) {
|
||||
|
||||
func createEventSchema(ctx *fiber.Ctx, meta EventMeta, configId ConfigurationId) EventSchema {
|
||||
path := strings.Split(ctx.Path(), "/")
|
||||
bucket, object := path[1], strings.Join(path[2:], "/")
|
||||
acc := ctx.Locals("account").(auth.Account)
|
||||
|
||||
var bucket, object string
|
||||
if len(path) > 1 {
|
||||
bucket, object = path[1], strings.Join(path[2:], "/")
|
||||
}
|
||||
|
||||
acc := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
|
||||
return EventSchema{
|
||||
Records: []EventRecord{
|
||||
{
|
||||
EventVersion: "2.2",
|
||||
EventSource: "aws:s3",
|
||||
AwsRegion: ctx.Locals("region").(string),
|
||||
AwsRegion: utils.ContextKeyRegion.Get(ctx).(string),
|
||||
EventTime: time.Now().Format(time.RFC3339),
|
||||
EventName: meta.EventName,
|
||||
UserIdentity: EventUserIdentity{
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
@@ -68,10 +69,16 @@ func (f *FileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta) {
|
||||
access := "-"
|
||||
reqURI := ctx.OriginalURL()
|
||||
path := strings.Split(ctx.Path(), "/")
|
||||
bucket, object := path[1], strings.Join(path[2:], "/")
|
||||
var bucket, object string
|
||||
if len(path) > 1 {
|
||||
bucket, object = path[1], strings.Join(path[2:], "/")
|
||||
}
|
||||
errorCode := ""
|
||||
httpStatus := 200
|
||||
startTime := ctx.Locals("startTime").(time.Time)
|
||||
startTime, ok := utils.ContextKeyStartTime.Get(ctx).(time.Time)
|
||||
if !ok {
|
||||
startTime = time.Now()
|
||||
}
|
||||
tlsConnState := ctx.Context().TLSConnectionState()
|
||||
if tlsConnState != nil {
|
||||
lf.CipherSuite = tls.CipherSuiteName(tlsConnState.CipherSuite)
|
||||
@@ -89,9 +96,9 @@ func (f *FileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta) {
|
||||
}
|
||||
}
|
||||
|
||||
switch ctx.Locals("account").(type) {
|
||||
case auth.Account:
|
||||
access = ctx.Locals("account").(auth.Account).Access
|
||||
acct, ok := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
if ok {
|
||||
access = acct.Access
|
||||
}
|
||||
|
||||
lf.BucketOwner = meta.BucketOwner
|
||||
@@ -115,7 +122,7 @@ func (f *FileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta) {
|
||||
lf.HostID = ctx.Get("X-Amz-Id-2")
|
||||
lf.SignatureVersion = "SigV4"
|
||||
lf.AuthenticationType = "AuthHeader"
|
||||
lf.HostHeader = fmt.Sprintf("s3.%v.amazonaws.com", ctx.Locals("region").(string))
|
||||
lf.HostHeader = fmt.Sprintf("s3.%v.amazonaws.com", utils.ContextKeyRegion.Get(ctx).(string))
|
||||
lf.AccessPointARN = fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/"))
|
||||
lf.AclRequired = "Yes"
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
)
|
||||
|
||||
// FileLogger is a local file audit log
|
||||
@@ -57,7 +58,10 @@ func (f *AdminFileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMe
|
||||
access := "-"
|
||||
reqURI := ctx.OriginalURL()
|
||||
errorCode := ""
|
||||
startTime := ctx.Locals("startTime").(time.Time)
|
||||
startTime, ok := utils.ContextKeyStartTime.Get(ctx).(time.Time)
|
||||
if !ok {
|
||||
startTime = time.Now()
|
||||
}
|
||||
tlsConnState := ctx.Context().TLSConnectionState()
|
||||
if tlsConnState != nil {
|
||||
lf.CipherSuite = tls.CipherSuiteName(tlsConnState.CipherSuite)
|
||||
@@ -68,9 +72,9 @@ func (f *AdminFileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMe
|
||||
errorCode = err.Error()
|
||||
}
|
||||
|
||||
switch ctx.Locals("account").(type) {
|
||||
switch utils.ContextKeyAccount.Get(ctx).(type) {
|
||||
case auth.Account:
|
||||
access = ctx.Locals("account").(auth.Account).Access
|
||||
access = utils.ContextKeyAccount.Get(ctx).(auth.Account).Access
|
||||
}
|
||||
|
||||
lf.Time = time.Now()
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
@@ -65,10 +66,16 @@ func (wl *WebhookLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMet
|
||||
access := "-"
|
||||
reqURI := ctx.OriginalURL()
|
||||
path := strings.Split(ctx.Path(), "/")
|
||||
bucket, object := path[1], strings.Join(path[2:], "/")
|
||||
var bucket, object string
|
||||
if len(path) > 1 {
|
||||
bucket, object = path[1], strings.Join(path[2:], "/")
|
||||
}
|
||||
errorCode := ""
|
||||
httpStatus := 200
|
||||
startTime := ctx.Locals("startTime").(time.Time)
|
||||
startTime, ok := utils.ContextKeyStartTime.Get(ctx).(time.Time)
|
||||
if !ok {
|
||||
startTime = time.Now()
|
||||
}
|
||||
tlsConnState := ctx.Context().TLSConnectionState()
|
||||
if tlsConnState != nil {
|
||||
lf.CipherSuite = tls.CipherSuiteName(tlsConnState.CipherSuite)
|
||||
@@ -86,9 +93,9 @@ func (wl *WebhookLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMet
|
||||
}
|
||||
}
|
||||
|
||||
switch ctx.Locals("account").(type) {
|
||||
case auth.Account:
|
||||
access = ctx.Locals("account").(auth.Account).Access
|
||||
acct, ok := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
if ok {
|
||||
access = acct.Access
|
||||
}
|
||||
|
||||
lf.BucketOwner = meta.BucketOwner
|
||||
@@ -112,7 +119,7 @@ func (wl *WebhookLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMet
|
||||
lf.HostID = ctx.Get("X-Amz-Id-2")
|
||||
lf.SignatureVersion = "SigV4"
|
||||
lf.AuthenticationType = "AuthHeader"
|
||||
lf.HostHeader = fmt.Sprintf("s3.%v.amazonaws.com", ctx.Locals("region").(string))
|
||||
lf.HostHeader = fmt.Sprintf("s3.%v.amazonaws.com", utils.ContextKeyRegion.Get(ctx).(string))
|
||||
lf.AccessPointARN = fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/"))
|
||||
lf.AclRequired = "Yes"
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ package s3response
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
@@ -29,16 +30,27 @@ const (
|
||||
)
|
||||
|
||||
type PutObjectOutput struct {
|
||||
ETag string
|
||||
VersionID string
|
||||
ETag string
|
||||
VersionID string
|
||||
ChecksumCRC32 *string
|
||||
ChecksumCRC32C *string
|
||||
ChecksumSHA1 *string
|
||||
ChecksumSHA256 *string
|
||||
ChecksumCRC64NVME *string
|
||||
ChecksumType types.ChecksumType
|
||||
}
|
||||
|
||||
// Part describes part metadata.
|
||||
type Part struct {
|
||||
PartNumber int
|
||||
LastModified time.Time
|
||||
ETag string
|
||||
Size int64
|
||||
PartNumber int
|
||||
LastModified time.Time
|
||||
ETag string
|
||||
Size int64
|
||||
ChecksumCRC32 *string
|
||||
ChecksumCRC32C *string
|
||||
ChecksumSHA1 *string
|
||||
ChecksumSHA256 *string
|
||||
ChecksumCRC64NVME *string
|
||||
}
|
||||
|
||||
func (p Part) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
@@ -50,7 +62,7 @@ func (p Part) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
Alias: (*Alias)(&p),
|
||||
}
|
||||
|
||||
aux.LastModified = p.LastModified.UTC().Format(iso8601TimeFormat)
|
||||
aux.LastModified = p.LastModified.UTC().Format(time.RFC3339)
|
||||
|
||||
return e.EncodeElement(aux, start)
|
||||
}
|
||||
@@ -59,9 +71,11 @@ func (p Part) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
type ListPartsResult struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"`
|
||||
|
||||
Bucket string
|
||||
Key string
|
||||
UploadID string `xml:"UploadId"`
|
||||
Bucket string
|
||||
Key string
|
||||
UploadID string `xml:"UploadId"`
|
||||
ChecksumAlgorithm types.ChecksumAlgorithm
|
||||
ChecksumType types.ChecksumType
|
||||
|
||||
Initiator Initiator
|
||||
Owner Owner
|
||||
@@ -101,6 +115,7 @@ type GetObjectAttributesResponse struct {
|
||||
ObjectSize *int64
|
||||
StorageClass types.StorageClass `xml:",omitempty"`
|
||||
ObjectParts *ObjectParts
|
||||
Checksum *types.Checksum
|
||||
|
||||
// Not included in the response body
|
||||
VersionId *string
|
||||
@@ -157,7 +172,7 @@ type ListObjectsV2Result struct {
|
||||
Name *string
|
||||
Prefix *string
|
||||
StartAfter *string
|
||||
ContinuationToken *string
|
||||
ContinuationToken *string `xml:"ContinuationToken,omitempty"`
|
||||
NextContinuationToken *string
|
||||
KeyCount *int32
|
||||
MaxKeys *int32
|
||||
@@ -169,27 +184,28 @@ type ListObjectsV2Result struct {
|
||||
}
|
||||
|
||||
type Object struct {
|
||||
ETag *string
|
||||
Key *string
|
||||
LastModified *time.Time
|
||||
Owner *types.Owner
|
||||
RestoreStatus *types.RestoreStatus
|
||||
Size *int64
|
||||
StorageClass types.ObjectStorageClass
|
||||
ChecksumAlgorithm []types.ChecksumAlgorithm
|
||||
ChecksumType types.ChecksumType
|
||||
ETag *string
|
||||
Key *string
|
||||
LastModified *time.Time
|
||||
Owner *types.Owner
|
||||
RestoreStatus *types.RestoreStatus
|
||||
Size *int64
|
||||
StorageClass types.ObjectStorageClass
|
||||
}
|
||||
|
||||
func (o Object) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
type Alias Object
|
||||
aux := &struct {
|
||||
LastModified *string `xml:"LastModified,omitempty"`
|
||||
LastModified string `xml:"LastModified,omitempty"`
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(&o),
|
||||
}
|
||||
|
||||
if o.LastModified != nil {
|
||||
formattedTime := o.LastModified.UTC().Format(iso8601TimeFormat)
|
||||
aux.LastModified = &formattedTime
|
||||
aux.LastModified = o.LastModified.UTC().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
return e.EncodeElement(aux, start)
|
||||
@@ -197,12 +213,14 @@ func (o Object) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
|
||||
// Upload describes in progress multipart upload
|
||||
type Upload struct {
|
||||
Key string
|
||||
UploadID string `xml:"UploadId"`
|
||||
Initiator Initiator
|
||||
Owner Owner
|
||||
StorageClass types.StorageClass
|
||||
Initiated time.Time
|
||||
Key string
|
||||
UploadID string `xml:"UploadId"`
|
||||
Initiator Initiator
|
||||
Owner Owner
|
||||
StorageClass types.StorageClass
|
||||
Initiated time.Time
|
||||
ChecksumAlgorithm types.ChecksumAlgorithm
|
||||
ChecksumType types.ChecksumType
|
||||
}
|
||||
|
||||
func (u Upload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
@@ -214,7 +232,7 @@ func (u Upload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
Alias: (*Alias)(&u),
|
||||
}
|
||||
|
||||
aux.Initiated = u.Initiated.UTC().Format(iso8601TimeFormat)
|
||||
aux.Initiated = u.Initiated.UTC().Format(time.RFC3339)
|
||||
|
||||
return e.EncodeElement(aux, start)
|
||||
}
|
||||
@@ -311,7 +329,7 @@ func (r ListAllMyBucketsEntry) MarshalXML(e *xml.Encoder, start xml.StartElement
|
||||
Alias: (*Alias)(&r),
|
||||
}
|
||||
|
||||
aux.CreationDate = r.CreationDate.UTC().Format(iso8601TimeFormat)
|
||||
aux.CreationDate = r.CreationDate.UTC().Format(time.RFC3339)
|
||||
|
||||
return e.EncodeElement(aux, start)
|
||||
}
|
||||
@@ -325,27 +343,89 @@ type CanonicalUser struct {
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
type CopyObjectOutput struct {
|
||||
BucketKeyEnabled *bool
|
||||
CopyObjectResult *CopyObjectResult
|
||||
CopySourceVersionId *string
|
||||
Expiration *string
|
||||
SSECustomerAlgorithm *string
|
||||
SSECustomerKeyMD5 *string
|
||||
SSEKMSEncryptionContext *string
|
||||
SSEKMSKeyId *string
|
||||
ServerSideEncryption types.ServerSideEncryption
|
||||
VersionId *string
|
||||
}
|
||||
|
||||
type CopyObjectResult struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult" json:"-"`
|
||||
LastModified time.Time
|
||||
ETag string
|
||||
CopySourceVersionId string `xml:"-"`
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult" json:"-"`
|
||||
ChecksumCRC32 *string
|
||||
ChecksumCRC32C *string
|
||||
ChecksumCRC64NVME *string
|
||||
ChecksumSHA1 *string
|
||||
ChecksumSHA256 *string
|
||||
ChecksumType types.ChecksumType
|
||||
ETag *string
|
||||
LastModified *time.Time
|
||||
}
|
||||
|
||||
func (r CopyObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
type Alias CopyObjectResult
|
||||
aux := &struct {
|
||||
LastModified string `xml:"LastModified"`
|
||||
LastModified string `xml:"LastModified,omitempty"`
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(&r),
|
||||
}
|
||||
|
||||
aux.LastModified = r.LastModified.UTC().Format(iso8601TimeFormat)
|
||||
if r.LastModified != nil {
|
||||
aux.LastModified = r.LastModified.UTC().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
return e.EncodeElement(aux, start)
|
||||
}
|
||||
|
||||
type CopyPartResult struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyPartResult" json:"-"`
|
||||
LastModified time.Time
|
||||
ETag *string
|
||||
ChecksumCRC32 *string
|
||||
ChecksumCRC32C *string
|
||||
ChecksumSHA1 *string
|
||||
ChecksumSHA256 *string
|
||||
ChecksumCRC64NVME *string
|
||||
|
||||
// not included in the body
|
||||
CopySourceVersionId string `xml:"-"`
|
||||
}
|
||||
|
||||
func (r CopyPartResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
type Alias CopyPartResult
|
||||
aux := &struct {
|
||||
LastModified string `xml:"LastModified,omitempty"`
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(&r),
|
||||
}
|
||||
if !r.LastModified.IsZero() {
|
||||
aux.LastModified = r.LastModified.UTC().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
return e.EncodeElement(aux, start)
|
||||
}
|
||||
|
||||
type CompleteMultipartUploadResult struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult" json:"-"`
|
||||
Location *string
|
||||
Bucket *string
|
||||
Key *string
|
||||
ETag *string
|
||||
ChecksumCRC32 *string
|
||||
ChecksumCRC32C *string
|
||||
ChecksumSHA1 *string
|
||||
ChecksumSHA256 *string
|
||||
ChecksumCRC64NVME *string
|
||||
ChecksumType *types.ChecksumType
|
||||
}
|
||||
|
||||
type AccessControlPolicy struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlPolicy" json:"-"`
|
||||
Owner CanonicalUser
|
||||
@@ -400,7 +480,37 @@ type ListVersionsResult struct {
|
||||
NextVersionIdMarker *string
|
||||
Prefix *string
|
||||
VersionIdMarker *string
|
||||
Versions []types.ObjectVersion `xml:"Version"`
|
||||
Versions []ObjectVersion `xml:"Version"`
|
||||
}
|
||||
|
||||
type ObjectVersion struct {
|
||||
ChecksumAlgorithm []types.ChecksumAlgorithm
|
||||
ChecksumType types.ChecksumType
|
||||
ETag *string
|
||||
IsLatest *bool
|
||||
Key *string
|
||||
LastModified *time.Time
|
||||
Owner *types.Owner
|
||||
RestoreStatus *types.RestoreStatus
|
||||
Size *int64
|
||||
StorageClass types.ObjectVersionStorageClass
|
||||
VersionId *string
|
||||
}
|
||||
|
||||
func (o ObjectVersion) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
type Alias ObjectVersion
|
||||
aux := &struct {
|
||||
LastModified string `xml:"LastModified"`
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(&o),
|
||||
}
|
||||
|
||||
if o.LastModified != nil {
|
||||
aux.LastModified = o.LastModified.UTC().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
return e.EncodeElement(aux, start)
|
||||
}
|
||||
|
||||
type GetBucketVersioningOutput struct {
|
||||
@@ -415,6 +525,134 @@ type PutObjectRetentionInput struct {
|
||||
RetainUntilDate AmzDate
|
||||
}
|
||||
|
||||
type PutObjectInput struct {
|
||||
ContentLength *int64
|
||||
ObjectLockRetainUntilDate *time.Time
|
||||
|
||||
Bucket *string
|
||||
Key *string
|
||||
ContentType *string
|
||||
ContentEncoding *string
|
||||
ContentDisposition *string
|
||||
ContentLanguage *string
|
||||
CacheControl *string
|
||||
Expires *string
|
||||
Tagging *string
|
||||
ChecksumCRC32 *string
|
||||
ChecksumCRC32C *string
|
||||
ChecksumSHA1 *string
|
||||
ChecksumSHA256 *string
|
||||
ChecksumCRC64NVME *string
|
||||
ContentMD5 *string
|
||||
ExpectedBucketOwner *string
|
||||
GrantFullControl *string
|
||||
GrantRead *string
|
||||
GrantReadACP *string
|
||||
GrantWriteACP *string
|
||||
IfMatch *string
|
||||
IfNoneMatch *string
|
||||
SSECustomerAlgorithm *string
|
||||
SSECustomerKey *string
|
||||
SSECustomerKeyMD5 *string
|
||||
SSEKMSEncryptionContext *string
|
||||
SSEKMSKeyId *string
|
||||
WebsiteRedirectLocation *string
|
||||
|
||||
ObjectLockMode types.ObjectLockMode
|
||||
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
|
||||
ChecksumAlgorithm types.ChecksumAlgorithm
|
||||
|
||||
Metadata map[string]string
|
||||
Body io.Reader
|
||||
}
|
||||
|
||||
type CreateMultipartUploadInput struct {
|
||||
Bucket *string
|
||||
Key *string
|
||||
ExpectedBucketOwner *string
|
||||
CacheControl *string
|
||||
ContentDisposition *string
|
||||
ContentEncoding *string
|
||||
ContentLanguage *string
|
||||
ContentType *string
|
||||
Expires *string
|
||||
SSECustomerAlgorithm *string
|
||||
SSECustomerKey *string
|
||||
SSECustomerKeyMD5 *string
|
||||
SSEKMSEncryptionContext *string
|
||||
SSEKMSKeyId *string
|
||||
GrantFullControl *string
|
||||
GrantRead *string
|
||||
GrantReadACP *string
|
||||
GrantWriteACP *string
|
||||
Tagging *string
|
||||
WebsiteRedirectLocation *string
|
||||
BucketKeyEnabled *bool
|
||||
ObjectLockRetainUntilDate *time.Time
|
||||
Metadata map[string]string
|
||||
|
||||
ACL types.ObjectCannedACL
|
||||
ChecksumAlgorithm types.ChecksumAlgorithm
|
||||
ChecksumType types.ChecksumType
|
||||
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
|
||||
ObjectLockMode types.ObjectLockMode
|
||||
RequestPayer types.RequestPayer
|
||||
ServerSideEncryption types.ServerSideEncryption
|
||||
StorageClass types.StorageClass
|
||||
}
|
||||
|
||||
type CopyObjectInput struct {
|
||||
Metadata map[string]string
|
||||
Bucket *string
|
||||
CopySource *string
|
||||
Key *string
|
||||
CacheControl *string
|
||||
ContentDisposition *string
|
||||
ContentEncoding *string
|
||||
ContentLanguage *string
|
||||
ContentType *string
|
||||
CopySourceIfMatch *string
|
||||
CopySourceIfNoneMatch *string
|
||||
CopySourceSSECustomerAlgorithm *string
|
||||
CopySourceSSECustomerKey *string
|
||||
CopySourceSSECustomerKeyMD5 *string
|
||||
ExpectedBucketOwner *string
|
||||
ExpectedSourceBucketOwner *string
|
||||
Expires *string
|
||||
GrantFullControl *string
|
||||
GrantRead *string
|
||||
GrantReadACP *string
|
||||
GrantWriteACP *string
|
||||
SSECustomerAlgorithm *string
|
||||
SSECustomerKey *string
|
||||
SSECustomerKeyMD5 *string
|
||||
SSEKMSEncryptionContext *string
|
||||
SSEKMSKeyId *string
|
||||
Tagging *string
|
||||
WebsiteRedirectLocation *string
|
||||
|
||||
CopySourceIfModifiedSince *time.Time
|
||||
CopySourceIfUnmodifiedSince *time.Time
|
||||
ObjectLockRetainUntilDate *time.Time
|
||||
|
||||
BucketKeyEnabled *bool
|
||||
|
||||
ACL types.ObjectCannedACL
|
||||
ChecksumAlgorithm types.ChecksumAlgorithm
|
||||
MetadataDirective types.MetadataDirective
|
||||
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
|
||||
ObjectLockMode types.ObjectLockMode
|
||||
RequestPayer types.RequestPayer
|
||||
ServerSideEncryption types.ServerSideEncryption
|
||||
StorageClass types.StorageClass
|
||||
TaggingDirective types.TaggingDirective
|
||||
}
|
||||
|
||||
type GetObjectLegalHoldResult struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LegalHold"`
|
||||
Status types.ObjectLockLegalHoldStatus
|
||||
}
|
||||
|
||||
type AmzDate struct {
|
||||
time.Time
|
||||
}
|
||||
@@ -467,3 +705,14 @@ func (AmzDate) ISO8601Parse(date string) (t time.Time, err error) {
|
||||
type ListBucketsResult struct {
|
||||
Buckets []Bucket
|
||||
}
|
||||
|
||||
type Checksum struct {
|
||||
Algorithm types.ChecksumAlgorithm
|
||||
Type types.ChecksumType
|
||||
|
||||
CRC32 *string
|
||||
CRC32C *string
|
||||
SHA1 *string
|
||||
SHA256 *string
|
||||
CRC64NVME *string
|
||||
}
|
||||
|
||||
@@ -25,5 +25,8 @@ USERNAME_TWO=HIJKLMN
|
||||
PASSWORD_TWO=OPQRSTU
|
||||
TEST_FILE_FOLDER=$PWD/versity-gwtest-files
|
||||
RECREATE_BUCKETS=true
|
||||
DELETE_BUCKETS_AFTER_TEST=true
|
||||
REMOVE_TEST_FILE_FOLDER=true
|
||||
AUTOGENERATE_USERS=true
|
||||
USER_AUTOGENERATION_PREFIX=versitygw-docker-
|
||||
VERSIONING_DIR=/tmp/versioning
|
||||
@@ -21,6 +21,7 @@ RUN apt-get update && \
|
||||
jq \
|
||||
bc \
|
||||
libxml2-utils \
|
||||
xmlstarlet \
|
||||
ca-certificates && \
|
||||
update-ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -110,6 +110,11 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
|
||||
|
||||
**ACL_AWS_ACCESS_KEY_ID**, **ACL_AWS_ACCESS_SECRET_KEY**: for direct mode, the ID and key for the S3 user in the **ACL_AWS_CANONICAL_ID** account.
|
||||
|
||||
**USER_ID_{role}_{id}**, **USERNAME_{role}_{id}**, **PASSWORD_{role}_{id}**: for setup_user_v2 non-autocreated users, the format for the user.
|
||||
* example: USER_ID_USER_1={name}: user ID corresponding to the first user with **user** permissions in the test.
|
||||
|
||||
####
|
||||
|
||||
### Non-Secret
|
||||
|
||||
**VERSITY_EXE**: location of the versity executable relative to test folder.
|
||||
@@ -146,11 +151,13 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
|
||||
|
||||
**DIRECT**: if **true**, bypass versitygw and run directly against s3 (for comparison and validity-checking purposes).
|
||||
|
||||
**DIRECT_DISPLAY_NAME**: username if **DIRECT** is set to **true**.
|
||||
**DIRECT_DISPLAY_NAME**: AWS ACL main user display name if **DIRECT** is set to **true**.
|
||||
|
||||
**DIRECT_AWS_USER_ID**: AWS policy 12-digit user ID if **DIRECT** is set to **true**.
|
||||
|
||||
**COVERAGE_DB**: database to store client command coverage info and usage counts, if using.
|
||||
|
||||
**USERNAME_ONE**, **PASSWORD_ONE**, **USERNAME_TWO**, **PASSWORD_TWO**: credentials for users created and tested for non-root user **versitygw** operations.
|
||||
**USERNAME_ONE**, **PASSWORD_ONE**, **USERNAME_TWO**, **PASSWORD_TWO**: setup_user (v1), credentials for users created and tested for non-root user **versitygw** operations (non-setup_user_v2).
|
||||
|
||||
**TEST_FILE_FOLDER**: where to put temporary test files.
|
||||
|
||||
@@ -162,10 +169,22 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
|
||||
|
||||
**TIME_LOG**: optional log to show duration of individual tests
|
||||
|
||||
**DIRECT_S3_ROOT_ACCOUNT_NAME**: for direct mode, S3 username
|
||||
**DIRECT_S3_ROOT_ACCOUNT_NAME**: for direct mode, S3 username for user with root permissions
|
||||
|
||||
**DELETE_BUCKETS_AFTER_TEST**: whether or not to delete buckets after individual tests, useful for debugging if the post-test bucket state needs to be checked
|
||||
|
||||
**AUTOGENERATE_USERS**: setup_user_v2, whether or not to autocreate users for tests. If set to **false**, users must be pre-created (see `Secret` section above).
|
||||
|
||||
**USER_AUTOGENERATION_PREFIX**: setup_user_v2, if **AUTOCREATE_USERS** is set to **true**, the prefix for the autocreated username.
|
||||
|
||||
**CREATE_STATIC_USERS_IF_NONEXISTENT**: setup_user_v2, if **AUTOCREATE_USERS** is set to **false**, generate non-existing users if they don't exist, but don't delete them, as with user autogeneration
|
||||
|
||||
**DIRECT_POST_COMMAND_DELAY**: in direct mode, time to wait before sending new commands to try to prevent propagation delay issues
|
||||
|
||||
**SKIP_ACL_TESTING**: avoid ACL tests for systems which do not use ACLs
|
||||
|
||||
**MAX_FILE_DOWNLOAD_CHUNK_SIZE**: when set, will divide the download of large files with GetObject into chunks of the given size. Useful for direct testing with slower connections.
|
||||
|
||||
## REST Scripts
|
||||
|
||||
REST scripts are included for calls to S3's REST API in the `./tests/rest_scripts/` folder. To call a script, the following parameters are needed:
|
||||
|
||||
@@ -27,6 +27,21 @@ abort_multipart_upload() {
|
||||
return 0
|
||||
}
|
||||
|
||||
abort_multipart_upload_rest() {
|
||||
if ! check_param_count "abort_multipart_upload_rest" "bucket, key, upload ID" 3 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$3" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/abort_multipart_upload.sh); then
|
||||
log 2 "error aborting multipart upload: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "204" ]; then
|
||||
log 2 "expected '204' response, actual was '$result' (error: $(cat "$TEST_FILE_FOLDER"/result.txt)"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
abort_multipart_upload_with_user() {
|
||||
if [ $# -ne 5 ]; then
|
||||
log 2 "'abort multipart upload' command requires bucket, key, upload ID, username, password"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user