mirror of
https://github.com/versity/versitygw.git
synced 2026-01-25 20:42:02 +00:00
Compare commits
268 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01fc142c1e | ||
|
|
ca2dd9b4b3 | ||
|
|
10152cefbc | ||
|
|
948b424ed2 | ||
|
|
d2996e1131 | ||
|
|
2489d876c9 | ||
|
|
a69f5a4db7 | ||
|
|
df31eb031a | ||
|
|
b70be6116e | ||
|
|
e08539e909 | ||
|
|
f78483a938 | ||
|
|
cb1d469742 | ||
|
|
792a3eb2c5 | ||
|
|
252090d9e9 | ||
|
|
8569b158f0 | ||
|
|
45b6a4a74e | ||
|
|
b576ed87c5 | ||
|
|
0ba5cbe8b9 | ||
|
|
a4d341fc4e | ||
|
|
6c564febb9 | ||
|
|
0c520a30cf | ||
|
|
935e322764 | ||
|
|
f6225aa968 | ||
|
|
1d30567129 | ||
|
|
bfc753b302 | ||
|
|
86e2b02e55 | ||
|
|
2cf8610831 | ||
|
|
8e3e633a24 | ||
|
|
12092cf297 | ||
|
|
75cae81f0a | ||
|
|
68d7924afa | ||
|
|
e37dfa6aaf | ||
|
|
04f8946798 | ||
|
|
43fd18b069 | ||
|
|
eb72d3c6e8 | ||
|
|
43559e646e | ||
|
|
6e11e3350c | ||
|
|
c0e6a08e1e | ||
|
|
3866476257 | ||
|
|
d45cfa2663 | ||
|
|
7a26aec685 | ||
|
|
2a7e76a44f | ||
|
|
5979e056e1 | ||
|
|
2a23686c87 | ||
|
|
f9e903aaf4 | ||
|
|
06f4f0ac15 | ||
|
|
abbd6697d1 | ||
|
|
6198bf4b53 | ||
|
|
d05d29010d | ||
|
|
b1e9dead5d | ||
|
|
bf5b0b85d8 | ||
|
|
2561ef9708 | ||
|
|
b78d21c3db | ||
|
|
0cab42d9fe | ||
|
|
12f0b5c43c | ||
|
|
e81b87f71c | ||
|
|
ff00e42538 | ||
|
|
cf99b3e036 | ||
|
|
c91e5dc3f2 | ||
|
|
d446102f69 | ||
|
|
f2a75708e4 | ||
|
|
6fd939386c | ||
|
|
dff20b5b9d | ||
|
|
7a4dd59c81 | ||
|
|
6f74d2cddb | ||
|
|
8e0eec0201 | ||
|
|
0cfacfc049 | ||
|
|
6b017aa5cd | ||
|
|
841a012ce0 | ||
|
|
067de184a9 | ||
|
|
10ab569277 | ||
|
|
01552b78c7 | ||
|
|
d0158420ee | ||
|
|
c2c2306d37 | ||
|
|
841b3d61a4 | ||
|
|
fa2e677370 | ||
|
|
9f6bf183f4 | ||
|
|
12e1308d1f | ||
|
|
f235b62b70 | ||
|
|
06a45124b1 | ||
|
|
a75aa9bad5 | ||
|
|
4cbd58cc66 | ||
|
|
e5343cf611 | ||
|
|
0a2c7ac7cb | ||
|
|
b1fed810a7 | ||
|
|
48b590fcb8 | ||
|
|
f835ef1772 | ||
|
|
d819fa8665 | ||
|
|
0240bb922c | ||
|
|
0b3722bd09 | ||
|
|
7c454d230e | ||
|
|
981a34e9d5 | ||
|
|
657b9ac046 | ||
|
|
61308d2fbf | ||
|
|
8d16bff8ce | ||
|
|
35596b38ae | ||
|
|
39ee175484 | ||
|
|
edac345c23 | ||
|
|
f467b896d8 | ||
|
|
5aa2a822e8 | ||
|
|
eb6ffca21e | ||
|
|
cc54aad003 | ||
|
|
807399459d | ||
|
|
0124398f10 | ||
|
|
5d8d054fdc | ||
|
|
b15e03d154 | ||
|
|
4a31d0d5d2 | ||
|
|
2ab1cef407 | ||
|
|
9eaaeedd28 | ||
|
|
9fb039b878 | ||
|
|
8fc56208eb | ||
|
|
cadd79139f | ||
|
|
a5eabe257f | ||
|
|
d507f206f3 | ||
|
|
30acb4b152 | ||
|
|
d0ec284e05 | ||
|
|
ef8bd1e74f | ||
|
|
c6d2360e21 | ||
|
|
fff6659214 | ||
|
|
b29d6a0106 | ||
|
|
4f6d0ffb88 | ||
|
|
69e107efe9 | ||
|
|
84cae88bbb | ||
|
|
681c7a3fe4 | ||
|
|
7627debbf1 | ||
|
|
0a2a23d943 | ||
|
|
0afe6eb204 | ||
|
|
dfe6abcb2e | ||
|
|
aabf214841 | ||
|
|
f631cd0364 | ||
|
|
f4c848009e | ||
|
|
a36747c372 | ||
|
|
ce9693e554 | ||
|
|
c58f9b20e0 | ||
|
|
d861dc8e30 | ||
|
|
6b3a281673 | ||
|
|
b57764e136 | ||
|
|
01b97cd9e3 | ||
|
|
d7cbee7036 | ||
|
|
f7c33de841 | ||
|
|
9c8e14d406 | ||
|
|
13810e227c | ||
|
|
55c94f4a7b | ||
|
|
3c2b4c6452 | ||
|
|
11bd58c39e | ||
|
|
30d8474b17 | ||
|
|
3a65521b48 | ||
|
|
5ac5705b72 | ||
|
|
1d0a1d8261 | ||
|
|
ac0884a1dc | ||
|
|
3c3e9dd8b1 | ||
|
|
874e838dcc | ||
|
|
8a43d1cd18 | ||
|
|
4740372ce2 | ||
|
|
5226f0dc61 | ||
|
|
9f54a25519 | ||
|
|
b629f5d707 | ||
|
|
371dccfde9 | ||
|
|
05f8225577 | ||
|
|
8466d06371 | ||
|
|
eae11b44c5 | ||
|
|
12bfd4220b | ||
|
|
fc03472d60 | ||
|
|
971ae7845d | ||
|
|
8bb4bcba63 | ||
|
|
72a4e40038 | ||
|
|
a64733bfbe | ||
|
|
743cb03808 | ||
|
|
5c3cef65e2 | ||
|
|
8bb34b3b6e | ||
|
|
77459720ba | ||
|
|
59312f880f | ||
|
|
fe9384164c | ||
|
|
8d2eeebce3 | ||
|
|
c06463424a | ||
|
|
efe4ccb5ec | ||
|
|
a6e8752b33 | ||
|
|
c3c39e4022 | ||
|
|
9a01185be9 | ||
|
|
7744dacced | ||
|
|
4345420e12 | ||
|
|
d05f25f277 | ||
|
|
d174819eac | ||
|
|
9bde1ddb3a | ||
|
|
1c488422bc | ||
|
|
8a733b8cbf | ||
|
|
a93cf3f403 | ||
|
|
326de3b010 | ||
|
|
2a51b0cc70 | ||
|
|
8c3e49d0bb | ||
|
|
559d636846 | ||
|
|
045bdec60c | ||
|
|
ee67b41a98 | ||
|
|
ff973c279f | ||
|
|
adbf8e138c | ||
|
|
12f4920c8d | ||
|
|
d63b5818f1 | ||
|
|
dff3eb0887 | ||
|
|
69a3483269 | ||
|
|
d256ea5929 | ||
|
|
ebf7a030cc | ||
|
|
2bf4ccd244 | ||
|
|
7e44a5e703 | ||
|
|
b5bea5b659 | ||
|
|
2a4a0f000a | ||
|
|
2dd442c24d | ||
|
|
27dc84b5fd | ||
|
|
932f1c9da7 | ||
|
|
24679a82ac | ||
|
|
fa2023c18f | ||
|
|
bfe090df38 | ||
|
|
9c6a09260a | ||
|
|
6ea9950ead | ||
|
|
5bc6852f2c | ||
|
|
d39685947d | ||
|
|
af550c8f80 | ||
|
|
d15d348226 | ||
|
|
66e29d4aa4 | ||
|
|
cdc4358257 | ||
|
|
ce6193b191 | ||
|
|
8bb22debad | ||
|
|
64f50cc504 | ||
|
|
e2534afafe | ||
|
|
341d51107c | ||
|
|
707af47769 | ||
|
|
40da4a31d3 | ||
|
|
874165cdcf | ||
|
|
e750cf9718 | ||
|
|
c158dfeb0d | ||
|
|
4c3965d87e | ||
|
|
5c084b8452 | ||
|
|
78cf20075f | ||
|
|
a4dc837f54 | ||
|
|
a7d83b42fd | ||
|
|
54bd4ec841 | ||
|
|
9ae68076c1 | ||
|
|
13fdbaf35a | ||
|
|
45f55c2283 | ||
|
|
7aa733ae9e | ||
|
|
bef297f6ad | ||
|
|
25cde72fa3 | ||
|
|
48f438b1e0 | ||
|
|
a606e57bbd | ||
|
|
53dea3bb0d | ||
|
|
703c7cdc8b | ||
|
|
62ca9b6ff3 | ||
|
|
1ec629c38d | ||
|
|
5ef61af6f3 | ||
|
|
fe660d5b9c | ||
|
|
068b04ec62 | ||
|
|
54e2c39df1 | ||
|
|
caa7ca0f90 | ||
|
|
dac2460eb3 | ||
|
|
d3c56dbfc1 | ||
|
|
6cf3b93a83 | ||
|
|
df74e7fde6 | ||
|
|
6a34f3a848 | ||
|
|
6b64783db7 | ||
|
|
b5b823c47b | ||
|
|
a057a254c1 | ||
|
|
f435880fe8 | ||
|
|
ebdda06633 | ||
|
|
221592fbab | ||
|
|
8c1327d1e8 | ||
|
|
70ebe00f7c | ||
|
|
ca6a92bb84 | ||
|
|
51e54874a8 | ||
|
|
6176d9eb46 |
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -12,3 +12,7 @@ updates:
|
||||
# Allow both direct and indirect updates for all packages
|
||||
- dependency-type: "all"
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
4
.github/workflows/azurite.yml
vendored
4
.github/workflows/azurite.yml
vendored
@@ -8,10 +8,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
108
.github/workflows/codeql.yml
vendored
Normal file
108
.github/workflows/codeql.yml
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL Advanced"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
schedule:
|
||||
- cron: '21 17 * * 2'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||
# - https://gh.io/supported-runners-and-hardware-resources
|
||||
# - https://gh.io/using-larger-runners (GitHub.com only)
|
||||
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
# required to fetch internal or private CodeQL packs
|
||||
packages: read
|
||||
|
||||
# only required for workflows in private repositories
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: actions
|
||||
build-mode: none
|
||||
- language: go
|
||||
build-mode: autobuild
|
||||
- language: javascript-typescript
|
||||
build-mode: none
|
||||
paths-ignore:
|
||||
# ignore embedded 3rd party assets
|
||||
- 'webui/web/assets/**'
|
||||
- language: python
|
||||
build-mode: none
|
||||
# CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift'
|
||||
# Use `c-cpp` to analyze code written in C, C++ or both
|
||||
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
||||
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
||||
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
||||
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
||||
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
# Add any setup steps before running the `github/codeql-action/init` action.
|
||||
# This includes steps like installing compilers or runtimes (`actions/setup-node`
|
||||
# or others). This is typically only required for manual builds.
|
||||
# - name: Setup runtime (example)
|
||||
# uses: actions/setup-example@v1
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v4
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
||||
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
# If the analyze step fails for one of the languages you are analyzing with
|
||||
# "We were unable to automatically build your code", modify the matrix above
|
||||
# to set the build mode to "manual" for that language. Then modify this step
|
||||
# to build your code.
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
- name: Run manual build steps
|
||||
if: matrix.build-mode == 'manual'
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'If you are using a "manual" build mode for one or more of the' \
|
||||
'languages you are analyzing, replace this with the commands to build' \
|
||||
'your code, for example:'
|
||||
echo ' make bootstrap'
|
||||
echo ' make release'
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v4
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
3
.github/workflows/docker-bats.yml
vendored
3
.github/workflows/docker-bats.yml
vendored
@@ -8,13 +8,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Build Docker Image
|
||||
run: |
|
||||
cp tests/.env.docker.default tests/.env.docker
|
||||
cp tests/.secrets.default tests/.secrets
|
||||
# see https://github.com/versity/versitygw/issues/1034
|
||||
docker build \
|
||||
--build-arg="GO_LIBRARY=go1.23.1.linux-amd64.tar.gz" \
|
||||
--build-arg="AWS_CLI=awscli-exe-linux-x86_64.zip" \
|
||||
|
||||
4
.github/workflows/docker.yml
vendored
4
.github/workflows/docker.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
ghcr.io/${{ github.repository }}
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
|
||||
4
.github/workflows/functional.yml
vendored
4
.github/workflows/functional.yml
vendored
@@ -9,10 +9,10 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
8
.github/workflows/go.yml
vendored
8
.github/workflows/go.yml
vendored
@@ -9,10 +9,10 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
@@ -46,10 +46,10 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
|
||||
4
.github/workflows/goreleaser.yml
vendored
4
.github/workflows/goreleaser.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
run: git fetch --force --tags
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: stable
|
||||
|
||||
|
||||
2
.github/workflows/host-style-tests.yml
vendored
2
.github/workflows/host-style-tests.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: run host-style tests
|
||||
run: make test-host-style
|
||||
|
||||
2
.github/workflows/shellcheck.yml
vendored
2
.github/workflows/shellcheck.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Run checks
|
||||
run: |
|
||||
|
||||
84
.github/workflows/skips.yml
vendored
Normal file
84
.github/workflows/skips.yml
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
name: skips check
|
||||
permissions: {}
|
||||
on: workflow_dispatch
|
||||
jobs:
|
||||
skip-ticket-check:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Fail if any skip descriptions are empty or point to closed issues/PRs
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Find uncommented lines with "skip " (ignore lines whose first non-space char is #)
|
||||
mapfile -t MATCHES < <(
|
||||
git ls-files 'tests/test_*.sh' \
|
||||
| xargs -r grep -nE '^[[:space:]]*[^#][[:space:]]*skip[[:space:]]*$' \
|
||||
|| true
|
||||
)
|
||||
|
||||
if [ ${#MATCHES[@]} -ne 0 ]; then
|
||||
echo "${#MATCHES[@]} skip(s) lack a description"
|
||||
printf ' - %s\n' "${MATCHES[@]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mapfile -t MATCHES < <(
|
||||
git ls-files 'tests/test_*.sh' \
|
||||
| xargs -r grep -nE '^[[:space:]]*[^#][[:space:]]*skip[[:space:]]*"https://github.com' \
|
||||
|| true
|
||||
)
|
||||
|
||||
urls=()
|
||||
for m in "${MATCHES[@]}"; do
|
||||
# Extract first GitHub issue/PR URL on the line:
|
||||
# supports /issues/123 and /pull/123 (with or without extra suffix)
|
||||
url="$(echo "$m" | grep -oE 'https://github\.com/[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+/(issues|pull)/[0-9]+' | head -n1 || true)"
|
||||
if [ -n "$url" ]; then
|
||||
urls+=("$url")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#urls[@]} -eq 0 ]; then
|
||||
echo "Found skip lines, but no recognizable GitHub issue/PR URLs."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Found skip ticket URLs:"
|
||||
printf ' - %s\n' "${urls[@]}"
|
||||
|
||||
closed=()
|
||||
|
||||
for url in "${urls[@]}"; do
|
||||
# Parse owner/repo and number from URL
|
||||
# url format: https://github.com/OWNER/REPO/issues/123 or /pull/123
|
||||
path="${url#https://github.com/}"
|
||||
owner="$(echo "$path" | cut -d/ -f1)"
|
||||
repo="$(echo "$path" | cut -d/ -f2)"
|
||||
num="$(echo "$path" | cut -d/ -f4)"
|
||||
|
||||
# Issues API works for both issues and PRs; state=open/closed
|
||||
state="$(curl -fsSL \
|
||||
-H "Authorization: Bearer $GH_TOKEN" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"https://api.github.com/repos/$owner/$repo/issues/$num" \
|
||||
| python -c "import sys,json; print(json.load(sys.stdin).get('state',''))")"
|
||||
|
||||
echo "$url -> $state"
|
||||
if [ "$state" = "closed" ]; then
|
||||
closed+=("$url")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#closed[@]} -gt 0 ]; then
|
||||
echo "::error::Closed tickets referenced by uncommented skip URLs:"
|
||||
printf '::error:: - %s\n' "${closed[@]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "All referenced tickets are open. ✅"
|
||||
4
.github/workflows/static.yml
vendored
4
.github/workflows/static.yml
vendored
@@ -9,12 +9,12 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
156
.github/workflows/system.yml
vendored
156
.github/workflows/system.yml
vendored
@@ -2,138 +2,34 @@ name: system tests
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
jobs:
|
||||
generate:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.make.outputs.matrix }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- id: make
|
||||
run: |
|
||||
if ! matrix_output=$(tests/generate_matrix.sh 2>&1); then
|
||||
echo "error generating matrix: $matrix_output"
|
||||
exit 1
|
||||
fi
|
||||
MATRIX_JSON=$(echo -n "$matrix_output" | jq -c . )
|
||||
echo "matrix=$MATRIX_JSON" >> "$GITHUB_OUTPUT"
|
||||
|
||||
build:
|
||||
name: RunTests
|
||||
needs: generate
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- set: "mc, posix, non-file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "mc-non-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "mc, posix, file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "mc-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "REST, posix, non-static, base|acl|multipart|put-object, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-base,rest-acl,rest-multipart,rest-put-object"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "REST, posix, non-static, chunked|checksum|versioning|bucket, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-chunked,rest-checksum,rest-versioning,rest-bucket,rest-list-buckets,rest-create-bucket,rest-head-bucket"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "REST, posix, non-static, not implemented|rest-delete-bucket-ownership-controls|rest-delete-bucket-tagging, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-not-implemented,rest-delete-bucket-ownership-controls,rest-delete-bucket-tagging"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3, posix, non-file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3-non-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3, posix, file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, bucket|object|multipart, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, policy, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-policy"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, user, non-static, s3 IAM"
|
||||
IAM_TYPE: s3
|
||||
RUN_SET: "s3api-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, bucket, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-bucket"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, multipart, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-multipart"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, object, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-object"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, policy, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-policy"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, posix, user, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-user"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
# TODO fix/debug s3 gateway
|
||||
#- set: "s3api, s3, multipart|object, non-static, folder IAM"
|
||||
# IAM_TYPE: folder
|
||||
# RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
|
||||
# RECREATE_BUCKETS: "true"
|
||||
# BACKEND: "s3"
|
||||
#- set: "s3api, s3, policy|user, non-static, folder IAM"
|
||||
# IAM_TYPE: folder
|
||||
# RUN_SET: "s3api-policy,s3api-user"
|
||||
# RECREATE_BUCKETS: "true"
|
||||
# BACKEND: "s3"
|
||||
- set: "s3cmd, posix, file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3cmd-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3cmd, posix, non-user, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3cmd-non-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3cmd, posix, user, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3cmd-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
matrix: ${{ fromJson(needs.generate.outputs.matrix) }}
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: "stable"
|
||||
id: go
|
||||
@@ -195,9 +91,9 @@ jobs:
|
||||
MC_ALIAS: versity
|
||||
LOG_LEVEL: 4
|
||||
GOCOVERDIR: ${{ github.workspace }}/cover
|
||||
USERNAME_ONE: ABCDEFG
|
||||
USERNAME_ONE: HIJKLMN
|
||||
PASSWORD_ONE: 1234567
|
||||
USERNAME_TWO: HIJKLMN
|
||||
USERNAME_TWO: OPQRSTU
|
||||
PASSWORD_TWO: 8901234
|
||||
TEST_FILE_FOLDER: ${{ github.workspace }}/versity-gwtest-files
|
||||
REMOVE_TEST_FILE_FOLDER: true
|
||||
@@ -207,11 +103,12 @@ jobs:
|
||||
PYTHON_ENV_FOLDER: ${{ github.workspace }}/env
|
||||
AUTOGENERATE_USERS: true
|
||||
USER_AUTOGENERATION_PREFIX: github-actions-test-
|
||||
AWS_REGION: ${{ matrix.AWS_REGION }}
|
||||
run: |
|
||||
make testbin
|
||||
export AWS_ACCESS_KEY_ID=ABCDEFGHIJKLMNOPQRST
|
||||
export AWS_SECRET_ACCESS_KEY=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmn
|
||||
export AWS_REGION=us-east-1
|
||||
export AWS_REGION=$AWS_REGION
|
||||
export AWS_ACCESS_KEY_ID_TWO=user
|
||||
export AWS_SECRET_ACCESS_KEY_TWO=pass
|
||||
export AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED
|
||||
@@ -226,10 +123,13 @@ jobs:
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
BYPASS_ENV_FILE=true ${{ github.workspace }}/tests/setup_static.sh
|
||||
fi
|
||||
BYPASS_ENV_FILE=true ${{ github.workspace }}/tests/run.sh $RUN_SET
|
||||
BYPASS_ENV_FILE=true $HOME/bin/bats ${{ github.workspace }}/$RUN_SET
|
||||
|
||||
- name: Time report
|
||||
run: cat ${{ github.workspace }}/time.log
|
||||
run: |
|
||||
if [ -e ${{ github.workspace }}/time.log ]; then
|
||||
cat ${{ github.workspace }}/time.log
|
||||
fi
|
||||
|
||||
- name: Coverage report
|
||||
run: |
|
||||
|
||||
@@ -23,13 +23,16 @@ RUN go build -ldflags "-X=main.Build=${BUILD} -X=main.BuildTime=${TIME} -X=main.
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
# These arguments can be overriden when building the image
|
||||
# These arguments can be overridden when building the image
|
||||
ARG IAM_DIR=/tmp/vgw
|
||||
ARG SETUP_DIR=/tmp/vgw
|
||||
|
||||
RUN mkdir -p $IAM_DIR
|
||||
RUN mkdir -p $SETUP_DIR
|
||||
|
||||
COPY --from=0 /app/cmd/versitygw/versitygw /app/versitygw
|
||||
COPY --from=0 /app/cmd/versitygw/versitygw /usr/local/bin/versitygw
|
||||
|
||||
ENTRYPOINT [ "/app/versitygw" ]
|
||||
COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT [ "/usr/local/bin/docker-entrypoint.sh" ]
|
||||
|
||||
23
README.md
23
README.md
@@ -70,6 +70,29 @@ versitygw [global options] command [command options] [arguments...]
|
||||
```
|
||||
The [global options](https://github.com/versity/versitygw/wiki/Global-Options) are specified before the backend type and the backend options are specified after.
|
||||
|
||||
### Run the gateway in Docker
|
||||
|
||||
Use the published image like the native binary by passing CLI arguments:
|
||||
|
||||
```bash
|
||||
docker run --rm versity/versitygw:latest --version
|
||||
```
|
||||
|
||||
When no command arguments are supplied, the container looks for `VGW_BACKEND` and optional `VGW_BACKEND_ARG`/`VGW_BACKEND_ARGS` environment variables to determine which backend to start. Backend-specific configuration continues to come from the existing environment flags (for example `ROOT_ACCESS_KEY`, `VGW_PORT`, and others).
|
||||
|
||||
```bash
|
||||
docker run --rm \
|
||||
-e ROOT_ACCESS_KEY=testuser \
|
||||
-e ROOT_SECRET_KEY=secret \
|
||||
-e VGW_BACKEND=posix \
|
||||
-e VGW_BACKEND_ARG=/data \
|
||||
-p 10000:7070 \
|
||||
-v $(pwd)/data:/data \
|
||||
versity/versitygw:latest
|
||||
```
|
||||
|
||||
If you need to pass additional CLI options, set `VGW_ARGS` with a space-delimited list, or continue passing arguments directly to `docker run`.
|
||||
|
||||
***
|
||||
|
||||
#### Versity gives you clarity and control over your archival storage, so you can allocate more resources to your core mission.
|
||||
|
||||
@@ -82,15 +82,15 @@ type AccessOptions struct {
|
||||
}
|
||||
|
||||
func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) error {
|
||||
// Skip the access check for public bucket requests
|
||||
if opts.IsPublicRequest {
|
||||
return nil
|
||||
}
|
||||
if opts.Readonly {
|
||||
if opts.AclPermission == PermissionWrite || opts.AclPermission == PermissionWriteAcp {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
}
|
||||
// Skip the access check for public bucket requests
|
||||
if opts.IsPublicRequest {
|
||||
return nil
|
||||
}
|
||||
if opts.IsRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
14
auth/acl.go
14
auth/acl.go
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
@@ -245,7 +246,7 @@ func ParseACLOutput(data []byte, owner string) (GetBucketAclOutput, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool) ([]byte, error) {
|
||||
func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService) ([]byte, error) {
|
||||
if input == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
@@ -493,3 +494,14 @@ func UpdateBucketACLOwner(ctx context.Context, be backend.Backend, bucket, newOw
|
||||
|
||||
return be.DeleteBucketPolicy(ctx, bucket)
|
||||
}
|
||||
|
||||
// ValidateCannedACL validates bucket canned acl value
|
||||
func ValidateCannedACL(acl string) error {
|
||||
switch types.BucketCannedACL(acl) {
|
||||
case types.BucketCannedACLPrivate, types.BucketCannedACLPublicRead, types.BucketCannedACLPublicReadWrite, "":
|
||||
return nil
|
||||
default:
|
||||
debuglogger.Logf("invalid bucket canned acl: %v", acl)
|
||||
return s3err.GetAPIError(s3err.ErrInvalidArgument)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,14 +40,17 @@ const (
|
||||
policyErrInvalidFirstChar = policyErr("Policies must be valid JSON and the first byte must be '{'")
|
||||
policyErrEmptyStatement = policyErr("Could not parse the policy: Statement is empty!")
|
||||
policyErrMissingStatmentField = policyErr("Missing required field Statement")
|
||||
policyErrInvalidVersion = policyErr("The policy must contain a valid version string")
|
||||
)
|
||||
|
||||
type BucketPolicy struct {
|
||||
Version PolicyVersion `json:"Version"`
|
||||
Statement []BucketPolicyItem `json:"Statement"`
|
||||
}
|
||||
|
||||
func (bp *BucketPolicy) UnmarshalJSON(data []byte) error {
|
||||
var tmp struct {
|
||||
Version *PolicyVersion
|
||||
Statement *[]BucketPolicyItem `json:"Statement"`
|
||||
}
|
||||
|
||||
@@ -60,12 +63,22 @@ func (bp *BucketPolicy) UnmarshalJSON(data []byte) error {
|
||||
return policyErrMissingStatmentField
|
||||
}
|
||||
|
||||
// Assign the parsed value to the actual struct
|
||||
if tmp.Version == nil {
|
||||
// bucket policy version should defualt to '2008-10-17'
|
||||
bp.Version = PolicyVersion2008
|
||||
} else {
|
||||
bp.Version = *tmp.Version
|
||||
}
|
||||
|
||||
bp.Statement = *tmp.Statement
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bp *BucketPolicy) Validate(bucket string, iam IAMService) error {
|
||||
if !bp.Version.isValid() {
|
||||
return policyErrInvalidVersion
|
||||
}
|
||||
|
||||
for _, statement := range bp.Statement {
|
||||
err := statement.Validate(bucket, iam)
|
||||
if err != nil {
|
||||
|
||||
@@ -38,15 +38,20 @@ const (
|
||||
GetObjectAction Action = "s3:GetObject"
|
||||
GetObjectVersionAction Action = "s3:GetObjectVersion"
|
||||
DeleteObjectAction Action = "s3:DeleteObject"
|
||||
DeleteObjectVersionAction Action = "s3:DeleteObjectVersion"
|
||||
GetObjectAclAction Action = "s3:GetObjectAcl"
|
||||
GetObjectAttributesAction Action = "s3:GetObjectAttributes"
|
||||
GetObjectVersionAttributesAction Action = "s3:GetObjectVersionAttributes"
|
||||
PutObjectAclAction Action = "s3:PutObjectAcl"
|
||||
RestoreObjectAction Action = "s3:RestoreObject"
|
||||
GetBucketTaggingAction Action = "s3:GetBucketTagging"
|
||||
PutBucketTaggingAction Action = "s3:PutBucketTagging"
|
||||
GetObjectTaggingAction Action = "s3:GetObjectTagging"
|
||||
GetObjectVersionTaggingAction Action = "s3:GetObjectVersionTagging"
|
||||
PutObjectTaggingAction Action = "s3:PutObjectTagging"
|
||||
PutObjectVersionTaggingAction Action = "s3:PutObjectVersionTagging"
|
||||
DeleteObjectTaggingAction Action = "s3:DeleteObjectTagging"
|
||||
DeleteObjectVersionTaggingAction Action = "s3:DeleteObjectVersionTagging"
|
||||
ListBucketVersionsAction Action = "s3:ListBucketVersions"
|
||||
ListBucketAction Action = "s3:ListBucket"
|
||||
GetBucketObjectLockConfigurationAction Action = "s3:GetBucketObjectLockConfiguration"
|
||||
@@ -109,15 +114,20 @@ var supportedActionList = map[Action]struct{}{
|
||||
GetObjectAction: {},
|
||||
GetObjectVersionAction: {},
|
||||
DeleteObjectAction: {},
|
||||
DeleteObjectVersionAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
GetObjectVersionAttributesAction: {},
|
||||
PutObjectAclAction: {},
|
||||
RestoreObjectAction: {},
|
||||
GetBucketTaggingAction: {},
|
||||
PutBucketTaggingAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
GetObjectVersionTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
PutObjectVersionTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
DeleteObjectVersionTaggingAction: {},
|
||||
ListBucketVersionsAction: {},
|
||||
ListBucketAction: {},
|
||||
GetBucketObjectLockConfigurationAction: {},
|
||||
@@ -163,25 +173,30 @@ var supportedActionList = map[Action]struct{}{
|
||||
}
|
||||
|
||||
var supportedObjectActionList = map[Action]struct{}{
|
||||
AbortMultipartUploadAction: {},
|
||||
ListMultipartUploadPartsAction: {},
|
||||
PutObjectAction: {},
|
||||
GetObjectAction: {},
|
||||
GetObjectVersionAction: {},
|
||||
DeleteObjectAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
PutObjectAclAction: {},
|
||||
RestoreObjectAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
GetObjectLegalHoldAction: {},
|
||||
PutObjectLegalHoldAction: {},
|
||||
GetObjectRetentionAction: {},
|
||||
PutObjectRetentionAction: {},
|
||||
BypassGovernanceRetentionAction: {},
|
||||
AllActions: {},
|
||||
AbortMultipartUploadAction: {},
|
||||
ListMultipartUploadPartsAction: {},
|
||||
PutObjectAction: {},
|
||||
GetObjectAction: {},
|
||||
GetObjectVersionAction: {},
|
||||
DeleteObjectAction: {},
|
||||
DeleteObjectVersionAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
GetObjectVersionAttributesAction: {},
|
||||
PutObjectAclAction: {},
|
||||
RestoreObjectAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
GetObjectVersionTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
PutObjectVersionTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
DeleteObjectVersionTaggingAction: {},
|
||||
GetObjectLegalHoldAction: {},
|
||||
PutObjectLegalHoldAction: {},
|
||||
GetObjectRetentionAction: {},
|
||||
PutObjectRetentionAction: {},
|
||||
BypassGovernanceRetentionAction: {},
|
||||
AllActions: {},
|
||||
}
|
||||
|
||||
// Validates Action: it should either wildcard match with supported actions list or be in it
|
||||
|
||||
32
auth/bucket_policy_version.go
Normal file
32
auth/bucket_policy_version.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
type PolicyVersion string
|
||||
|
||||
const (
|
||||
PolicyVersion2008 PolicyVersion = "2008-10-17"
|
||||
PolicyVersion2012 PolicyVersion = "2012-10-17"
|
||||
)
|
||||
|
||||
// isValid checks if the policy version is valid or not
|
||||
func (pv PolicyVersion) isValid() bool {
|
||||
switch pv {
|
||||
case PolicyVersion2008, PolicyVersion2012:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
54
auth/bucket_policy_version_test.go
Normal file
54
auth/bucket_policy_version_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPolicyVersion_isValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string // description of this test case
|
||||
value string
|
||||
want bool
|
||||
}{
|
||||
{"valid 2008", "2008-10-17", true},
|
||||
{"valid 2012", "2012-10-17", true},
|
||||
{"invalid empty", "", false},
|
||||
{"invalid 1", "invalid", false},
|
||||
{"invalid 2", "2010-10-17", false},
|
||||
{"invalid 3", "2006-00-12", false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := PolicyVersion(tt.value).isValid()
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
106
auth/iam.go
106
auth/iam.go
@@ -45,11 +45,12 @@ func (r Role) IsValid() bool {
|
||||
|
||||
// Account is a gateway IAM account
|
||||
type Account struct {
|
||||
Access string `json:"access"`
|
||||
Secret string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID int `json:"userID"`
|
||||
GroupID int `json:"groupID"`
|
||||
Access string `json:"access"`
|
||||
Secret string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID int `json:"userID"`
|
||||
GroupID int `json:"groupID"`
|
||||
ProjectID int `json:"projectID"`
|
||||
}
|
||||
|
||||
type ListUserAccountsResult struct {
|
||||
@@ -58,10 +59,11 @@ type ListUserAccountsResult struct {
|
||||
|
||||
// Mutable props, which could be changed when updating an IAM account
|
||||
type MutableProps struct {
|
||||
Secret *string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID *int `json:"userID"`
|
||||
GroupID *int `json:"groupID"`
|
||||
Secret *string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID *int `json:"userID"`
|
||||
GroupID *int `json:"groupID"`
|
||||
ProjectID *int `json:"projectID"`
|
||||
}
|
||||
|
||||
func (m MutableProps) Validate() error {
|
||||
@@ -82,6 +84,9 @@ func updateAcc(acc *Account, props MutableProps) {
|
||||
if props.UserID != nil {
|
||||
acc.UserID = *props.UserID
|
||||
}
|
||||
if props.ProjectID != nil {
|
||||
acc.ProjectID = *props.ProjectID
|
||||
}
|
||||
if props.Role != "" {
|
||||
acc.Role = props.Role
|
||||
}
|
||||
@@ -107,42 +112,47 @@ var (
|
||||
)
|
||||
|
||||
type Opts struct {
|
||||
RootAccount Account
|
||||
Dir string
|
||||
LDAPServerURL string
|
||||
LDAPBindDN string
|
||||
LDAPPassword string
|
||||
LDAPQueryBase string
|
||||
LDAPObjClasses string
|
||||
LDAPAccessAtr string
|
||||
LDAPSecretAtr string
|
||||
LDAPRoleAtr string
|
||||
LDAPUserIdAtr string
|
||||
LDAPGroupIdAtr string
|
||||
VaultEndpointURL string
|
||||
VaultSecretStoragePath string
|
||||
VaultAuthMethod string
|
||||
VaultMountPath string
|
||||
VaultRootToken string
|
||||
VaultRoleId string
|
||||
VaultRoleSecret string
|
||||
VaultServerCert string
|
||||
VaultClientCert string
|
||||
VaultClientCertKey string
|
||||
S3Access string
|
||||
S3Secret string
|
||||
S3Region string
|
||||
S3Bucket string
|
||||
S3Endpoint string
|
||||
S3DisableSSlVerfiy bool
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
IpaHost string
|
||||
IpaVaultName string
|
||||
IpaUser string
|
||||
IpaPassword string
|
||||
IpaInsecure bool
|
||||
RootAccount Account
|
||||
Dir string
|
||||
LDAPServerURL string
|
||||
LDAPBindDN string
|
||||
LDAPPassword string
|
||||
LDAPQueryBase string
|
||||
LDAPObjClasses string
|
||||
LDAPAccessAtr string
|
||||
LDAPSecretAtr string
|
||||
LDAPRoleAtr string
|
||||
LDAPUserIdAtr string
|
||||
LDAPGroupIdAtr string
|
||||
LDAPProjectIdAtr string
|
||||
LDAPTLSSkipVerify bool
|
||||
VaultEndpointURL string
|
||||
VaultNamespace string
|
||||
VaultSecretStoragePath string
|
||||
VaultSecretStorageNamespace string
|
||||
VaultAuthMethod string
|
||||
VaultAuthNamespace string
|
||||
VaultMountPath string
|
||||
VaultRootToken string
|
||||
VaultRoleId string
|
||||
VaultRoleSecret string
|
||||
VaultServerCert string
|
||||
VaultClientCert string
|
||||
VaultClientCertKey string
|
||||
S3Access string
|
||||
S3Secret string
|
||||
S3Region string
|
||||
S3Bucket string
|
||||
S3Endpoint string
|
||||
S3DisableSSlVerfiy bool
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
IpaHost string
|
||||
IpaVaultName string
|
||||
IpaUser string
|
||||
IpaPassword string
|
||||
IpaInsecure bool
|
||||
}
|
||||
|
||||
func New(o *Opts) (IAMService, error) {
|
||||
@@ -156,7 +166,7 @@ func New(o *Opts) (IAMService, error) {
|
||||
case o.LDAPServerURL != "":
|
||||
svc, err = NewLDAPService(o.RootAccount, o.LDAPServerURL, o.LDAPBindDN, o.LDAPPassword,
|
||||
o.LDAPQueryBase, o.LDAPAccessAtr, o.LDAPSecretAtr, o.LDAPRoleAtr, o.LDAPUserIdAtr,
|
||||
o.LDAPGroupIdAtr, o.LDAPObjClasses)
|
||||
o.LDAPGroupIdAtr, o.LDAPProjectIdAtr, o.LDAPObjClasses, o.LDAPTLSSkipVerify)
|
||||
fmt.Printf("initializing LDAP IAM with %q\n", o.LDAPServerURL)
|
||||
case o.S3Endpoint != "":
|
||||
svc, err = NewS3(o.RootAccount, o.S3Access, o.S3Secret, o.S3Region, o.S3Bucket,
|
||||
@@ -164,8 +174,8 @@ func New(o *Opts) (IAMService, error) {
|
||||
fmt.Printf("initializing S3 IAM with '%v/%v'\n",
|
||||
o.S3Endpoint, o.S3Bucket)
|
||||
case o.VaultEndpointURL != "":
|
||||
svc, err = NewVaultIAMService(o.RootAccount, o.VaultEndpointURL, o.VaultSecretStoragePath,
|
||||
o.VaultAuthMethod, o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
|
||||
svc, err = NewVaultIAMService(o.RootAccount, o.VaultEndpointURL, o.VaultNamespace, o.VaultSecretStoragePath, o.VaultSecretStorageNamespace,
|
||||
o.VaultAuthMethod, o.VaultAuthNamespace, o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
|
||||
o.VaultServerCert, o.VaultClientCert, o.VaultClientCertKey)
|
||||
fmt.Printf("initializing Vault IAM with %q\n", o.VaultEndpointURL)
|
||||
case o.IpaHost != "":
|
||||
|
||||
@@ -194,11 +194,12 @@ func (s *IAMServiceInternal) ListUserAccounts() ([]Account, error) {
|
||||
var accs []Account
|
||||
for _, k := range keys {
|
||||
accs = append(accs, Account{
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
ProjectID: conf.AccessAccounts[k].ProjectID,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -132,6 +132,7 @@ func (ipa *IpaIAMService) GetUserAccount(access string) (Account, error) {
|
||||
userResult := struct {
|
||||
Gidnumber []string
|
||||
Uidnumber []string
|
||||
PidNumber []string
|
||||
}{}
|
||||
|
||||
err = ipa.rpc(req, &userResult)
|
||||
@@ -139,20 +140,25 @@ func (ipa *IpaIAMService) GetUserAccount(access string) (Account, error) {
|
||||
return Account{}, err
|
||||
}
|
||||
|
||||
uid, err := strconv.Atoi(userResult.Uidnumber[0])
|
||||
uid, err := parseToInt(userResult.Uidnumber, "userID")
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("ipa uid invalid: %w", err)
|
||||
return Account{}, err
|
||||
}
|
||||
gid, err := strconv.Atoi(userResult.Gidnumber[0])
|
||||
gid, err := parseToInt(userResult.Gidnumber, "groupID")
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("ipa gid invalid: %w", err)
|
||||
return Account{}, err
|
||||
}
|
||||
pId, err := parseToInt(userResult.PidNumber, "projectID")
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
|
||||
account := Account{
|
||||
Access: access,
|
||||
Role: RoleUser,
|
||||
UserID: uid,
|
||||
GroupID: gid,
|
||||
Access: access,
|
||||
Role: RoleUser,
|
||||
UserID: uid,
|
||||
GroupID: gid,
|
||||
ProjectID: pId,
|
||||
}
|
||||
|
||||
session_key := make([]byte, 16)
|
||||
@@ -494,3 +500,20 @@ func (b *Base64Encoded) UnmarshalJSON(data []byte) error {
|
||||
*b, err = base64.StdEncoding.DecodeString(intermediate)
|
||||
return err
|
||||
}
|
||||
|
||||
// parseToInt parses the first argument of input string slice
|
||||
// to an integer. If slice is empty, it defaults to 0
|
||||
func parseToInt(input []string, argName string) (int, error) {
|
||||
if len(input) == 0 {
|
||||
debuglogger.IAMLogf("empty %s slice: defaulting to 0", argName)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
id, err := strconv.Atoi(input[0])
|
||||
if err != nil {
|
||||
debuglogger.IAMLogf("failed to parse %s: %v", argName, err)
|
||||
return 0, fmt.Errorf("invalid %s: %w", argName, err)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
129
auth/iam_ldap.go
129
auth/iam_ldap.go
@@ -15,7 +15,9 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -26,57 +28,82 @@ import (
|
||||
)
|
||||
|
||||
type LdapIAMService struct {
|
||||
conn *ldap.Conn
|
||||
queryBase string
|
||||
objClasses []string
|
||||
accessAtr string
|
||||
secretAtr string
|
||||
roleAtr string
|
||||
groupIdAtr string
|
||||
userIdAtr string
|
||||
rootAcc Account
|
||||
url string
|
||||
bindDN string
|
||||
pass string
|
||||
mu sync.Mutex
|
||||
conn *ldap.Conn
|
||||
queryBase string
|
||||
objClasses []string
|
||||
accessAtr string
|
||||
secretAtr string
|
||||
roleAtr string
|
||||
groupIdAtr string
|
||||
userIdAtr string
|
||||
projectIdAtr string
|
||||
rootAcc Account
|
||||
url string
|
||||
bindDN string
|
||||
pass string
|
||||
tlsSkipVerify bool
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
var _ IAMService = &LdapIAMService{}
|
||||
|
||||
func NewLDAPService(rootAcc Account, url, bindDN, pass, queryBase, accAtr, secAtr, roleAtr, userIdAtr, groupIdAtr, objClasses string) (IAMService, error) {
|
||||
if url == "" || bindDN == "" || pass == "" || queryBase == "" || accAtr == "" ||
|
||||
secAtr == "" || roleAtr == "" || userIdAtr == "" || groupIdAtr == "" || objClasses == "" {
|
||||
func NewLDAPService(rootAcc Account, ldapURL, bindDN, pass, queryBase, accAtr, secAtr, roleAtr, userIdAtr, groupIdAtr, projectIdAtr, objClasses string, tlsSkipVerify bool) (IAMService, error) {
|
||||
if ldapURL == "" || bindDN == "" || pass == "" || queryBase == "" || accAtr == "" ||
|
||||
secAtr == "" || roleAtr == "" || userIdAtr == "" || groupIdAtr == "" || projectIdAtr == "" || objClasses == "" {
|
||||
return nil, fmt.Errorf("required parameters list not fully provided")
|
||||
}
|
||||
conn, err := ldap.DialURL(url)
|
||||
|
||||
conn, err := dialLDAP(ldapURL, tlsSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to LDAP server: %w", err)
|
||||
}
|
||||
|
||||
err = conn.Bind(bindDN, pass)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, fmt.Errorf("failed to bind to LDAP server %w", err)
|
||||
}
|
||||
return &LdapIAMService{
|
||||
conn: conn,
|
||||
queryBase: queryBase,
|
||||
objClasses: strings.Split(objClasses, ","),
|
||||
accessAtr: accAtr,
|
||||
secretAtr: secAtr,
|
||||
roleAtr: roleAtr,
|
||||
userIdAtr: userIdAtr,
|
||||
groupIdAtr: groupIdAtr,
|
||||
rootAcc: rootAcc,
|
||||
url: url,
|
||||
bindDN: bindDN,
|
||||
pass: pass,
|
||||
conn: conn,
|
||||
queryBase: queryBase,
|
||||
objClasses: strings.Split(objClasses, ","),
|
||||
accessAtr: accAtr,
|
||||
secretAtr: secAtr,
|
||||
roleAtr: roleAtr,
|
||||
userIdAtr: userIdAtr,
|
||||
groupIdAtr: groupIdAtr,
|
||||
projectIdAtr: projectIdAtr,
|
||||
rootAcc: rootAcc,
|
||||
url: ldapURL,
|
||||
bindDN: bindDN,
|
||||
pass: pass,
|
||||
tlsSkipVerify: tlsSkipVerify,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// dialLDAP establishes an LDAP connection with optional TLS configuration
|
||||
func dialLDAP(ldapURL string, tlsSkipVerify bool) (*ldap.Conn, error) {
|
||||
u, err := url.Parse(ldapURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid LDAP URL: %w", err)
|
||||
}
|
||||
|
||||
// For ldaps:// URLs, use DialURL with custom TLS config if needed
|
||||
if u.Scheme == "ldaps" && tlsSkipVerify {
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: tlsSkipVerify,
|
||||
}
|
||||
return ldap.DialURL(ldapURL, ldap.DialWithTLSConfig(tlsConfig))
|
||||
}
|
||||
|
||||
// For ldap:// or when TLS verification is enabled, use standard DialURL
|
||||
return ldap.DialURL(ldapURL)
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) reconnect() error {
|
||||
ld.conn.Close()
|
||||
|
||||
conn, err := ldap.DialURL(ld.url)
|
||||
conn, err := dialLDAP(ld.url, ld.tlsSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to reconnect to LDAP server: %w", err)
|
||||
}
|
||||
@@ -117,6 +144,7 @@ func (ld *LdapIAMService) CreateAccount(account Account) error {
|
||||
userEntry.Attribute(ld.roleAtr, []string{string(account.Role)})
|
||||
userEntry.Attribute(ld.groupIdAtr, []string{fmt.Sprint(account.GroupID)})
|
||||
userEntry.Attribute(ld.userIdAtr, []string{fmt.Sprint(account.UserID)})
|
||||
userEntry.Attribute(ld.projectIdAtr, []string{fmt.Sprint(account.ProjectID)})
|
||||
|
||||
err := ld.execute(func(c *ldap.Conn) error {
|
||||
return c.Add(userEntry)
|
||||
@@ -152,7 +180,7 @@ func (ld *LdapIAMService) GetUserAccount(access string) (Account, error) {
|
||||
0,
|
||||
false,
|
||||
ld.buildSearchFilter(access),
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.userIdAtr, ld.groupIdAtr},
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.userIdAtr, ld.groupIdAtr, ld.projectIdAtr},
|
||||
nil,
|
||||
)
|
||||
|
||||
@@ -191,12 +219,19 @@ func (ld *LdapIAMService) GetUserAccount(access string) (Account, error) {
|
||||
return Account{}, fmt.Errorf("invalid entry value for user-id %q: %w",
|
||||
entry.GetAttributeValue(ld.userIdAtr), err)
|
||||
}
|
||||
projectID, err := strconv.Atoi(entry.GetAttributeValue(ld.projectIdAtr))
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("invalid entry value for project-id %q: %w",
|
||||
entry.GetAttributeValue(ld.projectIdAtr), err)
|
||||
}
|
||||
|
||||
return Account{
|
||||
Access: entry.GetAttributeValue(ld.accessAtr),
|
||||
Secret: entry.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(entry.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
UserID: userId,
|
||||
Access: entry.GetAttributeValue(ld.accessAtr),
|
||||
Secret: entry.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(entry.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
UserID: userId,
|
||||
ProjectID: projectID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -211,6 +246,9 @@ func (ld *LdapIAMService) UpdateUserAccount(access string, props MutableProps) e
|
||||
if props.UserID != nil {
|
||||
req.Replace(ld.userIdAtr, []string{fmt.Sprint(*props.UserID)})
|
||||
}
|
||||
if props.ProjectID != nil {
|
||||
req.Replace(ld.projectIdAtr, []string{fmt.Sprint(*props.ProjectID)})
|
||||
}
|
||||
if props.Role != "" {
|
||||
req.Replace(ld.roleAtr, []string{string(props.Role)})
|
||||
}
|
||||
@@ -248,7 +286,7 @@ func (ld *LdapIAMService) ListUserAccounts() ([]Account, error) {
|
||||
0,
|
||||
false,
|
||||
ld.buildSearchFilter(""),
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.groupIdAtr, ld.userIdAtr},
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.groupIdAtr, ld.projectIdAtr, ld.userIdAtr},
|
||||
nil,
|
||||
)
|
||||
|
||||
@@ -273,12 +311,19 @@ func (ld *LdapIAMService) ListUserAccounts() ([]Account, error) {
|
||||
return nil, fmt.Errorf("invalid entry value for user-id %q: %w",
|
||||
el.GetAttributeValue(ld.userIdAtr), err)
|
||||
}
|
||||
projectID, err := strconv.Atoi(el.GetAttributeValue(ld.projectIdAtr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid entry value for project-id %q: %w",
|
||||
el.GetAttributeValue(ld.groupIdAtr), err)
|
||||
}
|
||||
|
||||
result = append(result, Account{
|
||||
Access: el.GetAttributeValue(ld.accessAtr),
|
||||
Secret: el.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(el.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
UserID: userId,
|
||||
Access: el.GetAttributeValue(ld.accessAtr),
|
||||
Secret: el.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(el.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
ProjectID: projectID,
|
||||
UserID: userId,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -205,11 +205,12 @@ func (s *IAMServiceS3) ListUserAccounts() ([]Account, error) {
|
||||
var accs []Account
|
||||
for _, k := range keys {
|
||||
accs = append(accs, Account{
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
ProjectID: conf.AccessAccounts[k].ProjectID,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -38,15 +38,39 @@ type VaultIAMService struct {
|
||||
creds schema.AppRoleLoginRequest
|
||||
}
|
||||
|
||||
type VaultIAMNamespace struct {
|
||||
Auth string
|
||||
SecretStorage string
|
||||
}
|
||||
|
||||
// Resolve empty specific namespaces to the fallback.
|
||||
// Empty result means root namespace.
|
||||
func resolveVaultNamespaces(authNamespace, secretStorageNamespace, fallback string) VaultIAMNamespace {
|
||||
ns := VaultIAMNamespace{
|
||||
Auth: authNamespace,
|
||||
SecretStorage: secretStorageNamespace,
|
||||
}
|
||||
|
||||
if ns.Auth == "" {
|
||||
ns.Auth = fallback
|
||||
}
|
||||
if ns.SecretStorage == "" {
|
||||
ns.SecretStorage = fallback
|
||||
}
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
var _ IAMService = &VaultIAMService{}
|
||||
|
||||
func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
|
||||
authMethod, mountPath, rootToken, roleID, roleSecret, serverCert,
|
||||
func NewVaultIAMService(rootAcc Account, endpoint, namespace, secretStoragePath, secretStorageNamespace,
|
||||
authMethod, authNamespace, mountPath, rootToken, roleID, roleSecret, serverCert,
|
||||
clientCert, clientCertKey string) (IAMService, error) {
|
||||
opts := []vault.ClientOption{
|
||||
vault.WithAddress(endpoint),
|
||||
vault.WithRequestTimeout(requestTimeout),
|
||||
}
|
||||
|
||||
if serverCert != "" {
|
||||
tls := vault.TLSConfiguration{}
|
||||
|
||||
@@ -80,6 +104,28 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
|
||||
kvReqOpts = append(kvReqOpts, vault.WithMountPath(mountPath))
|
||||
}
|
||||
|
||||
// Resolve namespaces using optional generic fallback "namespace"
|
||||
ns := resolveVaultNamespaces(authNamespace, secretStorageNamespace, namespace)
|
||||
|
||||
// Guard: AppRole tokens are namespace scoped. If using AppRole and namespaces differ, error early.
|
||||
// Root token can span namespaces because each request carries X-Vault-Namespace.
|
||||
if rootToken == "" && ns.Auth != "" && ns.SecretStorage != "" && ns.Auth != ns.SecretStorage {
|
||||
return nil, fmt.Errorf(
|
||||
"approle tokens are namespace scoped. auth namespace %q and secret storage namespace %q differ. "+
|
||||
"use the same namespace or authenticate with a root token",
|
||||
ns.Auth, ns.SecretStorage,
|
||||
)
|
||||
}
|
||||
|
||||
// Apply namespaces to the correct request option sets.
|
||||
// For root token we do not need an auth namespace since we are not logging in via auth.
|
||||
if rootToken == "" && ns.Auth != "" {
|
||||
authReqOpts = append(authReqOpts, vault.WithNamespace(ns.Auth))
|
||||
}
|
||||
if ns.SecretStorage != "" {
|
||||
kvReqOpts = append(kvReqOpts, vault.WithNamespace(ns.SecretStorage))
|
||||
}
|
||||
|
||||
creds := schema.AppRoleLoginRequest{
|
||||
RoleId: roleID,
|
||||
SecretId: roleSecret,
|
||||
@@ -179,6 +225,10 @@ func (vt *VaultIAMService) CreateAccount(account Account) error {
|
||||
if strings.Contains(err.Error(), "check-and-set") {
|
||||
return ErrUserExists
|
||||
}
|
||||
if vault.IsErrorStatus(err, http.StatusForbidden) {
|
||||
return fmt.Errorf("vault 403 permission denied on path %q. check KV mount path and policy. original: %w",
|
||||
vt.secretStoragePath+"/"+account.Access, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -319,12 +369,21 @@ func parseVaultUserAccount(data map[string]any, access string) (acc Account, err
|
||||
if err != nil {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
projectIdJson, ok := usrAcc["projectID"].(json.Number)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
projectID, err := projectIdJson.Int64()
|
||||
if err != nil {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
|
||||
return Account{
|
||||
Access: acss,
|
||||
Secret: secret,
|
||||
Role: Role(role),
|
||||
UserID: int(userId),
|
||||
GroupID: int(groupId),
|
||||
Access: acss,
|
||||
Secret: secret,
|
||||
Role: Role(role),
|
||||
UserID: int(userId),
|
||||
GroupID: int(groupId),
|
||||
ProjectID: int(projectID),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
@@ -40,7 +41,7 @@ func ParseBucketLockConfigurationInput(input []byte) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if lockConfig.ObjectLockEnabled != "" && lockConfig.ObjectLockEnabled != types.ObjectLockEnabledEnabled {
|
||||
if lockConfig.ObjectLockEnabled != types.ObjectLockEnabledEnabled {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
@@ -92,28 +93,101 @@ func ParseBucketLockConfigurationOutput(input []byte) (*types.ObjectLockConfigur
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func ParseObjectLockRetentionInput(input []byte) ([]byte, error) {
|
||||
func ParseObjectLockRetentionInput(input []byte) (*s3response.PutObjectRetentionInput, error) {
|
||||
var retention s3response.PutObjectRetentionInput
|
||||
if err := xml.Unmarshal(input, &retention); err != nil {
|
||||
debuglogger.Logf("invalid object lock retention request body: %v", err)
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if retention.RetainUntilDate.Before(time.Now()) {
|
||||
debuglogger.Logf("object lock retain until date must be in the future")
|
||||
return nil, s3err.GetAPIError(s3err.ErrPastObjectLockRetainDate)
|
||||
}
|
||||
switch retention.Mode {
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
default:
|
||||
debuglogger.Logf("invalid object lock retention mode: %s", retention.Mode)
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
return json.Marshal(retention)
|
||||
return &retention, nil
|
||||
}
|
||||
|
||||
func ParseObjectLockRetentionInputToJSON(input *s3response.PutObjectRetentionInput) ([]byte, error) {
|
||||
data, err := json.Marshal(input)
|
||||
if err != nil {
|
||||
debuglogger.Logf("parse object lock retention to JSON: %v", err)
|
||||
return nil, fmt.Errorf("parse object lock retention: %w", err)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// IsObjectLockRetentionPutAllowed checks if the object lock retention PUT request
|
||||
// is allowed against the current state of the object lock
|
||||
func IsObjectLockRetentionPutAllowed(ctx context.Context, be backend.Backend, bucket, object, versionId, userAccess string, input *s3response.PutObjectRetentionInput, bypass bool) error {
|
||||
ret, err := be.GetObjectRetention(ctx, bucket, object, versionId)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration)) {
|
||||
// if object lock configuration is not set
|
||||
// allow the retention modification without any checks
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to get object retention: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
retention, err := ParseObjectLockRetentionOutput(ret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if retention.Mode == input.Mode {
|
||||
// if retention mode is the same
|
||||
// the operation is allowed
|
||||
return nil
|
||||
}
|
||||
|
||||
if retention.Mode == types.ObjectLockRetentionModeCompliance {
|
||||
// COMPLIANCE mode is by definition not allowed to modify
|
||||
debuglogger.Logf("object lock retention change request from 'COMPLIANCE' to 'GOVERNANCE' is not allowed")
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
|
||||
if !bypass {
|
||||
// if x-amz-bypass-governance-retention is not provided
|
||||
// return error: object is locked
|
||||
debuglogger.Logf("object lock retention mode change is not allowed and bypass governence is not forced")
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
|
||||
// the last case left, when user tries to chenge
|
||||
// from 'GOVERNANCE' to 'COMPLIANCE' with
|
||||
// 'x-amz-bypass-governance-retention' header
|
||||
// first we need to check if user has 's3:BypassGovernanceRetention'
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if err != nil {
|
||||
// if it fails to get the policy, return object is locked
|
||||
debuglogger.Logf("failed to get the bucket policy: %v", err)
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, object, BypassGovernanceRetentionAction)
|
||||
if err != nil {
|
||||
// if user doesn't have "s3:BypassGovernanceRetention" permission
|
||||
// return object is locked
|
||||
debuglogger.Logf("the user is missing 's3:BypassGovernanceRetention' permission")
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseObjectLockRetentionOutput(input []byte) (*types.ObjectLockRetention, error) {
|
||||
var retention types.ObjectLockRetention
|
||||
if err := json.Unmarshal(input, &retention); err != nil {
|
||||
debuglogger.Logf("parse object lock retention output: %v", err)
|
||||
return nil, fmt.Errorf("parse object lock retention: %w", err)
|
||||
}
|
||||
|
||||
@@ -136,7 +210,16 @@ func ParseObjectLegalHoldOutput(status *bool) *s3response.GetObjectLegalHoldResu
|
||||
}
|
||||
}
|
||||
|
||||
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []types.ObjectIdentifier, bypass, isBucketPublic bool, be backend.Backend) error {
|
||||
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []types.ObjectIdentifier, bypass, isBucketPublic bool, be backend.Backend, isOverwrite bool) error {
|
||||
if isOverwrite {
|
||||
// if bucket versioning is enabled, any overwrite request
|
||||
// should be enabled, as it leads to a new object version
|
||||
// creation
|
||||
res, err := be.GetBucketVersioning(ctx, bucket)
|
||||
if err == nil && res.Status != nil && *res.Status == types.BucketVersioningStatusEnabled {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
data, err := be.GetObjectLockConfiguration(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound)) {
|
||||
@@ -171,6 +254,12 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
|
||||
}
|
||||
}
|
||||
|
||||
var versioningEnabled bool
|
||||
vers, err := be.GetBucketVersioning(ctx, bucket)
|
||||
if err == nil && vers.Status != nil {
|
||||
versioningEnabled = *vers.Status == types.BucketVersioningStatusEnabled
|
||||
}
|
||||
|
||||
for _, obj := range objects {
|
||||
var key, versionId string
|
||||
if obj.Key != nil {
|
||||
@@ -179,11 +268,21 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
|
||||
if obj.VersionId != nil {
|
||||
versionId = *obj.VersionId
|
||||
}
|
||||
// if bucket versioning is enabled and versionId isn't provided
|
||||
// no lock check is needed, as it leads to a new delete marker creation
|
||||
if versioningEnabled && versionId == "" {
|
||||
continue
|
||||
}
|
||||
checkRetention := true
|
||||
retentionData, err := be.GetObjectRetention(ctx, bucket, key, versionId)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
|
||||
continue
|
||||
}
|
||||
// the object is a delete marker, if a `MethodNotAllowed` error is returned
|
||||
// no object lock check is needed
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMethodNotAllowed)) {
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration)) {
|
||||
checkRetention = false
|
||||
}
|
||||
@@ -198,31 +297,35 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
|
||||
}
|
||||
|
||||
if retention.Mode != "" && retention.RetainUntilDate != nil {
|
||||
if retention.RetainUntilDate.After(time.Now()) {
|
||||
switch retention.Mode {
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
if !bypass {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
} else {
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBucketPublic {
|
||||
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
|
||||
} else {
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
|
||||
}
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
}
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
if retention.RetainUntilDate.Before(time.Now()) {
|
||||
// if the object retention is expired, the object
|
||||
// is allowed for write operations(delete, modify)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch retention.Mode {
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
if !bypass {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
} else {
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBucketPublic {
|
||||
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
|
||||
} else {
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
|
||||
}
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
}
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,8 @@ var IgnoredHeaders = Rules{
|
||||
// some clients use user-agent in signed headers
|
||||
// "User-Agent": struct{}{},
|
||||
"X-Amzn-Trace-Id": struct{}{},
|
||||
"Expect": struct{}{},
|
||||
// Expect might appear in signed headers
|
||||
// "Expect": struct{}{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestIgnoredHeaders(t *testing.T) {
|
||||
}{
|
||||
"expect": {
|
||||
Header: "Expect",
|
||||
ExpectIgnored: true,
|
||||
ExpectIgnored: false,
|
||||
},
|
||||
"authorization": {
|
||||
Header: "Authorization",
|
||||
|
||||
@@ -157,7 +157,7 @@ func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
|
||||
string(keyOwnership): backend.GetPtrFromString(encodeBytes([]byte(input.ObjectOwnership))),
|
||||
}
|
||||
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
acct, ok := ctx.Value("bucket-owner").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
@@ -177,7 +177,21 @@ func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
|
||||
meta[string(keyBucketLock)] = backend.GetPtrFromString(encodeBytes(defaultLockParsed))
|
||||
}
|
||||
|
||||
_, err := az.client.CreateContainer(ctx, *input.Bucket, &container.CreateOptions{Metadata: meta})
|
||||
tagging, err := backend.ParseCreateBucketTags(input.CreateBucketConfiguration.Tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tagging != nil {
|
||||
tags, err := json.Marshal(tagging)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal tags: %w", err)
|
||||
}
|
||||
|
||||
meta[string(keyTags)] = backend.GetPtrFromString(encodeBytes(tags))
|
||||
}
|
||||
|
||||
_, err = az.client.CreateContainer(ctx, *input.Bucket, &container.CreateOptions{Metadata: meta})
|
||||
if errors.Is(s3err.GetAPIError(s3err.ErrBucketAlreadyExists), azureErrToS3Err(err)) {
|
||||
aclBytes, err := az.getContainerMetaData(ctx, *input.Bucket, string(keyAclCapital))
|
||||
if err != nil {
|
||||
@@ -350,6 +364,9 @@ func (az *Azure) PutObject(ctx context.Context, po s3response.PutObjectInput) (s
|
||||
if po.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn {
|
||||
err := az.PutObjectLegalHold(ctx, *po.Bucket, *po.Key, "", true)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)) {
|
||||
err = s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
return s3response.PutObjectOutput{}, err
|
||||
}
|
||||
}
|
||||
@@ -364,8 +381,11 @@ func (az *Azure) PutObject(ctx context.Context, po s3response.PutObjectInput) (s
|
||||
if err != nil {
|
||||
return s3response.PutObjectOutput{}, fmt.Errorf("parse object lock retention: %w", err)
|
||||
}
|
||||
err = az.PutObjectRetention(ctx, *po.Bucket, *po.Key, "", true, retParsed)
|
||||
err = az.PutObjectRetention(ctx, *po.Bucket, *po.Key, "", retParsed)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)) {
|
||||
err = s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
return s3response.PutObjectOutput{}, err
|
||||
}
|
||||
}
|
||||
@@ -569,6 +589,11 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
|
||||
}
|
||||
}
|
||||
|
||||
if resp.TagCount != nil {
|
||||
tagcount := int32(*resp.TagCount)
|
||||
result.TagCount = &tagcount
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -961,6 +986,9 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
|
||||
if input.ObjectLockLegalHoldStatus != "" {
|
||||
err = az.PutObjectLegalHold(ctx, *input.Bucket, *input.Key, "", input.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)) {
|
||||
err = s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
}
|
||||
@@ -977,8 +1005,11 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, fmt.Errorf("parse object retention: %w", err)
|
||||
}
|
||||
err = az.PutObjectRetention(ctx, *input.Bucket, *input.Key, "", true, retParsed)
|
||||
err = az.PutObjectRetention(ctx, *input.Bucket, *input.Key, "", retParsed)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)) {
|
||||
err = s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
}
|
||||
@@ -1071,7 +1102,7 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
|
||||
func (az *Azure) PutObjectTagging(ctx context.Context, bucket, object, _ string, tags map[string]string) error {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1085,7 +1116,7 @@ func (az *Azure) PutObjectTagging(ctx context.Context, bucket, object string, ta
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Azure) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
|
||||
func (az *Azure) GetObjectTagging(ctx context.Context, bucket, object, _ string) (map[string]string, error) {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1099,7 +1130,7 @@ func (az *Azure) GetObjectTagging(ctx context.Context, bucket, object string) (m
|
||||
return parseAzTags(tags.BlobTagSet), nil
|
||||
}
|
||||
|
||||
func (az *Azure) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
|
||||
func (az *Azure) DeleteObjectTagging(ctx context.Context, bucket, object, _ string) error {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1121,7 +1152,7 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input s3response.Cre
|
||||
}
|
||||
|
||||
if len(bucketLock) == 0 {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
|
||||
var bucketLockConfig auth.BucketLockConfig
|
||||
@@ -1130,7 +1161,7 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input s3response.Cre
|
||||
}
|
||||
|
||||
if !bucketLockConfig.Enabled {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1644,24 +1675,6 @@ func (az *Azure) DeleteBucketCors(ctx context.Context, bucket string) error {
|
||||
}
|
||||
|
||||
func (az *Azure) PutObjectLockConfiguration(ctx context.Context, bucket string, config []byte) error {
|
||||
cfg, err := az.getContainerMetaData(ctx, bucket, string(keyBucketLock))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cfg) == 0 {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotAllowed)
|
||||
}
|
||||
|
||||
var bucketLockCfg auth.BucketLockConfig
|
||||
if err := json.Unmarshal(cfg, &bucketLockCfg); err != nil {
|
||||
return fmt.Errorf("unmarshal object lock config: %w", err)
|
||||
}
|
||||
|
||||
if !bucketLockCfg.Enabled {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotAllowed)
|
||||
}
|
||||
|
||||
return az.setContainerMetaData(ctx, bucket, string(keyBucketLock), config)
|
||||
}
|
||||
|
||||
@@ -1678,7 +1691,7 @@ func (az *Azure) GetObjectLockConfiguration(ctx context.Context, bucket string)
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func (az *Azure) PutObjectRetention(ctx context.Context, bucket, object, versionId string, bypass bool, retention []byte) error {
|
||||
func (az *Azure) PutObjectRetention(ctx context.Context, bucket, object, versionId string, retention []byte) error {
|
||||
err := az.isBucketObjectLockEnabled(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1700,28 +1713,7 @@ func (az *Azure) PutObjectRetention(ctx context.Context, bucket, object, version
|
||||
string(keyObjRetention): backend.GetPtrFromString(string(retention)),
|
||||
}
|
||||
} else {
|
||||
objLockCfg, ok := meta[string(keyObjRetention)]
|
||||
if !ok {
|
||||
meta[string(keyObjRetention)] = backend.GetPtrFromString(string(retention))
|
||||
} else {
|
||||
var lockCfg types.ObjectLockRetention
|
||||
if err := json.Unmarshal([]byte(*objLockCfg), &lockCfg); err != nil {
|
||||
return fmt.Errorf("unmarshal object lock config: %w", err)
|
||||
}
|
||||
|
||||
switch lockCfg.Mode {
|
||||
// Compliance mode can't be overridden
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
// To override governance mode user should have "s3:BypassGovernanceRetention" permission
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
if !bypass {
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
meta[string(keyObjRetention)] = backend.GetPtrFromString(string(retention))
|
||||
}
|
||||
meta[string(keyObjRetention)] = backend.GetPtrFromString(string(retention))
|
||||
}
|
||||
|
||||
_, err = blobClient.SetMetadata(ctx, meta, nil)
|
||||
@@ -1859,7 +1851,7 @@ func (az *Azure) isBucketObjectLockEnabled(ctx context.Context, bucket string) e
|
||||
}
|
||||
|
||||
if len(cfg) == 0 {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
return s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)
|
||||
}
|
||||
|
||||
var bucketLockConfig auth.BucketLockConfig
|
||||
@@ -1868,7 +1860,7 @@ func (az *Azure) isBucketObjectLockEnabled(ctx context.Context, bucket string) e
|
||||
}
|
||||
|
||||
if !bucketLockConfig.Enabled {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
return s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -2105,22 +2097,20 @@ func (az *Azure) evaluateWritePreconditions(ctx context.Context, bucket, object,
|
||||
return nil
|
||||
}
|
||||
// call HeadObject to evaluate preconditions
|
||||
// if object doesn't exist, move forward with the object creation
|
||||
// otherwise return the error
|
||||
_, err := az.HeadObject(ctx, &s3.HeadObjectInput{
|
||||
Bucket: bucket,
|
||||
Key: object,
|
||||
IfMatch: ifMatch,
|
||||
IfNoneMatch: ifNoneMatch,
|
||||
res, err := az.HeadObject(ctx, &s3.HeadObjectInput{
|
||||
Bucket: bucket,
|
||||
Key: object,
|
||||
})
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNotModified)) {
|
||||
return s3err.GetAPIError(s3err.ErrPreconditionFailed)
|
||||
}
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
var etag string
|
||||
if res != nil {
|
||||
etag = backend.GetStringFromPtr(res.ETag)
|
||||
}
|
||||
|
||||
return backend.EvaluateObjectPutPreconditions(etag, ifMatch, ifNoneMatch, !errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)))
|
||||
}
|
||||
|
||||
func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
|
||||
|
||||
@@ -83,14 +83,14 @@ type Backend interface {
|
||||
DeleteBucketTagging(_ context.Context, bucket string) error
|
||||
|
||||
// object tagging operations
|
||||
GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error)
|
||||
PutObjectTagging(_ context.Context, bucket, object string, tags map[string]string) error
|
||||
DeleteObjectTagging(_ context.Context, bucket, object string) error
|
||||
GetObjectTagging(_ context.Context, bucket, object, versionId string) (map[string]string, error)
|
||||
PutObjectTagging(_ context.Context, bucket, object, versionId string, tags map[string]string) error
|
||||
DeleteObjectTagging(_ context.Context, bucket, object, versionId string) error
|
||||
|
||||
// object lock operations
|
||||
PutObjectLockConfiguration(_ context.Context, bucket string, config []byte) error
|
||||
GetObjectLockConfiguration(_ context.Context, bucket string) ([]byte, error)
|
||||
PutObjectRetention(_ context.Context, bucket, object, versionId string, bypass bool, retention []byte) error
|
||||
PutObjectRetention(_ context.Context, bucket, object, versionId string, retention []byte) error
|
||||
GetObjectRetention(_ context.Context, bucket, object, versionId string) ([]byte, error)
|
||||
PutObjectLegalHold(_ context.Context, bucket, object, versionId string, status bool) error
|
||||
GetObjectLegalHold(_ context.Context, bucket, object, versionId string) (*bool, error)
|
||||
@@ -251,13 +251,13 @@ func (BackendUnsupported) DeleteBucketTagging(_ context.Context, bucket string)
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
func (BackendUnsupported) GetObjectTagging(_ context.Context, bucket, object, versionId string) (map[string]string, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutObjectTagging(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
func (BackendUnsupported) PutObjectTagging(_ context.Context, bucket, object, versionId string, tags map[string]string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObjectTagging(_ context.Context, bucket, object string) error {
|
||||
func (BackendUnsupported) DeleteObjectTagging(_ context.Context, bucket, object, versionId string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
@@ -267,7 +267,7 @@ func (BackendUnsupported) PutObjectLockConfiguration(_ context.Context, bucket s
|
||||
func (BackendUnsupported) GetObjectLockConfiguration(_ context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutObjectRetention(_ context.Context, bucket, object, versionId string, bypass bool, retention []byte) error {
|
||||
func (BackendUnsupported) PutObjectRetention(_ context.Context, bucket, object, versionId string, retention []byte) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectRetention(_ context.Context, bucket, object, versionId string) ([]byte, error) {
|
||||
|
||||
@@ -317,14 +317,60 @@ func ParseObjectTags(tagging string) (map[string]string, error) {
|
||||
return tagSet, nil
|
||||
}
|
||||
|
||||
var validTagComponent = regexp.MustCompile(`^[a-zA-Z0-9:/_.\-+ ]+$`)
|
||||
|
||||
// isValidTagComponent matches strings which contain letters, decimal digits,
|
||||
// and special chars: '/', '_', '-', '+', '.', ' ' (space)
|
||||
func isValidTagComponent(str string) bool {
|
||||
if str == "" {
|
||||
return true
|
||||
// ParseCreateBucketTags parses and validates the bucket
|
||||
// tagging from CreateBucket input
|
||||
func ParseCreateBucketTags(tagging []types.Tag) (map[string]string, error) {
|
||||
if len(tagging) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tagset := make(map[string]string, len(tagging))
|
||||
|
||||
if len(tagging) > 50 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrBucketTaggingLimited)
|
||||
}
|
||||
|
||||
for _, tag := range tagging {
|
||||
// validate tag key length
|
||||
key := GetStringFromPtr(tag.Key)
|
||||
if len(key) == 0 || len(key) > 128 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// validate tag key string chars
|
||||
if !isValidTagComponent(key) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// validate tag value length
|
||||
value := GetStringFromPtr(tag.Value)
|
||||
if len(value) > 256 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// validate tag value string chars
|
||||
if !isValidTagComponent(value) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// make sure there are no duplicate keys
|
||||
_, ok := tagset[key]
|
||||
if ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrDuplicateTagKey)
|
||||
}
|
||||
|
||||
tagset[key] = value
|
||||
}
|
||||
|
||||
return tagset, nil
|
||||
}
|
||||
|
||||
// tag component (key/value) name rule regexp
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_Tag.html
|
||||
var validTagComponent = regexp.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`)
|
||||
|
||||
// isValidTagComponent validates the tag component(key/value) name
|
||||
func isValidTagComponent(str string) bool {
|
||||
return validTagComponent.Match([]byte(str))
|
||||
}
|
||||
|
||||
@@ -449,6 +495,8 @@ func EvaluatePreconditions(etag string, modTime time.Time, preconditions PreCond
|
||||
return nil
|
||||
}
|
||||
|
||||
etag = strings.Trim(etag, `"`)
|
||||
|
||||
// convert all conditions to *bool to evaluate the conditions
|
||||
var ifMatch, ifNoneMatch, ifModSince, ifUnmodeSince *bool
|
||||
if preconditions.IfMatch != nil {
|
||||
@@ -535,6 +583,7 @@ func EvaluatePreconditions(etag string, modTime time.Time, preconditions PreCond
|
||||
|
||||
// EvaluateMatchPreconditions evaluates if-match and if-none-match preconditions
|
||||
func EvaluateMatchPreconditions(etag string, ifMatch, ifNoneMatch *string) error {
|
||||
etag = strings.Trim(etag, `"`)
|
||||
if ifMatch != nil && *ifMatch != etag {
|
||||
return errPreconditionFailed
|
||||
}
|
||||
@@ -545,6 +594,38 @@ func EvaluateMatchPreconditions(etag string, ifMatch, ifNoneMatch *string) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// EvaluateObjectPutPreconditions evaluates if-match and if-none-match preconditions
|
||||
// for object PUT(PutObject, CompleteMultipartUpload) actions
|
||||
func EvaluateObjectPutPreconditions(etag string, ifMatch, ifNoneMatch *string, objExists bool) error {
|
||||
if ifMatch == nil && ifNoneMatch == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ifNoneMatch != nil && *ifNoneMatch != "*" {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
if ifNoneMatch != nil && ifMatch != nil {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
if ifNoneMatch != nil && objExists {
|
||||
return s3err.GetAPIError(s3err.ErrPreconditionFailed)
|
||||
}
|
||||
|
||||
if ifMatch != nil && !objExists {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
etag = strings.Trim(etag, `"`)
|
||||
|
||||
if ifMatch != nil && *ifMatch != etag {
|
||||
return s3err.GetAPIError(s3err.ErrPreconditionFailed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ObjectDeletePreconditions struct {
|
||||
IfMatch *string
|
||||
IfMatchLastModTime *time.Time
|
||||
@@ -570,3 +651,19 @@ func EvaluateObjectDeletePreconditions(etag string, modTime time.Time, size int6
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValidDirectoryName returns true if the string is a valid name
|
||||
// for a directory
|
||||
func IsValidDirectoryName(name string) bool {
|
||||
// directories may not contain a path separator
|
||||
if strings.ContainsRune(name, '/') {
|
||||
return false
|
||||
}
|
||||
|
||||
// directories may not contain null character
|
||||
if strings.ContainsRune(name, 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ package meta
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
@@ -98,6 +99,8 @@ func (s SideCar) DeleteAttribute(bucket, object, attribute string) error {
|
||||
return fmt.Errorf("failed to remove attribute: %v", err)
|
||||
}
|
||||
|
||||
s.cleanupEmptyDirs(metadir, bucket, object)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -135,5 +138,60 @@ func (s SideCar) DeleteAttributes(bucket, object string) error {
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return fmt.Errorf("failed to remove attributes: %v", err)
|
||||
}
|
||||
s.cleanupEmptyDirs(metadir, bucket, object)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s SideCar) cleanupEmptyDirs(metadir, bucket, object string) {
|
||||
removeIfEmpty(metadir)
|
||||
if bucket == "" {
|
||||
return
|
||||
}
|
||||
bucketDir := filepath.Join(s.dir, bucket)
|
||||
if object != "" {
|
||||
removeEmptyParents(filepath.Dir(metadir), bucketDir)
|
||||
}
|
||||
removeIfEmpty(bucketDir)
|
||||
}
|
||||
|
||||
func removeIfEmpty(dir string) {
|
||||
empty, err := isDirEmpty(dir)
|
||||
if err != nil || !empty {
|
||||
return
|
||||
}
|
||||
_ = os.Remove(dir)
|
||||
}
|
||||
|
||||
func removeEmptyParents(dir, stopDir string) {
|
||||
for {
|
||||
if dir == stopDir || dir == "." || dir == string(filepath.Separator) {
|
||||
return
|
||||
}
|
||||
empty, err := isDirEmpty(dir)
|
||||
if err != nil || !empty {
|
||||
return
|
||||
}
|
||||
err = os.Remove(dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dir = filepath.Dir(dir)
|
||||
}
|
||||
}
|
||||
|
||||
func isDirEmpty(dir string) (bool, error) {
|
||||
f, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
ents, err := f.Readdirnames(1)
|
||||
if err == io.EOF {
|
||||
return true, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(ents) == 0, nil
|
||||
}
|
||||
|
||||
@@ -26,10 +26,6 @@ import (
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
const (
|
||||
xattrPrefix = "user."
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoSuchKey is returned when the key does not exist.
|
||||
ErrNoSuchKey = errors.New("no such key")
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
@@ -12,14 +12,8 @@
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package scoutfs
|
||||
//go:build freebsd
|
||||
|
||||
type stat struct {
|
||||
Meta_seq uint64
|
||||
Data_seq uint64
|
||||
Data_version uint64
|
||||
Online_blocks uint64
|
||||
Offline_blocks uint64
|
||||
Crtime_sec uint64
|
||||
Crtime_nsec uint32
|
||||
}
|
||||
package meta
|
||||
|
||||
const xattrPrefix = ""
|
||||
19
backend/meta/xattr_other.go
Normal file
19
backend/meta/xattr_other.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
//go:build !freebsd
|
||||
|
||||
package meta
|
||||
|
||||
const xattrPrefix = "user."
|
||||
File diff suppressed because it is too large
Load Diff
@@ -285,11 +285,14 @@ func (s *S3Proxy) GetBucketVersioning(ctx context.Context, bucket string) (s3res
|
||||
out, err := s.client.GetBucketVersioning(ctx, &s3.GetBucketVersioningInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
if err != nil {
|
||||
return s3response.GetBucketVersioningOutput{}, handleError(err)
|
||||
}
|
||||
|
||||
return s3response.GetBucketVersioningOutput{
|
||||
Status: &out.Status,
|
||||
MFADelete: &out.MFADelete,
|
||||
}, handleError(err)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) ListObjectVersions(ctx context.Context, input *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
|
||||
@@ -1093,6 +1096,9 @@ func (s *S3Proxy) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAt
|
||||
}
|
||||
|
||||
out, err := s.client.GetObjectAttributes(ctx, input)
|
||||
if err != nil {
|
||||
return s3response.GetObjectAttributesResponse{}, handleError(err)
|
||||
}
|
||||
|
||||
parts := s3response.ObjectParts{}
|
||||
objParts := out.ObjectParts
|
||||
@@ -1125,7 +1131,7 @@ func (s *S3Proxy) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAt
|
||||
StorageClass: out.StorageClass,
|
||||
ObjectParts: &parts,
|
||||
Checksum: out.Checksum,
|
||||
}, handleError(err)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
|
||||
@@ -1445,7 +1451,7 @@ func (s *S3Proxy) PutBucketAcl(ctx context.Context, bucket string, data []byte)
|
||||
return handleError(s.putMetaBucketObj(ctx, bucket, data, metaPrefixAcl))
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
|
||||
func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object, versionId string, tags map[string]string) error {
|
||||
if bucket == s.metaBucket {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
@@ -1460,20 +1466,22 @@ func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object string, t
|
||||
}
|
||||
|
||||
_, err := s.client.PutObjectTagging(ctx, &s3.PutObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
Tagging: tagging,
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
VersionId: &versionId,
|
||||
Tagging: tagging,
|
||||
})
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
|
||||
func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object, versionId string) (map[string]string, error) {
|
||||
if bucket == s.metaBucket {
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
output, err := s.client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
VersionId: &versionId,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
@@ -1487,13 +1495,14 @@ func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object string) (
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
|
||||
func (s *S3Proxy) DeleteObjectTagging(ctx context.Context, bucket, object, versionId string) error {
|
||||
if bucket == s.metaBucket {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
_, err := s.client.DeleteObjectTagging(ctx, &s3.DeleteObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
VersionId: &versionId,
|
||||
})
|
||||
return handleError(err)
|
||||
}
|
||||
@@ -1558,7 +1567,7 @@ func (s *S3Proxy) GetObjectLockConfiguration(ctx context.Context, bucket string)
|
||||
return nil, s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutObjectRetention(ctx context.Context, bucket, object, versionId string, bypass bool, retention []byte) error {
|
||||
func (s *S3Proxy) PutObjectRetention(ctx context.Context, bucket, object, versionId string, retention []byte) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
|
||||
@@ -15,24 +15,9 @@
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
// ScoutfsOpts are the options for the ScoutFS backend
|
||||
@@ -41,6 +26,8 @@ type ScoutfsOpts struct {
|
||||
ChownUID bool
|
||||
// ChownGID sets the GID of the object to the GID of the user on PUT
|
||||
ChownGID bool
|
||||
// SetProjectID sets the Project ID of the bucket/object to the project ID of the user on PUT
|
||||
SetProjectID bool
|
||||
// BucketLinks enables symlinks to directories to be treated as buckets
|
||||
BucketLinks bool
|
||||
//VersioningDir sets the version directory to enable object versioning
|
||||
@@ -51,322 +38,10 @@ type ScoutfsOpts struct {
|
||||
GlacierMode bool
|
||||
// DisableNoArchive prevents setting noarchive on temporary files
|
||||
DisableNoArchive bool
|
||||
}
|
||||
|
||||
type ScoutFS struct {
|
||||
*posix.Posix
|
||||
rootfd *os.File
|
||||
rootdir string
|
||||
|
||||
// glaciermode enables the following behavior:
|
||||
// GET object: if file offline, return invalid object state
|
||||
// HEAD object: if file offline, set obj storage class to GLACIER
|
||||
// if file offline and staging, x-amz-restore: ongoing-request="true"
|
||||
// if file offline and not staging, x-amz-restore: ongoing-request="false"
|
||||
// if file online, x-amz-restore: ongoing-request="false", expiry-date="Fri, 2 Dec 2050 00:00:00 GMT"
|
||||
// note: this expiry-date is not used but provided for client glacier compatibility
|
||||
// ListObjects: if file offline, set obj storage class to GLACIER
|
||||
// RestoreObject: add batch stage request to file
|
||||
glaciermode bool
|
||||
|
||||
// disableNoArchive is used to disable setting scoutam noarchive flag
|
||||
// on mutlipart parts. This is enabled by default to prevent archive
|
||||
// copies of temporary multipart parts.
|
||||
disableNoArchive bool
|
||||
// ValidateBucketNames enables minimal bucket name validation to prevent
|
||||
// incorrect access to the filesystem. This is only needed if the
|
||||
// frontend is not already validating bucket names.
|
||||
ValidateBucketNames bool
|
||||
}
|
||||
|
||||
var _ backend.Backend = &ScoutFS{}
|
||||
|
||||
const (
|
||||
stageComplete = "ongoing-request=\"false\", expiry-date=\"Fri, 2 Dec 2050 00:00:00 GMT\""
|
||||
stageInProgress = "true"
|
||||
stageNotInProgress = "false"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScoutFS special xattr types
|
||||
systemPrefix = "scoutfs.hide."
|
||||
onameAttr = systemPrefix + "objname"
|
||||
flagskey = systemPrefix + "sam_flags"
|
||||
stagecopykey = systemPrefix + "sam_stagereq"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScoutAM Flags
|
||||
|
||||
// Staging - file requested stage
|
||||
Staging uint64 = 1 << iota
|
||||
// StageFail - all copies failed to stage
|
||||
StageFail
|
||||
// NoArchive - no archive copies of file should be made
|
||||
NoArchive
|
||||
// ExtCacheRequested means file policy requests Ext Cache
|
||||
ExtCacheRequested
|
||||
// ExtCacheDone means this file ext cache copy has been
|
||||
// created already (and possibly pruned, so may not exist)
|
||||
ExtCacheDone
|
||||
)
|
||||
|
||||
func (s *ScoutFS) Shutdown() {
|
||||
s.Posix.Shutdown()
|
||||
s.rootfd.Close()
|
||||
_ = s.rootdir
|
||||
}
|
||||
|
||||
func (*ScoutFS) String() string {
|
||||
return "ScoutFS Gateway"
|
||||
}
|
||||
|
||||
func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
out, err := s.Posix.UploadPart(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !s.disableNoArchive {
|
||||
sum := sha256.Sum256([]byte(*input.Key))
|
||||
partPath := filepath.Join(
|
||||
*input.Bucket, // bucket
|
||||
posix.MetaTmpMultipartDir, // temp multipart dir
|
||||
fmt.Sprintf("%x", sum), // hashed objname
|
||||
*input.UploadId, // upload id
|
||||
fmt.Sprintf("%v", *input.PartNumber), // part number
|
||||
)
|
||||
|
||||
err = setNoArchive(partPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("set noarchive: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return out, err
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
|
||||
// ioctl to not have to read and copy the part data to the final object. This
|
||||
// saves a read and write cycle for all mutlipart uploads.
|
||||
func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
|
||||
return s.Posix.CompleteMultipartUploadWithCopy(ctx, input, moveData)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
res, err := s.Posix.HeadObject(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
objPath := filepath.Join(*input.Bucket, *input.Key)
|
||||
|
||||
stclass := types.StorageClassStandard
|
||||
requestOngoing := ""
|
||||
|
||||
requestOngoing = stageComplete
|
||||
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will set storage class to glacier.
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
stclass = types.StorageClassGlacier
|
||||
requestOngoing = stageNotInProgress
|
||||
|
||||
ok, err := isStaging(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("check stage status: %w", err)
|
||||
}
|
||||
if ok {
|
||||
requestOngoing = stageInProgress
|
||||
}
|
||||
}
|
||||
|
||||
res.Restore = &requestOngoing
|
||||
res.StorageClass = stclass
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, syscall.ENAMETOOLONG) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrKeyTooLong)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(object, "/") && !fi.IsDir() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the InvalidObjectState error.
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectState)
|
||||
}
|
||||
}
|
||||
|
||||
return s.Posix.GetObject(ctx, input)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsParametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjects(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsV2Parametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjectsV2(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
// FileToObj function for ListObject calls that adds a Glacier storage class if the file is offline
|
||||
func (s *ScoutFS) glacierFileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
||||
posixFileToObj := s.Posix.FileToObj(bucket, fetchOwner)
|
||||
|
||||
return func(path string, d fs.DirEntry) (s3response.Object, error) {
|
||||
res, err := posixFileToObj(path, d)
|
||||
if err != nil || d.IsDir() {
|
||||
return res, err
|
||||
}
|
||||
objPath := filepath.Join(bucket, path)
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the Glacier storage class
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.Object{}, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
res.StorageClass = types.ObjectStorageClassGlacier
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreObject will set stage request on file if offline and do nothing if
|
||||
// file is online
|
||||
func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput) error {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
err = setStaging(filepath.Join(bucket, object))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stage object: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isStaging(objname string) (bool, error) {
|
||||
b, err := xattr.Get(objname, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var flags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return flags&Staging == Staging, nil
|
||||
}
|
||||
|
||||
func setFlag(objname string, flag uint64) error {
|
||||
b, err := xattr.Get(objname, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
var oldflags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &oldflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newflags := oldflags | flag
|
||||
|
||||
if newflags == oldflags {
|
||||
// no flags change, just return
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err = json.Marshal(&newflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return xattr.Set(objname, flagskey, b)
|
||||
}
|
||||
|
||||
func setStaging(objname string) error {
|
||||
return setFlag(objname, Staging)
|
||||
}
|
||||
|
||||
func setNoArchive(objname string) error {
|
||||
return setFlag(objname, NoArchive)
|
||||
}
|
||||
|
||||
func isNoAttr(err error) bool {
|
||||
xerr, ok := err.(*xattr.Error)
|
||||
if ok && xerr.Err == xattr.ENOATTR {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -17,24 +17,70 @@
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/versity/scoutfs-go"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/meta"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
type ScoutFS struct {
|
||||
*posix.Posix
|
||||
rootfd *os.File
|
||||
rootdir string
|
||||
|
||||
// glaciermode enables the following behavior:
|
||||
// GET object: if file offline, return invalid object state
|
||||
// HEAD object: if file offline, set obj storage class to GLACIER
|
||||
// if file offline and staging, x-amz-restore: ongoing-request="true"
|
||||
// if file offline and not staging, x-amz-restore: ongoing-request="false"
|
||||
// if file online, x-amz-restore: ongoing-request="false", expiry-date="Fri, 2 Dec 2050 00:00:00 GMT"
|
||||
// note: this expiry-date is not used but provided for client glacier compatibility
|
||||
// ListObjects: if file offline, set obj storage class to GLACIER
|
||||
// RestoreObject: add batch stage request to file
|
||||
glaciermode bool
|
||||
|
||||
// disableNoArchive is used to disable setting scoutam noarchive flag
|
||||
// on multipart parts. This is enabled by default to prevent archive
|
||||
// copies of temporary multipart parts.
|
||||
disableNoArchive bool
|
||||
|
||||
// enable posix level bucket name validations, not needed if the
|
||||
// frontend handlers are already validating bucket names
|
||||
validateBucketName bool
|
||||
|
||||
// projectIDEnabled enables setting projectid of new buckets and objects
|
||||
// to the account project id when non-0
|
||||
projectIDEnabled bool
|
||||
}
|
||||
|
||||
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
metastore := meta.XattrMeta{}
|
||||
|
||||
p, err := posix.New(rootdir, metastore, posix.PosixOpts{
|
||||
ChownUID: opts.ChownUID,
|
||||
ChownGID: opts.ChownGID,
|
||||
BucketLinks: opts.BucketLinks,
|
||||
NewDirPerm: opts.NewDirPerm,
|
||||
VersioningDir: opts.VersioningDir,
|
||||
ChownUID: opts.ChownUID,
|
||||
ChownGID: opts.ChownGID,
|
||||
BucketLinks: opts.BucketLinks,
|
||||
NewDirPerm: opts.NewDirPerm,
|
||||
VersioningDir: opts.VersioningDir,
|
||||
ValidateBucketNames: opts.ValidateBucketNames,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -45,50 +91,491 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
return nil, fmt.Errorf("open %v: %w", rootdir, err)
|
||||
}
|
||||
|
||||
setProjectID := opts.SetProjectID
|
||||
if opts.SetProjectID {
|
||||
setProjectID = fGetFormatVersion(f).AtLeast(versionScoutFsV2)
|
||||
if !setProjectID {
|
||||
fmt.Println("WARNING:")
|
||||
fmt.Println("Disabling ProjectIDs for unsupported FS format version")
|
||||
fmt.Println("See documentation for format version upgrades")
|
||||
}
|
||||
}
|
||||
|
||||
return &ScoutFS{
|
||||
Posix: p,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
glaciermode: opts.GlacierMode,
|
||||
disableNoArchive: opts.DisableNoArchive,
|
||||
projectIDEnabled: setProjectID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func moveData(from *os.File, to *os.File) error {
|
||||
// May fail if the files are not 4K aligned; check for alignment
|
||||
ffi, err := from.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat from: %v", err)
|
||||
}
|
||||
tfi, err := to.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat to: %v", err)
|
||||
}
|
||||
if ffi.Size()%4096 != 0 || tfi.Size()%4096 != 0 {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
const (
|
||||
stageComplete = "ongoing-request=\"false\", expiry-date=\"Fri, 2 Dec 2050 00:00:00 GMT\""
|
||||
stageInProgress = "true"
|
||||
stageNotInProgress = "false"
|
||||
)
|
||||
|
||||
err = scoutfs.MoveData(from, to)
|
||||
if err != nil {
|
||||
debuglogger.Logf("ScoutFs MoveData failed: %v", err)
|
||||
}
|
||||
return err
|
||||
const (
|
||||
// ScoutFS special xattr types
|
||||
systemPrefix = "scoutfs.hide."
|
||||
flagskey = systemPrefix + "sam_flags"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScoutAM Flags
|
||||
|
||||
// Staging - file requested stage
|
||||
Staging uint64 = 1 << iota
|
||||
// StageFail - all copies failed to stage
|
||||
StageFail
|
||||
// NoArchive - no archive copies of file should be made
|
||||
NoArchive
|
||||
// ExtCacheRequested means file policy requests Ext Cache
|
||||
ExtCacheRequested
|
||||
// ExtCacheDone means this file ext cache copy has been
|
||||
// created already (and possibly pruned, so may not exist)
|
||||
ExtCacheDone
|
||||
)
|
||||
|
||||
func (s *ScoutFS) Shutdown() {
|
||||
s.Posix.Shutdown()
|
||||
s.rootfd.Close()
|
||||
}
|
||||
|
||||
func statMore(path string) (stat, error) {
|
||||
st, err := scoutfs.StatMore(path)
|
||||
if err != nil {
|
||||
return stat{}, err
|
||||
}
|
||||
var s stat
|
||||
|
||||
s.Meta_seq = st.Meta_seq
|
||||
s.Data_seq = st.Data_seq
|
||||
s.Data_version = st.Data_version
|
||||
s.Online_blocks = st.Online_blocks
|
||||
s.Offline_blocks = st.Offline_blocks
|
||||
s.Crtime_sec = st.Crtime_sec
|
||||
s.Crtime_nsec = st.Crtime_nsec
|
||||
|
||||
return s, nil
|
||||
func (*ScoutFS) String() string {
|
||||
return "ScoutFS Gateway"
|
||||
}
|
||||
|
||||
func (s *ScoutFS) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, acl []byte) error {
|
||||
err := s.Posix.CreateBucket(ctx, input, acl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.projectIDEnabled {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
if !isValidProjectID(acct.ProjectID) {
|
||||
// early return to avoid the open if we dont have a valid
|
||||
// project id
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := os.Open(*input.Bucket)
|
||||
if err != nil {
|
||||
debuglogger.InternalError(fmt.Errorf("create bucket %q set project id - open: %v",
|
||||
*input.Bucket, err))
|
||||
return nil
|
||||
}
|
||||
|
||||
err = s.setProjectID(f, acct.ProjectID)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
debuglogger.InternalError(fmt.Errorf("create bucket %q set project id: %v",
|
||||
*input.Bucket, err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
res, err := s.Posix.HeadObject(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
objPath := filepath.Join(*input.Bucket, *input.Key)
|
||||
|
||||
stclass := types.StorageClassStandard
|
||||
requestOngoing := ""
|
||||
|
||||
requestOngoing = stageComplete
|
||||
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will set storage class to glacier.
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
stclass = types.StorageClassGlacier
|
||||
requestOngoing = stageNotInProgress
|
||||
|
||||
ok, err := isStaging(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("check stage status: %w", err)
|
||||
}
|
||||
if ok {
|
||||
requestOngoing = stageInProgress
|
||||
}
|
||||
}
|
||||
|
||||
res.Restore = &requestOngoing
|
||||
res.StorageClass = stclass
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
return s.Posix.PutObjectWithPostFunc(ctx, po, func(f *os.File) error {
|
||||
err := s.setProjectID(f, acct.ProjectID)
|
||||
if err != nil {
|
||||
debuglogger.InternalError(fmt.Errorf("put object %v/%v set project id: %v",
|
||||
filepath.Join(*po.Bucket, *po.Key), acct.ProjectID, err))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
return s.Posix.UploadPartWithPostFunc(ctx, input,
|
||||
func(f *os.File) error {
|
||||
if !s.disableNoArchive {
|
||||
err := setNoArchive(f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set noarchive: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := s.setProjectID(f, acct.ProjectID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set project id %v: %w", acct.ProjectID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
|
||||
// ioctl to not have to read and copy the part data to the final object. This
|
||||
// saves a read and write cycle for all mutlipart uploads.
|
||||
func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
return s.Posix.CompleteMultipartUploadWithCopy(ctx, input,
|
||||
func(from *os.File, to *os.File) error {
|
||||
// May fail if the files are not 4K aligned; check for alignment
|
||||
ffi, err := from.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("complete-mpu stat from: %w", err)
|
||||
}
|
||||
tfi, err := to.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("complete-mpu stat to: %w", err)
|
||||
}
|
||||
if ffi.Size()%4096 != 0 || tfi.Size()%4096 != 0 {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
err = s.setProjectID(to, acct.ProjectID)
|
||||
if err != nil {
|
||||
debuglogger.InternalError(fmt.Errorf("complete-mpu %q/%q set project id %v: %v",
|
||||
*input.Bucket, *input.Key, acct.ProjectID, err))
|
||||
}
|
||||
|
||||
err = scoutfs.MoveData(from, to)
|
||||
if err != nil {
|
||||
return fmt.Errorf("complete-mpu movedata: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ScoutFS) isBucketValid(bucket string) bool {
|
||||
if !s.validateBucketName {
|
||||
return true
|
||||
}
|
||||
|
||||
return backend.IsValidDirectoryName(bucket)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
if !s.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, syscall.ENAMETOOLONG) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrKeyTooLong)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(object, "/") && !fi.IsDir() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the InvalidObjectState error.
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectState)
|
||||
}
|
||||
}
|
||||
|
||||
return s.Posix.GetObject(ctx, input)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsParametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjects(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsV2Parametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjectsV2(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
// FileToObj function for ListObject calls that adds a Glacier storage class if the file is offline
|
||||
func (s *ScoutFS) glacierFileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
||||
posixFileToObj := s.Posix.FileToObj(bucket, fetchOwner)
|
||||
|
||||
return func(path string, d fs.DirEntry) (s3response.Object, error) {
|
||||
res, err := posixFileToObj(path, d)
|
||||
if err != nil || d.IsDir() {
|
||||
return res, err
|
||||
}
|
||||
objPath := filepath.Join(bucket, path)
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the Glacier storage class
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.Object{}, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
res.StorageClass = types.ObjectStorageClassGlacier
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreObject will set stage request on file if offline and do nothing if
|
||||
// file is online
|
||||
func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput) error {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
if !s.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
err = setStaging(filepath.Join(bucket, object))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stage object: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isStaging(objname string) (bool, error) {
|
||||
b, err := xattr.Get(objname, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var flags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return flags&Staging == Staging, nil
|
||||
}
|
||||
|
||||
func setFlag(objname string, flag uint64) error {
|
||||
f, err := os.Open(objname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return fsetFlag(f, flag)
|
||||
}
|
||||
|
||||
func fsetFlag(f *os.File, flag uint64) error {
|
||||
b, err := xattr.FGet(f, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
var oldflags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &oldflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newflags := oldflags | flag
|
||||
|
||||
if newflags == oldflags {
|
||||
// no flags change, just return
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err = json.Marshal(&newflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return xattr.FSet(f, flagskey, b)
|
||||
}
|
||||
|
||||
func setStaging(objname string) error {
|
||||
return setFlag(objname, Staging)
|
||||
}
|
||||
|
||||
func setNoArchive(f *os.File) error {
|
||||
return fsetFlag(f, NoArchive)
|
||||
}
|
||||
|
||||
func isNoAttr(err error) bool {
|
||||
xerr, ok := err.(*xattr.Error)
|
||||
if ok && xerr.Err == xattr.ENOATTR {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ScoutFS) setProjectID(f *os.File, proj int) error {
|
||||
if s.projectIDEnabled && isValidProjectID(proj) {
|
||||
err := scoutfs.SetProjectID(f, uint64(proj))
|
||||
if err != nil {
|
||||
return fmt.Errorf("set project id: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isValidProjectID(proj int) bool {
|
||||
return proj > 0
|
||||
}
|
||||
|
||||
const (
|
||||
sysscoutfs = "/sys/fs/scoutfs/"
|
||||
formatversion = "format_version"
|
||||
)
|
||||
|
||||
// GetFormatVersion returns ScoutFS version reported by sysfs
|
||||
func fGetFormatVersion(f *os.File) scoutFsVersion {
|
||||
fsid, err := scoutfs.GetIDs(f)
|
||||
if err != nil {
|
||||
return versionScoutFsNotScoutFS
|
||||
}
|
||||
|
||||
path := filepath.Join(sysscoutfs, fsid.ShortID, formatversion)
|
||||
buf, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return versionScoutFsUnknown
|
||||
}
|
||||
|
||||
str := strings.TrimSpace(string(buf))
|
||||
vers, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return versionScoutFsUnknown
|
||||
}
|
||||
|
||||
return scoutFsVersion(vers)
|
||||
}
|
||||
|
||||
const (
|
||||
// versionScoutFsUnknown is unknown version
|
||||
versionScoutFsUnknown scoutFsVersion = iota
|
||||
// versionScoutFsV1 is version 1
|
||||
versionScoutFsV1
|
||||
// versionScoutFsV2 is version 2
|
||||
versionScoutFsV2
|
||||
// versionScoutFsMin is minimum scoutfs version
|
||||
versionScoutFsMin = versionScoutFsV1
|
||||
// versionScoutFsMax is maximum scoutfs version
|
||||
versionScoutFsMax = versionScoutFsV2
|
||||
// versionScoutFsNotScoutFS means the target FS is not scoutfs
|
||||
versionScoutFsNotScoutFS = versionScoutFsMax + 1
|
||||
)
|
||||
|
||||
// scoutFsVersion version
|
||||
type scoutFsVersion int
|
||||
|
||||
// AtLeast returns true if version is valid and at least b
|
||||
func (a scoutFsVersion) AtLeast(b scoutFsVersion) bool {
|
||||
return a.IsValid() && a >= b
|
||||
}
|
||||
|
||||
func (a scoutFsVersion) IsValid() bool {
|
||||
return a >= versionScoutFsMin && a <= versionScoutFsMax
|
||||
}
|
||||
|
||||
@@ -17,23 +17,15 @@
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/versity/versitygw/backend"
|
||||
)
|
||||
|
||||
type ScoutFS struct {
|
||||
backend.BackendUnsupported
|
||||
}
|
||||
|
||||
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
return nil, fmt.Errorf("scoutfs only available on linux")
|
||||
}
|
||||
|
||||
var (
|
||||
errNotSupported = errors.New("not supported")
|
||||
)
|
||||
|
||||
func moveData(_, _ *os.File) error {
|
||||
return errNotSupported
|
||||
}
|
||||
|
||||
func statMore(_ string) (stat, error) {
|
||||
return stat{}, errNotSupported
|
||||
}
|
||||
|
||||
@@ -19,16 +19,20 @@ import (
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
@@ -82,6 +86,11 @@ func adminCommand() *cli.Command {
|
||||
Usage: "groupID for the new user",
|
||||
Aliases: []string{"gi"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "project-id",
|
||||
Usage: "projectID for the new user",
|
||||
Aliases: []string{"pi"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -115,6 +124,11 @@ func adminCommand() *cli.Command {
|
||||
Usage: "groupID for the new user",
|
||||
Aliases: []string{"gi"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "project-id",
|
||||
Usage: "projectID for the new user",
|
||||
Aliases: []string{"pi"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -159,6 +173,66 @@ func adminCommand() *cli.Command {
|
||||
Usage: "Lists all the gateway buckets and owners.",
|
||||
Action: listBuckets,
|
||||
},
|
||||
{
|
||||
Name: "create-bucket",
|
||||
Usage: "Create a new bucket with owner",
|
||||
Action: createBucket,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "owner",
|
||||
Usage: "access key id of the bucket owner",
|
||||
Required: true,
|
||||
Aliases: []string{"o"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "bucket",
|
||||
Usage: "bucket name",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "acl",
|
||||
Usage: "canned ACL to apply to the bucket",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-full-control",
|
||||
Usage: "Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-read",
|
||||
Usage: "Allows grantee to list the objects in the bucket.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-read-acp",
|
||||
Usage: "Allows grantee to read the bucket ACL.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-write",
|
||||
Usage: `Allows grantee to create new objects in the bucket.
|
||||
For the bucket and object owners of existing objects, also allows deletions and overwrites of those objects.`,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-write-acp",
|
||||
Usage: "Allows grantee to write the ACL for the applicable bucket.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "create-bucket-configuration",
|
||||
Usage: "bucket configuration (LocationConstraint, Tags)",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "object-lock-enabled-for-bucket",
|
||||
Usage: "enable object lock for the bucket",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "no-object-lock-enabled-for-bucket",
|
||||
Usage: "disable object lock for the bucket",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "object-ownership",
|
||||
Usage: "bucket object ownership setting",
|
||||
Value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
// TODO: create a configuration file for this
|
||||
@@ -167,7 +241,6 @@ func adminCommand() *cli.Command {
|
||||
Usage: "admin access key id",
|
||||
EnvVars: []string{"ADMIN_ACCESS_KEY_ID", "ADMIN_ACCESS_KEY"},
|
||||
Aliases: []string{"a"},
|
||||
Required: true,
|
||||
Destination: &adminAccess,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@@ -175,7 +248,6 @@ func adminCommand() *cli.Command {
|
||||
Usage: "admin secret access key",
|
||||
EnvVars: []string{"ADMIN_SECRET_ACCESS_KEY", "ADMIN_SECRET_KEY"},
|
||||
Aliases: []string{"s"},
|
||||
Required: true,
|
||||
Destination: &adminSecret,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@@ -205,6 +277,32 @@ func adminCommand() *cli.Command {
|
||||
}
|
||||
}
|
||||
|
||||
// getAdminCreds returns the effective admin access key ID and secret key.
|
||||
// If admin-specific credentials are not provided, it falls back to the
|
||||
// root user credentials. Both resulting values must be non-empty;
|
||||
// otherwise, an error is returned.
|
||||
func getAdminCreds() (string, string, error) {
|
||||
access := adminAccess
|
||||
secret := adminSecret
|
||||
|
||||
// Fallbacks to root user credentials
|
||||
if access == "" {
|
||||
access = rootUserAccess
|
||||
}
|
||||
if secret == "" {
|
||||
secret = rootUserSecret
|
||||
}
|
||||
|
||||
if access == "" {
|
||||
return "", "", errors.New("subcommand admin access key id is not set")
|
||||
}
|
||||
if secret == "" {
|
||||
return "", "", errors.New("subcommand admin secret access key is not set")
|
||||
}
|
||||
|
||||
return access, secret, nil
|
||||
}
|
||||
|
||||
func initHTTPClient() *http.Client {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: allowInsecure},
|
||||
@@ -213,8 +311,12 @@ func initHTTPClient() *http.Client {
|
||||
}
|
||||
|
||||
func createUser(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
access, secret, role := ctx.String("access"), ctx.String("secret"), ctx.String("role")
|
||||
userID, groupID := ctx.Int("user-id"), ctx.Int("group-id")
|
||||
userID, groupID, projectID := ctx.Int("user-id"), ctx.Int("group-id"), ctx.Int("project-id")
|
||||
if access == "" || secret == "" {
|
||||
return fmt.Errorf("invalid input parameters for the new user access/secret keys")
|
||||
}
|
||||
@@ -223,11 +325,12 @@ func createUser(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
acc := auth.Account{
|
||||
Access: access,
|
||||
Secret: secret,
|
||||
Role: auth.Role(role),
|
||||
UserID: userID,
|
||||
GroupID: groupID,
|
||||
Access: access,
|
||||
Secret: secret,
|
||||
Role: auth.Role(role),
|
||||
UserID: userID,
|
||||
GroupID: groupID,
|
||||
ProjectID: projectID,
|
||||
}
|
||||
|
||||
accxml, err := xml.Marshal(acc)
|
||||
@@ -273,6 +376,10 @@ func createUser(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func deleteUser(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
access := ctx.String("access")
|
||||
if access == "" {
|
||||
return fmt.Errorf("invalid input parameter for the user access key")
|
||||
@@ -316,7 +423,19 @@ func deleteUser(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func updateUser(ctx *cli.Context) error {
|
||||
access, secret, userId, groupId, role := ctx.String("access"), ctx.String("secret"), ctx.Int("user-id"), ctx.Int("group-id"), auth.Role(ctx.String("role"))
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
access, secret, userId, groupId, projectID, role :=
|
||||
ctx.String("access"),
|
||||
ctx.String("secret"),
|
||||
ctx.Int("user-id"),
|
||||
ctx.Int("group-id"),
|
||||
ctx.Int("projectID"),
|
||||
auth.Role(ctx.String("role"))
|
||||
|
||||
props := auth.MutableProps{}
|
||||
if ctx.IsSet("role") {
|
||||
if !role.IsValid() {
|
||||
@@ -333,6 +452,9 @@ func updateUser(ctx *cli.Context) error {
|
||||
if ctx.IsSet("group-id") {
|
||||
props.GroupID = &groupId
|
||||
}
|
||||
if ctx.IsSet("project-id") {
|
||||
props.ProjectID = &projectID
|
||||
}
|
||||
|
||||
propsxml, err := xml.Marshal(props)
|
||||
if err != nil {
|
||||
@@ -377,6 +499,11 @@ func updateUser(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func listUsers(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/list-users", adminEndpoint), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
@@ -421,6 +548,251 @@ func listUsers(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type createBucketInput struct {
|
||||
LocationConstraint *string
|
||||
Tags []types.Tag
|
||||
}
|
||||
|
||||
// parseCreateBucketPayload parses the
|
||||
func parseCreateBucketPayload(input string) ([]byte, error) {
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
// try to parse as json, if the input starts with '{'
|
||||
if input[0] == '{' {
|
||||
var raw createBucketInput
|
||||
err := json.Unmarshal([]byte(input), &raw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid JSON input: %w", err)
|
||||
}
|
||||
|
||||
return xml.Marshal(s3response.CreateBucketConfiguration{
|
||||
LocationConstraint: raw.LocationConstraint,
|
||||
TagSet: raw.Tags,
|
||||
})
|
||||
}
|
||||
|
||||
var config s3response.CreateBucketConfiguration
|
||||
|
||||
// parse as string - shorthand syntax
|
||||
inputParts, err := splitTopLevel(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, part := range inputParts {
|
||||
part = strings.TrimSpace(part)
|
||||
if strings.HasPrefix(part, "LocationConstraint=") {
|
||||
locConstraint := strings.TrimPrefix(part, "LocationConstraint=")
|
||||
config.LocationConstraint = &locConstraint
|
||||
} else if strings.HasPrefix(part, "Tags=") {
|
||||
tags, err := parseTagging(strings.TrimPrefix(part, "Tags="))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.TagSet = tags
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid component: %v", part)
|
||||
}
|
||||
}
|
||||
|
||||
return xml.Marshal(config)
|
||||
}
|
||||
|
||||
var errInvalidTagsSyntax = errors.New("invalid tags syntax")
|
||||
|
||||
// splitTopLevel splits a shorthand configuration string into top-level components.
|
||||
// The function splits only on commas that are not nested inside '{}' or '[]'.
|
||||
func splitTopLevel(s string) ([]string, error) {
|
||||
var parts []string
|
||||
start := 0
|
||||
depth := 0
|
||||
|
||||
for i, r := range s {
|
||||
switch r {
|
||||
case '{', '[':
|
||||
depth++
|
||||
case '}', ']':
|
||||
depth--
|
||||
case ',':
|
||||
if depth == 0 {
|
||||
parts = append(parts, s[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
return nil, errors.New("invalid string format")
|
||||
}
|
||||
|
||||
// add last segment
|
||||
if start < len(s) {
|
||||
parts = append(parts, s[start:])
|
||||
}
|
||||
|
||||
return parts, nil
|
||||
}
|
||||
|
||||
// parseTagging parses a tag set expressed in shorthand syntax into AWS CLI tags.
|
||||
// Expected format:
|
||||
//
|
||||
// [{Key=string,Value=string},{Key=string,Value=string}]
|
||||
//
|
||||
// The function validates bracket structure, splits tag objects at the top level,
|
||||
// and delegates individual tag parsing to parseTag. It returns an error if the
|
||||
// syntax is invalid or if any tag entry cannot be parsed.
|
||||
func parseTagging(input string) ([]types.Tag, error) {
|
||||
if len(input) < 2 {
|
||||
return nil, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
if input[0] != '[' || input[len(input)-1] != ']' {
|
||||
return nil, errInvalidTagsSyntax
|
||||
}
|
||||
// strip []
|
||||
input = input[1 : len(input)-1]
|
||||
|
||||
tagComponents, err := splitTopLevel(input)
|
||||
if err != nil {
|
||||
return nil, errInvalidTagsSyntax
|
||||
}
|
||||
result := make([]types.Tag, 0, len(tagComponents))
|
||||
for _, tagComponent := range tagComponents {
|
||||
tagComponent = strings.TrimSpace(tagComponent)
|
||||
tag, err := parseTag(tagComponent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, tag)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// parseTag parses a single tag definition in shorthand form.
|
||||
// Expected format:
|
||||
//
|
||||
// {Key=string,Value=string}
|
||||
func parseTag(input string) (types.Tag, error) {
|
||||
input = strings.TrimSpace(input)
|
||||
|
||||
if len(input) < 2 {
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
if input[0] != '{' || input[len(input)-1] != '}' {
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
// strip {}
|
||||
input = input[1 : len(input)-1]
|
||||
|
||||
components := strings.Split(input, ",")
|
||||
if len(components) != 2 {
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
var key, value string
|
||||
|
||||
for _, c := range components {
|
||||
c = strings.TrimSpace(c)
|
||||
|
||||
switch {
|
||||
case strings.HasPrefix(c, "Key="):
|
||||
key = strings.TrimPrefix(c, "Key=")
|
||||
case strings.HasPrefix(c, "Value="):
|
||||
value = strings.TrimPrefix(c, "Value=")
|
||||
default:
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
}
|
||||
|
||||
if key == "" {
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
return types.Tag{
|
||||
Key: &key,
|
||||
Value: &value,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func createBucket(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket, owner := ctx.String("bucket"), ctx.String("owner")
|
||||
|
||||
payload, err := parseCreateBucketPayload(ctx.String("create-bucket-configuration"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid create bucket configuration: %w", err)
|
||||
}
|
||||
|
||||
hashedPayload := sha256.Sum256(payload)
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
headers := map[string]string{
|
||||
"x-amz-content-sha256": hexPayload,
|
||||
"x-vgw-owner": owner,
|
||||
"x-amz-acl": ctx.String("acl"),
|
||||
"x-amz-grant-full-control": ctx.String("grant-full-control"),
|
||||
"x-amz-grant-read": ctx.String("grant-read"),
|
||||
"x-amz-grant-read-acp": ctx.String("grant-read-acp"),
|
||||
"x-amz-grant-write": ctx.String("grant-write"),
|
||||
"x-amz-grant-write-acp": ctx.String("grant-write-acp"),
|
||||
"x-amz-object-ownership": ctx.String("object-ownership"),
|
||||
}
|
||||
|
||||
if ctx.Bool("object-lock-enabled-for-bucket") {
|
||||
headers["x-amz-bucket-object-lock-enabled"] = "true"
|
||||
}
|
||||
if ctx.Bool("no-object-lock-enabled-for-bucket") {
|
||||
headers["x-amz-bucket-object-lock-enabled"] = "false"
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx.Context, http.MethodPatch, fmt.Sprintf("%s/%s/create", adminEndpoint, bucket), bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for key, value := range headers {
|
||||
if value != "" {
|
||||
req.Header.Set(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
err = signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := initHTTPClient()
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return parseApiError(body)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
// account table formatting
|
||||
minwidth int = 2 // minimal cell width including any padding
|
||||
@@ -433,16 +805,21 @@ const (
|
||||
func printAcctTable(accs []auth.Account) {
|
||||
w := new(tabwriter.Writer)
|
||||
w.Init(os.Stdout, minwidth, tabwidth, padding, padchar, flags)
|
||||
fmt.Fprintln(w, "Account\tRole\tUserID\tGroupID")
|
||||
fmt.Fprintln(w, "-------\t----\t------\t-------")
|
||||
fmt.Fprintln(w, "Account\tRole\tUserID\tGroupID\tProjectID")
|
||||
fmt.Fprintln(w, "-------\t----\t------\t-------\t---------")
|
||||
for _, acc := range accs {
|
||||
fmt.Fprintf(w, "%v\t%v\t%v\t%v\n", acc.Access, acc.Role, acc.UserID, acc.GroupID)
|
||||
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\n", acc.Access, acc.Role, acc.UserID, acc.GroupID, acc.ProjectID)
|
||||
}
|
||||
fmt.Fprintln(w)
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
func changeBucketOwner(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket, owner := ctx.String("bucket"), ctx.String("owner")
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/change-bucket-owner/?bucket=%v&owner=%v", adminEndpoint, bucket, owner), nil)
|
||||
if err != nil {
|
||||
@@ -494,6 +871,11 @@ func printBuckets(buckets []s3response.Bucket) {
|
||||
}
|
||||
|
||||
func listBuckets(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/list-buckets", adminEndpoint), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
|
||||
@@ -16,16 +16,15 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
@@ -33,56 +32,68 @@ import (
|
||||
"github.com/versity/versitygw/metrics"
|
||||
"github.com/versity/versitygw/s3api"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3event"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
"github.com/versity/versitygw/webui"
|
||||
)
|
||||
|
||||
var (
|
||||
port, admPort string
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
admCertFile, admKeyFile string
|
||||
certFile, keyFile string
|
||||
kafkaURL, kafkaTopic, kafkaKey string
|
||||
natsURL, natsTopic string
|
||||
rabbitmqURL, rabbitmqExchange string
|
||||
rabbitmqRoutingKey string
|
||||
eventWebhookURL string
|
||||
eventConfigFilePath string
|
||||
logWebhookURL, accessLog string
|
||||
adminLogFile string
|
||||
healthPath string
|
||||
virtualDomain string
|
||||
debug bool
|
||||
keepAlive bool
|
||||
pprof string
|
||||
quiet bool
|
||||
readonly bool
|
||||
iamDir string
|
||||
ldapURL, ldapBindDN, ldapPassword string
|
||||
ldapQueryBase, ldapObjClasses string
|
||||
ldapAccessAtr, ldapSecAtr, ldapRoleAtr string
|
||||
ldapUserIdAtr, ldapGroupIdAtr string
|
||||
vaultEndpointURL, vaultSecretStoragePath string
|
||||
vaultAuthMethod, vaultMountPath string
|
||||
vaultRootToken, vaultRoleId string
|
||||
vaultRoleSecret, vaultServerCert string
|
||||
vaultClientCert, vaultClientCertKey string
|
||||
s3IamAccess, s3IamSecret string
|
||||
s3IamRegion, s3IamBucket string
|
||||
s3IamEndpoint string
|
||||
s3IamSslNoVerify bool
|
||||
iamCacheDisable bool
|
||||
iamCacheTTL int
|
||||
iamCachePrune int
|
||||
metricsService string
|
||||
statsdServers string
|
||||
dogstatsServers string
|
||||
ipaHost, ipaVaultName string
|
||||
ipaUser, ipaPassword string
|
||||
ipaInsecure bool
|
||||
iamDebug bool
|
||||
port, admPort string
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
corsAllowOrigin string
|
||||
admCertFile, admKeyFile string
|
||||
certFile, keyFile string
|
||||
kafkaURL, kafkaTopic, kafkaKey string
|
||||
natsURL, natsTopic string
|
||||
rabbitmqURL, rabbitmqExchange string
|
||||
rabbitmqRoutingKey string
|
||||
eventWebhookURL string
|
||||
eventConfigFilePath string
|
||||
logWebhookURL, accessLog string
|
||||
adminLogFile string
|
||||
healthPath string
|
||||
virtualDomain string
|
||||
debug bool
|
||||
keepAlive bool
|
||||
pprof string
|
||||
quiet bool
|
||||
readonly bool
|
||||
disableStrictBucketNames bool
|
||||
iamDir string
|
||||
ldapURL, ldapBindDN, ldapPassword string
|
||||
ldapQueryBase, ldapObjClasses string
|
||||
ldapAccessAtr, ldapSecAtr, ldapRoleAtr string
|
||||
ldapUserIdAtr, ldapGroupIdAtr string
|
||||
ldapProjectIdAtr string
|
||||
ldapTLSSkipVerify bool
|
||||
vaultEndpointURL, vaultNamespace string
|
||||
vaultSecretStoragePath string
|
||||
vaultSecretStorageNamespace string
|
||||
vaultAuthMethod, vaultAuthNamespace string
|
||||
vaultMountPath string
|
||||
vaultRootToken, vaultRoleId string
|
||||
vaultRoleSecret, vaultServerCert string
|
||||
vaultClientCert, vaultClientCertKey string
|
||||
s3IamAccess, s3IamSecret string
|
||||
s3IamRegion, s3IamBucket string
|
||||
s3IamEndpoint string
|
||||
s3IamSslNoVerify bool
|
||||
iamCacheDisable bool
|
||||
iamCacheTTL int
|
||||
iamCachePrune int
|
||||
metricsService string
|
||||
statsdServers string
|
||||
dogstatsServers string
|
||||
ipaHost, ipaVaultName string
|
||||
ipaUser, ipaPassword string
|
||||
ipaInsecure bool
|
||||
iamDebug bool
|
||||
webuiAddr string
|
||||
webuiCertFile, webuiKeyFile string
|
||||
webuiNoTLS bool
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -160,6 +171,30 @@ func initFlags() []cli.Flag {
|
||||
Destination: &port,
|
||||
Aliases: []string{"p"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "webui",
|
||||
Usage: "enable WebUI server on the specified listen address (e.g. ':7071', '127.0.0.1:7071', 'localhost:7071'; disabled when omitted)",
|
||||
EnvVars: []string{"VGW_WEBUI_PORT"},
|
||||
Destination: &webuiAddr,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "webui-cert",
|
||||
Usage: "TLS cert file for WebUI (defaults to --cert value when WebUI is enabled)",
|
||||
EnvVars: []string{"VGW_WEBUI_CERT"},
|
||||
Destination: &webuiCertFile,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "webui-key",
|
||||
Usage: "TLS key file for WebUI (defaults to --key value when WebUI is enabled)",
|
||||
EnvVars: []string{"VGW_WEBUI_KEY"},
|
||||
Destination: &webuiKeyFile,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "webui-no-tls",
|
||||
Usage: "disable TLS for WebUI even if TLS is configured for the gateway",
|
||||
EnvVars: []string{"VGW_WEBUI_NO_TLS"},
|
||||
Destination: &webuiNoTLS,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "access",
|
||||
Usage: "root user access key",
|
||||
@@ -182,6 +217,12 @@ func initFlags() []cli.Flag {
|
||||
Destination: ®ion,
|
||||
Aliases: []string{"r"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cors-allow-origin",
|
||||
Usage: "default CORS Access-Control-Allow-Origin value (applied when no bucket CORS configuration exists, and for admin APIs)",
|
||||
EnvVars: []string{"VGW_CORS_ALLOW_ORIGIN"},
|
||||
Destination: &corsAllowOrigin,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cert",
|
||||
Usage: "TLS cert file",
|
||||
@@ -400,24 +441,54 @@ func initFlags() []cli.Flag {
|
||||
EnvVars: []string{"VGW_IAM_LDAP_GROUP_ID_ATR"},
|
||||
Destination: &ldapGroupIdAtr,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-ldap-project-id-atr",
|
||||
Usage: "ldap server user project id attribute name",
|
||||
EnvVars: []string{"VGW_IAM_LDAP_PROJECT_ID_ATR"},
|
||||
Destination: &ldapProjectIdAtr,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "iam-ldap-tls-skip-verify",
|
||||
Usage: "disable TLS certificate verification for LDAP connections (insecure, for self-signed certificates)",
|
||||
EnvVars: []string{"VGW_IAM_LDAP_TLS_SKIP_VERIFY"},
|
||||
Destination: &ldapTLSSkipVerify,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-endpoint-url",
|
||||
Usage: "vault server url",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_ENDPOINT_URL"},
|
||||
Destination: &vaultEndpointURL,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-namespace",
|
||||
Usage: "vault server namespace",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_NAMESPACE"},
|
||||
Destination: &vaultNamespace,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-secret-storage-path",
|
||||
Usage: "vault server secret storage path",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_SECRET_STORAGE_PATH"},
|
||||
Destination: &vaultSecretStoragePath,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-secret-storage-namespace",
|
||||
Usage: "vault server secret storage namespace",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_SECRET_STORAGE_NAMESPACE"},
|
||||
Destination: &vaultSecretStorageNamespace,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-auth-method",
|
||||
Usage: "vault server auth method",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_AUTH_METHOD"},
|
||||
Destination: &vaultAuthMethod,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-auth-namespace",
|
||||
Usage: "vault server auth namespace",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_AUTH_NAMESPACE"},
|
||||
Destination: &vaultAuthNamespace,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-mount-path",
|
||||
Usage: "vault server mount path",
|
||||
@@ -537,6 +608,12 @@ func initFlags() []cli.Flag {
|
||||
EnvVars: []string{"VGW_READ_ONLY"},
|
||||
Destination: &readonly,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "disable-strict-bucket-names",
|
||||
Usage: "allow relaxed bucket naming (disables strict validation checks)",
|
||||
EnvVars: []string{"VGW_DISABLE_STRICT_BUCKET_NAMES"},
|
||||
Destination: &disableStrictBucketNames,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "metrics-service-name",
|
||||
Usage: "service name tag for metrics, hostname if blank",
|
||||
@@ -596,6 +673,44 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
return fmt.Errorf("root user access and secret key must be provided")
|
||||
}
|
||||
|
||||
webuiAddr = strings.TrimSpace(webuiAddr)
|
||||
if webuiAddr != "" && isAllDigits(webuiAddr) {
|
||||
webuiAddr = ":" + webuiAddr
|
||||
}
|
||||
|
||||
// WebUI runs in a browser and typically talks to the gateway/admin APIs cross-origin
|
||||
// (different port). If no bucket CORS configuration exists, those API responses need
|
||||
// a default Access-Control-Allow-Origin to be usable from the WebUI.
|
||||
if webuiAddr != "" && strings.TrimSpace(corsAllowOrigin) == "" {
|
||||
// A single Access-Control-Allow-Origin value cannot cover multiple specific
|
||||
// origins. Default to '*' for usability and print a warning so operators can
|
||||
// lock it down explicitly.
|
||||
corsAllowOrigin = "*"
|
||||
webuiScheme := "http"
|
||||
if !webuiNoTLS && (strings.TrimSpace(webuiCertFile) != "" || strings.TrimSpace(certFile) != "") {
|
||||
webuiScheme = "https"
|
||||
}
|
||||
|
||||
// Suggest a more secure explicit origin based on the actual WebUI listening interfaces.
|
||||
// (Browsers require an exact origin match; this is typically one chosen hostname/IP.)
|
||||
var suggestion string
|
||||
ips, ipsErr := getMatchingIPs(webuiAddr)
|
||||
_, webPrt, prtErr := net.SplitHostPort(webuiAddr)
|
||||
if ipsErr == nil && prtErr == nil && len(ips) > 0 {
|
||||
origins := make([]string, 0, len(ips))
|
||||
for _, ip := range ips {
|
||||
origins = append(origins, fmt.Sprintf("%s://%s:%s", webuiScheme, ip, webPrt))
|
||||
}
|
||||
suggestion = fmt.Sprintf("consider setting it to one of: %s (or your public hostname)", strings.Join(origins, ", "))
|
||||
} else {
|
||||
suggestion = fmt.Sprintf("consider setting it to %s://<host>:<port>", webuiScheme)
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "WARNING: --webui is enabled but --cors-allow-origin is not set; defaulting to '*'; %s\n", suggestion)
|
||||
}
|
||||
|
||||
utils.SetBucketNameValidationStrict(!disableStrictBucketNames)
|
||||
|
||||
if pprof != "" {
|
||||
// listen on specified port for pprof debug
|
||||
// point browser to http://<ip:port>/debug/pprof/
|
||||
@@ -604,16 +719,10 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
}()
|
||||
}
|
||||
|
||||
app := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
ServerHeader: "VERSITYGW",
|
||||
StreamRequestBody: true,
|
||||
DisableKeepalive: !keepAlive,
|
||||
Network: fiber.NetworkTCP,
|
||||
DisableStartupMessage: true,
|
||||
})
|
||||
|
||||
var opts []s3api.Option
|
||||
if corsAllowOrigin != "" {
|
||||
opts = append(opts, s3api.WithCORSAllowOrigin(corsAllowOrigin))
|
||||
}
|
||||
|
||||
if certFile != "" || keyFile != "" {
|
||||
if certFile == "" {
|
||||
@@ -623,11 +732,12 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
return fmt.Errorf("TLS cert specified without key file")
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
cs := utils.NewCertStorage()
|
||||
err := cs.SetCertificate(certFile, keyFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tls: load certs: %v", err)
|
||||
}
|
||||
opts = append(opts, s3api.WithTLS(cert))
|
||||
opts = append(opts, s3api.WithTLS(cs))
|
||||
}
|
||||
if admPort == "" {
|
||||
opts = append(opts, s3api.WithAdminServer())
|
||||
@@ -644,11 +754,12 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
if virtualDomain != "" {
|
||||
opts = append(opts, s3api.WithHostStyle(virtualDomain))
|
||||
}
|
||||
|
||||
if keepAlive {
|
||||
opts = append(opts, s3api.WithKeepAlive())
|
||||
}
|
||||
if debug {
|
||||
debuglogger.SetDebugEnabled()
|
||||
}
|
||||
|
||||
if iamDebug {
|
||||
debuglogger.SetIAMDebugEnabled()
|
||||
}
|
||||
@@ -659,41 +770,46 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
Secret: rootUserSecret,
|
||||
Role: auth.RoleAdmin,
|
||||
},
|
||||
Dir: iamDir,
|
||||
LDAPServerURL: ldapURL,
|
||||
LDAPBindDN: ldapBindDN,
|
||||
LDAPPassword: ldapPassword,
|
||||
LDAPQueryBase: ldapQueryBase,
|
||||
LDAPObjClasses: ldapObjClasses,
|
||||
LDAPAccessAtr: ldapAccessAtr,
|
||||
LDAPSecretAtr: ldapSecAtr,
|
||||
LDAPRoleAtr: ldapRoleAtr,
|
||||
LDAPUserIdAtr: ldapUserIdAtr,
|
||||
LDAPGroupIdAtr: ldapGroupIdAtr,
|
||||
VaultEndpointURL: vaultEndpointURL,
|
||||
VaultSecretStoragePath: vaultSecretStoragePath,
|
||||
VaultAuthMethod: vaultAuthMethod,
|
||||
VaultMountPath: vaultMountPath,
|
||||
VaultRootToken: vaultRootToken,
|
||||
VaultRoleId: vaultRoleId,
|
||||
VaultRoleSecret: vaultRoleSecret,
|
||||
VaultServerCert: vaultServerCert,
|
||||
VaultClientCert: vaultClientCert,
|
||||
VaultClientCertKey: vaultClientCertKey,
|
||||
S3Access: s3IamAccess,
|
||||
S3Secret: s3IamSecret,
|
||||
S3Region: s3IamRegion,
|
||||
S3Bucket: s3IamBucket,
|
||||
S3Endpoint: s3IamEndpoint,
|
||||
S3DisableSSlVerfiy: s3IamSslNoVerify,
|
||||
CacheDisable: iamCacheDisable,
|
||||
CacheTTL: iamCacheTTL,
|
||||
CachePrune: iamCachePrune,
|
||||
IpaHost: ipaHost,
|
||||
IpaVaultName: ipaVaultName,
|
||||
IpaUser: ipaUser,
|
||||
IpaPassword: ipaPassword,
|
||||
IpaInsecure: ipaInsecure,
|
||||
Dir: iamDir,
|
||||
LDAPServerURL: ldapURL,
|
||||
LDAPBindDN: ldapBindDN,
|
||||
LDAPPassword: ldapPassword,
|
||||
LDAPQueryBase: ldapQueryBase,
|
||||
LDAPObjClasses: ldapObjClasses,
|
||||
LDAPAccessAtr: ldapAccessAtr,
|
||||
LDAPSecretAtr: ldapSecAtr,
|
||||
LDAPRoleAtr: ldapRoleAtr,
|
||||
LDAPUserIdAtr: ldapUserIdAtr,
|
||||
LDAPGroupIdAtr: ldapGroupIdAtr,
|
||||
LDAPProjectIdAtr: ldapProjectIdAtr,
|
||||
LDAPTLSSkipVerify: ldapTLSSkipVerify,
|
||||
VaultEndpointURL: vaultEndpointURL,
|
||||
VaultNamespace: vaultNamespace,
|
||||
VaultSecretStoragePath: vaultSecretStoragePath,
|
||||
VaultSecretStorageNamespace: vaultSecretStorageNamespace,
|
||||
VaultAuthMethod: vaultAuthMethod,
|
||||
VaultAuthNamespace: vaultAuthNamespace,
|
||||
VaultMountPath: vaultMountPath,
|
||||
VaultRootToken: vaultRootToken,
|
||||
VaultRoleId: vaultRoleId,
|
||||
VaultRoleSecret: vaultRoleSecret,
|
||||
VaultServerCert: vaultServerCert,
|
||||
VaultClientCert: vaultClientCert,
|
||||
VaultClientCertKey: vaultClientCertKey,
|
||||
S3Access: s3IamAccess,
|
||||
S3Secret: s3IamSecret,
|
||||
S3Region: s3IamRegion,
|
||||
S3Bucket: s3IamBucket,
|
||||
S3Endpoint: s3IamEndpoint,
|
||||
S3DisableSSlVerfiy: s3IamSslNoVerify,
|
||||
CacheDisable: iamCacheDisable,
|
||||
CacheTTL: iamCacheTTL,
|
||||
CachePrune: iamCachePrune,
|
||||
IpaHost: ipaHost,
|
||||
IpaVaultName: ipaVaultName,
|
||||
IpaUser: ipaUser,
|
||||
IpaPassword: ipaPassword,
|
||||
IpaInsecure: ipaInsecure,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("setup iam: %w", err)
|
||||
@@ -733,7 +849,7 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
return fmt.Errorf("init bucket event notifications: %w", err)
|
||||
}
|
||||
|
||||
srv, err := s3api.New(app, be, middlewares.RootUserConfig{
|
||||
srv, err := s3api.New(be, middlewares.RootUserConfig{
|
||||
Access: rootUserAccess,
|
||||
Secret: rootUserSecret,
|
||||
}, port, region, iam, loggers.S3Logger, loggers.AdminLogger, evSender, metricsManager, opts...)
|
||||
@@ -744,14 +860,10 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
var admSrv *s3api.S3AdminServer
|
||||
|
||||
if admPort != "" {
|
||||
admApp := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
ServerHeader: "VERSITYGW",
|
||||
Network: fiber.NetworkTCP,
|
||||
DisableStartupMessage: true,
|
||||
})
|
||||
|
||||
var opts []s3api.AdminOpt
|
||||
if corsAllowOrigin != "" {
|
||||
opts = append(opts, s3api.WithAdminCORSAllowOrigin(corsAllowOrigin))
|
||||
}
|
||||
|
||||
if admCertFile != "" || admKeyFile != "" {
|
||||
if admCertFile == "" {
|
||||
@@ -761,11 +873,12 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
return fmt.Errorf("TLS cert specified without key file")
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(admCertFile, admKeyFile)
|
||||
cs := utils.NewCertStorage()
|
||||
err = cs.SetCertificate(admCertFile, admKeyFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tls: load certs: %v", err)
|
||||
}
|
||||
opts = append(opts, s3api.WithAdminSrvTLS(cert))
|
||||
opts = append(opts, s3api.WithAdminSrvTLS(cs))
|
||||
}
|
||||
if quiet {
|
||||
opts = append(opts, s3api.WithAdminQuiet())
|
||||
@@ -774,18 +887,105 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
opts = append(opts, s3api.WithAdminDebug())
|
||||
}
|
||||
|
||||
admSrv = s3api.NewAdminServer(admApp, be, middlewares.RootUserConfig{Access: rootUserAccess, Secret: rootUserSecret}, admPort, region, iam, loggers.AdminLogger, opts...)
|
||||
admSrv = s3api.NewAdminServer(be, middlewares.RootUserConfig{Access: rootUserAccess, Secret: rootUserSecret}, admPort, region, iam, loggers.AdminLogger, srv.Router.Ctrl, opts...)
|
||||
}
|
||||
|
||||
var webSrv *webui.Server
|
||||
webuiSSLEnabled := false
|
||||
webTLSCert := ""
|
||||
webTLSKey := ""
|
||||
if webuiAddr != "" {
|
||||
_, webPrt, err := net.SplitHostPort(webuiAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("webui listen address must be in the form ':port' or 'host:port': %w", err)
|
||||
}
|
||||
webPortNum, err := strconv.Atoi(webPrt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("webui port must be a number: %w", err)
|
||||
}
|
||||
if webPortNum < 0 || webPortNum > 65535 {
|
||||
return fmt.Errorf("webui port must be between 0 and 65535")
|
||||
}
|
||||
|
||||
var webOpts []webui.Option
|
||||
if !webuiNoTLS {
|
||||
// WebUI can either use explicitly provided TLS files or reuse the
|
||||
// gateway's TLS files by default.
|
||||
webTLSCert = webuiCertFile
|
||||
webTLSKey = webuiKeyFile
|
||||
if webTLSCert == "" && webTLSKey == "" {
|
||||
webTLSCert = certFile
|
||||
webTLSKey = keyFile
|
||||
}
|
||||
if webTLSCert != "" || webTLSKey != "" {
|
||||
if webTLSCert == "" {
|
||||
return fmt.Errorf("webui TLS key specified without cert file")
|
||||
}
|
||||
if webTLSKey == "" {
|
||||
return fmt.Errorf("webui TLS cert specified without key file")
|
||||
}
|
||||
webuiSSLEnabled = true
|
||||
|
||||
cs := utils.NewCertStorage()
|
||||
err := cs.SetCertificate(webTLSCert, webTLSKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tls: load certs: %v", err)
|
||||
}
|
||||
|
||||
webOpts = append(webOpts, webui.WithTLS(cs))
|
||||
}
|
||||
}
|
||||
|
||||
sslEnabled := certFile != ""
|
||||
admSSLEnabled := sslEnabled
|
||||
if admPort != "" {
|
||||
admSSLEnabled = admCertFile != ""
|
||||
}
|
||||
|
||||
gateways, err := buildServiceURLs(port, sslEnabled)
|
||||
if err != nil {
|
||||
return fmt.Errorf("webui: build gateway URLs: %w", err)
|
||||
}
|
||||
|
||||
adminGateways := gateways
|
||||
if admPort != "" {
|
||||
adminGateways, err = buildServiceURLs(admPort, admSSLEnabled)
|
||||
if err != nil {
|
||||
return fmt.Errorf("webui: build admin gateway URLs: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if quiet {
|
||||
webOpts = append(webOpts, webui.WithQuiet())
|
||||
}
|
||||
|
||||
webSrv = webui.NewServer(&webui.ServerConfig{
|
||||
ListenAddr: webuiAddr,
|
||||
Gateways: gateways,
|
||||
AdminGateways: adminGateways,
|
||||
Region: region,
|
||||
}, webOpts...)
|
||||
}
|
||||
|
||||
if !quiet {
|
||||
printBanner(port, admPort, certFile != "", admCertFile != "")
|
||||
printBanner(port, admPort, certFile != "", admCertFile != "", webuiAddr, webuiSSLEnabled)
|
||||
}
|
||||
|
||||
c := make(chan error, 2)
|
||||
servers := 1
|
||||
if admPort != "" {
|
||||
servers++
|
||||
}
|
||||
if webSrv != nil {
|
||||
servers++
|
||||
}
|
||||
c := make(chan error, servers)
|
||||
go func() { c <- srv.Serve() }()
|
||||
if admPort != "" {
|
||||
go func() { c <- admSrv.Serve() }()
|
||||
}
|
||||
if webSrv != nil {
|
||||
go func() { c <- webSrv.Serve() }()
|
||||
}
|
||||
|
||||
// for/select blocks until shutdown
|
||||
Loop:
|
||||
@@ -810,35 +1010,71 @@ Loop:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
if certFile != "" && keyFile != "" {
|
||||
err = srv.CertStorage.SetCertificate(certFile, keyFile)
|
||||
if err != nil {
|
||||
debuglogger.InternalError(fmt.Errorf("srv cert reload failed: %w", err))
|
||||
} else {
|
||||
fmt.Printf("srv cert reloaded (cert: %s, key: %s)\n", certFile, keyFile)
|
||||
}
|
||||
}
|
||||
if admPort != "" && admCertFile != "" && admKeyFile != "" {
|
||||
err = admSrv.CertStorage.SetCertificate(admCertFile, admKeyFile)
|
||||
if err != nil {
|
||||
debuglogger.InternalError(fmt.Errorf("admSrv cert reload failed: %w", err))
|
||||
} else {
|
||||
fmt.Printf("admSrv cert reloaded (cert: %s, key: %s)\n", admCertFile, admKeyFile)
|
||||
}
|
||||
}
|
||||
if webSrv != nil && webTLSCert != "" && webTLSKey != "" {
|
||||
err := webSrv.CertStorage.SetCertificate(webTLSCert, webTLSKey)
|
||||
if err != nil {
|
||||
debuglogger.InternalError(fmt.Errorf("webSrv cert reload failed: %w", err))
|
||||
} else {
|
||||
fmt.Printf("webSrv cert reloaded (cert: %s, key: %s)\n", webTLSCert, webTLSKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
saveErr := err
|
||||
|
||||
// first shut down the s3api and admin servers
|
||||
// as they have dependecy from other modules
|
||||
err = srv.ShutDown()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "shutdown api server: %v\n", err)
|
||||
}
|
||||
|
||||
if admSrv != nil {
|
||||
err := admSrv.Shutdown()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "shutdown admin server: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if webSrv != nil {
|
||||
err := webSrv.Shutdown()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "shutdown webui server: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
be.Shutdown()
|
||||
|
||||
err = iam.Shutdown()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "shutdown iam: %v\n", err)
|
||||
}
|
||||
|
||||
if loggers.S3Logger != nil {
|
||||
err := loggers.S3Logger.Shutdown()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "shutdown s3 logger: %v\n", err)
|
||||
}
|
||||
}
|
||||
if loggers.AdminLogger != nil {
|
||||
err := loggers.AdminLogger.Shutdown()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "shutdown admin logger: %v\n", err)
|
||||
}
|
||||
}
|
||||
@@ -846,9 +1082,6 @@ Loop:
|
||||
if evSender != nil {
|
||||
err := evSender.Close()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "close event sender: %v\n", err)
|
||||
}
|
||||
}
|
||||
@@ -860,7 +1093,7 @@ Loop:
|
||||
return saveErr
|
||||
}
|
||||
|
||||
func printBanner(port, admPort string, ssl, admSsl bool) {
|
||||
func printBanner(port, admPort string, ssl, admSsl bool, webuiAddr string, webuiSsl bool) {
|
||||
interfaces, err := getMatchingIPs(port)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to match local IP addresses: %v\n", err)
|
||||
@@ -942,6 +1175,30 @@ func printBanner(port, admPort string, ssl, admSsl bool) {
|
||||
}
|
||||
}
|
||||
|
||||
if strings.TrimSpace(webuiAddr) != "" {
|
||||
webInterfaces, err := getMatchingIPs(webuiAddr)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to match webui port local IP addresses: %v\n", err)
|
||||
return
|
||||
}
|
||||
_, webPrt, err := net.SplitHostPort(webuiAddr)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to parse webui port: %v\n", err)
|
||||
return
|
||||
}
|
||||
lines = append(lines,
|
||||
centerText(""),
|
||||
leftText("WebUI listening on:"),
|
||||
)
|
||||
for _, ip := range webInterfaces {
|
||||
url := fmt.Sprintf("http://%s:%s", ip, webPrt)
|
||||
if webuiSsl {
|
||||
url = fmt.Sprintf("https://%s:%s", ip, webPrt)
|
||||
}
|
||||
lines = append(lines, leftText(" "+url))
|
||||
}
|
||||
}
|
||||
|
||||
// Print the top border
|
||||
fmt.Println("┌" + strings.Repeat("─", columnWidth-2) + "┐")
|
||||
|
||||
@@ -1017,6 +1274,42 @@ func getMatchingIPs(spec string) ([]string, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func buildServiceURLs(spec string, ssl bool) ([]string, error) {
|
||||
interfaces, err := getMatchingIPs(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, prt, err := net.SplitHostPort(spec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse address/port: %w", err)
|
||||
}
|
||||
if len(interfaces) == 0 {
|
||||
interfaces = []string{"localhost"}
|
||||
}
|
||||
|
||||
scheme := "http"
|
||||
if ssl {
|
||||
scheme = "https"
|
||||
}
|
||||
urls := make([]string, 0, len(interfaces))
|
||||
for _, ip := range interfaces {
|
||||
urls = append(urls, fmt.Sprintf("%s://%s:%s", scheme, ip, prt))
|
||||
}
|
||||
return urls, nil
|
||||
}
|
||||
|
||||
func isAllDigits(s string) bool {
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
for _, r := range s {
|
||||
if r < '0' || r > '9' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const columnWidth = 70
|
||||
|
||||
func centerText(text string) string {
|
||||
|
||||
@@ -32,8 +32,9 @@ func pluginCommand() *cli.Command {
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "location of the config file",
|
||||
Usage: "location of the plugin config file",
|
||||
Aliases: []string{"c"},
|
||||
EnvVars: []string{"VGW_PLUGIN_CONFIG"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -120,12 +120,13 @@ func runPosix(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
opts := posix.PosixOpts{
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
BucketLinks: bucketlinks,
|
||||
VersioningDir: versioningDir,
|
||||
NewDirPerm: fs.FileMode(dirPerms),
|
||||
ForceNoTmpFile: forceNoTmpFile,
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
BucketLinks: bucketlinks,
|
||||
VersioningDir: versioningDir,
|
||||
NewDirPerm: fs.FileMode(dirPerms),
|
||||
ForceNoTmpFile: forceNoTmpFile,
|
||||
ValidateBucketNames: disableStrictBucketNames,
|
||||
}
|
||||
|
||||
var ms meta.MetadataStorer
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
var (
|
||||
glacier bool
|
||||
disableNoArchive bool
|
||||
setProjectID bool
|
||||
)
|
||||
|
||||
func scoutfsCommand() *cli.Command {
|
||||
@@ -66,6 +67,12 @@ move interfaces as well as support for tiered filesystems.`,
|
||||
EnvVars: []string{"VGW_CHOWN_GID"},
|
||||
Destination: &chowngid,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "projectid",
|
||||
Usage: "set project id on newly created buckets, files, and directories to client account ProjectID",
|
||||
EnvVars: []string{"VGW_SET_PROJECT_ID"},
|
||||
Destination: &setProjectID,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "bucketlinks",
|
||||
Usage: "allow symlinked directories at bucket level to be treated as buckets",
|
||||
@@ -113,6 +120,8 @@ func runScoutfs(ctx *cli.Context) error {
|
||||
opts.NewDirPerm = fs.FileMode(dirPerms)
|
||||
opts.DisableNoArchive = disableNoArchive
|
||||
opts.VersioningDir = versioningDir
|
||||
opts.ValidateBucketNames = disableStrictBucketNames
|
||||
opts.SetProjectID = setProjectID
|
||||
|
||||
be, err := scoutfs.New(ctx.Args().Get(0), opts)
|
||||
if err != nil {
|
||||
|
||||
@@ -39,6 +39,7 @@ var (
|
||||
versioningEnabled bool
|
||||
azureTests bool
|
||||
tlsStatus bool
|
||||
parallel bool
|
||||
)
|
||||
|
||||
func testCommand() *cli.Command {
|
||||
@@ -115,6 +116,12 @@ func initTestCommands() []*cli.Command {
|
||||
Destination: &azureTests,
|
||||
Aliases: []string{"azure"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "parallel",
|
||||
Usage: "executes the tests concurrently",
|
||||
Destination: ¶llel,
|
||||
Aliases: []string{"p"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -304,9 +311,9 @@ func initTestCommands() []*cli.Command {
|
||||
}, extractIntTests()...)
|
||||
}
|
||||
|
||||
type testFunc func(*integration.S3Conf)
|
||||
type testFunc func(*integration.TestState)
|
||||
|
||||
func getAction(tf testFunc) func(*cli.Context) error {
|
||||
func getAction(tf testFunc) func(ctx *cli.Context) error {
|
||||
return func(ctx *cli.Context) error {
|
||||
opts := []integration.Option{
|
||||
integration.WithAccess(awsID),
|
||||
@@ -329,12 +336,14 @@ func getAction(tf testFunc) func(*cli.Context) error {
|
||||
}
|
||||
|
||||
s := integration.NewS3Conf(opts...)
|
||||
tf(s)
|
||||
ts := integration.NewTestState(ctx.Context, s, parallel)
|
||||
tf(ts)
|
||||
ts.Wait()
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("RAN:", integration.RunCount, "PASS:", integration.PassCount, "FAIL:", integration.FailCount)
|
||||
if integration.FailCount > 0 {
|
||||
return fmt.Errorf("test failed with %v errors", integration.FailCount)
|
||||
fmt.Println("RAN:", integration.RunCount.Load(), "PASS:", integration.PassCount.Load(), "FAIL:", integration.FailCount.Load())
|
||||
if integration.FailCount.Load() > 0 {
|
||||
return fmt.Errorf("test failed with %v errors", integration.FailCount.Load())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
@@ -25,18 +26,39 @@ import (
|
||||
)
|
||||
|
||||
type Color string
|
||||
type prefix string
|
||||
|
||||
const (
|
||||
green Color = "\033[32m"
|
||||
yellow Color = "\033[33m"
|
||||
blue Color = "\033[34m"
|
||||
red Color = "\033[31m"
|
||||
Purple Color = "\033[0;35m"
|
||||
|
||||
prefixPanic prefix = "[PANIC]: "
|
||||
prefixInernalError prefix = "[INTERNAL ERROR]: "
|
||||
prefixInfo prefix = "[INFO]: "
|
||||
prefixDebug prefix = "[DEBUG]: "
|
||||
|
||||
reset = "\033[0m"
|
||||
borderChar = "─"
|
||||
boxWidth = 120
|
||||
)
|
||||
|
||||
// Panic prints the panics out in the console
|
||||
func Panic(er error) {
|
||||
printError(prefixPanic, er)
|
||||
}
|
||||
|
||||
// InternalError prints the internal error out in the console
|
||||
func InternalError(er error) {
|
||||
printError(prefixInernalError, er)
|
||||
}
|
||||
|
||||
func printError(prefix prefix, er error) {
|
||||
fmt.Fprintf(os.Stderr, string(red)+string(prefix)+"%v"+reset+"\n", er)
|
||||
}
|
||||
|
||||
// Logs http request details: headers, body, params, query args
|
||||
func LogFiberRequestDetails(ctx *fiber.Ctx) {
|
||||
// Log the full request url
|
||||
@@ -102,8 +124,8 @@ func Logf(format string, v ...any) {
|
||||
if !debugEnabled.Load() {
|
||||
return
|
||||
}
|
||||
debugPrefix := "[DEBUG]: "
|
||||
fmt.Printf(string(yellow)+debugPrefix+format+reset+"\n", v...)
|
||||
|
||||
fmt.Printf(string(yellow)+string(prefixDebug)+format+reset+"\n", v...)
|
||||
}
|
||||
|
||||
// Infof prints out green info block with [INFO]: prefix
|
||||
@@ -111,8 +133,8 @@ func Infof(format string, v ...any) {
|
||||
if !debugEnabled.Load() {
|
||||
return
|
||||
}
|
||||
debugPrefix := "[INFO]: "
|
||||
fmt.Printf(string(green)+debugPrefix+format+reset+"\n", v...)
|
||||
|
||||
fmt.Printf(string(green)+string(prefixInfo)+format+reset+"\n", v...)
|
||||
}
|
||||
|
||||
var debugIAMEnabled atomic.Bool
|
||||
@@ -133,8 +155,8 @@ func IAMLogf(format string, v ...any) {
|
||||
if !debugIAMEnabled.Load() {
|
||||
return
|
||||
}
|
||||
debugPrefix := "[DEBUG]: "
|
||||
fmt.Printf(string(yellow)+debugPrefix+format+reset+"\n", v...)
|
||||
|
||||
fmt.Printf(string(yellow)+string(prefixDebug)+format+reset+"\n", v...)
|
||||
}
|
||||
|
||||
// PrintInsideHorizontalBorders prints the text inside horizontal
|
||||
|
||||
51
docker-entrypoint.sh
Normal file
51
docker-entrypoint.sh
Normal file
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
BIN="${VGW_BINARY:-/usr/local/bin/versitygw}"
|
||||
|
||||
if [ ! -x "$BIN" ]; then
|
||||
echo "Entrypoint error: versitygw binary not found at $BIN" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# If arguments were provided, run them directly for backward compatibility.
|
||||
if [ "$#" -gt 0 ]; then
|
||||
exec "$BIN" "$@"
|
||||
fi
|
||||
|
||||
backend="${VGW_BACKEND:-}"
|
||||
if [ -z "$backend" ]; then
|
||||
cat >&2 <<'EOF'
|
||||
No command arguments were provided and VGW_BACKEND is unset.
|
||||
Set VGW_BACKEND to one of: posix, scoutfs, s3, azure, plugin
|
||||
or pass explicit arguments to the container to run the versitygw command directly.
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$backend" in
|
||||
posix|scoutfs|s3|azure|plugin)
|
||||
;;
|
||||
*)
|
||||
echo "VGW_BACKEND invalid backend (was '$backend')." >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
set -- "$backend"
|
||||
|
||||
if [ -n "${VGW_BACKEND_ARG:-}" ]; then
|
||||
set -- "$@" "$VGW_BACKEND_ARG"
|
||||
fi
|
||||
|
||||
if [ -n "${VGW_BACKEND_ARGS:-}" ]; then
|
||||
# shellcheck disable=SC2086
|
||||
set -- "$@" ${VGW_BACKEND_ARGS}
|
||||
fi
|
||||
|
||||
if [ -n "${VGW_ARGS:-}" ]; then
|
||||
# shellcheck disable=SC2086
|
||||
set -- "$@" ${VGW_ARGS}
|
||||
fi
|
||||
|
||||
exec "$BIN" "$@"
|
||||
@@ -23,7 +23,8 @@
|
||||
# VersityGW Required Options #
|
||||
##############################
|
||||
|
||||
# VGW_BACKEND must be defined, and must be one of: posix, scoutfs, or s3
|
||||
# VGW_BACKEND must be defined, and must be one of: posix, scoutfs, s3, azure,
|
||||
# or plugin
|
||||
# This defines the backend that the VGW will use for data access.
|
||||
VGW_BACKEND=posix
|
||||
|
||||
@@ -119,6 +120,12 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
# https://<VGW_ENDPOINT>/<bucket>
|
||||
#VGW_VIRTUAL_DOMAIN=
|
||||
|
||||
# By default, versitygw will enforce similar bucket naming rules as described
|
||||
# in https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
|
||||
# Set to true to allow legacy or non-DNS-compliant bucket names by skipping
|
||||
# strict validation checks.
|
||||
#VGW_DISABLE_STRICT_BUCKET_NAMES=false
|
||||
|
||||
###############
|
||||
# Access Logs #
|
||||
###############
|
||||
@@ -194,6 +201,42 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
# to generate a default rules file "event_config.json" in the current directory.
|
||||
#VGW_EVENT_FILTER=
|
||||
|
||||
###########
|
||||
# Web GUI #
|
||||
###########
|
||||
|
||||
# The VGW_WEBUI_PORT option enables the Web GUI server on the specified
|
||||
# listening address. The Web GUI provides a browser-based interface for managing
|
||||
# users, buckets and objects. The format can be either ':port' to listen on all
|
||||
# interfaces (e.g., ':7071') or 'host:port' to listen on a specific interface
|
||||
# (e.g., '127.0.0.1:7071' or 'localhost:7071'). When omitted, the Web GUI is
|
||||
# disabled.
|
||||
#VGW_WEBUI_PORT=
|
||||
|
||||
# The VGW_WEBUI_CERT and VGW_WEBUI_KEY options specify the TLS certificate and
|
||||
# private key for the Web GUI server. If these are not specified and TLS is
|
||||
# configured for the gateway (VGW_CERT and VGW_KEY), the Web GUI will use the
|
||||
# same certificates as the gateway. If neither are specified, the Web GUI will
|
||||
# run without TLS (HTTP only). These options allow the Web GUI to use different
|
||||
# certificates than the main S3 gateway.
|
||||
#VGW_WEBUI_CERT=
|
||||
#VGW_WEBUI_KEY=
|
||||
|
||||
# The VGW_WEBUI_NO_TLS option disables TLS for the Web GUI even if TLS
|
||||
# certificates are configured for the gateway. Set to true to force the Web GUI
|
||||
# to use HTTP instead of HTTPS. This can be useful when running the Web GUI
|
||||
# behind a reverse proxy that handles TLS termination.
|
||||
#VGW_WEBUI_NO_TLS=false
|
||||
|
||||
# The VGW_CORS_ALLOW_ORIGIN option sets the default CORS (Cross-Origin Resource
|
||||
# Sharing) Access-Control-Allow-Origin header value. This header is applied to
|
||||
# responses when no bucket-specific CORS configuration exists, and for all admin
|
||||
# API responses. When the Web GUI is enabled and this option is not set, it
|
||||
# defaults to '*' (allow all origins) for usability. For production environments,
|
||||
# it is recommended to set this to a specific origin (e.g.,
|
||||
# 'https://webui.example.com') to improve security.
|
||||
#VGW_CORS_ALLOW_ORIGIN=
|
||||
|
||||
#######################
|
||||
# Debug / Diagnostics #
|
||||
#######################
|
||||
@@ -272,6 +315,11 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_IAM_LDAP_ROLE_ATR=
|
||||
#VGW_IAM_LDAP_USER_ID_ATR=
|
||||
#VGW_IAM_LDAP_GROUP_ID_ATR=
|
||||
# Disable TLS certificate verification for LDAP connections (insecure, allows
|
||||
# self-signed certificates). This should only be used in testing environments
|
||||
# or when using self-signed certificates. The default is false (verification
|
||||
# enabled).
|
||||
#VGW_IAM_LDAP_TLS_SKIP_VERIFY=false
|
||||
|
||||
# The FreeIPA options will enable the FreeIPA IAM service with accounts stored
|
||||
# in an external FreeIPA service. Currently the FreeIPA IAM service only
|
||||
@@ -433,6 +481,11 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_CHOWN_UID=false
|
||||
#VGW_CHOWN_GID=false
|
||||
|
||||
# The VGW_SET_PROJECT_ID option will enable setting account defined ProjectID
|
||||
# for newly created buckets, files, and directories if the account ProjectID
|
||||
# is greater than 0 and the filesystem format version supports project IDs.
|
||||
#VGW_SET_PROJECT_ID=false
|
||||
|
||||
# The VGW_BUCKET_LINKS option will enable the gateway to treat symbolic links
|
||||
# to directories at the top level gateway directory as buckets.
|
||||
#VGW_BUCKET_LINKS=false
|
||||
@@ -480,3 +533,48 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_S3_DISABLE_CHECKSUM=false
|
||||
#VGW_S3_SSL_SKIP_VERIFY=false
|
||||
#VGW_S3_DEBUG=false
|
||||
|
||||
########
|
||||
# azure #
|
||||
########
|
||||
|
||||
# The azure backend allows the gateway to store objects in Azure Blob Storage.
|
||||
# Buckets created through the gateway map to blob containers within the
|
||||
# configured storage account. This backend is useful when existing workflows
|
||||
# expect an S3-compatible interface while data resides in Azure.
|
||||
|
||||
# When the azure backend is selected, configure credentials with one of the
|
||||
# following approaches:
|
||||
# - Shared key: Define AZ_ACCOUNT_NAME with the storage account name and
|
||||
# AZ_ACCESS_KEY with the corresponding account key.
|
||||
# - SAS token: Set AZ_SAS_TOKEN to an account or container scoped SAS token.
|
||||
# Provide AZ_ENDPOINT if the token does not implicitly define the endpoint.
|
||||
# - Default Azure credentials: Leave AZ_ACCOUNT_NAME and AZ_ACCESS_KEY blank
|
||||
# and configure the standard Azure identity environment variables supported
|
||||
# by the DefaultAzureCredential chain (e.g. AZURE_CLIENT_ID, AZURE_TENANT_ID,
|
||||
# AZURE_CLIENT_SECRET, managed identity, etc.).
|
||||
# Use AZ_ENDPOINT to override the service URL (for example when targeting
|
||||
# Azurite or a sovereign cloud). If unset, it defaults to
|
||||
# https://<account>.blob.core.windows.net/ when an account name is provided.
|
||||
#AZ_ACCOUNT_NAME=
|
||||
#AZ_ACCESS_KEY=
|
||||
#AZ_SAS_TOKEN=
|
||||
#AZ_ENDPOINT=
|
||||
|
||||
##########
|
||||
# plugin #
|
||||
##########
|
||||
|
||||
# The plugin backend loads a Go plugin shared object that exposes a variable
|
||||
# named "Backend" of type *plugins.BackendPlugin. The gateway uses the
|
||||
# exported constructor to create the backend implementation at runtime.
|
||||
|
||||
# Set VGW_BACKEND_ARG to the absolute path of the compiled plugin (.so) file.
|
||||
# The path must be readable by the gateway service account and remain stable
|
||||
# across restarts.
|
||||
#VGW_BACKEND_ARG=/usr/lib/versitygw/plugins/example.so
|
||||
|
||||
# Provide the plugin-specific configuration file path via VGW_PLUGIN_CONFIG.
|
||||
# The gateway automatically forwards this value to the plugin backend when it
|
||||
# starts up.
|
||||
#VGW_PLUGIN_CONFIG=/etc/versitygw.d/example-plugin.conf
|
||||
|
||||
@@ -17,7 +17,7 @@ Group=root
|
||||
|
||||
EnvironmentFile=/etc/versitygw.d/%i.conf
|
||||
|
||||
ExecStart=/bin/bash -c 'if [[ ! ("${VGW_BACKEND}" == "posix" || "${VGW_BACKEND}" == "scoutfs" || "${VGW_BACKEND}" == "s3") ]]; then echo "VGW_BACKEND environment variable not set to one of posix, scoutfs, or s3"; exit 1; fi && exec /usr/bin/versitygw "$VGW_BACKEND" "$VGW_BACKEND_ARG"'
|
||||
ExecStart=/bin/bash -c 'if [[ ! ("${VGW_BACKEND}" == "posix" || "${VGW_BACKEND}" == "scoutfs" || "${VGW_BACKEND}" == "s3" || "${VGW_BACKEND}" == "azure" || "${VGW_BACKEND}" == "plugin") ]]; then echo "VGW_BACKEND environment variable ${VGW_BACKEND} not set to valid backend type"; exit 1; fi && exec /usr/bin/versitygw "$VGW_BACKEND" "$VGW_BACKEND_ARG"'
|
||||
|
||||
# Let systemd restart this service always
|
||||
Restart=always
|
||||
|
||||
88
go.mod
88
go.mod
@@ -5,83 +5,87 @@ go 1.24.0
|
||||
toolchain go1.24.1
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
|
||||
github.com/DataDog/datadog-go/v5 v5.7.1
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.1
|
||||
github.com/aws/smithy-go v1.23.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4
|
||||
github.com/DataDog/datadog-go/v5 v5.8.2
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1
|
||||
github.com/aws/smithy-go v1.24.0
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/go-ldap/ldap/v3 v3.4.11
|
||||
github.com/gofiber/fiber/v2 v2.52.9
|
||||
github.com/go-ldap/ldap/v3 v3.4.12
|
||||
github.com/gofiber/fiber/v2 v2.52.10
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/vault-client-go v0.4.3
|
||||
github.com/nats-io/nats.go v1.45.0
|
||||
github.com/minio/crc64nvme v1.1.1
|
||||
github.com/nats-io/nats.go v1.48.0
|
||||
github.com/oklog/ulid/v2 v2.1.1
|
||||
github.com/pkg/xattr v0.4.12
|
||||
github.com/rabbitmq/amqp091-go v1.10.0
|
||||
github.com/segmentio/kafka-go v0.4.49
|
||||
github.com/segmentio/kafka-go v0.4.50
|
||||
github.com/smira/go-statsd v1.3.4
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/urfave/cli/v2 v2.27.7
|
||||
github.com/valyala/fasthttp v1.66.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
|
||||
golang.org/x/sync v0.17.0
|
||||
golang.org/x/sys v0.36.0
|
||||
github.com/valyala/fasthttp v1.69.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20240625221833-95fd765b760b
|
||||
golang.org/x/sync v0.19.0
|
||||
golang.org/x/sys v0.40.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.1.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.3.1 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/nats-io/nkeys v0.4.11 // indirect
|
||||
github.com/nats-io/nkeys v0.4.14 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.25 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.42.0 // indirect
|
||||
golang.org/x/net v0.44.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.13.0 // indirect
|
||||
golang.org/x/crypto v0.47.0 // indirect
|
||||
golang.org/x/net v0.49.0 // indirect
|
||||
golang.org/x/text v0.33.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.8
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.12
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.6
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.0
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/compress v1.18.3 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
|
||||
181
go.sum
181
go.sum
@@ -1,68 +1,74 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 h1:MhRfI58HblXzCtWEZCO0feHs8LweePB3s90r7WaR1KU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0/go.mod h1:okZ+ZURbArNdlJ+ptXoyHNuOETzOl1Oww19rm8I2WLA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 h1:fou+2+WFTib47nS+nz/ozhEBnvU96bKHy6LjRsY4E28=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0/go.mod h1:t76Ruy8AHvUAC8GfMWJMa0ElSbuIcO03NLpynfbgsPA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 h1:FwladfywkNirM+FZYLBR2kBz5C8Tg0fw5w5Y7meRXWI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2/go.mod h1:vv5Ad0RrIoT1lJFdWBZwt4mB1+j+V8DUroixmKDTCdk=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4 h1:jWQK1GI+LeGGUKBADtcH2rRqPxYB1Ljwms5gFA2LqrM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4/go.mod h1:8mwH4klAm9DUgR2EEHyEEAQlRDvLPyg5fQry3y+cDew=
|
||||
github.com/Azure/go-ntlmssp v0.1.0 h1:DjFo6YtWzNqNvQdrwEyr/e4nhU3vRiwenz5QX7sFz+A=
|
||||
github.com/Azure/go-ntlmssp v0.1.0/go.mod h1:NYqdhxd/8aAct/s4qSYZEerdPuH1liG2/X9DiVTbhpk=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/DataDog/datadog-go/v5 v5.7.1 h1:dNhEwKaO3LJhGYKajl2DjobArfa5R9YF72z3Dy+PH3k=
|
||||
github.com/DataDog/datadog-go/v5 v5.7.1/go.mod h1:CA9Ih6tb3jtxk+ps1xvTnxmhjr7ldE8TiwrZyrm31ss=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/DataDog/datadog-go/v5 v5.8.2 h1:9IEfH1Mw9AjWwhAMqCAkhbxjuJeMxm2ARX2VdgL+ols=
|
||||
github.com/DataDog/datadog-go/v5 v5.8.2/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
|
||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
|
||||
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.0 h1:xm5WV/2L4emMRmMjHFykqiA4M/ra0DJVSWUkDyBjbg4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.0/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.8 h1:kQjtOLlTU4m4A64TsRcqwNChhGCwaPBt+zCQt/oWsHU=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.8/go.mod h1:QPpc7IgljrKwH0+E6/KolCgr4WPLerURiU592AYzfSY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.12 h1:zmc9e1q90wMn8wQbjryy8IwA6Q4XlaL9Bx2zIqdNNbk=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.12/go.mod h1:3VzdRDR5u3sSJRI4kYcOSIBbeYsgtVk7dG5R/U6qLWY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 h1:Is2tPmieqGS2edBnmOJIbdvOA6Op+rRpaYR60iBAwXM=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7/go.mod h1:F1i5V5421EGci570yABvpIXgRIBPb5JM+lSkHF6Dq5w=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.6 h1:bByPm7VcaAgeT2+z5m0Lj5HDzm+g9AwbA3WFx2hPby0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.6/go.mod h1:PhTe8fR8aFW0wDc6IV9BHeIzXhpv3q6AaVHnqiv5Pyc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 h1:UCxq0X9O3xrlENdKf1r9eRJoKz/b0AfGkpp3a7FPlhg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7/go.mod h1:rHRoJUNUASj5Z/0eqI4w32vKvC7atoWR0jC+IkmVH8k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 h1:Y6DTZUn7ZUC4th9FMBbo8LVE+1fyq3ofw+tRwkUd3PY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7/go.mod h1:x3XE6vMnU9QvHN/Wrx2s44kwzV2o2g5x/siw4ZUJ9g8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.7 h1:BszAktdUo2xlzmYHjWMq70DqJ7cROM8iBd3f6hrpuMQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.7/go.mod h1:XJ1yHki/P7ZPuG4fd3f0Pg/dSGA2cTQBCLw82MH2H48=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.7 h1:zmZ8qvtE9chfhBPuKB2aQFxW5F/rpwXUgmcVCgQzqRw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.7/go.mod h1:vVYfbpd2l+pKqlSIDIOgouxNsGu5il9uDp0ooWb0jys=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 h1:mLgc5QIgOy26qyh5bvW+nDoAppxgn3J2WV3m9ewq7+8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7/go.mod h1:wXb/eQnqt8mDQIQTTmcw58B5mYGxzLGZGK8PWNFZ0BA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.7 h1:u3VbDKUCWarWiU+aIUK4gjTr/wQFXV17y3hgNno9fcA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.7/go.mod h1:/OuMQwhSyRapYxq6ZNpPer8juGNrB4P5Oz8bZ2cgjQE=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.1 h1:+RpGuaQ72qnU83qBKVwxkznewEdAGhIWo/PQCmkhhog=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.1/go.mod h1:xajPTguLoeQMAOE44AAP2RQoUhF8ey1g5IFHARv71po=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 h1:7PKX3VYsZ8LUWceVRuv0+PU+E7OtQb1lgmi5vmUE9CM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.3/go.mod h1:Ql6jE9kyyWI5JHn+61UT/Y5Z0oyVJGmgmJbZD5g4unY=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4 h1:e0XBRn3AptQotkyBFrHAxFB8mDhAIOfsG+7KyJ0dg98=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4/go.mod h1:XclEty74bsGBCr1s0VSaA11hQ4ZidK4viWK7rRfO88I=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 h1:PR00NXRYgY4FWHqOGx3fC3lhVKjsp1GdloDv2ynMSd8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.4/go.mod h1:Z+Gd23v97pX9zK97+tX4ppAgqCt3Z2dIXB02CtBncK8=
|
||||
github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE=
|
||||
github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.0 h1:pQZGI0qQXeCHZHMeWzhwPu+4jkWrdrIb2dgpG4OKmco=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.0/go.mod h1:XGq5kImVqQT4HUNbbG+0Y8O74URsPNH7CGPg1s1HW5E=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 h1:Z5EiPIzXKewUQK0QTMkutjiaPVeVYXX7KIqhXu/0fXs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8/go.mod h1:FsTpJtvC4U1fyDXk7c71XoDv3HlRm8V3NiYLeYLh5YE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu/eJC0Lh9adJa3M1xREcndxLNZlve2U=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1 h1:C2dUPSnEpy4voWFIq3JNd8gN0Y5vYGDo44eUE58a/p8=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
|
||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
|
||||
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
|
||||
github.com/clipperhouse/uax29/v2 v2.3.1 h1:RjM8gnVbFbgI67SBekIC7ihFpyXwRPYWXn9BZActHbw=
|
||||
github.com/clipperhouse/uax29/v2 v2.3.1/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -72,10 +78,10 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.4.11 h1:4k0Yxweg+a3OyBLjdYn5OKglv18JNvfDykSoI8bW0gU=
|
||||
github.com/go-ldap/ldap/v3 v3.4.11/go.mod h1:bY7t0FLK8OAVpp/vV6sSlpz3EQDGcQwc8pF0ujLgKvM=
|
||||
github.com/gofiber/fiber/v2 v2.52.9 h1:YjKl5DOiyP3j0mO61u3NTmK7or8GzzWzCFzkboyP5cw=
|
||||
github.com/gofiber/fiber/v2 v2.52.9/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
|
||||
github.com/go-ldap/ldap/v3 v3.4.12 h1:1b81mv7MagXZ7+1r7cLTWmyuTqVqdwbtJSjC0DAp9s4=
|
||||
github.com/go-ldap/ldap/v3 v3.4.12/go.mod h1:+SPAGcTtOfmGsCb3h1RFiq4xpp4N636G75OEace8lNo=
|
||||
github.com/gofiber/fiber/v2 v2.52.10 h1:jRHROi2BuNti6NYXmZ6gbNSfT3zj/8c0xy94GOU5elY=
|
||||
github.com/gofiber/fiber/v2 v2.52.10/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
@@ -111,8 +117,10 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
|
||||
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=
|
||||
github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
@@ -123,21 +131,23 @@ github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHP
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI=
|
||||
github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/nats-io/nats.go v1.45.0 h1:/wGPbnYXDM0pLKFjZTX+2JOw9TQPoIgTFrUaH97giwA=
|
||||
github.com/nats-io/nats.go v1.45.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
||||
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
|
||||
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
|
||||
github.com/nats-io/nats.go v1.48.0 h1:pSFyXApG+yWU/TgbKCjmm5K4wrHu86231/w84qRVR+U=
|
||||
github.com/nats-io/nats.go v1.48.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
||||
github.com/nats-io/nkeys v0.4.14 h1:ofx8UiyHP5S4Q52/THHucCJsMWu6zhf4DLh0U2593HE=
|
||||
github.com/nats-io/nkeys v0.4.14/go.mod h1:seG5UKwYdZXb7M1y1vvu53mNh3xq2B6um/XUgYAgvkM=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
|
||||
github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
|
||||
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
|
||||
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
|
||||
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0=
|
||||
github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -147,17 +157,14 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
||||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
||||
github.com/segmentio/kafka-go v0.4.49 h1:GJiNX1d/g+kG6ljyJEoi9++PUMdXGAxb7JGPiDCuNmk=
|
||||
github.com/segmentio/kafka-go v0.4.49/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E=
|
||||
github.com/segmentio/kafka-go v0.4.50 h1:mcyC3tT5WeyWzrFbd6O374t+hmcu1NKt2Pu1L3QaXmc=
|
||||
github.com/segmentio/kafka-go v0.4.50/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smira/go-statsd v1.3.4 h1:kBYWcLSGT+qC6JVbvfz48kX7mQys32fjDOPrfmsSx2c=
|
||||
github.com/smira/go-statsd v1.3.4/go.mod h1:RjdsESPgDODtg1VpVVf9MJrEW2Hw0wtRNbmB1CAhu6A=
|
||||
@@ -176,10 +183,10 @@ github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=
|
||||
github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.66.0 h1:M87A0Z7EayeyNaV6pfO3tUTUiYO0dZfEJnRGXTVNuyU=
|
||||
github.com/valyala/fasthttp v1.66.0/go.mod h1:Y4eC+zwoocmXSVCB1JmhNbYtS7tZPRI2ztPB72EVObs=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44 h1:Wx1o3pNrCzsHIIDyZ2MLRr6tF/1FhAr7HNDn80QqDWE=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
|
||||
github.com/valyala/fasthttp v1.69.0 h1:fNLLESD2SooWeh2cidsuFtOcrEi4uB4m1mPrkJMZyVI=
|
||||
github.com/valyala/fasthttp v1.69.0/go.mod h1:4wA4PfAraPlAsJ5jMSqCE2ug5tqUPwKXxVj8oNECGcw=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240625221833-95fd765b760b h1:kuqsuYRMG1c6YXBAQvWO7CiurlpYtjDJWI6oZ2K/ZZE=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240625221833-95fd765b760b/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
|
||||
@@ -195,18 +202,18 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -217,15 +224,15 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
|
||||
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
|
||||
@@ -125,6 +125,7 @@ var (
|
||||
ActionAdminChangeBucketOwner = "admin_ChangeBucketOwner"
|
||||
ActionAdminListUsers = "admin_ListUsers"
|
||||
ActionAdminListBuckets = "admin_ListBuckets"
|
||||
ActionAdminCreateBucket = "admin_CreateBucket"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
11
runtests.sh
11
runtests.sh
@@ -16,7 +16,6 @@ ECHO "Generating TLS certificate and key in the cert.pem and key.pem files"
|
||||
openssl genpkey -algorithm RSA -out key.pem -pkeyopt rsa_keygen_bits:2048
|
||||
openssl req -new -x509 -key key.pem -out cert.pem -days 365 -subj "/C=US/ST=California/L=San Francisco/O=Versity/OU=Software/CN=versity.com"
|
||||
|
||||
|
||||
ECHO "Running the sdk test over http"
|
||||
# run server in background not versioning-enabled
|
||||
# port: 7070(default)
|
||||
@@ -33,7 +32,7 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow; then
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow --parallel; then
|
||||
echo "full flow tests failed"
|
||||
kill $GW_PID
|
||||
exit 1
|
||||
@@ -70,7 +69,7 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 full-flow; then
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 full-flow --parallel; then
|
||||
echo "full flow tests failed"
|
||||
kill $GW_HTTPS_PID
|
||||
exit 1
|
||||
@@ -90,7 +89,6 @@ fi
|
||||
|
||||
kill $GW_HTTPS_PID
|
||||
|
||||
|
||||
ECHO "Running the sdk test over http against the versioning-enabled gateway"
|
||||
# run server in background versioning-enabled
|
||||
# port: 7072
|
||||
@@ -108,7 +106,7 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 full-flow -vs; then
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 full-flow -vs --parallel; then
|
||||
echo "versioning-enabled full-flow tests failed"
|
||||
kill $GW_VS_PID
|
||||
exit 1
|
||||
@@ -140,7 +138,7 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 full-flow -vs; then
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 full-flow -vs --parallel; then
|
||||
echo "versioning-enabled full-flow tests failed"
|
||||
kill $GW_VS_HTTPS_PID
|
||||
exit 1
|
||||
@@ -162,4 +160,3 @@ exit 0
|
||||
# go tool covdata percent -i=/tmp/covdata
|
||||
# go tool covdata textfmt -i=/tmp/covdata -o profile.txt
|
||||
# go tool cover -html=profile.txt
|
||||
|
||||
|
||||
@@ -24,10 +24,12 @@ import (
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
type S3AdminRouter struct{}
|
||||
type S3AdminRouter struct {
|
||||
s3api controllers.S3ApiController
|
||||
}
|
||||
|
||||
func (ar *S3AdminRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, root middlewares.RootUserConfig, region string, debug bool) {
|
||||
ctrl := controllers.NewAdminController(iam, be, logger)
|
||||
func (ar *S3AdminRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, root middlewares.RootUserConfig, region string, debug bool, corsAllowOrigin string) {
|
||||
ctrl := controllers.NewAdminController(iam, be, logger, ar.s3api)
|
||||
services := &controllers.Services{
|
||||
Logger: logger,
|
||||
}
|
||||
@@ -35,42 +37,82 @@ func (ar *S3AdminRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMSe
|
||||
// CreateUser admin api
|
||||
app.Patch("/create-user",
|
||||
controllers.ProcessHandlers(ctrl.CreateUser, metrics.ActionAdminCreateUser, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminCreateUser),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/create-user",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// DeleteUsers admin api
|
||||
app.Patch("/delete-user",
|
||||
controllers.ProcessHandlers(ctrl.DeleteUser, metrics.ActionAdminDeleteUser, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminDeleteUser),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/delete-user",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// UpdateUser admin api
|
||||
app.Patch("/update-user",
|
||||
controllers.ProcessHandlers(ctrl.UpdateUser, metrics.ActionAdminUpdateUser, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminUpdateUser),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/update-user",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// ListUsers admin api
|
||||
app.Patch("/list-users",
|
||||
controllers.ProcessHandlers(ctrl.ListUsers, metrics.ActionAdminListUsers, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminListUsers),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/list-users",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// ChangeBucketOwner admin api
|
||||
app.Patch("/change-bucket-owner",
|
||||
controllers.ProcessHandlers(ctrl.ChangeBucketOwner, metrics.ActionAdminChangeBucketOwner, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminChangeBucketOwner),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/change-bucket-owner",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// ListBucketsAndOwners admin api
|
||||
app.Patch("/list-buckets",
|
||||
controllers.ProcessHandlers(ctrl.ListBuckets, metrics.ActionAdminListBuckets, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminListBuckets),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/list-buckets",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
app.Patch("/:bucket/create",
|
||||
controllers.ProcessHandlers(ctrl.CreateBucket, metrics.ActionAdminListBuckets, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminCreateBucket),
|
||||
))
|
||||
app.Options("/:bucket/create",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -15,57 +15,80 @@
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
"github.com/gofiber/fiber/v2/middleware/recover"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
type S3AdminServer struct {
|
||||
app *fiber.App
|
||||
backend backend.Backend
|
||||
router *S3AdminRouter
|
||||
port string
|
||||
cert *tls.Certificate
|
||||
quiet bool
|
||||
debug bool
|
||||
app *fiber.App
|
||||
backend backend.Backend
|
||||
router *S3AdminRouter
|
||||
port string
|
||||
CertStorage *utils.CertStorage
|
||||
quiet bool
|
||||
debug bool
|
||||
corsAllowOrigin string
|
||||
}
|
||||
|
||||
func NewAdminServer(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, l s3log.AuditLogger, opts ...AdminOpt) *S3AdminServer {
|
||||
func NewAdminServer(be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, l s3log.AuditLogger, ctrl controllers.S3ApiController, opts ...AdminOpt) *S3AdminServer {
|
||||
server := &S3AdminServer{
|
||||
app: app,
|
||||
backend: be,
|
||||
router: new(S3AdminRouter),
|
||||
port: port,
|
||||
router: &S3AdminRouter{
|
||||
s3api: ctrl,
|
||||
},
|
||||
port: port,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(server)
|
||||
}
|
||||
|
||||
app := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
ServerHeader: "VERSITYGW",
|
||||
Network: fiber.NetworkTCP,
|
||||
DisableStartupMessage: true,
|
||||
ErrorHandler: globalErrorHandler,
|
||||
})
|
||||
|
||||
server.app = app
|
||||
|
||||
app.Use(recover.New(
|
||||
recover.Config{
|
||||
EnableStackTrace: true,
|
||||
StackTraceHandler: stackTraceHandler,
|
||||
}))
|
||||
|
||||
// Logging middlewares
|
||||
if !server.quiet {
|
||||
app.Use(logger.New(logger.Config{
|
||||
Format: "${time} | ${status} | ${latency} | ${ip} | ${method} | ${path} | ${error} | ${queryParams}\n",
|
||||
Format: "${time} | adm | ${status} | ${latency} | ${ip} | ${method} | ${path} | ${error} | ${queryParams}\n",
|
||||
}))
|
||||
}
|
||||
app.Use(controllers.WrapMiddleware(middlewares.DecodeURL, l, nil))
|
||||
app.Use(middlewares.DebugLogger())
|
||||
|
||||
server.router.Init(app, be, iam, l, root, region, server.debug)
|
||||
// initialize the debug logger in debug mode
|
||||
if debuglogger.IsDebugEnabled() {
|
||||
app.Use(middlewares.DebugLogger())
|
||||
}
|
||||
|
||||
server.router.Init(app, be, iam, l, root, region, server.debug, server.corsAllowOrigin)
|
||||
|
||||
return server
|
||||
}
|
||||
|
||||
type AdminOpt func(s *S3AdminServer)
|
||||
|
||||
func WithAdminSrvTLS(cert tls.Certificate) AdminOpt {
|
||||
return func(s *S3AdminServer) { s.cert = &cert }
|
||||
func WithAdminSrvTLS(cs *utils.CertStorage) AdminOpt {
|
||||
return func(s *S3AdminServer) { s.CertStorage = cs }
|
||||
}
|
||||
|
||||
// WithQuiet silences default logging output
|
||||
@@ -78,9 +101,25 @@ func WithAdminDebug() AdminOpt {
|
||||
return func(s *S3AdminServer) { s.debug = true }
|
||||
}
|
||||
|
||||
// WithAdminCORSAllowOrigin sets the default CORS Access-Control-Allow-Origin value
|
||||
// for the standalone admin server.
|
||||
func WithAdminCORSAllowOrigin(origin string) AdminOpt {
|
||||
return func(s *S3AdminServer) { s.corsAllowOrigin = origin }
|
||||
}
|
||||
|
||||
func (sa *S3AdminServer) Serve() (err error) {
|
||||
if sa.cert != nil {
|
||||
return sa.app.ListenTLSWithCertificate(sa.port, *sa.cert)
|
||||
if sa.CertStorage != nil {
|
||||
ln, err := utils.NewTLSListener(sa.app.Config().Network, sa.port, sa.CertStorage.GetCertificate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sa.app.Listener(ln)
|
||||
}
|
||||
return sa.app.Listen(sa.port)
|
||||
}
|
||||
|
||||
// ShutDown gracefully shuts down the server with a context timeout
|
||||
func (sa S3AdminServer) Shutdown() error {
|
||||
return sa.app.ShutdownWithTimeout(shutDownDuration)
|
||||
}
|
||||
|
||||
@@ -28,13 +28,14 @@ import (
|
||||
)
|
||||
|
||||
type AdminController struct {
|
||||
iam auth.IAMService
|
||||
be backend.Backend
|
||||
l s3log.AuditLogger
|
||||
iam auth.IAMService
|
||||
be backend.Backend
|
||||
l s3log.AuditLogger
|
||||
s3api S3ApiController
|
||||
}
|
||||
|
||||
func NewAdminController(iam auth.IAMService, be backend.Backend, l s3log.AuditLogger) AdminController {
|
||||
return AdminController{iam: iam, be: be, l: l}
|
||||
func NewAdminController(iam auth.IAMService, be backend.Backend, l s3log.AuditLogger, s3api S3ApiController) AdminController {
|
||||
return AdminController{iam: iam, be: be, l: l, s3api: s3api}
|
||||
}
|
||||
|
||||
func (c AdminController) CreateUser(ctx *fiber.Ctx) (*Response, error) {
|
||||
@@ -161,3 +162,39 @@ func (c AdminController) ListBuckets(ctx *fiber.Ctx) (*Response, error) {
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, err
|
||||
}
|
||||
|
||||
func (c AdminController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
owner := ctx.Get("x-vgw-owner")
|
||||
if owner == "" {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, s3err.GetAPIError(s3err.ErrAdminEmptyBucketOwnerHeader)
|
||||
}
|
||||
|
||||
acc, err := c.iam.GetUserAccount(owner)
|
||||
if err != nil {
|
||||
if err == auth.ErrNoSuchUser {
|
||||
err = s3err.GetAPIError(s3err.ErrAdminUserNotFound)
|
||||
}
|
||||
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, err
|
||||
}
|
||||
|
||||
// store the owner access key id in context
|
||||
ctx.Context().SetUserValue("bucket-owner", acc)
|
||||
|
||||
_, err = c.s3api.CreateBucket(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, err
|
||||
}
|
||||
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
Status: http.StatusCreated,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
@@ -32,9 +33,10 @@ import (
|
||||
|
||||
func TestNewAdminController(t *testing.T) {
|
||||
type args struct {
|
||||
iam auth.IAMService
|
||||
be backend.Backend
|
||||
l s3log.AuditLogger
|
||||
iam auth.IAMService
|
||||
be backend.Backend
|
||||
l s3log.AuditLogger
|
||||
s3api S3ApiController
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -49,7 +51,7 @@ func TestNewAdminController(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := NewAdminController(tt.args.iam, tt.args.be, tt.args.l)
|
||||
got := NewAdminController(tt.args.iam, tt.args.be, tt.args.l, tt.args.s3api)
|
||||
assert.Equal(t, got, tt.want)
|
||||
})
|
||||
}
|
||||
@@ -577,3 +579,126 @@ func TestAdminController_ListBuckets(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdminController_CreateBucket(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input testInput
|
||||
output testOutput
|
||||
}{
|
||||
{
|
||||
name: "empty owner header",
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrAdminEmptyBucketOwnerHeader),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fails to get user account",
|
||||
input: testInput{
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrInternalError),
|
||||
headers: map[string]string{
|
||||
"x-vgw-owner": "access",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInternalError),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "user not found",
|
||||
input: testInput{
|
||||
extraMockErr: auth.ErrNoSuchUser,
|
||||
headers: map[string]string{
|
||||
"x-vgw-owner": "access",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrAdminUserNotFound),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
headers: map[string]string{
|
||||
"x-vgw-owner": "access",
|
||||
},
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyAccount: auth.Account{
|
||||
Access: "test-user",
|
||||
Role: "admin",
|
||||
},
|
||||
},
|
||||
beErr: s3err.GetAPIError(s3err.ErrAdminMethodNotSupported),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrAdminMethodNotSupported),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "successful response",
|
||||
input: testInput{
|
||||
headers: map[string]string{
|
||||
"x-vgw-owner": "access",
|
||||
},
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyAccount: auth.Account{
|
||||
Access: "test-user",
|
||||
Role: "admin",
|
||||
},
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
Status: http.StatusCreated,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
iam := &IAMServiceMock{
|
||||
GetUserAccountFunc: func(access string) (auth.Account, error) {
|
||||
return auth.Account{}, tt.input.extraMockErr
|
||||
},
|
||||
}
|
||||
be := &BackendMock{
|
||||
CreateBucketFunc: func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error {
|
||||
return tt.input.beErr
|
||||
},
|
||||
}
|
||||
|
||||
s3api := New(be, iam, nil, nil, nil, false, "")
|
||||
|
||||
ctrl := AdminController{
|
||||
iam: iam,
|
||||
be: be,
|
||||
s3api: s3api,
|
||||
}
|
||||
|
||||
testController(
|
||||
t,
|
||||
ctrl.CreateBucket,
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
headers: tt.input.headers,
|
||||
},
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// DeleteObjectFunc: func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
|
||||
// panic("mock out the DeleteObject method")
|
||||
// },
|
||||
// DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string) error {
|
||||
// DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string) error {
|
||||
// panic("mock out the DeleteObjectTagging method")
|
||||
// },
|
||||
// DeleteObjectsFunc: func(contextMoqParam context.Context, deleteObjectsInput *s3.DeleteObjectsInput) (s3response.DeleteResult, error) {
|
||||
@@ -101,7 +101,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// GetObjectRetentionFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string) ([]byte, error) {
|
||||
// panic("mock out the GetObjectRetention method")
|
||||
// },
|
||||
// GetObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string) (map[string]string, error) {
|
||||
// GetObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string) (map[string]string, error) {
|
||||
// panic("mock out the GetObjectTagging method")
|
||||
// },
|
||||
// HeadBucketFunc: func(contextMoqParam context.Context, headBucketInput *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
@@ -161,10 +161,10 @@ var _ backend.Backend = &BackendMock{}
|
||||
// PutObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string, config []byte) error {
|
||||
// panic("mock out the PutObjectLockConfiguration method")
|
||||
// },
|
||||
// PutObjectRetentionFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string, bypass bool, retention []byte) error {
|
||||
// PutObjectRetentionFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string, retention []byte) error {
|
||||
// panic("mock out the PutObjectRetention method")
|
||||
// },
|
||||
// PutObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error {
|
||||
// PutObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string, tags map[string]string) error {
|
||||
// panic("mock out the PutObjectTagging method")
|
||||
// },
|
||||
// RestoreObjectFunc: func(contextMoqParam context.Context, restoreObjectInput *s3.RestoreObjectInput) error {
|
||||
@@ -229,7 +229,7 @@ type BackendMock struct {
|
||||
DeleteObjectFunc func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
|
||||
|
||||
// DeleteObjectTaggingFunc mocks the DeleteObjectTagging method.
|
||||
DeleteObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string) error
|
||||
DeleteObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string, versionId string) error
|
||||
|
||||
// DeleteObjectsFunc mocks the DeleteObjects method.
|
||||
DeleteObjectsFunc func(contextMoqParam context.Context, deleteObjectsInput *s3.DeleteObjectsInput) (s3response.DeleteResult, error)
|
||||
@@ -271,7 +271,7 @@ type BackendMock struct {
|
||||
GetObjectRetentionFunc func(contextMoqParam context.Context, bucket string, object string, versionId string) ([]byte, error)
|
||||
|
||||
// GetObjectTaggingFunc mocks the GetObjectTagging method.
|
||||
GetObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string) (map[string]string, error)
|
||||
GetObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string, versionId string) (map[string]string, error)
|
||||
|
||||
// HeadBucketFunc mocks the HeadBucket method.
|
||||
HeadBucketFunc func(contextMoqParam context.Context, headBucketInput *s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
|
||||
@@ -331,10 +331,10 @@ type BackendMock struct {
|
||||
PutObjectLockConfigurationFunc func(contextMoqParam context.Context, bucket string, config []byte) error
|
||||
|
||||
// PutObjectRetentionFunc mocks the PutObjectRetention method.
|
||||
PutObjectRetentionFunc func(contextMoqParam context.Context, bucket string, object string, versionId string, bypass bool, retention []byte) error
|
||||
PutObjectRetentionFunc func(contextMoqParam context.Context, bucket string, object string, versionId string, retention []byte) error
|
||||
|
||||
// PutObjectTaggingFunc mocks the PutObjectTagging method.
|
||||
PutObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error
|
||||
PutObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string, versionId string, tags map[string]string) error
|
||||
|
||||
// RestoreObjectFunc mocks the RestoreObject method.
|
||||
RestoreObjectFunc func(contextMoqParam context.Context, restoreObjectInput *s3.RestoreObjectInput) error
|
||||
@@ -452,6 +452,8 @@ type BackendMock struct {
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// VersionId is the versionId argument value.
|
||||
VersionId string
|
||||
}
|
||||
// DeleteObjects holds details about calls to the DeleteObjects method.
|
||||
DeleteObjects []struct {
|
||||
@@ -560,6 +562,8 @@ type BackendMock struct {
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// VersionId is the versionId argument value.
|
||||
VersionId string
|
||||
}
|
||||
// HeadBucket holds details about calls to the HeadBucket method.
|
||||
HeadBucket []struct {
|
||||
@@ -722,8 +726,6 @@ type BackendMock struct {
|
||||
Object string
|
||||
// VersionId is the versionId argument value.
|
||||
VersionId string
|
||||
// Bypass is the bypass argument value.
|
||||
Bypass bool
|
||||
// Retention is the retention argument value.
|
||||
Retention []byte
|
||||
}
|
||||
@@ -735,6 +737,8 @@ type BackendMock struct {
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// VersionId is the versionId argument value.
|
||||
VersionId string
|
||||
// Tags is the tags argument value.
|
||||
Tags map[string]string
|
||||
}
|
||||
@@ -1270,7 +1274,7 @@ func (mock *BackendMock) DeleteObjectCalls() []struct {
|
||||
}
|
||||
|
||||
// DeleteObjectTagging calls DeleteObjectTaggingFunc.
|
||||
func (mock *BackendMock) DeleteObjectTagging(contextMoqParam context.Context, bucket string, object string) error {
|
||||
func (mock *BackendMock) DeleteObjectTagging(contextMoqParam context.Context, bucket string, object string, versionId string) error {
|
||||
if mock.DeleteObjectTaggingFunc == nil {
|
||||
panic("BackendMock.DeleteObjectTaggingFunc: method is nil but Backend.DeleteObjectTagging was just called")
|
||||
}
|
||||
@@ -1278,15 +1282,17 @@ func (mock *BackendMock) DeleteObjectTagging(contextMoqParam context.Context, bu
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionId: versionId,
|
||||
}
|
||||
mock.lockDeleteObjectTagging.Lock()
|
||||
mock.calls.DeleteObjectTagging = append(mock.calls.DeleteObjectTagging, callInfo)
|
||||
mock.lockDeleteObjectTagging.Unlock()
|
||||
return mock.DeleteObjectTaggingFunc(contextMoqParam, bucket, object)
|
||||
return mock.DeleteObjectTaggingFunc(contextMoqParam, bucket, object, versionId)
|
||||
}
|
||||
|
||||
// DeleteObjectTaggingCalls gets all the calls that were made to DeleteObjectTagging.
|
||||
@@ -1297,11 +1303,13 @@ func (mock *BackendMock) DeleteObjectTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
}
|
||||
mock.lockDeleteObjectTagging.RLock()
|
||||
calls = mock.calls.DeleteObjectTagging
|
||||
@@ -1794,7 +1802,7 @@ func (mock *BackendMock) GetObjectRetentionCalls() []struct {
|
||||
}
|
||||
|
||||
// GetObjectTagging calls GetObjectTaggingFunc.
|
||||
func (mock *BackendMock) GetObjectTagging(contextMoqParam context.Context, bucket string, object string) (map[string]string, error) {
|
||||
func (mock *BackendMock) GetObjectTagging(contextMoqParam context.Context, bucket string, object string, versionId string) (map[string]string, error) {
|
||||
if mock.GetObjectTaggingFunc == nil {
|
||||
panic("BackendMock.GetObjectTaggingFunc: method is nil but Backend.GetObjectTagging was just called")
|
||||
}
|
||||
@@ -1802,15 +1810,17 @@ func (mock *BackendMock) GetObjectTagging(contextMoqParam context.Context, bucke
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionId: versionId,
|
||||
}
|
||||
mock.lockGetObjectTagging.Lock()
|
||||
mock.calls.GetObjectTagging = append(mock.calls.GetObjectTagging, callInfo)
|
||||
mock.lockGetObjectTagging.Unlock()
|
||||
return mock.GetObjectTaggingFunc(contextMoqParam, bucket, object)
|
||||
return mock.GetObjectTaggingFunc(contextMoqParam, bucket, object, versionId)
|
||||
}
|
||||
|
||||
// GetObjectTaggingCalls gets all the calls that were made to GetObjectTagging.
|
||||
@@ -1821,11 +1831,13 @@ func (mock *BackendMock) GetObjectTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
}
|
||||
mock.lockGetObjectTagging.RLock()
|
||||
calls = mock.calls.GetObjectTagging
|
||||
@@ -2554,7 +2566,7 @@ func (mock *BackendMock) PutObjectLockConfigurationCalls() []struct {
|
||||
}
|
||||
|
||||
// PutObjectRetention calls PutObjectRetentionFunc.
|
||||
func (mock *BackendMock) PutObjectRetention(contextMoqParam context.Context, bucket string, object string, versionId string, bypass bool, retention []byte) error {
|
||||
func (mock *BackendMock) PutObjectRetention(contextMoqParam context.Context, bucket string, object string, versionId string, retention []byte) error {
|
||||
if mock.PutObjectRetentionFunc == nil {
|
||||
panic("BackendMock.PutObjectRetentionFunc: method is nil but Backend.PutObjectRetention was just called")
|
||||
}
|
||||
@@ -2563,20 +2575,18 @@ func (mock *BackendMock) PutObjectRetention(contextMoqParam context.Context, buc
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Bypass bool
|
||||
Retention []byte
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionId: versionId,
|
||||
Bypass: bypass,
|
||||
Retention: retention,
|
||||
}
|
||||
mock.lockPutObjectRetention.Lock()
|
||||
mock.calls.PutObjectRetention = append(mock.calls.PutObjectRetention, callInfo)
|
||||
mock.lockPutObjectRetention.Unlock()
|
||||
return mock.PutObjectRetentionFunc(contextMoqParam, bucket, object, versionId, bypass, retention)
|
||||
return mock.PutObjectRetentionFunc(contextMoqParam, bucket, object, versionId, retention)
|
||||
}
|
||||
|
||||
// PutObjectRetentionCalls gets all the calls that were made to PutObjectRetention.
|
||||
@@ -2588,7 +2598,6 @@ func (mock *BackendMock) PutObjectRetentionCalls() []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Bypass bool
|
||||
Retention []byte
|
||||
} {
|
||||
var calls []struct {
|
||||
@@ -2596,7 +2605,6 @@ func (mock *BackendMock) PutObjectRetentionCalls() []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Bypass bool
|
||||
Retention []byte
|
||||
}
|
||||
mock.lockPutObjectRetention.RLock()
|
||||
@@ -2606,7 +2614,7 @@ func (mock *BackendMock) PutObjectRetentionCalls() []struct {
|
||||
}
|
||||
|
||||
// PutObjectTagging calls PutObjectTaggingFunc.
|
||||
func (mock *BackendMock) PutObjectTagging(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error {
|
||||
func (mock *BackendMock) PutObjectTagging(contextMoqParam context.Context, bucket string, object string, versionId string, tags map[string]string) error {
|
||||
if mock.PutObjectTaggingFunc == nil {
|
||||
panic("BackendMock.PutObjectTaggingFunc: method is nil but Backend.PutObjectTagging was just called")
|
||||
}
|
||||
@@ -2614,17 +2622,19 @@ func (mock *BackendMock) PutObjectTagging(contextMoqParam context.Context, bucke
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Tags map[string]string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionId: versionId,
|
||||
Tags: tags,
|
||||
}
|
||||
mock.lockPutObjectTagging.Lock()
|
||||
mock.calls.PutObjectTagging = append(mock.calls.PutObjectTagging, callInfo)
|
||||
mock.lockPutObjectTagging.Unlock()
|
||||
return mock.PutObjectTaggingFunc(contextMoqParam, bucket, object, tags)
|
||||
return mock.PutObjectTaggingFunc(contextMoqParam, bucket, object, versionId, tags)
|
||||
}
|
||||
|
||||
// PutObjectTaggingCalls gets all the calls that were made to PutObjectTagging.
|
||||
@@ -2635,12 +2645,14 @@ func (mock *BackendMock) PutObjectTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Tags map[string]string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Tags map[string]string
|
||||
}
|
||||
mock.lockPutObjectTagging.RLock()
|
||||
|
||||
@@ -18,7 +18,8 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
@@ -32,17 +33,17 @@ import (
|
||||
)
|
||||
|
||||
type S3ApiController struct {
|
||||
be backend.Backend
|
||||
iam auth.IAMService
|
||||
logger s3log.AuditLogger
|
||||
evSender s3event.S3EventSender
|
||||
mm metrics.Manager
|
||||
readonly bool
|
||||
be backend.Backend
|
||||
iam auth.IAMService
|
||||
logger s3log.AuditLogger
|
||||
evSender s3event.S3EventSender
|
||||
mm metrics.Manager
|
||||
readonly bool
|
||||
virtualDomain string
|
||||
}
|
||||
|
||||
const (
|
||||
// time constants
|
||||
iso8601Format = "20060102T150405Z"
|
||||
iso8601TimeFormatExtended = "Mon Jan _2 15:04:05 2006"
|
||||
timefmt = "Mon, 02 Jan 2006 15:04:05 GMT"
|
||||
|
||||
@@ -58,14 +59,15 @@ var (
|
||||
xmlhdr = []byte(`<?xml version="1.0" encoding="UTF-8"?>` + "\n")
|
||||
)
|
||||
|
||||
func New(be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, evs s3event.S3EventSender, mm metrics.Manager, readonly bool) S3ApiController {
|
||||
func New(be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, evs s3event.S3EventSender, mm metrics.Manager, readonly bool, virtualDomain string) S3ApiController {
|
||||
return S3ApiController{
|
||||
be: be,
|
||||
iam: iam,
|
||||
logger: logger,
|
||||
evSender: evs,
|
||||
readonly: readonly,
|
||||
mm: mm,
|
||||
be: be,
|
||||
iam: iam,
|
||||
logger: logger,
|
||||
evSender: evs,
|
||||
readonly: readonly,
|
||||
mm: mm,
|
||||
virtualDomain: virtualDomain,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -172,6 +174,7 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
|
||||
// Set the response headers
|
||||
SetResponseHeaders(ctx, response.Headers)
|
||||
ensureExposeMetaHeaders(ctx)
|
||||
|
||||
opts := response.MetaOpts
|
||||
if opts == nil {
|
||||
@@ -201,7 +204,7 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(serr, "", "", ""))
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Internal Error, %v\n", err)
|
||||
debuglogger.InternalError(err)
|
||||
ctx.Status(http.StatusInternalServerError)
|
||||
|
||||
// If the error is not 's3err.APIError' return 'InternalError'
|
||||
@@ -209,12 +212,32 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
s3err.GetAPIError(s3err.ErrInternalError), "", "", ""))
|
||||
}
|
||||
|
||||
// At this point, the S3 action has succeeded in the backend and
|
||||
// the event has already occurred. This means the S3 event must be sent,
|
||||
// even if unexpected issues arise while further parsing the response payload.
|
||||
if svc.EventSender != nil && opts.EventName != "" {
|
||||
svc.EventSender.SendEvent(ctx, s3event.EventMeta{
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
ObjectETag: opts.ObjectETag,
|
||||
VersionId: opts.VersionId,
|
||||
EventName: opts.EventName,
|
||||
})
|
||||
}
|
||||
|
||||
if opts.Status == 0 {
|
||||
opts.Status = http.StatusOK
|
||||
}
|
||||
|
||||
// if no data payload is provided, send the response status
|
||||
if response.Data == nil {
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, nil, []byte{}, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
ctx.Status(opts.Status)
|
||||
return nil
|
||||
}
|
||||
@@ -228,6 +251,13 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
} else {
|
||||
if responseBytes, err = xml.Marshal(response.Data); err != nil {
|
||||
debuglogger.Logf("Internal Error, %v", err)
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, err, nil, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
return ctx.Status(http.StatusInternalServerError).Send(s3err.GetAPIErrorResponse(
|
||||
s3err.GetAPIError(s3err.ErrInternalError), "", "", ""))
|
||||
}
|
||||
@@ -237,29 +267,19 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
}
|
||||
}
|
||||
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, nil, responseBytes, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
|
||||
if svc.EventSender != nil {
|
||||
svc.EventSender.SendEvent(ctx, s3event.EventMeta{
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
ObjectETag: opts.ObjectETag,
|
||||
VersionId: opts.VersionId,
|
||||
EventName: opts.EventName,
|
||||
})
|
||||
}
|
||||
|
||||
if ok {
|
||||
if len(responseBytes) > 0 {
|
||||
ctx.Response().Header.Set("Content-Length", fmt.Sprint(len(responseBytes)))
|
||||
}
|
||||
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, nil, responseBytes, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
|
||||
return ctx.Send(responseBytes)
|
||||
}
|
||||
|
||||
@@ -267,6 +287,13 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
if msglen > maxXMLBodyLen {
|
||||
debuglogger.Logf("XML encoded body len %v exceeds max len %v",
|
||||
msglen, maxXMLBodyLen)
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, err, []byte{}, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
ctx.Status(http.StatusInternalServerError)
|
||||
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(
|
||||
@@ -279,14 +306,95 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
// Set the Content-Length header
|
||||
ctx.Response().Header.SetContentLength(msglen)
|
||||
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, nil, responseBytes, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
|
||||
return ctx.Send(res)
|
||||
}
|
||||
|
||||
func ensureExposeMetaHeaders(ctx *fiber.Ctx) {
|
||||
// Only attempt to modify expose headers when CORS is actually in use.
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Origin")) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
existing := strings.TrimSpace(string(ctx.Response().Header.Peek("Access-Control-Expose-Headers")))
|
||||
if existing == "*" {
|
||||
return
|
||||
}
|
||||
|
||||
lowerExisting := map[string]struct{}{}
|
||||
if existing != "" {
|
||||
for _, part := range strings.Split(existing, ",") {
|
||||
p := strings.ToLower(strings.TrimSpace(part))
|
||||
if p != "" {
|
||||
lowerExisting[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
metaNames := map[string]struct{}{}
|
||||
for k := range ctx.Response().Header.All() {
|
||||
key := string(k)
|
||||
if strings.HasPrefix(strings.ToLower(key), "x-amz-meta-") {
|
||||
metaNames[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
if len(metaNames) == 0 {
|
||||
// Still ensure ETag is present if any expose headers exist/are needed.
|
||||
if _, ok := lowerExisting["etag"]; ok {
|
||||
return
|
||||
}
|
||||
if existing == "" {
|
||||
ctx.Response().Header.Set("Access-Control-Expose-Headers", "ETag")
|
||||
return
|
||||
}
|
||||
ctx.Response().Header.Set("Access-Control-Expose-Headers", existing+", ETag")
|
||||
return
|
||||
}
|
||||
|
||||
metaList := make([]string, 0, len(metaNames))
|
||||
for k := range metaNames {
|
||||
metaList = append(metaList, k)
|
||||
}
|
||||
sort.Strings(metaList)
|
||||
|
||||
toAdd := make([]string, 0, 1+len(metaList))
|
||||
if _, ok := lowerExisting["etag"]; !ok {
|
||||
toAdd = append(toAdd, "ETag")
|
||||
lowerExisting["etag"] = struct{}{}
|
||||
}
|
||||
for _, h := range metaList {
|
||||
lh := strings.ToLower(h)
|
||||
if _, ok := lowerExisting[lh]; ok {
|
||||
continue
|
||||
}
|
||||
toAdd = append(toAdd, h)
|
||||
lowerExisting[lh] = struct{}{}
|
||||
}
|
||||
if len(toAdd) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if existing == "" {
|
||||
ctx.Response().Header.Set("Access-Control-Expose-Headers", strings.Join(toAdd, ", "))
|
||||
return
|
||||
}
|
||||
ctx.Response().Header.Set("Access-Control-Expose-Headers", existing+", "+strings.Join(toAdd, ", "))
|
||||
}
|
||||
|
||||
// Sets the response headers
|
||||
func SetResponseHeaders(ctx *fiber.Ctx, headers map[string]*string) {
|
||||
if headers == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Response().Header.DisableNormalizing()
|
||||
for key, val := range headers {
|
||||
if val == nil || *val == "" {
|
||||
continue
|
||||
|
||||
@@ -237,6 +237,21 @@ func TestSetResponseHeaders(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureExposeMetaHeaders_AddsActualMetaHeaderNames(t *testing.T) {
|
||||
app := fiber.New()
|
||||
ctx := app.AcquireCtx(&fasthttp.RequestCtx{})
|
||||
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Origin", "https://example.com")
|
||||
ctx.Response().Header.Add("Access-Control-Expose-Headers", "ETag")
|
||||
ctx.Response().Header.Set("x-amz-meta-foo", "bar")
|
||||
ctx.Response().Header.Set("x-amz-meta-bar", "baz")
|
||||
|
||||
ensureExposeMetaHeaders(ctx)
|
||||
|
||||
got := string(ctx.Response().Header.Peek("Access-Control-Expose-Headers"))
|
||||
assert.Equal(t, "ETag, X-Amz-Meta-Bar, X-Amz-Meta-Foo", got)
|
||||
}
|
||||
|
||||
// mock the audit logger
|
||||
type mockAuditLogger struct {
|
||||
}
|
||||
|
||||
@@ -658,10 +658,14 @@ func (c S3ApiController) GetBucketLocation(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
// pick up configured region from locals (set by router middleware)
|
||||
region, _ := ctx.Locals("region").(string)
|
||||
value := ®ion
|
||||
if region == "us-east-1" {
|
||||
value = nil
|
||||
}
|
||||
|
||||
return &Response{
|
||||
Data: s3response.LocationConstraint{
|
||||
Value: region,
|
||||
Value: value,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
|
||||
@@ -1303,14 +1303,40 @@ func TestS3ApiController_GetBucketLocation(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "successful response",
|
||||
name: "successful response us-east-1",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Data: s3response.LocationConstraint{
|
||||
Value: "us-east-1",
|
||||
Value: nil,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "successful response",
|
||||
input: testInput{
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyIsRoot: true,
|
||||
utils.ContextKeyParsedAcl: auth.ACL{
|
||||
Owner: "root",
|
||||
},
|
||||
utils.ContextKeyAccount: auth.Account{
|
||||
Access: "root",
|
||||
Role: auth.RoleAdmin,
|
||||
},
|
||||
utils.ContextKeyRegion: "us-east-2",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Data: s3response.LocationConstraint{
|
||||
Value: utils.GetStringPtr("us-east-2"),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
|
||||
@@ -15,10 +15,13 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
@@ -42,6 +45,9 @@ func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
})
|
||||
if err != nil {
|
||||
return &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
@@ -54,6 +60,17 @@ func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrAccessDenied)) {
|
||||
return &Response{
|
||||
// access denied for head object still returns region header
|
||||
Headers: map[string]*string{
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -63,8 +80,8 @@ func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
return &Response{
|
||||
Headers: map[string]*string{
|
||||
"X-Amz-Access-Point-Alias": utils.GetStringPtr("false"),
|
||||
"X-Amz-Bucket-Region": utils.GetStringPtr(region),
|
||||
"x-amz-access-point-alias": utils.GetStringPtr("false"),
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
|
||||
@@ -48,6 +48,9 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
@@ -98,8 +101,8 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"X-Amz-Access-Point-Alias": utils.GetStringPtr("false"),
|
||||
"X-Amz-Bucket-Region": utils.GetStringPtr(region),
|
||||
"x-amz-access-point-alias": utils.GetStringPtr("false"),
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
|
||||
@@ -67,7 +67,7 @@ func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, dObj.Objects, bypass, IsBucketPublic, c.be)
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, dObj.Objects, bypass, IsBucketPublic, c.be, false)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -271,37 +270,6 @@ func (c S3ApiController) PutBucketCors(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
algo, checksusms, err := utils.ParseChecksumHeadersAndSdkAlgo(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
if algo != "" {
|
||||
rdr, err := utils.NewHashReader(bytes.NewReader(body), checksusms[algo], utils.HashType(strings.ToLower(string(algo))))
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// Pass the same body to avoid data duplication
|
||||
_, err = rdr.Read(body)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read hash calculation data: %v", err)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
}
|
||||
|
||||
err = c.be.PutBucketCors(ctx.Context(), bucket, body)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
@@ -346,6 +314,7 @@ func (c S3ApiController) PutBucketPolicy(ctx *fiber.Ctx) (*Response, error) {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
Status: http.StatusNoContent,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
@@ -384,6 +353,15 @@ func (c S3ApiController) PutBucketAcl(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = auth.ValidateCannedACL(acl)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
ownership, err := c.be.GetBucketOwnershipControls(ctx.Context(), bucket)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrOwnershipControlsNotFound)) {
|
||||
return &Response{
|
||||
@@ -451,14 +429,6 @@ func (c S3ApiController) PutBucketAcl(ctx *fiber.Ctx) (*Response, error) {
|
||||
AccessControlPolicy: &accessControlPolicy,
|
||||
}
|
||||
} else if acl != "" {
|
||||
if acl != "private" && acl != "public-read" && acl != "public-read-write" {
|
||||
debuglogger.Logf("invalid acl: %q", acl)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
if grants != "" {
|
||||
debuglogger.Logf("invalid request: %q (grants) %q (acl)",
|
||||
grants, acl)
|
||||
@@ -491,7 +461,7 @@ func (c S3ApiController) PutBucketAcl(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, s3err.GetAPIError(s3err.ErrMissingSecurityHeader)
|
||||
}
|
||||
|
||||
updAcl, err := auth.UpdateACL(input, parsedAcl, c.iam, acct.Role == auth.RoleAdmin)
|
||||
updAcl, err := auth.UpdateACL(input, parsedAcl, c.iam)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
@@ -517,13 +487,24 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
grantWrite := ctx.Get("X-Amz-Grant-Write")
|
||||
grantWriteACP := ctx.Get("X-Amz-Grant-Write-Acp")
|
||||
lockEnabled := strings.EqualFold(ctx.Get("X-Amz-Bucket-Object-Lock-Enabled"), "true")
|
||||
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
grants := grantFullControl + grantRead + grantReadACP + grantWrite + grantWriteACP
|
||||
objectOwnership := types.ObjectOwnership(
|
||||
ctx.Get("X-Amz-Object-Ownership", string(types.ObjectOwnershipBucketOwnerEnforced)),
|
||||
)
|
||||
|
||||
if acct.Role != auth.RoleAdmin && acct.Role != auth.RoleUserPlus {
|
||||
if c.readonly {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
creator := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
if !utils.ContextKeyBucketOwner.IsSet(ctx) {
|
||||
utils.ContextKeyBucketOwner.Set(ctx, creator)
|
||||
}
|
||||
bucketOwner := utils.ContextKeyBucketOwner.Get(ctx).(auth.Account)
|
||||
|
||||
if creator.Role != auth.RoleAdmin && creator.Role != auth.RoleUserPlus {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
@@ -532,14 +513,28 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
// validate the bucket name
|
||||
if ok := utils.IsValidBucketName(bucket); !ok {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
// validate bucket canned acl
|
||||
err := auth.ValidateCannedACL(acl)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// validate the object ownership value
|
||||
if ok := utils.IsValidOwnership(objectOwnership); !ok {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, s3err.APIError{
|
||||
Code: "InvalidArgument",
|
||||
Description: fmt.Sprintf("Invalid x-amz-object-ownership header: %v", objectOwnership),
|
||||
@@ -551,7 +546,7 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
debuglogger.Logf("bucket acls are disabled for %v object ownership", objectOwnership)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidBucketAclWithObjectOwnership)
|
||||
}
|
||||
@@ -560,13 +555,39 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
debuglogger.Logf("invalid request: %q (grants) %q (acl)", grants, acl)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrBothCannedAndHeaderGrants)
|
||||
}
|
||||
|
||||
var body s3response.CreateBucketConfiguration
|
||||
if len(ctx.Body()) != 0 {
|
||||
// request body is optional for CreateBucket
|
||||
err := xml.Unmarshal(ctx.Body(), &body)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse the request body: %v", err)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if body.LocationConstraint != nil {
|
||||
region := utils.ContextKeyRegion.Get(ctx).(string)
|
||||
if *body.LocationConstraint != region || *body.LocationConstraint == "us-east-1" {
|
||||
debuglogger.Logf("invalid location constraint: %s", *body.LocationConstraint)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidLocationConstraint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defACL := auth.ACL{
|
||||
Owner: acct.Access,
|
||||
Owner: bucketOwner.Access,
|
||||
}
|
||||
|
||||
updAcl, err := auth.UpdateACL(&auth.PutBucketAclInput{
|
||||
@@ -577,15 +598,15 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
GrantWriteACP: &grantWriteACP,
|
||||
AccessControlPolicy: &auth.AccessControlPolicy{
|
||||
Owner: &types.Owner{
|
||||
ID: &acct.Access,
|
||||
ID: &bucketOwner.Access,
|
||||
}},
|
||||
ACL: types.BucketCannedACL(acl),
|
||||
}, defACL, c.iam, acct.Role == auth.RoleAdmin)
|
||||
}, defACL, c.iam)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to update bucket acl: %v", err)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
@@ -594,10 +615,13 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
Bucket: &bucket,
|
||||
ObjectOwnership: objectOwnership,
|
||||
ObjectLockEnabledForBucket: &lockEnabled,
|
||||
CreateBucketConfiguration: &types.CreateBucketConfiguration{
|
||||
Tags: body.TagSet,
|
||||
},
|
||||
}, updAcl)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
@@ -528,22 +528,6 @@ func TestS3ApiController_PutBucketCors(t *testing.T) {
|
||||
err: s3err.GetUnsopportedCORSMethodErr("invalid_method"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid checksum algo",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validBody,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Sdk-Checksum-Algorithm": "invalid_algo",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: "root"},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidChecksumAlgorithm),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend error",
|
||||
input: testInput{
|
||||
@@ -657,7 +641,10 @@ func TestS3ApiController_PutBucketPolicy(t *testing.T) {
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: "root"},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
Status: http.StatusNoContent,
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
},
|
||||
@@ -672,6 +659,7 @@ func TestS3ApiController_PutBucketPolicy(t *testing.T) {
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
Status: http.StatusNoContent,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -711,6 +699,11 @@ func TestS3ApiController_CreateBucket(t *testing.T) {
|
||||
Role: auth.RoleUser,
|
||||
}
|
||||
|
||||
invLocConstBody, err := xml.Marshal(s3response.CreateBucketConfiguration{
|
||||
LocationConstraint: utils.GetStringPtr("us-west-1"),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input testInput
|
||||
@@ -740,11 +733,62 @@ func TestS3ApiController_CreateBucket(t *testing.T) {
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: adminAcc.Access,
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidBucketName),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "malformed body",
|
||||
input: testInput{
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyAccount: adminAcc,
|
||||
},
|
||||
body: []byte("invalid_body"),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: adminAcc.Access},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "invalid canned acl",
|
||||
input: testInput{
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyAccount: adminAcc,
|
||||
},
|
||||
headers: map[string]string{
|
||||
"x-amz-acl": "invalid_acl",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: adminAcc.Access},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidArgument),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid location constraint",
|
||||
input: testInput{
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyAccount: adminAcc,
|
||||
utils.ContextKeyRegion: "us-east-1",
|
||||
},
|
||||
body: invLocConstBody,
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: adminAcc.Access},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidLocationConstraint),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid ownership",
|
||||
input: testInput{
|
||||
@@ -757,7 +801,9 @@ func TestS3ApiController_CreateBucket(t *testing.T) {
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: adminAcc.Access,
|
||||
},
|
||||
},
|
||||
err: s3err.APIError{
|
||||
Code: "InvalidArgument",
|
||||
@@ -1059,7 +1105,7 @@ func TestS3ApiController_PutBucketAcl(t *testing.T) {
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidArgument),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
92
s3api/controllers/cors_default_origin_test.go
Normal file
92
s3api/controllers/cors_default_origin_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func TestApplyBucketCORS_FallbackOrigin_NoBucketCors_NoRequestOrigin(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
mockedBackend := &BackendMock{
|
||||
GetBucketCorsFunc: func(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchCORSConfiguration)
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
app.Get("/:bucket/test",
|
||||
middlewares.ApplyBucketCORS(mockedBackend, origin),
|
||||
func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(http.StatusOK)
|
||||
},
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/mybucket/test", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin to be set to fallback, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Expose-Headers"); got != "ETag" {
|
||||
t.Fatalf("expected Access-Control-Expose-Headers to include ETag, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyBucketCORS_FallbackOrigin_NotAppliedWhenBucketCorsExists(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
mockedBackend := &BackendMock{
|
||||
GetBucketCorsFunc: func(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return []byte("not-parsed"), nil
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
app.Get("/:bucket/test",
|
||||
middlewares.ApplyBucketCORS(mockedBackend, origin),
|
||||
func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(http.StatusOK)
|
||||
},
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/mybucket/test", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != "" {
|
||||
t.Fatalf("expected no Access-Control-Allow-Origin when bucket CORS exists, got %q", got)
|
||||
}
|
||||
}
|
||||
@@ -30,11 +30,17 @@ import (
|
||||
func (c S3ApiController) DeleteObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
bucket := ctx.Params("bucket")
|
||||
key := strings.TrimPrefix(ctx.Path(), fmt.Sprintf("/%s/", bucket))
|
||||
versionId := ctx.Query("versionId")
|
||||
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
isRoot := utils.ContextKeyIsRoot.Get(ctx).(bool)
|
||||
isBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
action := auth.DeleteObjectTaggingAction
|
||||
if versionId != "" {
|
||||
action = auth.DeleteObjectVersionTaggingAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
@@ -44,7 +50,7 @@ func (c S3ApiController) DeleteObjectTagging(ctx *fiber.Ctx) (*Response, error)
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: auth.DeleteObjectTaggingAction,
|
||||
Action: action,
|
||||
IsPublicRequest: isBucketPublic,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -55,8 +61,20 @@ func (c S3ApiController) DeleteObjectTagging(ctx *fiber.Ctx) (*Response, error)
|
||||
}, err
|
||||
}
|
||||
|
||||
err = c.be.DeleteObjectTagging(ctx.Context(), bucket, key)
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = c.be.DeleteObjectTagging(ctx.Context(), bucket, key, versionId)
|
||||
return &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
Status: http.StatusNoContent,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -124,7 +142,10 @@ func (c S3ApiController) DeleteObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
isBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
//TODO: check s3:DeleteObjectVersion policy in case a use tries to delete a version of an object
|
||||
action := auth.DeleteObjectAction
|
||||
if versionId != "" {
|
||||
action = auth.DeleteObjectVersionAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
@@ -135,7 +156,7 @@ func (c S3ApiController) DeleteObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: auth.DeleteObjectAction,
|
||||
Action: action,
|
||||
IsPublicRequest: isBucketPublic,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -146,6 +167,15 @@ func (c S3ApiController) DeleteObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = auth.CheckObjectAccess(
|
||||
ctx.Context(),
|
||||
bucket,
|
||||
@@ -159,6 +189,7 @@ func (c S3ApiController) DeleteObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
bypass,
|
||||
isBucketPublic,
|
||||
c.be,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
|
||||
@@ -20,12 +20,14 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3event"
|
||||
)
|
||||
|
||||
func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
versionId := ulid.Make().String()
|
||||
tests := []struct {
|
||||
name string
|
||||
input testInput
|
||||
@@ -45,14 +47,37 @@ func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
queries: map[string]string{
|
||||
"versionId": versionId,
|
||||
},
|
||||
locals: defaultLocals,
|
||||
beErr: s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
Status: http.StatusNoContent,
|
||||
@@ -66,9 +91,15 @@ func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
name: "successful response",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": versionId,
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
Status: http.StatusNoContent,
|
||||
@@ -81,7 +112,7 @@ func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
be := &BackendMock{
|
||||
DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object string) error {
|
||||
DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object, versionId string) error {
|
||||
return tt.input.beErr
|
||||
},
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
@@ -99,7 +130,8 @@ func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
locals: tt.input.locals,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -206,6 +238,23 @@ func TestS3ApiController_DeleteObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "object locked",
|
||||
input: testInput{
|
||||
@@ -289,7 +338,8 @@ func TestS3ApiController_DeleteObject(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
locals: tt.input.locals,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -35,11 +35,17 @@ import (
|
||||
func (c S3ApiController) GetObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
bucket := ctx.Params("bucket")
|
||||
key := strings.TrimPrefix(ctx.Path(), fmt.Sprintf("/%s/", bucket))
|
||||
versionId := ctx.Query("versionId")
|
||||
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
isRoot := utils.ContextKeyIsRoot.Get(ctx).(bool)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
isPublicBucket := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
|
||||
action := auth.GetObjectTaggingAction
|
||||
if versionId != "" {
|
||||
action = auth.GetObjectVersionTaggingAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -48,7 +54,7 @@ func (c S3ApiController) GetObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: auth.GetObjectTaggingAction,
|
||||
Action: action,
|
||||
IsPublicRequest: isPublicBucket,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -59,7 +65,16 @@ func (c S3ApiController) GetObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
data, err := c.be.GetObjectTagging(ctx.Context(), bucket, key)
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
data, err := c.be.GetObjectTagging(ctx.Context(), bucket, key, versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
@@ -78,10 +93,13 @@ func (c S3ApiController) GetObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
return &Response{
|
||||
Data: tags,
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c S3ApiController) GetObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
@@ -113,6 +131,15 @@ func (c S3ApiController) GetObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
data, err := c.be.GetObjectRetention(ctx.Context(), bucket, key, versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
@@ -160,6 +187,15 @@ func (c S3ApiController) GetObjectLegalHold(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
data, err := c.be.GetObjectLegalHold(ctx.Context(), bucket, key, versionId)
|
||||
return &Response{
|
||||
Data: auth.ParseObjectLegalHoldOutput(data),
|
||||
@@ -293,6 +329,11 @@ func (c S3ApiController) GetObjectAttributes(ctx *fiber.Ctx) (*Response, error)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
isPublicBucket := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
|
||||
action := auth.GetObjectAttributesAction
|
||||
if versionId != "" {
|
||||
action = auth.GetObjectVersionAttributesAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -301,7 +342,7 @@ func (c S3ApiController) GetObjectAttributes(ctx *fiber.Ctx) (*Response, error)
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: auth.GetObjectAttributesAction,
|
||||
Action: action,
|
||||
IsPublicRequest: isPublicBucket,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -312,6 +353,15 @@ func (c S3ApiController) GetObjectAttributes(ctx *fiber.Ctx) (*Response, error)
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// parse max parts
|
||||
maxParts, err := utils.ParseUint(maxPartsStr)
|
||||
if err != nil {
|
||||
@@ -455,6 +505,15 @@ func (c S3ApiController) GetObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
partNumber = &partNumberQuery
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// validate the checksum mode
|
||||
if checksumMode != "" && checksumMode != types.ChecksumModeEnabled {
|
||||
debuglogger.Logf("invalid x-amz-checksum-mode header value: %v", checksumMode)
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
@@ -33,6 +34,7 @@ import (
|
||||
)
|
||||
|
||||
func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
versionId := ulid.Make().String()
|
||||
tests := []struct {
|
||||
name string
|
||||
input testInput
|
||||
@@ -52,6 +54,23 @@ func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -71,6 +90,9 @@ func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
{
|
||||
name: "successful response",
|
||||
input: testInput{
|
||||
queries: map[string]string{
|
||||
"versionId": versionId,
|
||||
},
|
||||
locals: defaultLocals,
|
||||
beRes: map[string]string{
|
||||
"key": "val",
|
||||
@@ -78,6 +100,9 @@ func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": utils.GetStringPtr(versionId),
|
||||
},
|
||||
Data: s3response.Tagging{
|
||||
TagSet: s3response.TagSet{
|
||||
Tags: []s3response.Tag{
|
||||
@@ -95,7 +120,7 @@ func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
be := &BackendMock{
|
||||
GetObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object string) (map[string]string, error) {
|
||||
GetObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object, versionId string) (map[string]string, error) {
|
||||
return tt.input.beRes.(map[string]string), tt.input.beErr
|
||||
},
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
@@ -113,8 +138,9 @@ func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -147,6 +173,23 @@ func TestS3ApiController_GetObjectRetention(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -218,8 +261,9 @@ func TestS3ApiController_GetObjectRetention(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -249,6 +293,23 @@ func TestS3ApiController_GetObjectLegalHold(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -305,8 +366,9 @@ func TestS3ApiController_GetObjectLegalHold(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -555,6 +617,23 @@ func TestS3ApiController_GetObjectAttributes(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid max parts",
|
||||
input: testInput{
|
||||
@@ -663,6 +742,7 @@ func TestS3ApiController_GetObjectAttributes(t *testing.T) {
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
headers: tt.input.headers,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -693,6 +773,23 @@ func TestS3ApiController_GetObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid checksum mode",
|
||||
input: testInput{
|
||||
@@ -757,7 +854,7 @@ func TestS3ApiController_GetObject(t *testing.T) {
|
||||
"Range": "100-200",
|
||||
},
|
||||
queries: map[string]string{
|
||||
"versionId": "versionId",
|
||||
"versionId": "01BX5ZZKBKACTAV9WEVGEMMVRZ",
|
||||
},
|
||||
locals: defaultLocals,
|
||||
beRes: &s3.GetObjectOutput{
|
||||
|
||||
@@ -80,6 +80,15 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
partNumber = &partNumberQuery
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
checksumMode := types.ChecksumMode(strings.ToUpper(ctx.Get("x-amz-checksum-mode")))
|
||||
if checksumMode != "" && checksumMode != types.ChecksumModeEnabled {
|
||||
debuglogger.Logf("invalid x-amz-checksum-mode header value: %v", checksumMode)
|
||||
@@ -126,30 +135,31 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
return &Response{
|
||||
Headers: map[string]*string{
|
||||
"ETag": res.ETag,
|
||||
"x-amz-restore": res.Restore,
|
||||
"accept-ranges": res.AcceptRanges,
|
||||
"Content-Range": res.ContentRange,
|
||||
"Content-Disposition": res.ContentDisposition,
|
||||
"Content-Encoding": res.ContentEncoding,
|
||||
"Content-Language": res.ContentLanguage,
|
||||
"Cache-Control": res.CacheControl,
|
||||
"Content-Length": utils.ConvertPtrToStringPtr(res.ContentLength),
|
||||
"Content-Type": res.ContentType,
|
||||
"Expires": res.ExpiresString,
|
||||
"ETag": res.ETag,
|
||||
"Last-Modified": utils.FormatDatePtrToString(res.LastModified, timefmt),
|
||||
"x-amz-restore": res.Restore,
|
||||
"accept-ranges": res.AcceptRanges,
|
||||
"x-amz-checksum-crc32": res.ChecksumCRC32,
|
||||
"x-amz-checksum-crc64nvme": res.ChecksumCRC64NVME,
|
||||
"x-amz-checksum-crc32c": res.ChecksumCRC32C,
|
||||
"x-amz-checksum-sha1": res.ChecksumSHA1,
|
||||
"x-amz-checksum-sha256": res.ChecksumSHA256,
|
||||
"Content-Type": res.ContentType,
|
||||
"x-amz-version-id": res.VersionId,
|
||||
"Content-Length": utils.ConvertPtrToStringPtr(res.ContentLength),
|
||||
"x-amz-mp-parts-count": utils.ConvertPtrToStringPtr(res.PartsCount),
|
||||
"x-amz-object-lock-mode": utils.ConvertToStringPtr(res.ObjectLockMode),
|
||||
"x-amz-object-lock-legal-hold": utils.ConvertToStringPtr(res.ObjectLockLegalHoldStatus),
|
||||
"x-amz-storage-class": utils.ConvertToStringPtr(res.StorageClass),
|
||||
"x-amz-checksum-type": utils.ConvertToStringPtr(res.ChecksumType),
|
||||
"x-amz-object-lock-retain-until-date": utils.FormatDatePtrToString(res.ObjectLockRetainUntilDate, time.RFC3339),
|
||||
"Last-Modified": utils.FormatDatePtrToString(res.LastModified, timefmt),
|
||||
"x-amz-tagging-count": utils.ConvertPtrToStringPtr(res.TagCount),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
|
||||
@@ -51,13 +51,30 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid part number",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"partNumber": "-4",
|
||||
"versionId": "id",
|
||||
"versionId": "01BX5ZZKBKACTAV9WEVGEMMVRZ",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
@@ -147,6 +164,7 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
"x-amz-checksum-type": nil,
|
||||
"x-amz-object-lock-retain-until-date": nil,
|
||||
"Last-Modified": nil,
|
||||
"x-amz-tagging-count": nil,
|
||||
"Content-Type": utils.GetStringPtr("application/xml"),
|
||||
"Content-Length": utils.GetStringPtr("100"),
|
||||
},
|
||||
|
||||
@@ -158,7 +158,16 @@ func (c S3ApiController) CreateMultipartUpload(ctx *fiber.Ctx) (*Response, error
|
||||
isRoot := utils.ContextKeyIsRoot.Get(ctx).(bool)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
err := utils.ValidateNoACLHeaders(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -278,7 +287,7 @@ func (c S3ApiController) CompleteMultipartUpload(ctx *fiber.Ctx) (*Response, err
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrEmptyParts)
|
||||
}, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
var mpuObjectSize *int64
|
||||
@@ -305,7 +314,7 @@ func (c S3ApiController) CompleteMultipartUpload(ctx *fiber.Ctx) (*Response, err
|
||||
mpuObjectSize = &val
|
||||
}
|
||||
|
||||
checksums, err := utils.ParseChecksumHeaders(ctx)
|
||||
checksums, err := utils.ParseCompleteMpChecksumHeaders(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
@@ -325,6 +334,15 @@ func (c S3ApiController) CompleteMultipartUpload(ctx *fiber.Ctx) (*Response, err
|
||||
|
||||
ifMatch, ifNoneMatch := utils.ParsePreconditionMatchHeaders(ctx)
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []types.ObjectIdentifier{{Key: &key}}, true, isBucketPublic, c.be, true)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
res, versid, err := c.be.CompleteMultipartUpload(ctx.Context(),
|
||||
&s3.CompleteMultipartUploadInput{
|
||||
Bucket: &bucket,
|
||||
@@ -343,6 +361,10 @@ func (c S3ApiController) CompleteMultipartUpload(ctx *fiber.Ctx) (*Response, err
|
||||
IfMatch: ifMatch,
|
||||
IfNoneMatch: ifNoneMatch,
|
||||
})
|
||||
if err == nil {
|
||||
objUrl := utils.GenerateObjectLocation(ctx, c.virtualDomain, bucket, key)
|
||||
res.Location = &objUrl
|
||||
}
|
||||
return &Response{
|
||||
Data: res,
|
||||
Headers: map[string]*string{
|
||||
|
||||
@@ -404,7 +404,7 @@ func TestS3ApiController_CompleteMultipartUpload(t *testing.T) {
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrEmptyParts),
|
||||
err: s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -479,13 +479,30 @@ func TestS3ApiController_CompleteMultipartUpload(t *testing.T) {
|
||||
err: s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-type"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "object is locked",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validMpBody,
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLocked),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrObjectLocked),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validMpBody,
|
||||
beErr: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
beRes: s3response.CompleteMultipartUploadResult{},
|
||||
locals: defaultLocals,
|
||||
body: validMpBody,
|
||||
beErr: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
beRes: s3response.CompleteMultipartUploadResult{},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -514,11 +531,13 @@ func TestS3ApiController_CompleteMultipartUpload(t *testing.T) {
|
||||
headers: map[string]string{
|
||||
"X-Amz-Mp-Object-Size": "3",
|
||||
},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Data: s3response.CompleteMultipartUploadResult{
|
||||
ETag: &ETag,
|
||||
ETag: &ETag,
|
||||
Location: utils.GetStringPtr("http://example.com/bucket/object"),
|
||||
},
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
@@ -542,6 +561,12 @@ func TestS3ApiController_CompleteMultipartUpload(t *testing.T) {
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
},
|
||||
GetObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, tt.input.extraMockErr
|
||||
},
|
||||
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
|
||||
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
}
|
||||
|
||||
ctrl := S3ApiController{
|
||||
|
||||
@@ -36,11 +36,17 @@ import (
|
||||
func (c S3ApiController) PutObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
bucket := ctx.Params("bucket")
|
||||
key := strings.TrimPrefix(ctx.Path(), fmt.Sprintf("/%s/", bucket))
|
||||
versionId := ctx.Query("versionId")
|
||||
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
isRoot := utils.ContextKeyIsRoot.Get(ctx).(bool)
|
||||
IsBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
action := auth.PutObjectTaggingAction
|
||||
if versionId != "" {
|
||||
action = auth.PutObjectVersionTaggingAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -49,7 +55,7 @@ func (c S3ApiController) PutObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: auth.PutObjectTaggingAction,
|
||||
Action: action,
|
||||
IsPublicRequest: IsBucketPublic,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -60,6 +66,15 @@ func (c S3ApiController) PutObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
tagging, err := utils.ParseTagging(ctx.Body(), utils.TagLimitObject)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
@@ -69,8 +84,11 @@ func (c S3ApiController) PutObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = c.be.PutObjectTagging(ctx.Context(), bucket, key, tagging)
|
||||
err = c.be.PutObjectTagging(ctx.Context(), bucket, key, versionId, tagging)
|
||||
return &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
EventName: s3event.EventObjectTaggingPut,
|
||||
@@ -88,7 +106,7 @@ func (c S3ApiController) PutObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
IsBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
if err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
@@ -98,7 +116,8 @@ func (c S3ApiController) PutObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
Object: key,
|
||||
Action: auth.PutObjectRetentionAction,
|
||||
IsPublicRequest: IsBucketPublic,
|
||||
}); err != nil {
|
||||
})
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -106,20 +125,18 @@ func (c S3ApiController) PutObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
if bypass {
|
||||
policy, err := c.be.GetBucketPolicy(ctx.Context(), bucket)
|
||||
if err != nil {
|
||||
bypass = false
|
||||
} else {
|
||||
if err := auth.VerifyBucketPolicy(policy, acct.Access, bucket, key, auth.BypassGovernanceRetentionAction); err != nil {
|
||||
bypass = false
|
||||
}
|
||||
}
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// parse the request body bytes into a go struct and validate
|
||||
retention, err := auth.ParseObjectLockRetentionInput(ctx.Body())
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse object lock configuration input: %v", err)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -127,7 +144,27 @@ func (c S3ApiController) PutObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = c.be.PutObjectRetention(ctx.Context(), bucket, key, versionId, bypass, retention)
|
||||
// check if the operation is allowed
|
||||
err = auth.IsObjectLockRetentionPutAllowed(ctx.Context(), c.be, bucket, key, versionId, acct.Access, retention, bypass)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// parse the retention to JSON
|
||||
data, err := auth.ParseObjectLockRetentionInputToJSON(retention)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = c.be.PutObjectRetention(ctx.Context(), bucket, key, versionId, data)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -144,7 +181,7 @@ func (c S3ApiController) PutObjectLegalHold(ctx *fiber.Ctx) (*Response, error) {
|
||||
IsBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
if err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
@@ -154,7 +191,17 @@ func (c S3ApiController) PutObjectLegalHold(ctx *fiber.Ctx) (*Response, error) {
|
||||
Object: key,
|
||||
Action: auth.PutObjectLegalHoldAction,
|
||||
IsPublicRequest: IsBucketPublic,
|
||||
}); err != nil {
|
||||
})
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -181,7 +228,7 @@ func (c S3ApiController) PutObjectLegalHold(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
err := c.be.PutObjectLegalHold(ctx.Context(), bucket, key, versionId, legalHold.Status == types.ObjectLockLegalHoldStatusOn)
|
||||
err = c.be.PutObjectLegalHold(ctx.Context(), bucket, key, versionId, legalHold.Status == types.ObjectLockLegalHoldStatusOn)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -345,6 +392,15 @@ func (c S3ApiController) UploadPartCopy(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
if len(ctx.Request().Body()) != 0 {
|
||||
debuglogger.Logf("expected empty request body")
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrNonEmptyRequestBody)
|
||||
}
|
||||
|
||||
if partNumber < minPartNumber || partNumber > maxPartNumber {
|
||||
debuglogger.Logf("invalid part number: %d", partNumber)
|
||||
return &Response{
|
||||
@@ -454,7 +510,16 @@ func (c S3ApiController) CopyObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
isRoot := utils.ContextKeyIsRoot.Get(ctx).(bool)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
err := utils.ValidateCopySource(copySource)
|
||||
err := utils.ValidateNoACLHeaders(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateCopySource(copySource)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
@@ -481,6 +546,15 @@ func (c S3ApiController) CopyObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
if len(ctx.Request().Body()) != 0 {
|
||||
debuglogger.Logf("expected empty request body")
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrNonEmptyRequestBody)
|
||||
}
|
||||
|
||||
metadata := utils.GetUserMetaData(&ctx.Request().Header)
|
||||
|
||||
if metaDirective != "" && metaDirective != types.MetadataDirectiveCopy && metaDirective != types.MetadataDirectiveReplace {
|
||||
@@ -522,6 +596,15 @@ func (c S3ApiController) CopyObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
preconditionHdrs := utils.ParsePreconditionHeaders(ctx, utils.WithCopySource())
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []types.ObjectIdentifier{{Key: &key}}, true, false, c.be, true)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
res, err := c.be.CopyObject(ctx.Context(),
|
||||
s3response.CopyObjectInput{
|
||||
Bucket: &bucket,
|
||||
@@ -585,6 +668,15 @@ func (c S3ApiController) PutObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
IsBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
|
||||
err := utils.ValidateNoACLHeaders(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// Content Length
|
||||
contentLengthStr := ctx.Get("Content-Length")
|
||||
if contentLengthStr == "" {
|
||||
@@ -600,7 +692,7 @@ func (c S3ApiController) PutObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
// load the meta headers
|
||||
metadata := utils.GetUserMetaData(&ctx.Request().Header)
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
err = auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -620,7 +712,7 @@ func (c S3ApiController) PutObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []types.ObjectIdentifier{{Key: &key}}, true, IsBucketPublic, c.be)
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []types.ObjectIdentifier{{Key: &key}}, true, IsBucketPublic, c.be, true)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
@@ -45,6 +46,8 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
versionId := ulid.Make().String()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input testInput
|
||||
@@ -64,6 +67,23 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid request body",
|
||||
input: testInput{
|
||||
@@ -85,9 +105,15 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
locals: defaultLocals,
|
||||
beErr: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
body: validTaggingBody,
|
||||
queries: map[string]string{
|
||||
"versionId": versionId,
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
EventName: s3event.EventObjectTaggingPut,
|
||||
@@ -101,9 +127,15 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validTaggingBody,
|
||||
queries: map[string]string{
|
||||
"versionId": versionId,
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
EventName: s3event.EventObjectTaggingPut,
|
||||
@@ -115,7 +147,7 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
be := &BackendMock{
|
||||
PutObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object string, tags map[string]string) error {
|
||||
PutObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object, versionId string, tags map[string]string) error {
|
||||
return tt.input.beErr
|
||||
},
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
@@ -133,8 +165,9 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -171,6 +204,23 @@ func TestS3ApiController_PutObjectRetention(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid request body",
|
||||
input: testInput{
|
||||
@@ -186,12 +236,29 @@ func TestS3ApiController_PutObjectRetention(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "retention put not allowed",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validRetentionBody,
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
beErr: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
body: validRetentionBody,
|
||||
locals: defaultLocals,
|
||||
beErr: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
body: validRetentionBody,
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -203,46 +270,11 @@ func TestS3ApiController_PutObjectRetention(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "success bypass GetBucketPolicy fails",
|
||||
name: "successful response",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validRetentionBody,
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
headers: map[string]string{
|
||||
"X-Amz-Bypass-Governance-Retention": "true",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "success bypass VerifyBucketPolicy fails",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validRetentionBody,
|
||||
extraMockResp: []byte("invalid_policy"),
|
||||
headers: map[string]string{
|
||||
"X-Amz-Bypass-Governance-Retention": "true",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "successful response",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validRetentionBody,
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -256,15 +288,14 @@ func TestS3ApiController_PutObjectRetention(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
be := &BackendMock{
|
||||
PutObjectRetentionFunc: func(contextMoqParam context.Context, bucket, object, versionId string, bypass bool, retention []byte) error {
|
||||
PutObjectRetentionFunc: func(contextMoqParam context.Context, bucket, object, versionId string, retention []byte) error {
|
||||
return tt.input.beErr
|
||||
},
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
if tt.input.extraMockResp == nil {
|
||||
return nil, tt.input.extraMockErr
|
||||
} else {
|
||||
return tt.input.extraMockResp.([]byte), tt.input.extraMockErr
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
},
|
||||
GetObjectRetentionFunc: func(contextMoqParam context.Context, bucket, object, versionId string) ([]byte, error) {
|
||||
return nil, tt.input.extraMockErr
|
||||
},
|
||||
}
|
||||
|
||||
@@ -281,6 +312,7 @@ func TestS3ApiController_PutObjectRetention(t *testing.T) {
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
headers: tt.input.headers,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -317,6 +349,23 @@ func TestS3ApiController_PutObjectLegalHold(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid request body",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid request body",
|
||||
input: testInput{
|
||||
@@ -399,8 +448,9 @@ func TestS3ApiController_PutObjectLegalHold(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -598,6 +648,26 @@ func TestS3ApiController_UploadPartCopy(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid copy source: invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object?versionId=invalid_versionId",
|
||||
},
|
||||
queries: map[string]string{
|
||||
"partNumber": "2",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid copy source",
|
||||
input: testInput{
|
||||
@@ -618,6 +688,27 @@ func TestS3ApiController_UploadPartCopy(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non empty request body",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object",
|
||||
},
|
||||
queries: map[string]string{
|
||||
"partNumber": "2",
|
||||
},
|
||||
body: []byte("body"),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrNonEmptyRequestBody),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid part number",
|
||||
input: testInput{
|
||||
@@ -715,6 +806,7 @@ func TestS3ApiController_UploadPartCopy(t *testing.T) {
|
||||
locals: tt.input.locals,
|
||||
headers: tt.input.headers,
|
||||
queries: tt.input.queries,
|
||||
body: tt.input.body,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -836,6 +928,41 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidCopySourceBucket),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid copy source: versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object?versionId=invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non empty request body",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object",
|
||||
},
|
||||
body: []byte("body"),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrNonEmptyRequestBody),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid metadata directive",
|
||||
input: testInput{
|
||||
@@ -910,6 +1037,24 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrObjectLockInvalidHeaders),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "object is locked",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object",
|
||||
},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLocked),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrObjectLocked),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -919,6 +1064,7 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object",
|
||||
},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -949,6 +1095,7 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
ETag: utils.GetStringPtr("ETag"),
|
||||
},
|
||||
},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -978,6 +1125,12 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
},
|
||||
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
|
||||
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
GetObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, tt.input.extraMockErr
|
||||
},
|
||||
}
|
||||
|
||||
ctrl := S3ApiController{
|
||||
@@ -992,6 +1145,7 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
headers: tt.input.headers,
|
||||
body: tt.input.body,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -1193,6 +1347,9 @@ func TestS3ApiController_PutObject(t *testing.T) {
|
||||
GetObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, tt.input.extraMockErr
|
||||
},
|
||||
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
|
||||
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
}
|
||||
|
||||
ctrl := S3ApiController{
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// ParseAcl retreives the bucket acl and stores in the context locals
|
||||
@@ -42,6 +43,16 @@ func ParseAcl(be backend.Backend) fiber.Handler {
|
||||
parsedAcl.Owner = utils.ContextKeyRootAccessKey.Get(ctx).(string)
|
||||
}
|
||||
|
||||
// if expected bucket owner doesn't match the bucket owner
|
||||
// the gateway should return AccessDenied.
|
||||
// This header appears in all actions except 'CreateBucket' and 'ListBuckets'.
|
||||
// 'ParseACL' is also applied to all actions except for 'CreateBucket' and 'ListBuckets',
|
||||
// so it's a perfect place to check the expected bucket owner
|
||||
bucketOwner := ctx.Get("X-Amz-Expected-Bucket-Owner")
|
||||
if bucketOwner != "" && bucketOwner != parsedAcl.Owner {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
utils.ContextKeyParsedAcl.Set(ctx, parsedAcl)
|
||||
return nil
|
||||
}
|
||||
|
||||
73
s3api/middlewares/apply-bucket-cors-preflight.go
Normal file
73
s3api/middlewares/apply-bucket-cors-preflight.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// ApplyBucketCORSPreflightFallback handles CORS preflight (OPTIONS) requests for S3 routes
|
||||
// when no per-bucket CORS configuration exists.
|
||||
//
|
||||
// If the bucket has no CORS configuration and fallbackOrigin is set, it responds with 204 and:
|
||||
// - Access-Control-Allow-Origin: fallbackOrigin
|
||||
// - Vary: Origin, Access-Control-Request-Headers, Access-Control-Request-Method
|
||||
// - Access-Control-Allow-Methods: mirrors Access-Control-Request-Method (if present)
|
||||
// - Access-Control-Allow-Headers: mirrors Access-Control-Request-Headers (if present)
|
||||
//
|
||||
// If the bucket has a CORS configuration (or fallbackOrigin is blank), it calls next so the
|
||||
// standard CORS OPTIONS handler can apply bucket-specific rules.
|
||||
func ApplyBucketCORSPreflightFallback(be backend.Backend, fallbackOrigin string) fiber.Handler {
|
||||
fallbackOrigin = strings.TrimSpace(fallbackOrigin)
|
||||
if fallbackOrigin == "" {
|
||||
return func(ctx *fiber.Ctx) error { return ctx.Next() }
|
||||
}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
bucket := ctx.Params("bucket")
|
||||
_, err := be.GetBucketCors(ctx.Context(), bucket)
|
||||
if err != nil {
|
||||
if s3Err, ok := err.(s3err.APIError); ok && (s3Err.Code == "NoSuchCORSConfiguration" || s3Err.Code == "NoSuchBucket") {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Origin")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Origin", fallbackOrigin)
|
||||
}
|
||||
if len(ctx.Response().Header.Peek("Vary")) == 0 {
|
||||
ctx.Response().Header.Add("Vary", VaryHdr)
|
||||
}
|
||||
|
||||
if reqMethod := strings.TrimSpace(ctx.Get("Access-Control-Request-Method")); reqMethod != "" {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Methods")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Methods", reqMethod)
|
||||
}
|
||||
}
|
||||
|
||||
if reqHeaders := strings.TrimSpace(ctx.Get("Access-Control-Request-Headers")); reqHeaders != "" {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Headers")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Headers", reqHeaders)
|
||||
}
|
||||
}
|
||||
|
||||
ctx.Status(fiber.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
146
s3api/middlewares/apply-bucket-cors-preflight_test.go
Normal file
146
s3api/middlewares/apply-bucket-cors-preflight_test.go
Normal file
@@ -0,0 +1,146 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
type backendWithGetBucketCors struct {
|
||||
backend.BackendUnsupported
|
||||
getBucketCors func(ctx context.Context, bucket string) ([]byte, error)
|
||||
}
|
||||
|
||||
func (b backendWithGetBucketCors) GetBucketCors(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return b.getBucketCors(ctx, bucket)
|
||||
}
|
||||
|
||||
func TestApplyBucketCORSPreflightFallback_NoBucketCors_Responds204(t *testing.T) {
|
||||
be := backendWithGetBucketCors{
|
||||
getBucketCors: func(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchCORSConfiguration)
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
app.Options("/:bucket",
|
||||
ApplyBucketCORSPreflightFallback(be, "https://example.com"),
|
||||
func(c *fiber.Ctx) error {
|
||||
// Should not be reached if fallback triggers
|
||||
return c.SendStatus(http.StatusTeapot)
|
||||
},
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodOptions, "/testing", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", "https://request-origin.example")
|
||||
req.Header.Set("Access-Control-Request-Method", "GET")
|
||||
req.Header.Set("Access-Control-Request-Headers", "content-type")
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
t.Fatalf("expected status 204, got %d", resp.StatusCode)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != "https://example.com" {
|
||||
t.Fatalf("expected allow origin fallback, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Methods"); got != "GET" {
|
||||
t.Fatalf("expected allow methods to mirror request, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Headers"); got != "content-type" {
|
||||
t.Fatalf("expected allow headers to mirror request, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyBucketCORSPreflightFallback_NoSuchBucket_Responds204(t *testing.T) {
|
||||
be := backendWithGetBucketCors{
|
||||
getBucketCors: func(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
app.Options("/:bucket",
|
||||
ApplyBucketCORSPreflightFallback(be, "https://example.com"),
|
||||
func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(http.StatusTeapot)
|
||||
},
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodOptions, "/testing", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", "https://request-origin.example")
|
||||
req.Header.Set("Access-Control-Request-Method", "PUT")
|
||||
req.Header.Set("Access-Control-Request-Headers", "content-type")
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
t.Fatalf("expected status 204, got %d", resp.StatusCode)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != "https://example.com" {
|
||||
t.Fatalf("expected allow origin fallback, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Methods"); got != "PUT" {
|
||||
t.Fatalf("expected allow methods to mirror request, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyBucketCORSPreflightFallback_BucketHasCors_CallsNext(t *testing.T) {
|
||||
be := backendWithGetBucketCors{
|
||||
getBucketCors: func(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return []byte("dummy"), nil
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
app.Options("/:bucket",
|
||||
ApplyBucketCORSPreflightFallback(be, "https://example.com"),
|
||||
func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(http.StatusOK)
|
||||
},
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodOptions, "/testing", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("expected status 200 from next handler, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,7 @@ package middlewares
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
@@ -31,12 +32,14 @@ var VaryHdr = "Origin, Access-Control-Request-Headers, Access-Control-Request-Me
|
||||
// checks if origin and method meets the cors rules and
|
||||
// adds the necessary response headers.
|
||||
// CORS check is applied only when 'Origin' request header is present
|
||||
func ApplyBucketCORS(be backend.Backend) fiber.Handler {
|
||||
func ApplyBucketCORS(be backend.Backend, fallbackOrigin string) fiber.Handler {
|
||||
fallbackOrigin = strings.TrimSpace(fallbackOrigin)
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
bucket := ctx.Params("bucket")
|
||||
origin := ctx.Get("Origin")
|
||||
// if the origin request header is empty, skip cors validation
|
||||
if origin == "" {
|
||||
// If neither Origin is present nor a fallback is configured, skip CORS entirely.
|
||||
if origin == "" && fallbackOrigin == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -46,12 +49,32 @@ func ApplyBucketCORS(be backend.Backend) fiber.Handler {
|
||||
// If CORS is not configured, S3Error will have code NoSuchCORSConfiguration.
|
||||
// In this case, we can safely continue. For any other error, we should log it.
|
||||
s3Err, ok := err.(s3err.APIError)
|
||||
if ok && (s3Err.Code == "NoSuchCORSConfiguration" || s3Err.Code == "NoSuchBucket") {
|
||||
// Optional global fallback: add Access-Control-Allow-Origin for buckets
|
||||
// without a specific CORS configuration.
|
||||
if fallbackOrigin != "" {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Origin")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Origin", fallbackOrigin)
|
||||
}
|
||||
if len(ctx.Response().Header.Peek("Vary")) == 0 {
|
||||
ctx.Response().Header.Add("Vary", VaryHdr)
|
||||
}
|
||||
ensureExposeETag(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if !ok || s3Err.Code != "NoSuchCORSConfiguration" {
|
||||
debuglogger.Logf("failed to get bucket cors for bucket %q: %v", bucket, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If Origin is missing, don't attempt per-bucket CORS evaluation.
|
||||
// (Fallback has already been handled above for buckets without CORS config.)
|
||||
if origin == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
cors, err := auth.ParseCORSOutput(data)
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -100,6 +123,9 @@ func ApplyBucketCORS(be backend.Backend) fiber.Handler {
|
||||
}
|
||||
}
|
||||
|
||||
// Always expose ETag and user metadata headers for browser clients.
|
||||
ensureExposeETag(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
58
s3api/middlewares/apply-default-cors-preflight.go
Normal file
58
s3api/middlewares/apply-default-cors-preflight.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// ApplyDefaultCORSPreflight responds to CORS preflight (OPTIONS) requests for routes
|
||||
// that don't have per-bucket CORS configuration (e.g. admin APIs).
|
||||
//
|
||||
// It uses the provided fallbackOrigin as the Access-Control-Allow-Origin value.
|
||||
// It mirrors Access-Control-Request-Method into Access-Control-Allow-Methods and
|
||||
// mirrors Access-Control-Request-Headers into Access-Control-Allow-Headers.
|
||||
func ApplyDefaultCORSPreflight(fallbackOrigin string) fiber.Handler {
|
||||
fallbackOrigin = strings.TrimSpace(fallbackOrigin)
|
||||
if fallbackOrigin == "" {
|
||||
return func(ctx *fiber.Ctx) error { return nil }
|
||||
}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Origin")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Origin", fallbackOrigin)
|
||||
}
|
||||
if len(ctx.Response().Header.Peek("Vary")) == 0 {
|
||||
ctx.Response().Header.Add("Vary", VaryHdr)
|
||||
}
|
||||
|
||||
if reqMethod := strings.TrimSpace(ctx.Get("Access-Control-Request-Method")); reqMethod != "" {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Methods")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Methods", reqMethod)
|
||||
}
|
||||
}
|
||||
|
||||
if reqHeaders := strings.TrimSpace(ctx.Get("Access-Control-Request-Headers")); reqHeaders != "" {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Headers")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Headers", reqHeaders)
|
||||
}
|
||||
}
|
||||
|
||||
ctx.Status(fiber.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
59
s3api/middlewares/apply-default-cors-preflight_test.go
Normal file
59
s3api/middlewares/apply-default-cors-preflight_test.go
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func TestApplyDefaultCORSPreflight_OptionsSetsPreflightHeaders(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
app := fiber.New()
|
||||
app.Options("/admin",
|
||||
ApplyDefaultCORSPreflight(origin),
|
||||
ApplyDefaultCORS(origin),
|
||||
func(c *fiber.Ctx) error { return nil },
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodOptions, "/admin", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", "https://request-origin.example")
|
||||
req.Header.Set("Access-Control-Request-Method", "PATCH")
|
||||
req.Header.Set("Access-Control-Request-Headers", "content-type,authorization")
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
t.Fatalf("expected status 204, got %d", resp.StatusCode)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected allow origin fallback, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Methods"); got != "PATCH" {
|
||||
t.Fatalf("expected allow methods to mirror request, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Headers"); got != "content-type,authorization" {
|
||||
t.Fatalf("expected allow headers to mirror request, got %q", got)
|
||||
}
|
||||
}
|
||||
73
s3api/middlewares/apply-default-cors.go
Normal file
73
s3api/middlewares/apply-default-cors.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func ensureExposeETag(ctx *fiber.Ctx) {
|
||||
existing := strings.TrimSpace(string(ctx.Response().Header.Peek("Access-Control-Expose-Headers")))
|
||||
defaults := []string{"ETag"}
|
||||
if existing == "" {
|
||||
ctx.Response().Header.Add("Access-Control-Expose-Headers", strings.Join(defaults, ", "))
|
||||
return
|
||||
}
|
||||
|
||||
lowerExisting := map[string]struct{}{}
|
||||
for _, part := range strings.Split(existing, ",") {
|
||||
p := strings.ToLower(strings.TrimSpace(part))
|
||||
if p != "" {
|
||||
lowerExisting[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
updated := existing
|
||||
for _, h := range defaults {
|
||||
if _, ok := lowerExisting[strings.ToLower(h)]; ok {
|
||||
continue
|
||||
}
|
||||
updated += ", " + h
|
||||
}
|
||||
|
||||
if updated != existing {
|
||||
ctx.Response().Header.Set("Access-Control-Expose-Headers", updated)
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyDefaultCORS adds a default Access-Control-Allow-Origin header to responses
|
||||
// when the provided fallbackOrigin is non-empty.
|
||||
//
|
||||
// This is intended for routes that don't have per-bucket CORS configuration (e.g. admin APIs).
|
||||
// It will not override an existing Access-Control-Allow-Origin header.
|
||||
func ApplyDefaultCORS(fallbackOrigin string) fiber.Handler {
|
||||
fallbackOrigin = strings.TrimSpace(fallbackOrigin)
|
||||
if fallbackOrigin == "" {
|
||||
return func(ctx *fiber.Ctx) error { return nil }
|
||||
}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Origin")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Origin", fallbackOrigin)
|
||||
}
|
||||
if len(ctx.Response().Header.Peek("Vary")) == 0 {
|
||||
ctx.Response().Header.Add("Vary", VaryHdr)
|
||||
}
|
||||
ensureExposeETag(ctx)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
74
s3api/middlewares/apply-default-cors_test.go
Normal file
74
s3api/middlewares/apply-default-cors_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func TestApplyDefaultCORS_AddsHeaderWhenOriginSet(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
app := fiber.New()
|
||||
app.Get("/admin", ApplyDefaultCORS(origin), func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(http.StatusOK)
|
||||
})
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/admin", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected fallback origin header, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Expose-Headers"); got != "ETag" {
|
||||
t.Fatalf("expected expose headers to include ETag, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyDefaultCORS_DoesNotOverrideExistingHeader(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
app := fiber.New()
|
||||
app.Get("/admin", func(c *fiber.Ctx) error {
|
||||
c.Response().Header.Add("Access-Control-Allow-Origin", "https://already-set.com")
|
||||
return nil
|
||||
}, ApplyDefaultCORS(origin), func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(http.StatusOK)
|
||||
})
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/admin", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != "https://already-set.com" {
|
||||
t.Fatalf("expected existing header to remain, got %q", got)
|
||||
}
|
||||
}
|
||||
@@ -17,9 +17,7 @@ package middlewares
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -39,7 +37,7 @@ type RootUserConfig struct {
|
||||
Secret string
|
||||
}
|
||||
|
||||
func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string) fiber.Handler {
|
||||
func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string, streamBody bool, requireContentSha256 bool) fiber.Handler {
|
||||
acct := accounts{root: root, iam: iam}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
@@ -52,9 +50,27 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check X-Amz-Date header
|
||||
date := ctx.Get("X-Amz-Date")
|
||||
if date == "" {
|
||||
return s3err.GetAPIError(s3err.ErrMissingDateHeader)
|
||||
}
|
||||
|
||||
// Parse the date and check the date validity
|
||||
tdate, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrMissingDateHeader)
|
||||
}
|
||||
|
||||
// Validate the dates difference
|
||||
err = utils.ValidateDate(tdate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
authorization := ctx.Get("Authorization")
|
||||
if authorization == "" {
|
||||
return s3err.GetAPIError(s3err.ErrAuthHeaderEmpty)
|
||||
return s3err.GetAPIError(s3err.ErrInvalidAuthHeader)
|
||||
}
|
||||
|
||||
authData, err := utils.ParseAuthorization(authorization)
|
||||
@@ -63,11 +79,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string)
|
||||
}
|
||||
|
||||
if authData.Region != region {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Credential should be scoped to a valid Region, not %v", authData.Region),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
return s3err.MalformedAuth.IncorrectRegion(region, authData.Region)
|
||||
}
|
||||
|
||||
utils.ContextKeyIsRoot.Set(ctx, authData.Access == root.Access)
|
||||
@@ -80,29 +92,11 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string)
|
||||
return err
|
||||
}
|
||||
|
||||
utils.ContextKeyAccount.Set(ctx, account)
|
||||
|
||||
// Check X-Amz-Date header
|
||||
date := ctx.Get("X-Amz-Date")
|
||||
if date == "" {
|
||||
return s3err.GetAPIError(s3err.ErrMissingDateHeader)
|
||||
}
|
||||
|
||||
// Parse the date and check the date validity
|
||||
tdate, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrMalformedDate)
|
||||
}
|
||||
|
||||
if date[:8] != authData.Date {
|
||||
return s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch)
|
||||
return s3err.MalformedAuth.DateMismatch()
|
||||
}
|
||||
|
||||
// Validate the dates difference
|
||||
err = utils.ValidateDate(tdate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
utils.ContextKeyAccount.Set(ctx, account)
|
||||
|
||||
var contentLength int64
|
||||
contentLengthStr := ctx.Get("Content-Length")
|
||||
@@ -115,10 +109,18 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string)
|
||||
}
|
||||
|
||||
hashPayload := ctx.Get("X-Amz-Content-Sha256")
|
||||
if requireContentSha256 && hashPayload == "" {
|
||||
return s3err.GetAPIError(s3err.ErrMissingContentSha256)
|
||||
}
|
||||
if !utils.IsValidSh256PayloadHeader(hashPayload) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidSHA256Paylod)
|
||||
}
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
// the streaming payload type is allowed only in PutObject and UploadPart
|
||||
// e.g. STREAMING-UNSIGNED-PAYLOAD-TRAILER
|
||||
if !streamBody && utils.IsStreamingPayload(hashPayload) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidSHA256PayloadUsage)
|
||||
}
|
||||
if streamBody {
|
||||
// for streaming PUT actions, authorization is deferred
|
||||
// until end of stream due to need to get length and
|
||||
// checksum of the stream to validate authorization
|
||||
@@ -132,7 +134,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string)
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
var cr io.Reader
|
||||
cr, err = utils.NewChunkReader(ctx, r, authData, region, account.Secret, tdate)
|
||||
cr, err = utils.NewChunkReader(ctx, r, authData, account.Secret, tdate)
|
||||
return cr
|
||||
})
|
||||
if err != nil {
|
||||
@@ -166,7 +168,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string)
|
||||
}
|
||||
}
|
||||
|
||||
err = utils.CheckValidSignature(ctx, authData, account.Secret, hashPayload, tdate, contentLength)
|
||||
err = utils.CheckValidSignature(ctx, authData, account.Secret, hashPayload, tdate, contentLength, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -22,17 +22,83 @@ import (
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
)
|
||||
|
||||
// ChecksumReader extends io.Reader with checksum-related metadata.
|
||||
// It is used to differentiate normal readers from readers that can
|
||||
// report a checksum and the algorithm used to produce it.
|
||||
type ChecksumReader interface {
|
||||
io.Reader
|
||||
Algorithm() string
|
||||
Checksum() string
|
||||
}
|
||||
|
||||
// NewChecksumReader wraps a stackedReader and returns a reader that
|
||||
// preserves checksum behavior when the *original* bodyReader implemented
|
||||
// ChecksumReader.
|
||||
//
|
||||
// If bodyReader already supports ChecksumReader, we wrap stackedReader
|
||||
// with MockChecksumReader so that reading continues from stackedReader,
|
||||
// but Algorithm() and Checksum() still delegate to the underlying reader.
|
||||
//
|
||||
// If bodyReader is not a ChecksumReader, we simply return stackedReader.
|
||||
func NewChecksumReader(bodyReader io.Reader, stackedReader io.Reader) io.Reader {
|
||||
_, ok := bodyReader.(ChecksumReader)
|
||||
if ok {
|
||||
return &MockChecksumReader{rdr: stackedReader}
|
||||
}
|
||||
|
||||
return stackedReader
|
||||
}
|
||||
|
||||
// MockChecksumReader is a wrapper around an io.Reader that forwards Read()
|
||||
// but also conditionally exposes checksum metadata if the underlying reader
|
||||
// implements the ChecksumReader interface.
|
||||
type MockChecksumReader struct {
|
||||
rdr io.Reader
|
||||
}
|
||||
|
||||
// Read simply forwards data reads to the underlying reader.
|
||||
func (rr *MockChecksumReader) Read(buffer []byte) (int, error) {
|
||||
return rr.rdr.Read(buffer)
|
||||
}
|
||||
|
||||
// Algorithm returns the checksum algorithm used by the underlying reader,
|
||||
// but only if the wrapped reader implements ChecksumReader.
|
||||
func (rr *MockChecksumReader) Algorithm() string {
|
||||
r, ok := rr.rdr.(ChecksumReader)
|
||||
if ok {
|
||||
return r.Algorithm()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// Checksum returns the checksum value from the underlying reader,
|
||||
// if it implements ChecksumReader. Otherwise returns an empty string.
|
||||
func (rr *MockChecksumReader) Checksum() string {
|
||||
r, ok := rr.rdr.(ChecksumReader)
|
||||
if ok {
|
||||
return r.Checksum()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
var _ ChecksumReader = &MockChecksumReader{}
|
||||
|
||||
func wrapBodyReader(ctx *fiber.Ctx, wr func(io.Reader) io.Reader) {
|
||||
r, ok := utils.ContextKeyBodyReader.Get(ctx).(io.Reader)
|
||||
rdr, ok := utils.ContextKeyBodyReader.Get(ctx).(io.Reader)
|
||||
if !ok {
|
||||
r = ctx.Request().BodyStream()
|
||||
rdr = ctx.Request().BodyStream()
|
||||
// Override the body reader with an empty reader to prevent panics
|
||||
// in case of unexpected or malformed HTTP requests.
|
||||
if r == nil {
|
||||
r = bytes.NewBuffer([]byte{})
|
||||
if rdr == nil {
|
||||
rdr = bytes.NewBuffer([]byte{})
|
||||
}
|
||||
}
|
||||
|
||||
r = wr(r)
|
||||
r := wr(rdr)
|
||||
// Ensure checksum behavior is stacked if the original body reader had it.
|
||||
r = NewChecksumReader(rdr, r)
|
||||
|
||||
utils.ContextKeyBodyReader.Set(ctx, r)
|
||||
}
|
||||
|
||||
121
s3api/middlewares/checksum.go
Normal file
121
s3api/middlewares/checksum.go
Normal file
@@ -0,0 +1,121 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// VerifyChecksums parses, validates, and calculates the
|
||||
// Content-MD5 and x-amz-checksum-* headers.
|
||||
// Additionally, it ensures that the request body is not empty
|
||||
// for actions that require a non-empty body. For large data actions(PutObject, UploadPart),
|
||||
// it wraps the body reader to handle Content-MD5:
|
||||
// the x-amz-checksum-* headers are explicitly processed by the backend.
|
||||
func VerifyChecksums(streamBody bool, requireBody bool, requireChecksum bool) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
md5sum := ctx.Get("Content-Md5")
|
||||
|
||||
if streamBody {
|
||||
// for large data actions(PutObject, UploadPart)
|
||||
// only stack the md5 reader,as x-amz-checksum-*
|
||||
// calculation is explicitly handled in back-end
|
||||
if md5sum == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !isValidMD5(md5sum) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidDigest)
|
||||
}
|
||||
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
r, err = utils.NewHashReader(r, md5sum, utils.HashTypeMd5)
|
||||
return r
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
body := ctx.Body()
|
||||
if requireBody && len(body) == 0 {
|
||||
return s3err.GetAPIError(s3err.ErrMissingRequestBody)
|
||||
}
|
||||
|
||||
var rdr io.Reader
|
||||
var err error
|
||||
if md5sum != "" {
|
||||
if !isValidMD5(md5sum) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidDigest)
|
||||
}
|
||||
|
||||
rdr, err = utils.NewHashReader(bytes.NewReader(body), md5sum, utils.HashTypeMd5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// parse and validate checksum headers
|
||||
algo, checksums, err := utils.ParseChecksumHeadersAndSdkAlgo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if algo != "" {
|
||||
r, err := utils.NewHashReader(bytes.NewReader(body), checksums[algo], utils.HashType(strings.ToLower(string(algo))))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rdr != nil {
|
||||
// combine both md5 and the checksum readers
|
||||
rdr = io.MultiReader(rdr, r)
|
||||
} else {
|
||||
rdr = r
|
||||
}
|
||||
}
|
||||
|
||||
if rdr == nil && requireChecksum {
|
||||
return s3err.GetAPIError(s3err.ErrChecksumRequired)
|
||||
}
|
||||
|
||||
if rdr != nil {
|
||||
_, err = io.Copy(io.Discard, rdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func isValidMD5(s string) bool {
|
||||
decoded, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return len(decoded) == 16
|
||||
}
|
||||
@@ -32,6 +32,10 @@ func HostStyleParser(virtualDomain string) fiber.Handler {
|
||||
return ctx.Next()
|
||||
}
|
||||
path := ctx.Path()
|
||||
if path == "/" {
|
||||
// omit the trailing / for bucket operations
|
||||
path = ""
|
||||
}
|
||||
pathStyleUrl := fmt.Sprintf("/%v%v", bucket, path)
|
||||
ctx.Path(pathStyleUrl)
|
||||
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"io"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func VerifyMD5Body() fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
incomingSum := ctx.Get("Content-Md5")
|
||||
if incomingSum == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
r, err = utils.NewHashReader(r, incomingSum, utils.HashTypeMd5)
|
||||
return r
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
sum := md5.Sum(ctx.Body())
|
||||
calculatedSum := utils.Base64SumString(sum[:])
|
||||
|
||||
if incomingSum != calculatedSum {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidDigest)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
41
s3api/middlewares/md5_test.go
Normal file
41
s3api/middlewares/md5_test.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_isValidMD5(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
want bool
|
||||
}{
|
||||
{"invalid", "hello world", false},
|
||||
{"valid base64", "aGVsbCBzLGRham5mamFuc2Zhc2RmZHNhZmRzYWY=", false},
|
||||
{"valid 1", "CY9rzUYh03PK3k6DJie09g==", true},
|
||||
{"valid 2", "uU0nuZNNPgilLlLX2n2r+s==", true},
|
||||
{"valid 3", "7Qdih1MuhjZehB6Sv8UNjA==", true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := isValidMD5(tt.s)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, region string) fiber.Handler {
|
||||
func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, region string, streamBody bool) fiber.Handler {
|
||||
acct := accounts{root: root, iam: iam}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
@@ -32,10 +32,15 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, region
|
||||
if utils.ContextKeyPublicBucket.IsSet(ctx) {
|
||||
return nil
|
||||
}
|
||||
if ctx.Query("X-Amz-Signature") == "" {
|
||||
if !utils.IsPresignedURLAuth(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("X-Amz-Security-Token") {
|
||||
// OIDC Authorization with X-Amz-Security-Token is not supported
|
||||
return s3err.QueryAuthErrors.SecurityTokenNotSupported()
|
||||
}
|
||||
|
||||
// Set in the context the "authenticated" key, in case the authentication succeeds,
|
||||
// otherwise the middleware will return the caucht error
|
||||
utils.ContextKeyAuthenticated.Set(ctx, true)
|
||||
@@ -66,7 +71,7 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, region
|
||||
}
|
||||
}
|
||||
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
if streamBody {
|
||||
// Content-Length has to be set for data uploads: PutObject, UploadPart
|
||||
if contentLengthStr == "" {
|
||||
return s3err.GetAPIError(s3err.ErrMissingContentLength)
|
||||
@@ -83,7 +88,7 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, region
|
||||
return nil
|
||||
}
|
||||
|
||||
err = utils.CheckPresignedSignature(ctx, authData, account.Secret)
|
||||
err = utils.CheckPresignedSignature(ctx, authData, account.Secret, streamBody)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
@@ -28,10 +30,10 @@ import (
|
||||
|
||||
// AuthorizePublicBucketAccess checks if the bucket grants public
|
||||
// access to anonymous requesters
|
||||
func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPermission auth.Action, permission auth.Permission) fiber.Handler {
|
||||
func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPermission auth.Action, permission auth.Permission, region string, streamBody bool) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
// skip for authenticated requests
|
||||
if ctx.Query("X-Amz-Algorithm") != "" || ctx.Get("Authorization") != "" {
|
||||
if utils.IsPresignedURLAuth(ctx) || ctx.Get("Authorization") != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -57,12 +59,31 @@ func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPerm
|
||||
bucket, object := parsePath(ctx.Path())
|
||||
err := auth.VerifyPublicAccess(ctx.Context(), be, policyPermission, permission, bucket, object)
|
||||
if err != nil {
|
||||
if s3action == metrics.ActionHeadBucket {
|
||||
// add the bucket region header for HeadBucket
|
||||
// if anonymous access is denied
|
||||
ctx.Response().Header.Add("x-amz-bucket-region", region)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
payloadType := ctx.Get("X-Amz-Content-Sha256")
|
||||
if utils.IsUnsignedStreamingPayload(payloadType) {
|
||||
// at this point the bucket is considered as public
|
||||
// as public access is granted
|
||||
utils.ContextKeyPublicBucket.Set(ctx, true)
|
||||
|
||||
payloadHash := ctx.Get("X-Amz-Content-Sha256")
|
||||
err = utils.IsAnonymousPayloadHashSupported(payloadHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if streamBody {
|
||||
if utils.IsUnsignedStreamingPayload(payloadHash) {
|
||||
cLength, err := utils.ParseDecodedContentLength(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// stack an unsigned streaming payload reader
|
||||
checksumType, err := utils.ExtractChecksumType(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -70,19 +91,37 @@ func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPerm
|
||||
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
var cr io.Reader
|
||||
cr, err = utils.NewUnsignedChunkReader(r, checksumType)
|
||||
cr, err = utils.NewUnsignedChunkReader(r, checksumType, cLength)
|
||||
return cr
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
utils.ContextKeyBodyReader.Set(ctx, ctx.Request().BodyStream())
|
||||
}
|
||||
|
||||
return err
|
||||
} else if utils.IsUnsignedPaylod(payloadHash) {
|
||||
// for UNSIGNED-PAYLOD simply store the body reader in context locals
|
||||
utils.ContextKeyBodyReader.Set(ctx, ctx.Request().BodyStream())
|
||||
return nil
|
||||
} else {
|
||||
// stack a hash reader to calculated the payload sha256 hash
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
var cr io.Reader
|
||||
cr, err = utils.NewHashReader(r, payloadHash, utils.HashTypeSha256Hex)
|
||||
return cr
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
utils.ContextKeyPublicBucket.Set(ctx, true)
|
||||
if payloadHash != "" {
|
||||
// Calculate the hash of the request payload
|
||||
hashedPayload := sha256.Sum256(ctx.Body())
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
// Compare the calculated hash with the hash provided
|
||||
if payloadHash != hexPayload {
|
||||
return s3err.GetAPIError(s3err.ErrContentSHA256Mismatch)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
923
s3api/router.go
923
s3api/router.go
File diff suppressed because it is too large
Load Diff
253
s3api/router_cors_test.go
Normal file
253
s3api/router_cors_test.go
Normal file
@@ -0,0 +1,253 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
type backendWithCorsOnly struct {
|
||||
backend.BackendUnsupported
|
||||
}
|
||||
|
||||
func (b backendWithCorsOnly) GetBucketCors(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchCORSConfiguration)
|
||||
}
|
||||
|
||||
func TestS3ApiRouter_ListBuckets_DefaultCORSAllowOrigin(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
app := fiber.New()
|
||||
(&S3ApiRouter{}).Init(
|
||||
app,
|
||||
backend.BackendUnsupported{},
|
||||
&auth.IAMServiceInternal{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
"us-east-1",
|
||||
"",
|
||||
middlewares.RootUserConfig{},
|
||||
origin,
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin %q, got %q", origin, got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Expose-Headers"); got == "" {
|
||||
t.Fatalf("expected Access-Control-Expose-Headers to be set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3ApiRouter_ListBuckets_OptionsPreflight_DefaultCORS(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
app := fiber.New()
|
||||
(&S3ApiRouter{}).Init(
|
||||
app,
|
||||
backend.BackendUnsupported{},
|
||||
&auth.IAMServiceInternal{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
"us-east-1",
|
||||
"",
|
||||
middlewares.RootUserConfig{},
|
||||
origin,
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodOptions, "/", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", "https://client.example")
|
||||
req.Header.Set("Access-Control-Request-Method", "GET")
|
||||
req.Header.Set("Access-Control-Request-Headers", "authorization")
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusNoContent, resp.StatusCode)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin %q, got %q", origin, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3ApiRouter_PutBucketTagging_ErrorStillIncludesFallbackCORS(t *testing.T) {
|
||||
origin := "http://127.0.0.1:9090"
|
||||
|
||||
app := fiber.New()
|
||||
(&S3ApiRouter{}).Init(
|
||||
app,
|
||||
backendWithCorsOnly{},
|
||||
&auth.IAMServiceInternal{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
"us-east-1",
|
||||
"",
|
||||
middlewares.RootUserConfig{},
|
||||
origin,
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodPut, "/testing?tagging", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", origin)
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin %q, got %q", origin, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3ApiRouter_PutObjectTagging_ErrorStillIncludesFallbackCORS(t *testing.T) {
|
||||
origin := "http://127.0.0.1:9090"
|
||||
|
||||
app := fiber.New()
|
||||
(&S3ApiRouter{}).Init(
|
||||
app,
|
||||
backendWithCorsOnly{},
|
||||
&auth.IAMServiceInternal{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
"us-east-1",
|
||||
"",
|
||||
middlewares.RootUserConfig{},
|
||||
origin,
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodPut, "/testing/myobj?tagging", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", origin)
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin %q, got %q", origin, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3ApiRouter_CopyObject_ErrorStillIncludesFallbackCORS(t *testing.T) {
|
||||
origin := "http://127.0.0.1:9090"
|
||||
|
||||
app := fiber.New()
|
||||
(&S3ApiRouter{}).Init(
|
||||
app,
|
||||
backendWithCorsOnly{},
|
||||
&auth.IAMServiceInternal{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
"us-east-1",
|
||||
"",
|
||||
middlewares.RootUserConfig{},
|
||||
origin,
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodPut, "/testing/myobj", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", origin)
|
||||
req.Header.Set("X-Amz-Copy-Source", "srcbucket/srckey")
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin %q, got %q", origin, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3ApiRouter_PutObject_ErrorStillIncludesFallbackCORS(t *testing.T) {
|
||||
origin := "http://127.0.0.1:9090"
|
||||
|
||||
app := fiber.New()
|
||||
(&S3ApiRouter{}).Init(
|
||||
app,
|
||||
backendWithCorsOnly{},
|
||||
&auth.IAMServiceInternal{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
"us-east-1",
|
||||
"",
|
||||
middlewares.RootUserConfig{},
|
||||
origin,
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodPut, "/testing/myobj", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", origin)
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin %q, got %q", origin, got)
|
||||
}
|
||||
}
|
||||
@@ -46,7 +46,7 @@ func TestS3ApiRouter_Init(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.sa.Init(tt.args.app, tt.args.be, tt.args.iam, nil, nil, nil, nil, false, "us-east-1", middlewares.RootUserConfig{})
|
||||
tt.sa.Init(tt.args.app, tt.args.be, tt.args.iam, nil, nil, nil, nil, false, "us-east-1", "", middlewares.RootUserConfig{}, "")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user