mirror of
https://github.com/versity/versitygw.git
synced 2026-01-24 12:02:02 +00:00
Compare commits
105 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ca2dd9b4b3 | ||
|
|
10152cefbc | ||
|
|
948b424ed2 | ||
|
|
d2996e1131 | ||
|
|
2489d876c9 | ||
|
|
a69f5a4db7 | ||
|
|
df31eb031a | ||
|
|
b70be6116e | ||
|
|
e08539e909 | ||
|
|
f78483a938 | ||
|
|
cb1d469742 | ||
|
|
792a3eb2c5 | ||
|
|
252090d9e9 | ||
|
|
8569b158f0 | ||
|
|
45b6a4a74e | ||
|
|
b576ed87c5 | ||
|
|
0ba5cbe8b9 | ||
|
|
a4d341fc4e | ||
|
|
6c564febb9 | ||
|
|
0c520a30cf | ||
|
|
935e322764 | ||
|
|
f6225aa968 | ||
|
|
1d30567129 | ||
|
|
bfc753b302 | ||
|
|
86e2b02e55 | ||
|
|
2cf8610831 | ||
|
|
8e3e633a24 | ||
|
|
12092cf297 | ||
|
|
75cae81f0a | ||
|
|
68d7924afa | ||
|
|
e37dfa6aaf | ||
|
|
04f8946798 | ||
|
|
43fd18b069 | ||
|
|
eb72d3c6e8 | ||
|
|
43559e646e | ||
|
|
6e11e3350c | ||
|
|
c0e6a08e1e | ||
|
|
3866476257 | ||
|
|
d45cfa2663 | ||
|
|
7a26aec685 | ||
|
|
2a7e76a44f | ||
|
|
5979e056e1 | ||
|
|
2a23686c87 | ||
|
|
f9e903aaf4 | ||
|
|
06f4f0ac15 | ||
|
|
abbd6697d1 | ||
|
|
6198bf4b53 | ||
|
|
d05d29010d | ||
|
|
b1e9dead5d | ||
|
|
bf5b0b85d8 | ||
|
|
2561ef9708 | ||
|
|
b78d21c3db | ||
|
|
0cab42d9fe | ||
|
|
12f0b5c43c | ||
|
|
e81b87f71c | ||
|
|
ff00e42538 | ||
|
|
cf99b3e036 | ||
|
|
c91e5dc3f2 | ||
|
|
d446102f69 | ||
|
|
f2a75708e4 | ||
|
|
6fd939386c | ||
|
|
dff20b5b9d | ||
|
|
7a4dd59c81 | ||
|
|
6f74d2cddb | ||
|
|
8e0eec0201 | ||
|
|
0cfacfc049 | ||
|
|
6b017aa5cd | ||
|
|
841a012ce0 | ||
|
|
067de184a9 | ||
|
|
10ab569277 | ||
|
|
01552b78c7 | ||
|
|
d0158420ee | ||
|
|
c2c2306d37 | ||
|
|
841b3d61a4 | ||
|
|
fa2e677370 | ||
|
|
9f6bf183f4 | ||
|
|
12e1308d1f | ||
|
|
f235b62b70 | ||
|
|
06a45124b1 | ||
|
|
a75aa9bad5 | ||
|
|
4cbd58cc66 | ||
|
|
e5343cf611 | ||
|
|
0a2c7ac7cb | ||
|
|
b1fed810a7 | ||
|
|
48b590fcb8 | ||
|
|
f835ef1772 | ||
|
|
d819fa8665 | ||
|
|
0240bb922c | ||
|
|
0b3722bd09 | ||
|
|
7c454d230e | ||
|
|
981a34e9d5 | ||
|
|
657b9ac046 | ||
|
|
61308d2fbf | ||
|
|
8d16bff8ce | ||
|
|
35596b38ae | ||
|
|
39ee175484 | ||
|
|
edac345c23 | ||
|
|
f467b896d8 | ||
|
|
5aa2a822e8 | ||
|
|
eb6ffca21e | ||
|
|
cc54aad003 | ||
|
|
807399459d | ||
|
|
0124398f10 | ||
|
|
5d8d054fdc | ||
|
|
9eaaeedd28 |
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -12,3 +12,7 @@ updates:
|
||||
# Allow both direct and indirect updates for all packages
|
||||
- dependency-type: "all"
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
2
.github/workflows/azurite.yml
vendored
2
.github/workflows/azurite.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
|
||||
108
.github/workflows/codeql.yml
vendored
Normal file
108
.github/workflows/codeql.yml
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL Advanced"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
schedule:
|
||||
- cron: '21 17 * * 2'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||
# - https://gh.io/supported-runners-and-hardware-resources
|
||||
# - https://gh.io/using-larger-runners (GitHub.com only)
|
||||
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
# required to fetch internal or private CodeQL packs
|
||||
packages: read
|
||||
|
||||
# only required for workflows in private repositories
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: actions
|
||||
build-mode: none
|
||||
- language: go
|
||||
build-mode: autobuild
|
||||
- language: javascript-typescript
|
||||
build-mode: none
|
||||
paths-ignore:
|
||||
# ignore embedded 3rd party assets
|
||||
- 'webui/web/assets/**'
|
||||
- language: python
|
||||
build-mode: none
|
||||
# CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift'
|
||||
# Use `c-cpp` to analyze code written in C, C++ or both
|
||||
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
||||
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
||||
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
||||
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
||||
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
# Add any setup steps before running the `github/codeql-action/init` action.
|
||||
# This includes steps like installing compilers or runtimes (`actions/setup-node`
|
||||
# or others). This is typically only required for manual builds.
|
||||
# - name: Setup runtime (example)
|
||||
# uses: actions/setup-example@v1
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v4
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
||||
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
# If the analyze step fails for one of the languages you are analyzing with
|
||||
# "We were unable to automatically build your code", modify the matrix above
|
||||
# to set the build mode to "manual" for that language. Then modify this step
|
||||
# to build your code.
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
- name: Run manual build steps
|
||||
if: matrix.build-mode == 'manual'
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'If you are using a "manual" build mode for one or more of the' \
|
||||
'languages you are analyzing, replace this with the commands to build' \
|
||||
'your code, for example:'
|
||||
echo ' make bootstrap'
|
||||
echo ' make release'
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v4
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
2
.github/workflows/docker-bats.yml
vendored
2
.github/workflows/docker-bats.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Build Docker Image
|
||||
run: |
|
||||
|
||||
4
.github/workflows/docker.yml
vendored
4
.github/workflows/docker.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
ghcr.io/${{ github.repository }}
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
|
||||
2
.github/workflows/functional.yml
vendored
2
.github/workflows/functional.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
|
||||
4
.github/workflows/go.yml
vendored
4
.github/workflows/go.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
|
||||
2
.github/workflows/goreleaser.yml
vendored
2
.github/workflows/goreleaser.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
2
.github/workflows/host-style-tests.yml
vendored
2
.github/workflows/host-style-tests.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: run host-style tests
|
||||
run: make test-host-style
|
||||
|
||||
2
.github/workflows/shellcheck.yml
vendored
2
.github/workflows/shellcheck.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Run checks
|
||||
run: |
|
||||
|
||||
84
.github/workflows/skips.yml
vendored
Normal file
84
.github/workflows/skips.yml
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
name: skips check
|
||||
permissions: {}
|
||||
on: workflow_dispatch
|
||||
jobs:
|
||||
skip-ticket-check:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Fail if any skip descriptions are empty or point to closed issues/PRs
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Find uncommented lines with "skip " (ignore lines whose first non-space char is #)
|
||||
mapfile -t MATCHES < <(
|
||||
git ls-files 'tests/test_*.sh' \
|
||||
| xargs -r grep -nE '^[[:space:]]*[^#][[:space:]]*skip[[:space:]]*$' \
|
||||
|| true
|
||||
)
|
||||
|
||||
if [ ${#MATCHES[@]} -ne 0 ]; then
|
||||
echo "${#MATCHES[@]} skip(s) lack a description"
|
||||
printf ' - %s\n' "${MATCHES[@]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mapfile -t MATCHES < <(
|
||||
git ls-files 'tests/test_*.sh' \
|
||||
| xargs -r grep -nE '^[[:space:]]*[^#][[:space:]]*skip[[:space:]]*"https://github.com' \
|
||||
|| true
|
||||
)
|
||||
|
||||
urls=()
|
||||
for m in "${MATCHES[@]}"; do
|
||||
# Extract first GitHub issue/PR URL on the line:
|
||||
# supports /issues/123 and /pull/123 (with or without extra suffix)
|
||||
url="$(echo "$m" | grep -oE 'https://github\.com/[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+/(issues|pull)/[0-9]+' | head -n1 || true)"
|
||||
if [ -n "$url" ]; then
|
||||
urls+=("$url")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#urls[@]} -eq 0 ]; then
|
||||
echo "Found skip lines, but no recognizable GitHub issue/PR URLs."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Found skip ticket URLs:"
|
||||
printf ' - %s\n' "${urls[@]}"
|
||||
|
||||
closed=()
|
||||
|
||||
for url in "${urls[@]}"; do
|
||||
# Parse owner/repo and number from URL
|
||||
# url format: https://github.com/OWNER/REPO/issues/123 or /pull/123
|
||||
path="${url#https://github.com/}"
|
||||
owner="$(echo "$path" | cut -d/ -f1)"
|
||||
repo="$(echo "$path" | cut -d/ -f2)"
|
||||
num="$(echo "$path" | cut -d/ -f4)"
|
||||
|
||||
# Issues API works for both issues and PRs; state=open/closed
|
||||
state="$(curl -fsSL \
|
||||
-H "Authorization: Bearer $GH_TOKEN" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"https://api.github.com/repos/$owner/$repo/issues/$num" \
|
||||
| python -c "import sys,json; print(json.load(sys.stdin).get('state',''))")"
|
||||
|
||||
echo "$url -> $state"
|
||||
if [ "$state" = "closed" ]; then
|
||||
closed+=("$url")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#closed[@]} -gt 0 ]; then
|
||||
echo "::error::Closed tickets referenced by uncommented skip URLs:"
|
||||
printf '::error:: - %s\n' "${closed[@]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "All referenced tickets are open. ✅"
|
||||
2
.github/workflows/static.yml
vendored
2
.github/workflows/static.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
|
||||
202
.github/workflows/system.yml
vendored
202
.github/workflows/system.yml
vendored
@@ -2,195 +2,31 @@ name: system tests
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
jobs:
|
||||
generate:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.make.outputs.matrix }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- id: make
|
||||
run: |
|
||||
if ! matrix_output=$(tests/generate_matrix.sh 2>&1); then
|
||||
echo "error generating matrix: $matrix_output"
|
||||
exit 1
|
||||
fi
|
||||
MATRIX_JSON=$(echo -n "$matrix_output" | jq -c . )
|
||||
echo "matrix=$MATRIX_JSON" >> "$GITHUB_OUTPUT"
|
||||
|
||||
build:
|
||||
name: RunTests
|
||||
needs: generate
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- set: "mc, posix, non-file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "mc-non-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "mc, posix, file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "mc-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, non-static, base|acl|multipart|put-object, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-base,rest-acl,rest-multipart,rest-put-object"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, non-static, chunked|checksum|versioning|bucket, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-chunked,rest-checksum,rest-versioning,rest-bucket,rest-list-buckets,rest-create-bucket,rest-head-bucket"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, non-static, not implemented|rest-delete-bucket-ownership-controls|rest-delete-bucket-tagging, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-not-implemented,rest-delete-bucket-ownership-controls,rest-delete-bucket-tagging"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, static, base|acl|multipart|put-object, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-base,rest-acl,rest-multipart,rest-put-object"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, static, chunked|checksum|versioning|bucket, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-chunked,rest-checksum,rest-versioning,rest-bucket,rest-list-buckets,rest-create-bucket,rest-head-bucket"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, static, not implemented|rest-delete-bucket-ownership-controls|rest-delete-bucket-tagging, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-not-implemented,rest-delete-bucket-ownership-controls,rest-delete-bucket-tagging"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, static, rest-put-bucket-tagging|rest-get-bucket-location|rest-put-object-tagging, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-put-bucket-tagging,rest-get-bucket-location,rest-put-object-tagging,rest-get-object-tagging,rest-list-object-versions"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, non-static, rest-put-bucket-tagging|rest-get-bucket-location|rest-put-object-tagging, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-put-bucket-tagging,rest-get-bucket-location,rest-put-object-tagging,rest-get-object-tagging,rest-list-object-versions"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-west-1"
|
||||
- set: "s3, posix, non-file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3-non-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3, posix, file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, bucket|object|multipart, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, policy, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-policy"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, user, non-static, s3 IAM"
|
||||
IAM_TYPE: s3
|
||||
RUN_SET: "s3api-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, bucket, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-bucket"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, multipart, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-multipart"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, object, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-object"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, policy, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-policy"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, user, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-user"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
# TODO fix/debug s3 gateway
|
||||
#- set: "s3api, s3, multipart|object, non-static, folder IAM"
|
||||
# IAM_TYPE: folder
|
||||
# RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
|
||||
# RECREATE_BUCKETS: "true"
|
||||
# BACKEND: "s3"
|
||||
#- set: "s3api, s3, policy|user, non-static, folder IAM"
|
||||
# IAM_TYPE: folder
|
||||
# RUN_SET: "s3api-policy,s3api-user"
|
||||
# RECREATE_BUCKETS: "true"
|
||||
# BACKEND: "s3"
|
||||
- set: "s3cmd, posix, file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3cmd-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3cmd, posix, non-user, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3cmd-non-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3cmd, posix, user, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3cmd-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "setup/remove static buckets scripts"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "setup-remove-static"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
matrix: ${{ fromJson(needs.generate.outputs.matrix) }}
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
@@ -287,7 +123,7 @@ jobs:
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
BYPASS_ENV_FILE=true ${{ github.workspace }}/tests/setup_static.sh
|
||||
fi
|
||||
BYPASS_ENV_FILE=true ${{ github.workspace }}/tests/run.sh $RUN_SET
|
||||
BYPASS_ENV_FILE=true $HOME/bin/bats ${{ github.workspace }}/$RUN_SET
|
||||
|
||||
- name: Time report
|
||||
run: |
|
||||
|
||||
@@ -82,15 +82,15 @@ type AccessOptions struct {
|
||||
}
|
||||
|
||||
func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) error {
|
||||
// Skip the access check for public bucket requests
|
||||
if opts.IsPublicRequest {
|
||||
return nil
|
||||
}
|
||||
if opts.Readonly {
|
||||
if opts.AclPermission == PermissionWrite || opts.AclPermission == PermissionWriteAcp {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
}
|
||||
// Skip the access check for public bucket requests
|
||||
if opts.IsPublicRequest {
|
||||
return nil
|
||||
}
|
||||
if opts.IsRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -246,7 +246,7 @@ func ParseACLOutput(data []byte, owner string) (GetBucketAclOutput, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool) ([]byte, error) {
|
||||
func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService) ([]byte, error) {
|
||||
if input == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
@@ -254,6 +254,12 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
|
||||
}
|
||||
}
|
||||
|
||||
var versioningEnabled bool
|
||||
vers, err := be.GetBucketVersioning(ctx, bucket)
|
||||
if err == nil && vers.Status != nil {
|
||||
versioningEnabled = *vers.Status == types.BucketVersioningStatusEnabled
|
||||
}
|
||||
|
||||
for _, obj := range objects {
|
||||
var key, versionId string
|
||||
if obj.Key != nil {
|
||||
@@ -262,11 +268,21 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
|
||||
if obj.VersionId != nil {
|
||||
versionId = *obj.VersionId
|
||||
}
|
||||
// if bucket versioning is enabled and versionId isn't provided
|
||||
// no lock check is needed, as it leads to a new delete marker creation
|
||||
if versioningEnabled && versionId == "" {
|
||||
continue
|
||||
}
|
||||
checkRetention := true
|
||||
retentionData, err := be.GetObjectRetention(ctx, bucket, key, versionId)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
|
||||
continue
|
||||
}
|
||||
// the object is a delete marker, if a `MethodNotAllowed` error is returned
|
||||
// no object lock check is needed
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMethodNotAllowed)) {
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration)) {
|
||||
checkRetention = false
|
||||
}
|
||||
|
||||
@@ -8,7 +8,8 @@ var IgnoredHeaders = Rules{
|
||||
// some clients use user-agent in signed headers
|
||||
// "User-Agent": struct{}{},
|
||||
"X-Amzn-Trace-Id": struct{}{},
|
||||
"Expect": struct{}{},
|
||||
// Expect might appear in signed headers
|
||||
// "Expect": struct{}{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestIgnoredHeaders(t *testing.T) {
|
||||
}{
|
||||
"expect": {
|
||||
Header: "Expect",
|
||||
ExpectIgnored: true,
|
||||
ExpectIgnored: false,
|
||||
},
|
||||
"authorization": {
|
||||
Header: "Authorization",
|
||||
|
||||
@@ -157,7 +157,7 @@ func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
|
||||
string(keyOwnership): backend.GetPtrFromString(encodeBytes([]byte(input.ObjectOwnership))),
|
||||
}
|
||||
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
acct, ok := ctx.Value("bucket-owner").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
@@ -364,6 +364,9 @@ func (az *Azure) PutObject(ctx context.Context, po s3response.PutObjectInput) (s
|
||||
if po.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn {
|
||||
err := az.PutObjectLegalHold(ctx, *po.Bucket, *po.Key, "", true)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)) {
|
||||
err = s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
return s3response.PutObjectOutput{}, err
|
||||
}
|
||||
}
|
||||
@@ -380,6 +383,9 @@ func (az *Azure) PutObject(ctx context.Context, po s3response.PutObjectInput) (s
|
||||
}
|
||||
err = az.PutObjectRetention(ctx, *po.Bucket, *po.Key, "", retParsed)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)) {
|
||||
err = s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
return s3response.PutObjectOutput{}, err
|
||||
}
|
||||
}
|
||||
@@ -980,6 +986,9 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
|
||||
if input.ObjectLockLegalHoldStatus != "" {
|
||||
err = az.PutObjectLegalHold(ctx, *input.Bucket, *input.Key, "", input.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)) {
|
||||
err = s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
}
|
||||
@@ -998,6 +1007,9 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
|
||||
}
|
||||
err = az.PutObjectRetention(ctx, *input.Bucket, *input.Key, "", retParsed)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)) {
|
||||
err = s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
}
|
||||
@@ -1140,7 +1152,7 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input s3response.Cre
|
||||
}
|
||||
|
||||
if len(bucketLock) == 0 {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
|
||||
var bucketLockConfig auth.BucketLockConfig
|
||||
@@ -1149,7 +1161,7 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input s3response.Cre
|
||||
}
|
||||
|
||||
if !bucketLockConfig.Enabled {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1839,7 +1851,7 @@ func (az *Azure) isBucketObjectLockEnabled(ctx context.Context, bucket string) e
|
||||
}
|
||||
|
||||
if len(cfg) == 0 {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
return s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)
|
||||
}
|
||||
|
||||
var bucketLockConfig auth.BucketLockConfig
|
||||
@@ -1848,7 +1860,7 @@ func (az *Azure) isBucketObjectLockEnabled(ctx context.Context, bucket string) e
|
||||
}
|
||||
|
||||
if !bucketLockConfig.Enabled {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
return s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -2085,22 +2097,20 @@ func (az *Azure) evaluateWritePreconditions(ctx context.Context, bucket, object,
|
||||
return nil
|
||||
}
|
||||
// call HeadObject to evaluate preconditions
|
||||
// if object doesn't exist, move forward with the object creation
|
||||
// otherwise return the error
|
||||
_, err := az.HeadObject(ctx, &s3.HeadObjectInput{
|
||||
Bucket: bucket,
|
||||
Key: object,
|
||||
IfMatch: ifMatch,
|
||||
IfNoneMatch: ifNoneMatch,
|
||||
res, err := az.HeadObject(ctx, &s3.HeadObjectInput{
|
||||
Bucket: bucket,
|
||||
Key: object,
|
||||
})
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNotModified)) {
|
||||
return s3err.GetAPIError(s3err.ErrPreconditionFailed)
|
||||
}
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
var etag string
|
||||
if res != nil {
|
||||
etag = backend.GetStringFromPtr(res.ETag)
|
||||
}
|
||||
|
||||
return backend.EvaluateObjectPutPreconditions(etag, ifMatch, ifNoneMatch, !errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)))
|
||||
}
|
||||
|
||||
func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
|
||||
|
||||
@@ -495,6 +495,8 @@ func EvaluatePreconditions(etag string, modTime time.Time, preconditions PreCond
|
||||
return nil
|
||||
}
|
||||
|
||||
etag = strings.Trim(etag, `"`)
|
||||
|
||||
// convert all conditions to *bool to evaluate the conditions
|
||||
var ifMatch, ifNoneMatch, ifModSince, ifUnmodeSince *bool
|
||||
if preconditions.IfMatch != nil {
|
||||
@@ -581,6 +583,7 @@ func EvaluatePreconditions(etag string, modTime time.Time, preconditions PreCond
|
||||
|
||||
// EvaluateMatchPreconditions evaluates if-match and if-none-match preconditions
|
||||
func EvaluateMatchPreconditions(etag string, ifMatch, ifNoneMatch *string) error {
|
||||
etag = strings.Trim(etag, `"`)
|
||||
if ifMatch != nil && *ifMatch != etag {
|
||||
return errPreconditionFailed
|
||||
}
|
||||
@@ -591,6 +594,38 @@ func EvaluateMatchPreconditions(etag string, ifMatch, ifNoneMatch *string) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// EvaluateObjectPutPreconditions evaluates if-match and if-none-match preconditions
|
||||
// for object PUT(PutObject, CompleteMultipartUpload) actions
|
||||
func EvaluateObjectPutPreconditions(etag string, ifMatch, ifNoneMatch *string, objExists bool) error {
|
||||
if ifMatch == nil && ifNoneMatch == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ifNoneMatch != nil && *ifNoneMatch != "*" {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
if ifNoneMatch != nil && ifMatch != nil {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
if ifNoneMatch != nil && objExists {
|
||||
return s3err.GetAPIError(s3err.ErrPreconditionFailed)
|
||||
}
|
||||
|
||||
if ifMatch != nil && !objExists {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
etag = strings.Trim(etag, `"`)
|
||||
|
||||
if ifMatch != nil && *ifMatch != etag {
|
||||
return s3err.GetAPIError(s3err.ErrPreconditionFailed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ObjectDeletePreconditions struct {
|
||||
IfMatch *string
|
||||
IfMatchLastModTime *time.Time
|
||||
|
||||
@@ -17,6 +17,7 @@ package meta
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
@@ -98,6 +99,8 @@ func (s SideCar) DeleteAttribute(bucket, object, attribute string) error {
|
||||
return fmt.Errorf("failed to remove attribute: %v", err)
|
||||
}
|
||||
|
||||
s.cleanupEmptyDirs(metadir, bucket, object)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -135,5 +138,60 @@ func (s SideCar) DeleteAttributes(bucket, object string) error {
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return fmt.Errorf("failed to remove attributes: %v", err)
|
||||
}
|
||||
s.cleanupEmptyDirs(metadir, bucket, object)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s SideCar) cleanupEmptyDirs(metadir, bucket, object string) {
|
||||
removeIfEmpty(metadir)
|
||||
if bucket == "" {
|
||||
return
|
||||
}
|
||||
bucketDir := filepath.Join(s.dir, bucket)
|
||||
if object != "" {
|
||||
removeEmptyParents(filepath.Dir(metadir), bucketDir)
|
||||
}
|
||||
removeIfEmpty(bucketDir)
|
||||
}
|
||||
|
||||
func removeIfEmpty(dir string) {
|
||||
empty, err := isDirEmpty(dir)
|
||||
if err != nil || !empty {
|
||||
return
|
||||
}
|
||||
_ = os.Remove(dir)
|
||||
}
|
||||
|
||||
func removeEmptyParents(dir, stopDir string) {
|
||||
for {
|
||||
if dir == stopDir || dir == "." || dir == string(filepath.Separator) {
|
||||
return
|
||||
}
|
||||
empty, err := isDirEmpty(dir)
|
||||
if err != nil || !empty {
|
||||
return
|
||||
}
|
||||
err = os.Remove(dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dir = filepath.Dir(dir)
|
||||
}
|
||||
}
|
||||
|
||||
func isDirEmpty(dir string) (bool, error) {
|
||||
f, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
ents, err := f.Readdirnames(1)
|
||||
if err == io.EOF {
|
||||
return true, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(ents) == 0, nil
|
||||
}
|
||||
|
||||
@@ -26,10 +26,6 @@ import (
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
const (
|
||||
xattrPrefix = "user."
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoSuchKey is returned when the key does not exist.
|
||||
ErrNoSuchKey = errors.New("no such key")
|
||||
|
||||
19
backend/meta/xattr_freebsd.go
Normal file
19
backend/meta/xattr_freebsd.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
//go:build freebsd
|
||||
|
||||
package meta
|
||||
|
||||
const xattrPrefix = ""
|
||||
19
backend/meta/xattr_other.go
Normal file
19
backend/meta/xattr_other.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
//go:build !freebsd
|
||||
|
||||
package meta
|
||||
|
||||
const xattrPrefix = "user."
|
||||
@@ -369,7 +369,7 @@ func (p *Posix) HeadBucket(_ context.Context, input *s3.HeadBucketInput) (*s3.He
|
||||
}
|
||||
|
||||
func (p *Posix) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, acl []byte) error {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
acct, ok := ctx.Value("bucket-owner").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
@@ -728,8 +728,17 @@ func (p *Posix) deleteNullVersionIdObject(bucket, key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func isRemovableAttr(attr string) bool {
|
||||
switch attr {
|
||||
case objectLegalHoldKey, objectRetentionKey:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a new copy(version) of an object in the versioning directory
|
||||
func (p *Posix) createObjVersion(bucket, key string, size int64, acc auth.Account) (versionPath string, err error) {
|
||||
func (p *Posix) createObjVersion(bucket, key string, size int64, acc auth.Account, removeAttributes bool) (versionPath string, err error) {
|
||||
sf, err := os.Open(filepath.Join(bucket, key))
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -785,6 +794,14 @@ func (p *Posix) createObjVersion(bucket, key string, size int64, acc auth.Accoun
|
||||
if err != nil {
|
||||
return versionPath, fmt.Errorf("store %v attribute: %w", attr, err)
|
||||
}
|
||||
|
||||
// remove object lock attributes in delete marker
|
||||
if removeAttributes && isRemovableAttr(attr) {
|
||||
err := p.meta.DeleteAttribute(bucket, key, attr)
|
||||
if err != nil {
|
||||
return versionPath, fmt.Errorf("remove %s attribute: %w", attr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := f.link(); err != nil {
|
||||
@@ -854,6 +871,30 @@ func getBoolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
// ensureNotDeleteMarker return a `MethodNotAllowd` error
|
||||
// if the provided object(version) is a delete marker
|
||||
func (p *Posix) ensureNotDeleteMarker(bucket, object, versionId string) error {
|
||||
if !p.versioningEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := p.meta.RetrieveAttribute(nil, bucket, object, deleteMarkerKey)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if versionId != "" {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchVersion)
|
||||
}
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("get delete marker attr: %w", err)
|
||||
}
|
||||
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
|
||||
// Check if the given object is a delete marker
|
||||
func (p *Posix) isObjDeleteMarker(bucket, object string) (bool, error) {
|
||||
_, err := p.meta.RetrieveAttribute(nil, bucket, object, deleteMarkerKey)
|
||||
@@ -1342,6 +1383,9 @@ func (p *Posix) CreateMultipartUpload(ctx context.Context, mpu s3response.Create
|
||||
if mpu.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn {
|
||||
err := p.PutObjectLegalHold(ctx, bucket, filepath.Join(objdir, uploadID), "", true)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)) {
|
||||
err = s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
// cleanup object if returning error
|
||||
os.RemoveAll(filepath.Join(tmppath, uploadID))
|
||||
os.Remove(tmppath)
|
||||
@@ -1364,6 +1408,9 @@ func (p *Posix) CreateMultipartUpload(ctx context.Context, mpu s3response.Create
|
||||
}
|
||||
err = p.PutObjectRetention(ctx, bucket, filepath.Join(objdir, uploadID), "", retParsed)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)) {
|
||||
err = s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
// cleanup object if returning error
|
||||
os.RemoveAll(filepath.Join(tmppath, uploadID))
|
||||
os.Remove(tmppath)
|
||||
@@ -1473,8 +1520,8 @@ func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.C
|
||||
}
|
||||
|
||||
b, err := p.meta.RetrieveAttribute(nil, bucket, object, etagkey)
|
||||
if err == nil {
|
||||
err = backend.EvaluateMatchPreconditions(string(b), input.IfMatch, input.IfNoneMatch)
|
||||
if err == nil || errors.Is(err, fs.ErrNotExist) || errors.Is(err, meta.ErrNoSuchKey) {
|
||||
err = backend.EvaluateObjectPutPreconditions(string(b), input.IfMatch, input.IfNoneMatch, err == nil)
|
||||
if err != nil {
|
||||
return res, "", err
|
||||
}
|
||||
@@ -1699,7 +1746,7 @@ func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.C
|
||||
|
||||
// if the versioninng is enabled first create the file object version
|
||||
if p.versioningEnabled() && vEnabled && err == nil && !d.IsDir() {
|
||||
_, err := p.createObjVersion(bucket, object, d.Size(), acct)
|
||||
_, err := p.createObjVersion(bucket, object, d.Size(), acct, false)
|
||||
if err != nil {
|
||||
return res, "", fmt.Errorf("create object version: %w", err)
|
||||
}
|
||||
@@ -2913,8 +2960,8 @@ func (p *Posix) PutObjectWithPostFunc(ctx context.Context, po s3response.PutObje
|
||||
|
||||
// evaluate preconditions
|
||||
etagBytes, err := p.meta.RetrieveAttribute(nil, *po.Bucket, *po.Key, etagkey)
|
||||
if err == nil {
|
||||
err := backend.EvaluateMatchPreconditions(string(etagBytes), po.IfMatch, po.IfNoneMatch)
|
||||
if err == nil || errors.Is(err, fs.ErrNotExist) || errors.Is(err, meta.ErrNoSuchKey) {
|
||||
err = backend.EvaluateObjectPutPreconditions(string(etagBytes), po.IfMatch, po.IfNoneMatch, err == nil)
|
||||
if err != nil {
|
||||
return s3response.PutObjectOutput{}, err
|
||||
}
|
||||
@@ -2995,7 +3042,7 @@ func (p *Posix) PutObjectWithPostFunc(ctx context.Context, po s3response.PutObje
|
||||
isVersionIdMissing = len(vIdBytes) == 0
|
||||
}
|
||||
if !isVersionIdMissing {
|
||||
_, err := p.createObjVersion(*po.Bucket, *po.Key, d.Size(), acct)
|
||||
_, err := p.createObjVersion(*po.Bucket, *po.Key, d.Size(), acct, false)
|
||||
if err != nil {
|
||||
return s3response.PutObjectOutput{}, fmt.Errorf("create object version: %w", err)
|
||||
}
|
||||
@@ -3221,6 +3268,9 @@ func (p *Posix) PutObjectWithPostFunc(ctx context.Context, po s3response.PutObje
|
||||
if po.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn {
|
||||
err := p.PutObjectLegalHold(ctx, *po.Bucket, *po.Key, "", true)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)) {
|
||||
err = s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
return s3response.PutObjectOutput{}, err
|
||||
}
|
||||
}
|
||||
@@ -3237,6 +3287,9 @@ func (p *Posix) PutObjectWithPostFunc(ctx context.Context, po s3response.PutObje
|
||||
}
|
||||
err = p.PutObjectRetention(ctx, *po.Bucket, *po.Key, "", retParsed)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)) {
|
||||
err = s3err.GetAPIError(s3err.ErrMissingObjectLockConfigurationNoSpaces)
|
||||
}
|
||||
return s3response.PutObjectOutput{}, err
|
||||
}
|
||||
}
|
||||
@@ -3343,7 +3396,7 @@ func (p *Posix) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) (
|
||||
|
||||
// Creates a new object version in the versioning directory
|
||||
if p.isBucketVersioningEnabled(vStatus) || string(vId) != nullVersionId {
|
||||
_, err = p.createObjVersion(bucket, object, fi.Size(), acct)
|
||||
_, err = p.createObjVersion(bucket, object, fi.Size(), acct, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -3378,7 +3431,13 @@ func (p *Posix) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) (
|
||||
versionPath := p.genObjVersionPath(bucket, object)
|
||||
|
||||
vId, err := p.meta.RetrieveAttribute(nil, bucket, object, versionIdKey)
|
||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) && !errors.Is(err, fs.ErrNotExist) {
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
// AWS returns success if the object does not exist
|
||||
return &s3.DeleteObjectOutput{
|
||||
VersionId: input.VersionId,
|
||||
}, nil
|
||||
}
|
||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return nil, fmt.Errorf("get obj versionId: %w", err)
|
||||
}
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
@@ -3779,10 +3838,10 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetO
|
||||
|
||||
var tagCount *int32
|
||||
tags, err := p.getAttrTags(bucket, object, versionId)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tags != nil {
|
||||
if len(tags) != 0 {
|
||||
tgCount := int32(len(tags))
|
||||
tagCount = &tgCount
|
||||
}
|
||||
@@ -3859,10 +3918,10 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetO
|
||||
|
||||
var tagCount *int32
|
||||
tags, err := p.getAttrTags(bucket, object, versionId)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tags != nil {
|
||||
if len(tags) != 0 {
|
||||
tgCount := int32(len(tags))
|
||||
tagCount = &tgCount
|
||||
}
|
||||
@@ -4084,10 +4143,10 @@ func (p *Posix) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.
|
||||
|
||||
var tagCount *int32
|
||||
tags, err := p.getAttrTags(bucket, object, versionId)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tags != nil {
|
||||
if len(tags) != 0 {
|
||||
tc := int32(len(tags))
|
||||
tagCount = &tc
|
||||
}
|
||||
@@ -4855,6 +4914,11 @@ func (p *Posix) GetObjectTagging(_ context.Context, bucket, object, versionId st
|
||||
}
|
||||
}
|
||||
|
||||
err = p.ensureNotDeleteMarker(bucket, object, versionId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.getAttrTags(bucket, object, versionId)
|
||||
}
|
||||
|
||||
@@ -4868,6 +4932,10 @@ func (p *Posix) getAttrTags(bucket, object, versionId string) (map[string]string
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
if object != "" {
|
||||
// return empty tag set for object tagging
|
||||
return tags, nil
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -4913,6 +4981,11 @@ func (p *Posix) PutObjectTagging(_ context.Context, bucket, object, versionId st
|
||||
}
|
||||
}
|
||||
|
||||
err = p.ensureNotDeleteMarker(bucket, object, versionId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
err = p.meta.DeleteAttribute(bucket, object, tagHdr)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
@@ -5087,7 +5160,7 @@ func (p *Posix) isBucketObjectLockEnabled(bucket string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
return s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("get object lock config: %w", err)
|
||||
@@ -5099,7 +5172,7 @@ func (p *Posix) isBucketObjectLockEnabled(bucket string) error {
|
||||
}
|
||||
|
||||
if !bucketLockConfig.Enabled {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
return s3err.GetAPIError(s3err.ErrMissingObjectLockConfiguration)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -5204,6 +5277,11 @@ func (p *Posix) PutObjectLegalHold(_ context.Context, bucket, object, versionId
|
||||
}
|
||||
}
|
||||
|
||||
err = p.ensureNotDeleteMarker(bucket, object, versionId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.meta.StoreAttribute(nil, bucket, object, objectLegalHoldKey, statusData)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if versionId != "" {
|
||||
@@ -5250,6 +5328,11 @@ func (p *Posix) GetObjectLegalHold(_ context.Context, bucket, object, versionId
|
||||
}
|
||||
}
|
||||
|
||||
err = p.ensureNotDeleteMarker(bucket, object, versionId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := p.meta.RetrieveAttribute(nil, bucket, object, objectLegalHoldKey)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if versionId != "" {
|
||||
@@ -5301,6 +5384,11 @@ func (p *Posix) PutObjectRetention(_ context.Context, bucket, object, versionId
|
||||
}
|
||||
}
|
||||
|
||||
err = p.ensureNotDeleteMarker(bucket, object, versionId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.meta.StoreAttribute(nil, bucket, object, objectRetentionKey, retention)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set object lock config: %w", err)
|
||||
@@ -5341,6 +5429,11 @@ func (p *Posix) GetObjectRetention(_ context.Context, bucket, object, versionId
|
||||
}
|
||||
}
|
||||
|
||||
err = p.ensureNotDeleteMarker(bucket, object, versionId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := p.meta.RetrieveAttribute(nil, bucket, object, objectRetentionKey)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if versionId != "" {
|
||||
|
||||
@@ -19,16 +19,20 @@ import (
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
@@ -169,6 +173,66 @@ func adminCommand() *cli.Command {
|
||||
Usage: "Lists all the gateway buckets and owners.",
|
||||
Action: listBuckets,
|
||||
},
|
||||
{
|
||||
Name: "create-bucket",
|
||||
Usage: "Create a new bucket with owner",
|
||||
Action: createBucket,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "owner",
|
||||
Usage: "access key id of the bucket owner",
|
||||
Required: true,
|
||||
Aliases: []string{"o"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "bucket",
|
||||
Usage: "bucket name",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "acl",
|
||||
Usage: "canned ACL to apply to the bucket",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-full-control",
|
||||
Usage: "Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-read",
|
||||
Usage: "Allows grantee to list the objects in the bucket.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-read-acp",
|
||||
Usage: "Allows grantee to read the bucket ACL.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-write",
|
||||
Usage: `Allows grantee to create new objects in the bucket.
|
||||
For the bucket and object owners of existing objects, also allows deletions and overwrites of those objects.`,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-write-acp",
|
||||
Usage: "Allows grantee to write the ACL for the applicable bucket.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "create-bucket-configuration",
|
||||
Usage: "bucket configuration (LocationConstraint, Tags)",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "object-lock-enabled-for-bucket",
|
||||
Usage: "enable object lock for the bucket",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "no-object-lock-enabled-for-bucket",
|
||||
Usage: "disable object lock for the bucket",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "object-ownership",
|
||||
Usage: "bucket object ownership setting",
|
||||
Value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
// TODO: create a configuration file for this
|
||||
@@ -177,7 +241,6 @@ func adminCommand() *cli.Command {
|
||||
Usage: "admin access key id",
|
||||
EnvVars: []string{"ADMIN_ACCESS_KEY_ID", "ADMIN_ACCESS_KEY"},
|
||||
Aliases: []string{"a"},
|
||||
Required: true,
|
||||
Destination: &adminAccess,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@@ -185,7 +248,6 @@ func adminCommand() *cli.Command {
|
||||
Usage: "admin secret access key",
|
||||
EnvVars: []string{"ADMIN_SECRET_ACCESS_KEY", "ADMIN_SECRET_KEY"},
|
||||
Aliases: []string{"s"},
|
||||
Required: true,
|
||||
Destination: &adminSecret,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@@ -215,6 +277,32 @@ func adminCommand() *cli.Command {
|
||||
}
|
||||
}
|
||||
|
||||
// getAdminCreds returns the effective admin access key ID and secret key.
|
||||
// If admin-specific credentials are not provided, it falls back to the
|
||||
// root user credentials. Both resulting values must be non-empty;
|
||||
// otherwise, an error is returned.
|
||||
func getAdminCreds() (string, string, error) {
|
||||
access := adminAccess
|
||||
secret := adminSecret
|
||||
|
||||
// Fallbacks to root user credentials
|
||||
if access == "" {
|
||||
access = rootUserAccess
|
||||
}
|
||||
if secret == "" {
|
||||
secret = rootUserSecret
|
||||
}
|
||||
|
||||
if access == "" {
|
||||
return "", "", errors.New("subcommand admin access key id is not set")
|
||||
}
|
||||
if secret == "" {
|
||||
return "", "", errors.New("subcommand admin secret access key is not set")
|
||||
}
|
||||
|
||||
return access, secret, nil
|
||||
}
|
||||
|
||||
func initHTTPClient() *http.Client {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: allowInsecure},
|
||||
@@ -223,6 +311,10 @@ func initHTTPClient() *http.Client {
|
||||
}
|
||||
|
||||
func createUser(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
access, secret, role := ctx.String("access"), ctx.String("secret"), ctx.String("role")
|
||||
userID, groupID, projectID := ctx.Int("user-id"), ctx.Int("group-id"), ctx.Int("project-id")
|
||||
if access == "" || secret == "" {
|
||||
@@ -284,6 +376,10 @@ func createUser(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func deleteUser(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
access := ctx.String("access")
|
||||
if access == "" {
|
||||
return fmt.Errorf("invalid input parameter for the user access key")
|
||||
@@ -327,6 +423,11 @@ func deleteUser(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func updateUser(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
access, secret, userId, groupId, projectID, role :=
|
||||
ctx.String("access"),
|
||||
ctx.String("secret"),
|
||||
@@ -398,6 +499,11 @@ func updateUser(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func listUsers(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/list-users", adminEndpoint), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
@@ -442,6 +548,251 @@ func listUsers(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type createBucketInput struct {
|
||||
LocationConstraint *string
|
||||
Tags []types.Tag
|
||||
}
|
||||
|
||||
// parseCreateBucketPayload parses the
|
||||
func parseCreateBucketPayload(input string) ([]byte, error) {
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
// try to parse as json, if the input starts with '{'
|
||||
if input[0] == '{' {
|
||||
var raw createBucketInput
|
||||
err := json.Unmarshal([]byte(input), &raw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid JSON input: %w", err)
|
||||
}
|
||||
|
||||
return xml.Marshal(s3response.CreateBucketConfiguration{
|
||||
LocationConstraint: raw.LocationConstraint,
|
||||
TagSet: raw.Tags,
|
||||
})
|
||||
}
|
||||
|
||||
var config s3response.CreateBucketConfiguration
|
||||
|
||||
// parse as string - shorthand syntax
|
||||
inputParts, err := splitTopLevel(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, part := range inputParts {
|
||||
part = strings.TrimSpace(part)
|
||||
if strings.HasPrefix(part, "LocationConstraint=") {
|
||||
locConstraint := strings.TrimPrefix(part, "LocationConstraint=")
|
||||
config.LocationConstraint = &locConstraint
|
||||
} else if strings.HasPrefix(part, "Tags=") {
|
||||
tags, err := parseTagging(strings.TrimPrefix(part, "Tags="))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.TagSet = tags
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid component: %v", part)
|
||||
}
|
||||
}
|
||||
|
||||
return xml.Marshal(config)
|
||||
}
|
||||
|
||||
var errInvalidTagsSyntax = errors.New("invalid tags syntax")
|
||||
|
||||
// splitTopLevel splits a shorthand configuration string into top-level components.
|
||||
// The function splits only on commas that are not nested inside '{}' or '[]'.
|
||||
func splitTopLevel(s string) ([]string, error) {
|
||||
var parts []string
|
||||
start := 0
|
||||
depth := 0
|
||||
|
||||
for i, r := range s {
|
||||
switch r {
|
||||
case '{', '[':
|
||||
depth++
|
||||
case '}', ']':
|
||||
depth--
|
||||
case ',':
|
||||
if depth == 0 {
|
||||
parts = append(parts, s[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
return nil, errors.New("invalid string format")
|
||||
}
|
||||
|
||||
// add last segment
|
||||
if start < len(s) {
|
||||
parts = append(parts, s[start:])
|
||||
}
|
||||
|
||||
return parts, nil
|
||||
}
|
||||
|
||||
// parseTagging parses a tag set expressed in shorthand syntax into AWS CLI tags.
|
||||
// Expected format:
|
||||
//
|
||||
// [{Key=string,Value=string},{Key=string,Value=string}]
|
||||
//
|
||||
// The function validates bracket structure, splits tag objects at the top level,
|
||||
// and delegates individual tag parsing to parseTag. It returns an error if the
|
||||
// syntax is invalid or if any tag entry cannot be parsed.
|
||||
func parseTagging(input string) ([]types.Tag, error) {
|
||||
if len(input) < 2 {
|
||||
return nil, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
if input[0] != '[' || input[len(input)-1] != ']' {
|
||||
return nil, errInvalidTagsSyntax
|
||||
}
|
||||
// strip []
|
||||
input = input[1 : len(input)-1]
|
||||
|
||||
tagComponents, err := splitTopLevel(input)
|
||||
if err != nil {
|
||||
return nil, errInvalidTagsSyntax
|
||||
}
|
||||
result := make([]types.Tag, 0, len(tagComponents))
|
||||
for _, tagComponent := range tagComponents {
|
||||
tagComponent = strings.TrimSpace(tagComponent)
|
||||
tag, err := parseTag(tagComponent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, tag)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// parseTag parses a single tag definition in shorthand form.
|
||||
// Expected format:
|
||||
//
|
||||
// {Key=string,Value=string}
|
||||
func parseTag(input string) (types.Tag, error) {
|
||||
input = strings.TrimSpace(input)
|
||||
|
||||
if len(input) < 2 {
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
if input[0] != '{' || input[len(input)-1] != '}' {
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
// strip {}
|
||||
input = input[1 : len(input)-1]
|
||||
|
||||
components := strings.Split(input, ",")
|
||||
if len(components) != 2 {
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
var key, value string
|
||||
|
||||
for _, c := range components {
|
||||
c = strings.TrimSpace(c)
|
||||
|
||||
switch {
|
||||
case strings.HasPrefix(c, "Key="):
|
||||
key = strings.TrimPrefix(c, "Key=")
|
||||
case strings.HasPrefix(c, "Value="):
|
||||
value = strings.TrimPrefix(c, "Value=")
|
||||
default:
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
}
|
||||
|
||||
if key == "" {
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
return types.Tag{
|
||||
Key: &key,
|
||||
Value: &value,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func createBucket(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket, owner := ctx.String("bucket"), ctx.String("owner")
|
||||
|
||||
payload, err := parseCreateBucketPayload(ctx.String("create-bucket-configuration"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid create bucket configuration: %w", err)
|
||||
}
|
||||
|
||||
hashedPayload := sha256.Sum256(payload)
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
headers := map[string]string{
|
||||
"x-amz-content-sha256": hexPayload,
|
||||
"x-vgw-owner": owner,
|
||||
"x-amz-acl": ctx.String("acl"),
|
||||
"x-amz-grant-full-control": ctx.String("grant-full-control"),
|
||||
"x-amz-grant-read": ctx.String("grant-read"),
|
||||
"x-amz-grant-read-acp": ctx.String("grant-read-acp"),
|
||||
"x-amz-grant-write": ctx.String("grant-write"),
|
||||
"x-amz-grant-write-acp": ctx.String("grant-write-acp"),
|
||||
"x-amz-object-ownership": ctx.String("object-ownership"),
|
||||
}
|
||||
|
||||
if ctx.Bool("object-lock-enabled-for-bucket") {
|
||||
headers["x-amz-bucket-object-lock-enabled"] = "true"
|
||||
}
|
||||
if ctx.Bool("no-object-lock-enabled-for-bucket") {
|
||||
headers["x-amz-bucket-object-lock-enabled"] = "false"
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx.Context, http.MethodPatch, fmt.Sprintf("%s/%s/create", adminEndpoint, bucket), bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for key, value := range headers {
|
||||
if value != "" {
|
||||
req.Header.Set(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
err = signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := initHTTPClient()
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return parseApiError(body)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
// account table formatting
|
||||
minwidth int = 2 // minimal cell width including any padding
|
||||
@@ -464,6 +815,11 @@ func printAcctTable(accs []auth.Account) {
|
||||
}
|
||||
|
||||
func changeBucketOwner(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket, owner := ctx.String("bucket"), ctx.String("owner")
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/change-bucket-owner/?bucket=%v&owner=%v", adminEndpoint, bucket, owner), nil)
|
||||
if err != nil {
|
||||
@@ -515,6 +871,11 @@ func printBuckets(buckets []s3response.Bucket) {
|
||||
}
|
||||
|
||||
func listBuckets(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/list-buckets", adminEndpoint), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
|
||||
@@ -16,13 +16,13 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3event"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
"github.com/versity/versitygw/webui"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -42,6 +43,7 @@ var (
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
corsAllowOrigin string
|
||||
admCertFile, admKeyFile string
|
||||
certFile, keyFile string
|
||||
kafkaURL, kafkaTopic, kafkaKey string
|
||||
@@ -89,6 +91,9 @@ var (
|
||||
ipaUser, ipaPassword string
|
||||
ipaInsecure bool
|
||||
iamDebug bool
|
||||
webuiAddr string
|
||||
webuiCertFile, webuiKeyFile string
|
||||
webuiNoTLS bool
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -166,6 +171,30 @@ func initFlags() []cli.Flag {
|
||||
Destination: &port,
|
||||
Aliases: []string{"p"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "webui",
|
||||
Usage: "enable WebUI server on the specified listen address (e.g. ':7071', '127.0.0.1:7071', 'localhost:7071'; disabled when omitted)",
|
||||
EnvVars: []string{"VGW_WEBUI_PORT"},
|
||||
Destination: &webuiAddr,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "webui-cert",
|
||||
Usage: "TLS cert file for WebUI (defaults to --cert value when WebUI is enabled)",
|
||||
EnvVars: []string{"VGW_WEBUI_CERT"},
|
||||
Destination: &webuiCertFile,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "webui-key",
|
||||
Usage: "TLS key file for WebUI (defaults to --key value when WebUI is enabled)",
|
||||
EnvVars: []string{"VGW_WEBUI_KEY"},
|
||||
Destination: &webuiKeyFile,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "webui-no-tls",
|
||||
Usage: "disable TLS for WebUI even if TLS is configured for the gateway",
|
||||
EnvVars: []string{"VGW_WEBUI_NO_TLS"},
|
||||
Destination: &webuiNoTLS,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "access",
|
||||
Usage: "root user access key",
|
||||
@@ -188,6 +217,12 @@ func initFlags() []cli.Flag {
|
||||
Destination: ®ion,
|
||||
Aliases: []string{"r"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cors-allow-origin",
|
||||
Usage: "default CORS Access-Control-Allow-Origin value (applied when no bucket CORS configuration exists, and for admin APIs)",
|
||||
EnvVars: []string{"VGW_CORS_ALLOW_ORIGIN"},
|
||||
Destination: &corsAllowOrigin,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cert",
|
||||
Usage: "TLS cert file",
|
||||
@@ -638,6 +673,42 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
return fmt.Errorf("root user access and secret key must be provided")
|
||||
}
|
||||
|
||||
webuiAddr = strings.TrimSpace(webuiAddr)
|
||||
if webuiAddr != "" && isAllDigits(webuiAddr) {
|
||||
webuiAddr = ":" + webuiAddr
|
||||
}
|
||||
|
||||
// WebUI runs in a browser and typically talks to the gateway/admin APIs cross-origin
|
||||
// (different port). If no bucket CORS configuration exists, those API responses need
|
||||
// a default Access-Control-Allow-Origin to be usable from the WebUI.
|
||||
if webuiAddr != "" && strings.TrimSpace(corsAllowOrigin) == "" {
|
||||
// A single Access-Control-Allow-Origin value cannot cover multiple specific
|
||||
// origins. Default to '*' for usability and print a warning so operators can
|
||||
// lock it down explicitly.
|
||||
corsAllowOrigin = "*"
|
||||
webuiScheme := "http"
|
||||
if !webuiNoTLS && (strings.TrimSpace(webuiCertFile) != "" || strings.TrimSpace(certFile) != "") {
|
||||
webuiScheme = "https"
|
||||
}
|
||||
|
||||
// Suggest a more secure explicit origin based on the actual WebUI listening interfaces.
|
||||
// (Browsers require an exact origin match; this is typically one chosen hostname/IP.)
|
||||
var suggestion string
|
||||
ips, ipsErr := getMatchingIPs(webuiAddr)
|
||||
_, webPrt, prtErr := net.SplitHostPort(webuiAddr)
|
||||
if ipsErr == nil && prtErr == nil && len(ips) > 0 {
|
||||
origins := make([]string, 0, len(ips))
|
||||
for _, ip := range ips {
|
||||
origins = append(origins, fmt.Sprintf("%s://%s:%s", webuiScheme, ip, webPrt))
|
||||
}
|
||||
suggestion = fmt.Sprintf("consider setting it to one of: %s (or your public hostname)", strings.Join(origins, ", "))
|
||||
} else {
|
||||
suggestion = fmt.Sprintf("consider setting it to %s://<host>:<port>", webuiScheme)
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "WARNING: --webui is enabled but --cors-allow-origin is not set; defaulting to '*'; %s\n", suggestion)
|
||||
}
|
||||
|
||||
utils.SetBucketNameValidationStrict(!disableStrictBucketNames)
|
||||
|
||||
if pprof != "" {
|
||||
@@ -649,6 +720,9 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
}
|
||||
|
||||
var opts []s3api.Option
|
||||
if corsAllowOrigin != "" {
|
||||
opts = append(opts, s3api.WithCORSAllowOrigin(corsAllowOrigin))
|
||||
}
|
||||
|
||||
if certFile != "" || keyFile != "" {
|
||||
if certFile == "" {
|
||||
@@ -658,11 +732,12 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
return fmt.Errorf("TLS cert specified without key file")
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
cs := utils.NewCertStorage()
|
||||
err := cs.SetCertificate(certFile, keyFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tls: load certs: %v", err)
|
||||
}
|
||||
opts = append(opts, s3api.WithTLS(cert))
|
||||
opts = append(opts, s3api.WithTLS(cs))
|
||||
}
|
||||
if admPort == "" {
|
||||
opts = append(opts, s3api.WithAdminServer())
|
||||
@@ -786,6 +861,9 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
|
||||
if admPort != "" {
|
||||
var opts []s3api.AdminOpt
|
||||
if corsAllowOrigin != "" {
|
||||
opts = append(opts, s3api.WithAdminCORSAllowOrigin(corsAllowOrigin))
|
||||
}
|
||||
|
||||
if admCertFile != "" || admKeyFile != "" {
|
||||
if admCertFile == "" {
|
||||
@@ -795,11 +873,12 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
return fmt.Errorf("TLS cert specified without key file")
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(admCertFile, admKeyFile)
|
||||
cs := utils.NewCertStorage()
|
||||
err = cs.SetCertificate(admCertFile, admKeyFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tls: load certs: %v", err)
|
||||
}
|
||||
opts = append(opts, s3api.WithAdminSrvTLS(cert))
|
||||
opts = append(opts, s3api.WithAdminSrvTLS(cs))
|
||||
}
|
||||
if quiet {
|
||||
opts = append(opts, s3api.WithAdminQuiet())
|
||||
@@ -808,18 +887,105 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
opts = append(opts, s3api.WithAdminDebug())
|
||||
}
|
||||
|
||||
admSrv = s3api.NewAdminServer(be, middlewares.RootUserConfig{Access: rootUserAccess, Secret: rootUserSecret}, admPort, region, iam, loggers.AdminLogger, opts...)
|
||||
admSrv = s3api.NewAdminServer(be, middlewares.RootUserConfig{Access: rootUserAccess, Secret: rootUserSecret}, admPort, region, iam, loggers.AdminLogger, srv.Router.Ctrl, opts...)
|
||||
}
|
||||
|
||||
var webSrv *webui.Server
|
||||
webuiSSLEnabled := false
|
||||
webTLSCert := ""
|
||||
webTLSKey := ""
|
||||
if webuiAddr != "" {
|
||||
_, webPrt, err := net.SplitHostPort(webuiAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("webui listen address must be in the form ':port' or 'host:port': %w", err)
|
||||
}
|
||||
webPortNum, err := strconv.Atoi(webPrt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("webui port must be a number: %w", err)
|
||||
}
|
||||
if webPortNum < 0 || webPortNum > 65535 {
|
||||
return fmt.Errorf("webui port must be between 0 and 65535")
|
||||
}
|
||||
|
||||
var webOpts []webui.Option
|
||||
if !webuiNoTLS {
|
||||
// WebUI can either use explicitly provided TLS files or reuse the
|
||||
// gateway's TLS files by default.
|
||||
webTLSCert = webuiCertFile
|
||||
webTLSKey = webuiKeyFile
|
||||
if webTLSCert == "" && webTLSKey == "" {
|
||||
webTLSCert = certFile
|
||||
webTLSKey = keyFile
|
||||
}
|
||||
if webTLSCert != "" || webTLSKey != "" {
|
||||
if webTLSCert == "" {
|
||||
return fmt.Errorf("webui TLS key specified without cert file")
|
||||
}
|
||||
if webTLSKey == "" {
|
||||
return fmt.Errorf("webui TLS cert specified without key file")
|
||||
}
|
||||
webuiSSLEnabled = true
|
||||
|
||||
cs := utils.NewCertStorage()
|
||||
err := cs.SetCertificate(webTLSCert, webTLSKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tls: load certs: %v", err)
|
||||
}
|
||||
|
||||
webOpts = append(webOpts, webui.WithTLS(cs))
|
||||
}
|
||||
}
|
||||
|
||||
sslEnabled := certFile != ""
|
||||
admSSLEnabled := sslEnabled
|
||||
if admPort != "" {
|
||||
admSSLEnabled = admCertFile != ""
|
||||
}
|
||||
|
||||
gateways, err := buildServiceURLs(port, sslEnabled)
|
||||
if err != nil {
|
||||
return fmt.Errorf("webui: build gateway URLs: %w", err)
|
||||
}
|
||||
|
||||
adminGateways := gateways
|
||||
if admPort != "" {
|
||||
adminGateways, err = buildServiceURLs(admPort, admSSLEnabled)
|
||||
if err != nil {
|
||||
return fmt.Errorf("webui: build admin gateway URLs: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if quiet {
|
||||
webOpts = append(webOpts, webui.WithQuiet())
|
||||
}
|
||||
|
||||
webSrv = webui.NewServer(&webui.ServerConfig{
|
||||
ListenAddr: webuiAddr,
|
||||
Gateways: gateways,
|
||||
AdminGateways: adminGateways,
|
||||
Region: region,
|
||||
}, webOpts...)
|
||||
}
|
||||
|
||||
if !quiet {
|
||||
printBanner(port, admPort, certFile != "", admCertFile != "")
|
||||
printBanner(port, admPort, certFile != "", admCertFile != "", webuiAddr, webuiSSLEnabled)
|
||||
}
|
||||
|
||||
c := make(chan error, 2)
|
||||
servers := 1
|
||||
if admPort != "" {
|
||||
servers++
|
||||
}
|
||||
if webSrv != nil {
|
||||
servers++
|
||||
}
|
||||
c := make(chan error, servers)
|
||||
go func() { c <- srv.Serve() }()
|
||||
if admPort != "" {
|
||||
go func() { c <- admSrv.Serve() }()
|
||||
}
|
||||
if webSrv != nil {
|
||||
go func() { c <- webSrv.Serve() }()
|
||||
}
|
||||
|
||||
// for/select blocks until shutdown
|
||||
Loop:
|
||||
@@ -844,6 +1010,30 @@ Loop:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
if certFile != "" && keyFile != "" {
|
||||
err = srv.CertStorage.SetCertificate(certFile, keyFile)
|
||||
if err != nil {
|
||||
debuglogger.InernalError(fmt.Errorf("srv cert reload failed: %w", err))
|
||||
} else {
|
||||
fmt.Printf("srv cert reloaded (cert: %s, key: %s)\n", certFile, keyFile)
|
||||
}
|
||||
}
|
||||
if admPort != "" && admCertFile != "" && admKeyFile != "" {
|
||||
err = admSrv.CertStorage.SetCertificate(admCertFile, admKeyFile)
|
||||
if err != nil {
|
||||
debuglogger.InernalError(fmt.Errorf("admSrv cert reload failed: %w", err))
|
||||
} else {
|
||||
fmt.Printf("admSrv cert reloaded (cert: %s, key: %s)\n", admCertFile, admKeyFile)
|
||||
}
|
||||
}
|
||||
if webSrv != nil && webTLSCert != "" && webTLSKey != "" {
|
||||
err := webSrv.CertStorage.SetCertificate(webTLSCert, webTLSKey)
|
||||
if err != nil {
|
||||
debuglogger.InernalError(fmt.Errorf("webSrv cert reload failed: %w", err))
|
||||
} else {
|
||||
fmt.Printf("webSrv cert reloaded (cert: %s, key: %s)\n", webTLSCert, webTLSKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
saveErr := err
|
||||
@@ -862,6 +1052,13 @@ Loop:
|
||||
}
|
||||
}
|
||||
|
||||
if webSrv != nil {
|
||||
err := webSrv.Shutdown()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "shutdown webui server: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
be.Shutdown()
|
||||
|
||||
err = iam.Shutdown()
|
||||
@@ -896,7 +1093,7 @@ Loop:
|
||||
return saveErr
|
||||
}
|
||||
|
||||
func printBanner(port, admPort string, ssl, admSsl bool) {
|
||||
func printBanner(port, admPort string, ssl, admSsl bool, webuiAddr string, webuiSsl bool) {
|
||||
interfaces, err := getMatchingIPs(port)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to match local IP addresses: %v\n", err)
|
||||
@@ -978,6 +1175,30 @@ func printBanner(port, admPort string, ssl, admSsl bool) {
|
||||
}
|
||||
}
|
||||
|
||||
if strings.TrimSpace(webuiAddr) != "" {
|
||||
webInterfaces, err := getMatchingIPs(webuiAddr)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to match webui port local IP addresses: %v\n", err)
|
||||
return
|
||||
}
|
||||
_, webPrt, err := net.SplitHostPort(webuiAddr)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to parse webui port: %v\n", err)
|
||||
return
|
||||
}
|
||||
lines = append(lines,
|
||||
centerText(""),
|
||||
leftText("WebUI listening on:"),
|
||||
)
|
||||
for _, ip := range webInterfaces {
|
||||
url := fmt.Sprintf("http://%s:%s", ip, webPrt)
|
||||
if webuiSsl {
|
||||
url = fmt.Sprintf("https://%s:%s", ip, webPrt)
|
||||
}
|
||||
lines = append(lines, leftText(" "+url))
|
||||
}
|
||||
}
|
||||
|
||||
// Print the top border
|
||||
fmt.Println("┌" + strings.Repeat("─", columnWidth-2) + "┐")
|
||||
|
||||
@@ -1053,6 +1274,42 @@ func getMatchingIPs(spec string) ([]string, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func buildServiceURLs(spec string, ssl bool) ([]string, error) {
|
||||
interfaces, err := getMatchingIPs(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, prt, err := net.SplitHostPort(spec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse address/port: %w", err)
|
||||
}
|
||||
if len(interfaces) == 0 {
|
||||
interfaces = []string{"localhost"}
|
||||
}
|
||||
|
||||
scheme := "http"
|
||||
if ssl {
|
||||
scheme = "https"
|
||||
}
|
||||
urls := make([]string, 0, len(interfaces))
|
||||
for _, ip := range interfaces {
|
||||
urls = append(urls, fmt.Sprintf("%s://%s:%s", scheme, ip, prt))
|
||||
}
|
||||
return urls, nil
|
||||
}
|
||||
|
||||
func isAllDigits(s string) bool {
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
for _, r := range s {
|
||||
if r < '0' || r > '9' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const columnWidth = 70
|
||||
|
||||
func centerText(text string) string {
|
||||
|
||||
@@ -201,6 +201,42 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
# to generate a default rules file "event_config.json" in the current directory.
|
||||
#VGW_EVENT_FILTER=
|
||||
|
||||
###########
|
||||
# Web GUI #
|
||||
###########
|
||||
|
||||
# The VGW_WEBUI_PORT option enables the Web GUI server on the specified
|
||||
# listening address. The Web GUI provides a browser-based interface for managing
|
||||
# users, buckets and objects. The format can be either ':port' to listen on all
|
||||
# interfaces (e.g., ':7071') or 'host:port' to listen on a specific interface
|
||||
# (e.g., '127.0.0.1:7071' or 'localhost:7071'). When omitted, the Web GUI is
|
||||
# disabled.
|
||||
#VGW_WEBUI_PORT=
|
||||
|
||||
# The VGW_WEBUI_CERT and VGW_WEBUI_KEY options specify the TLS certificate and
|
||||
# private key for the Web GUI server. If these are not specified and TLS is
|
||||
# configured for the gateway (VGW_CERT and VGW_KEY), the Web GUI will use the
|
||||
# same certificates as the gateway. If neither are specified, the Web GUI will
|
||||
# run without TLS (HTTP only). These options allow the Web GUI to use different
|
||||
# certificates than the main S3 gateway.
|
||||
#VGW_WEBUI_CERT=
|
||||
#VGW_WEBUI_KEY=
|
||||
|
||||
# The VGW_WEBUI_NO_TLS option disables TLS for the Web GUI even if TLS
|
||||
# certificates are configured for the gateway. Set to true to force the Web GUI
|
||||
# to use HTTP instead of HTTPS. This can be useful when running the Web GUI
|
||||
# behind a reverse proxy that handles TLS termination.
|
||||
#VGW_WEBUI_NO_TLS=false
|
||||
|
||||
# The VGW_CORS_ALLOW_ORIGIN option sets the default CORS (Cross-Origin Resource
|
||||
# Sharing) Access-Control-Allow-Origin header value. This header is applied to
|
||||
# responses when no bucket-specific CORS configuration exists, and for all admin
|
||||
# API responses. When the Web GUI is enabled and this option is not set, it
|
||||
# defaults to '*' (allow all origins) for usability. For production environments,
|
||||
# it is recommended to set this to a specific origin (e.g.,
|
||||
# 'https://webui.example.com') to improve security.
|
||||
#VGW_CORS_ALLOW_ORIGIN=
|
||||
|
||||
#######################
|
||||
# Debug / Diagnostics #
|
||||
#######################
|
||||
|
||||
58
go.mod
58
go.mod
@@ -5,12 +5,12 @@ go 1.24.0
|
||||
toolchain go1.24.1
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4
|
||||
github.com/DataDog/datadog-go/v5 v5.8.2
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.94.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1
|
||||
github.com/aws/smithy-go v1.24.0
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/go-ldap/ldap/v3 v3.4.12
|
||||
@@ -19,18 +19,18 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/vault-client-go v0.4.3
|
||||
github.com/minio/crc64nvme v1.1.1
|
||||
github.com/nats-io/nats.go v1.47.0
|
||||
github.com/nats-io/nats.go v1.48.0
|
||||
github.com/oklog/ulid/v2 v2.1.1
|
||||
github.com/pkg/xattr v0.4.12
|
||||
github.com/rabbitmq/amqp091-go v1.10.0
|
||||
github.com/segmentio/kafka-go v0.4.49
|
||||
github.com/segmentio/kafka-go v0.4.50
|
||||
github.com/smira/go-statsd v1.3.4
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/urfave/cli/v2 v2.27.7
|
||||
github.com/valyala/fasthttp v1.68.0
|
||||
github.com/valyala/fasthttp v1.69.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20240625221833-95fd765b760b
|
||||
golang.org/x/sync v0.19.0
|
||||
golang.org/x/sys v0.39.0
|
||||
golang.org/x/sys v0.40.0
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -38,14 +38,14 @@ require (
|
||||
github.com/Azure/go-ntlmssp v0.1.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.3.1 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
@@ -55,15 +55,15 @@ require (
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/nats-io/nkeys v0.4.12 // indirect
|
||||
github.com/nats-io/nkeys v0.4.14 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.25 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.46.0 // indirect
|
||||
golang.org/x/net v0.48.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/crypto v0.47.0 // indirect
|
||||
golang.org/x/net v0.49.0 // indirect
|
||||
golang.org/x/text v0.33.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
@@ -71,18 +71,18 @@ require (
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.5
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.5
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.16
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.0
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/klauspost/compress v1.18.3 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
|
||||
116
go.sum
116
go.sum
@@ -1,5 +1,5 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 h1:fou+2+WFTib47nS+nz/ozhEBnvU96bKHy6LjRsY4E28=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0/go.mod h1:t76Ruy8AHvUAC8GfMWJMa0ElSbuIcO03NLpynfbgsPA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
|
||||
@@ -8,8 +8,8 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDo
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 h1:ZJJNFaQ86GVKQ9ehwqyAFE6pIfyicpuJ8IkVaPBc6/4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3/go.mod h1:URuDvhmATVKqHBH9/0nOiNKk0+YcwfQ3WkK5PqHKxc8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4 h1:jWQK1GI+LeGGUKBADtcH2rRqPxYB1Ljwms5gFA2LqrM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4/go.mod h1:8mwH4klAm9DUgR2EEHyEEAQlRDvLPyg5fQry3y+cDew=
|
||||
github.com/Azure/go-ntlmssp v0.1.0 h1:DjFo6YtWzNqNvQdrwEyr/e4nhU3vRiwenz5QX7sFz+A=
|
||||
github.com/Azure/go-ntlmssp v0.1.0/go.mod h1:NYqdhxd/8aAct/s4qSYZEerdPuH1liG2/X9DiVTbhpk=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
@@ -25,50 +25,50 @@ github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktp
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
|
||||
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.5 h1:pz3duhAfUgnxbtVhIK39PGF/AHYyrzGEyRD9Og0QrE8=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.5/go.mod h1:xmDjzSUs/d0BB7ClzYPAZMmgQdrodNjPPhd6bGASwoE=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.5 h1:xMo63RlqP3ZZydpJDMBsH9uJ10hgHYfQFIk1cHDXrR4=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.5/go.mod h1:hhbH6oRcou+LpXfA/0vPElh/e0M3aFeOblE1sssAAEk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.16 h1:NkjoiJoSpZqzsRcpM6rlk5AOCLro8JkK8UqekAm/hxM=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.16/go.mod h1:l2736DvrgbOinD65Ksh8fc/WQHBBlvsd+0/ZaxtsmGY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.0 h1:pQZGI0qQXeCHZHMeWzhwPu+4jkWrdrIb2dgpG4OKmco=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.0/go.mod h1:XGq5kImVqQT4HUNbbG+0Y8O74URsPNH7CGPg1s1HW5E=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16/go.mod h1:uVW4OLBqbJXSHJYA9svT9BluSvvwbzLQ2Crf6UPzR3c=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.94.0 h1:SWTxh/EcUCDVqi/0s26V6pVUq0BBG7kx0tDTmF/hCgA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.94.0/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.7 h1:eYnlt6QxnFINKzwxP5/Ucs1vkG7VT3Iezmvfgc2waUw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.7/go.mod h1:+fWt2UHSb4kS7Pu8y+BMBvJF0EWx+4H0hzNwtDNRTrg=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 h1:AHDr0DaHIAo8c9t1emrzAlVDFp+iMMKnPdYy6XO4MCE=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12/go.mod h1:GQ73XawFFiWxyWXMHWfhiomvP3tXtdNar/fi8z18sx0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 h1:Z5EiPIzXKewUQK0QTMkutjiaPVeVYXX7KIqhXu/0fXs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8/go.mod h1:FsTpJtvC4U1fyDXk7c71XoDv3HlRm8V3NiYLeYLh5YE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu/eJC0Lh9adJa3M1xREcndxLNZlve2U=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1 h1:C2dUPSnEpy4voWFIq3JNd8gN0Y5vYGDo44eUE58a/p8=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
|
||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
|
||||
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4=
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/clipperhouse/uax29/v2 v2.3.1 h1:RjM8gnVbFbgI67SBekIC7ihFpyXwRPYWXn9BZActHbw=
|
||||
github.com/clipperhouse/uax29/v2 v2.3.1/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -117,8 +117,8 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
|
||||
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
|
||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=
|
||||
github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
@@ -137,17 +137,17 @@ github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI
|
||||
github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/nats-io/nats.go v1.47.0 h1:YQdADw6J/UfGUd2Oy6tn4Hq6YHxCaJrVKayxxFqYrgM=
|
||||
github.com/nats-io/nats.go v1.47.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
||||
github.com/nats-io/nkeys v0.4.12 h1:nssm7JKOG9/x4J8II47VWCL1Ds29avyiQDRn0ckMvDc=
|
||||
github.com/nats-io/nkeys v0.4.12/go.mod h1:MT59A1HYcjIcyQDJStTfaOY6vhy9XTUjOFo+SVsvpBg=
|
||||
github.com/nats-io/nats.go v1.48.0 h1:pSFyXApG+yWU/TgbKCjmm5K4wrHu86231/w84qRVR+U=
|
||||
github.com/nats-io/nats.go v1.48.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
||||
github.com/nats-io/nkeys v0.4.14 h1:ofx8UiyHP5S4Q52/THHucCJsMWu6zhf4DLh0U2593HE=
|
||||
github.com/nats-io/nkeys v0.4.14/go.mod h1:seG5UKwYdZXb7M1y1vvu53mNh3xq2B6um/XUgYAgvkM=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
|
||||
github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
|
||||
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
|
||||
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
|
||||
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0=
|
||||
github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -163,8 +163,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
||||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
||||
github.com/segmentio/kafka-go v0.4.49 h1:GJiNX1d/g+kG6ljyJEoi9++PUMdXGAxb7JGPiDCuNmk=
|
||||
github.com/segmentio/kafka-go v0.4.49/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E=
|
||||
github.com/segmentio/kafka-go v0.4.50 h1:mcyC3tT5WeyWzrFbd6O374t+hmcu1NKt2Pu1L3QaXmc=
|
||||
github.com/segmentio/kafka-go v0.4.50/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smira/go-statsd v1.3.4 h1:kBYWcLSGT+qC6JVbvfz48kX7mQys32fjDOPrfmsSx2c=
|
||||
github.com/smira/go-statsd v1.3.4/go.mod h1:RjdsESPgDODtg1VpVVf9MJrEW2Hw0wtRNbmB1CAhu6A=
|
||||
@@ -183,8 +183,8 @@ github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=
|
||||
github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.68.0 h1:v12Nx16iepr8r9ySOwqI+5RBJ/DqTxhOy1HrHoDFnok=
|
||||
github.com/valyala/fasthttp v1.68.0/go.mod h1:5EXiRfYQAoiO/khu4oU9VISC/eVY6JqmSpPJoHCKsz4=
|
||||
github.com/valyala/fasthttp v1.69.0 h1:fNLLESD2SooWeh2cidsuFtOcrEi4uB4m1mPrkJMZyVI=
|
||||
github.com/valyala/fasthttp v1.69.0/go.mod h1:4wA4PfAraPlAsJ5jMSqCE2ug5tqUPwKXxVj8oNECGcw=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240625221833-95fd765b760b h1:kuqsuYRMG1c6YXBAQvWO7CiurlpYtjDJWI6oZ2K/ZZE=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240625221833-95fd765b760b/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||
@@ -202,14 +202,14 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
@@ -224,13 +224,13 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
@@ -125,6 +125,7 @@ var (
|
||||
ActionAdminChangeBucketOwner = "admin_ChangeBucketOwner"
|
||||
ActionAdminListUsers = "admin_ListUsers"
|
||||
ActionAdminListBuckets = "admin_ListBuckets"
|
||||
ActionAdminCreateBucket = "admin_CreateBucket"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -24,10 +24,12 @@ import (
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
type S3AdminRouter struct{}
|
||||
type S3AdminRouter struct {
|
||||
s3api controllers.S3ApiController
|
||||
}
|
||||
|
||||
func (ar *S3AdminRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, root middlewares.RootUserConfig, region string, debug bool) {
|
||||
ctrl := controllers.NewAdminController(iam, be, logger)
|
||||
func (ar *S3AdminRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, root middlewares.RootUserConfig, region string, debug bool, corsAllowOrigin string) {
|
||||
ctrl := controllers.NewAdminController(iam, be, logger, ar.s3api)
|
||||
services := &controllers.Services{
|
||||
Logger: logger,
|
||||
}
|
||||
@@ -37,40 +39,80 @@ func (ar *S3AdminRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMSe
|
||||
controllers.ProcessHandlers(ctrl.CreateUser, metrics.ActionAdminCreateUser, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminCreateUser),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/create-user",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// DeleteUsers admin api
|
||||
app.Patch("/delete-user",
|
||||
controllers.ProcessHandlers(ctrl.DeleteUser, metrics.ActionAdminDeleteUser, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminDeleteUser),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/delete-user",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// UpdateUser admin api
|
||||
app.Patch("/update-user",
|
||||
controllers.ProcessHandlers(ctrl.UpdateUser, metrics.ActionAdminUpdateUser, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminUpdateUser),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/update-user",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// ListUsers admin api
|
||||
app.Patch("/list-users",
|
||||
controllers.ProcessHandlers(ctrl.ListUsers, metrics.ActionAdminListUsers, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminListUsers),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/list-users",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// ChangeBucketOwner admin api
|
||||
app.Patch("/change-bucket-owner",
|
||||
controllers.ProcessHandlers(ctrl.ChangeBucketOwner, metrics.ActionAdminChangeBucketOwner, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminChangeBucketOwner),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/change-bucket-owner",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// ListBucketsAndOwners admin api
|
||||
app.Patch("/list-buckets",
|
||||
controllers.ProcessHandlers(ctrl.ListBuckets, metrics.ActionAdminListBuckets, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminListBuckets),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/list-buckets",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
app.Patch("/:bucket/create",
|
||||
controllers.ProcessHandlers(ctrl.CreateBucket, metrics.ActionAdminListBuckets, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminCreateBucket),
|
||||
))
|
||||
app.Options("/:bucket/create",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -15,33 +15,36 @@
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
"github.com/gofiber/fiber/v2/middleware/recover"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
type S3AdminServer struct {
|
||||
app *fiber.App
|
||||
backend backend.Backend
|
||||
router *S3AdminRouter
|
||||
port string
|
||||
cert *tls.Certificate
|
||||
quiet bool
|
||||
debug bool
|
||||
app *fiber.App
|
||||
backend backend.Backend
|
||||
router *S3AdminRouter
|
||||
port string
|
||||
CertStorage *utils.CertStorage
|
||||
quiet bool
|
||||
debug bool
|
||||
corsAllowOrigin string
|
||||
}
|
||||
|
||||
func NewAdminServer(be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, l s3log.AuditLogger, opts ...AdminOpt) *S3AdminServer {
|
||||
func NewAdminServer(be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, l s3log.AuditLogger, ctrl controllers.S3ApiController, opts ...AdminOpt) *S3AdminServer {
|
||||
server := &S3AdminServer{
|
||||
backend: be,
|
||||
router: new(S3AdminRouter),
|
||||
port: port,
|
||||
router: &S3AdminRouter{
|
||||
s3api: ctrl,
|
||||
},
|
||||
port: port,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -67,21 +70,25 @@ func NewAdminServer(be backend.Backend, root middlewares.RootUserConfig, port, r
|
||||
// Logging middlewares
|
||||
if !server.quiet {
|
||||
app.Use(logger.New(logger.Config{
|
||||
Format: "${time} | ${status} | ${latency} | ${ip} | ${method} | ${path} | ${error} | ${queryParams}\n",
|
||||
Format: "${time} | adm | ${status} | ${latency} | ${ip} | ${method} | ${path} | ${error} | ${queryParams}\n",
|
||||
}))
|
||||
}
|
||||
app.Use(controllers.WrapMiddleware(middlewares.DecodeURL, l, nil))
|
||||
app.Use(middlewares.DebugLogger())
|
||||
|
||||
server.router.Init(app, be, iam, l, root, region, server.debug)
|
||||
// initialize the debug logger in debug mode
|
||||
if debuglogger.IsDebugEnabled() {
|
||||
app.Use(middlewares.DebugLogger())
|
||||
}
|
||||
|
||||
server.router.Init(app, be, iam, l, root, region, server.debug, server.corsAllowOrigin)
|
||||
|
||||
return server
|
||||
}
|
||||
|
||||
type AdminOpt func(s *S3AdminServer)
|
||||
|
||||
func WithAdminSrvTLS(cert tls.Certificate) AdminOpt {
|
||||
return func(s *S3AdminServer) { s.cert = &cert }
|
||||
func WithAdminSrvTLS(cs *utils.CertStorage) AdminOpt {
|
||||
return func(s *S3AdminServer) { s.CertStorage = cs }
|
||||
}
|
||||
|
||||
// WithQuiet silences default logging output
|
||||
@@ -94,9 +101,20 @@ func WithAdminDebug() AdminOpt {
|
||||
return func(s *S3AdminServer) { s.debug = true }
|
||||
}
|
||||
|
||||
// WithAdminCORSAllowOrigin sets the default CORS Access-Control-Allow-Origin value
|
||||
// for the standalone admin server.
|
||||
func WithAdminCORSAllowOrigin(origin string) AdminOpt {
|
||||
return func(s *S3AdminServer) { s.corsAllowOrigin = origin }
|
||||
}
|
||||
|
||||
func (sa *S3AdminServer) Serve() (err error) {
|
||||
if sa.cert != nil {
|
||||
return sa.app.ListenTLSWithCertificate(sa.port, *sa.cert)
|
||||
if sa.CertStorage != nil {
|
||||
ln, err := utils.NewTLSListener(sa.app.Config().Network, sa.port, sa.CertStorage.GetCertificate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sa.app.Listener(ln)
|
||||
}
|
||||
return sa.app.Listen(sa.port)
|
||||
}
|
||||
|
||||
@@ -28,13 +28,14 @@ import (
|
||||
)
|
||||
|
||||
type AdminController struct {
|
||||
iam auth.IAMService
|
||||
be backend.Backend
|
||||
l s3log.AuditLogger
|
||||
iam auth.IAMService
|
||||
be backend.Backend
|
||||
l s3log.AuditLogger
|
||||
s3api S3ApiController
|
||||
}
|
||||
|
||||
func NewAdminController(iam auth.IAMService, be backend.Backend, l s3log.AuditLogger) AdminController {
|
||||
return AdminController{iam: iam, be: be, l: l}
|
||||
func NewAdminController(iam auth.IAMService, be backend.Backend, l s3log.AuditLogger, s3api S3ApiController) AdminController {
|
||||
return AdminController{iam: iam, be: be, l: l, s3api: s3api}
|
||||
}
|
||||
|
||||
func (c AdminController) CreateUser(ctx *fiber.Ctx) (*Response, error) {
|
||||
@@ -161,3 +162,39 @@ func (c AdminController) ListBuckets(ctx *fiber.Ctx) (*Response, error) {
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, err
|
||||
}
|
||||
|
||||
func (c AdminController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
owner := ctx.Get("x-vgw-owner")
|
||||
if owner == "" {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, s3err.GetAPIError(s3err.ErrAdminEmptyBucketOwnerHeader)
|
||||
}
|
||||
|
||||
acc, err := c.iam.GetUserAccount(owner)
|
||||
if err != nil {
|
||||
if err == auth.ErrNoSuchUser {
|
||||
err = s3err.GetAPIError(s3err.ErrAdminUserNotFound)
|
||||
}
|
||||
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, err
|
||||
}
|
||||
|
||||
// store the owner access key id in context
|
||||
ctx.Context().SetUserValue("bucket-owner", acc)
|
||||
|
||||
_, err = c.s3api.CreateBucket(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, err
|
||||
}
|
||||
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
Status: http.StatusCreated,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
@@ -32,9 +33,10 @@ import (
|
||||
|
||||
func TestNewAdminController(t *testing.T) {
|
||||
type args struct {
|
||||
iam auth.IAMService
|
||||
be backend.Backend
|
||||
l s3log.AuditLogger
|
||||
iam auth.IAMService
|
||||
be backend.Backend
|
||||
l s3log.AuditLogger
|
||||
s3api S3ApiController
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -49,7 +51,7 @@ func TestNewAdminController(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := NewAdminController(tt.args.iam, tt.args.be, tt.args.l)
|
||||
got := NewAdminController(tt.args.iam, tt.args.be, tt.args.l, tt.args.s3api)
|
||||
assert.Equal(t, got, tt.want)
|
||||
})
|
||||
}
|
||||
@@ -577,3 +579,126 @@ func TestAdminController_ListBuckets(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdminController_CreateBucket(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input testInput
|
||||
output testOutput
|
||||
}{
|
||||
{
|
||||
name: "empty owner header",
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrAdminEmptyBucketOwnerHeader),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fails to get user account",
|
||||
input: testInput{
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrInternalError),
|
||||
headers: map[string]string{
|
||||
"x-vgw-owner": "access",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInternalError),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "user not found",
|
||||
input: testInput{
|
||||
extraMockErr: auth.ErrNoSuchUser,
|
||||
headers: map[string]string{
|
||||
"x-vgw-owner": "access",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrAdminUserNotFound),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
headers: map[string]string{
|
||||
"x-vgw-owner": "access",
|
||||
},
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyAccount: auth.Account{
|
||||
Access: "test-user",
|
||||
Role: "admin",
|
||||
},
|
||||
},
|
||||
beErr: s3err.GetAPIError(s3err.ErrAdminMethodNotSupported),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrAdminMethodNotSupported),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "successful response",
|
||||
input: testInput{
|
||||
headers: map[string]string{
|
||||
"x-vgw-owner": "access",
|
||||
},
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyAccount: auth.Account{
|
||||
Access: "test-user",
|
||||
Role: "admin",
|
||||
},
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
Status: http.StatusCreated,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
iam := &IAMServiceMock{
|
||||
GetUserAccountFunc: func(access string) (auth.Account, error) {
|
||||
return auth.Account{}, tt.input.extraMockErr
|
||||
},
|
||||
}
|
||||
be := &BackendMock{
|
||||
CreateBucketFunc: func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error {
|
||||
return tt.input.beErr
|
||||
},
|
||||
}
|
||||
|
||||
s3api := New(be, iam, nil, nil, nil, false, "")
|
||||
|
||||
ctrl := AdminController{
|
||||
iam: iam,
|
||||
be: be,
|
||||
s3api: s3api,
|
||||
}
|
||||
|
||||
testController(
|
||||
t,
|
||||
ctrl.CreateBucket,
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
headers: tt.input.headers,
|
||||
},
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
@@ -31,12 +33,13 @@ import (
|
||||
)
|
||||
|
||||
type S3ApiController struct {
|
||||
be backend.Backend
|
||||
iam auth.IAMService
|
||||
logger s3log.AuditLogger
|
||||
evSender s3event.S3EventSender
|
||||
mm metrics.Manager
|
||||
readonly bool
|
||||
be backend.Backend
|
||||
iam auth.IAMService
|
||||
logger s3log.AuditLogger
|
||||
evSender s3event.S3EventSender
|
||||
mm metrics.Manager
|
||||
readonly bool
|
||||
virtualDomain string
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -56,14 +59,15 @@ var (
|
||||
xmlhdr = []byte(`<?xml version="1.0" encoding="UTF-8"?>` + "\n")
|
||||
)
|
||||
|
||||
func New(be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, evs s3event.S3EventSender, mm metrics.Manager, readonly bool) S3ApiController {
|
||||
func New(be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, evs s3event.S3EventSender, mm metrics.Manager, readonly bool, virtualDomain string) S3ApiController {
|
||||
return S3ApiController{
|
||||
be: be,
|
||||
iam: iam,
|
||||
logger: logger,
|
||||
evSender: evs,
|
||||
readonly: readonly,
|
||||
mm: mm,
|
||||
be: be,
|
||||
iam: iam,
|
||||
logger: logger,
|
||||
evSender: evs,
|
||||
readonly: readonly,
|
||||
mm: mm,
|
||||
virtualDomain: virtualDomain,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,6 +174,7 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
|
||||
// Set the response headers
|
||||
SetResponseHeaders(ctx, response.Headers)
|
||||
ensureExposeMetaHeaders(ctx)
|
||||
|
||||
opts := response.MetaOpts
|
||||
if opts == nil {
|
||||
@@ -312,6 +317,77 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
return ctx.Send(res)
|
||||
}
|
||||
|
||||
func ensureExposeMetaHeaders(ctx *fiber.Ctx) {
|
||||
// Only attempt to modify expose headers when CORS is actually in use.
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Origin")) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
existing := strings.TrimSpace(string(ctx.Response().Header.Peek("Access-Control-Expose-Headers")))
|
||||
if existing == "*" {
|
||||
return
|
||||
}
|
||||
|
||||
lowerExisting := map[string]struct{}{}
|
||||
if existing != "" {
|
||||
for _, part := range strings.Split(existing, ",") {
|
||||
p := strings.ToLower(strings.TrimSpace(part))
|
||||
if p != "" {
|
||||
lowerExisting[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
metaNames := map[string]struct{}{}
|
||||
for k := range ctx.Response().Header.All() {
|
||||
key := string(k)
|
||||
if strings.HasPrefix(strings.ToLower(key), "x-amz-meta-") {
|
||||
metaNames[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
if len(metaNames) == 0 {
|
||||
// Still ensure ETag is present if any expose headers exist/are needed.
|
||||
if _, ok := lowerExisting["etag"]; ok {
|
||||
return
|
||||
}
|
||||
if existing == "" {
|
||||
ctx.Response().Header.Set("Access-Control-Expose-Headers", "ETag")
|
||||
return
|
||||
}
|
||||
ctx.Response().Header.Set("Access-Control-Expose-Headers", existing+", ETag")
|
||||
return
|
||||
}
|
||||
|
||||
metaList := make([]string, 0, len(metaNames))
|
||||
for k := range metaNames {
|
||||
metaList = append(metaList, k)
|
||||
}
|
||||
sort.Strings(metaList)
|
||||
|
||||
toAdd := make([]string, 0, 1+len(metaList))
|
||||
if _, ok := lowerExisting["etag"]; !ok {
|
||||
toAdd = append(toAdd, "ETag")
|
||||
lowerExisting["etag"] = struct{}{}
|
||||
}
|
||||
for _, h := range metaList {
|
||||
lh := strings.ToLower(h)
|
||||
if _, ok := lowerExisting[lh]; ok {
|
||||
continue
|
||||
}
|
||||
toAdd = append(toAdd, h)
|
||||
lowerExisting[lh] = struct{}{}
|
||||
}
|
||||
if len(toAdd) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if existing == "" {
|
||||
ctx.Response().Header.Set("Access-Control-Expose-Headers", strings.Join(toAdd, ", "))
|
||||
return
|
||||
}
|
||||
ctx.Response().Header.Set("Access-Control-Expose-Headers", existing+", "+strings.Join(toAdd, ", "))
|
||||
}
|
||||
|
||||
// Sets the response headers
|
||||
func SetResponseHeaders(ctx *fiber.Ctx, headers map[string]*string) {
|
||||
if headers == nil {
|
||||
|
||||
@@ -237,6 +237,21 @@ func TestSetResponseHeaders(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureExposeMetaHeaders_AddsActualMetaHeaderNames(t *testing.T) {
|
||||
app := fiber.New()
|
||||
ctx := app.AcquireCtx(&fasthttp.RequestCtx{})
|
||||
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Origin", "https://example.com")
|
||||
ctx.Response().Header.Add("Access-Control-Expose-Headers", "ETag")
|
||||
ctx.Response().Header.Set("x-amz-meta-foo", "bar")
|
||||
ctx.Response().Header.Set("x-amz-meta-bar", "baz")
|
||||
|
||||
ensureExposeMetaHeaders(ctx)
|
||||
|
||||
got := string(ctx.Response().Header.Peek("Access-Control-Expose-Headers"))
|
||||
assert.Equal(t, "ETag, X-Amz-Meta-Bar, X-Amz-Meta-Foo", got)
|
||||
}
|
||||
|
||||
// mock the audit logger
|
||||
type mockAuditLogger struct {
|
||||
}
|
||||
|
||||
@@ -658,10 +658,14 @@ func (c S3ApiController) GetBucketLocation(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
// pick up configured region from locals (set by router middleware)
|
||||
region, _ := ctx.Locals("region").(string)
|
||||
value := ®ion
|
||||
if region == "us-east-1" {
|
||||
value = nil
|
||||
}
|
||||
|
||||
return &Response{
|
||||
Data: s3response.LocationConstraint{
|
||||
Value: region,
|
||||
Value: value,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
|
||||
@@ -1303,14 +1303,40 @@ func TestS3ApiController_GetBucketLocation(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "successful response",
|
||||
name: "successful response us-east-1",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Data: s3response.LocationConstraint{
|
||||
Value: "us-east-1",
|
||||
Value: nil,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "successful response",
|
||||
input: testInput{
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyIsRoot: true,
|
||||
utils.ContextKeyParsedAcl: auth.ACL{
|
||||
Owner: "root",
|
||||
},
|
||||
utils.ContextKeyAccount: auth.Account{
|
||||
Access: "root",
|
||||
Role: auth.RoleAdmin,
|
||||
},
|
||||
utils.ContextKeyRegion: "us-east-2",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Data: s3response.LocationConstraint{
|
||||
Value: utils.GetStringPtr("us-east-2"),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
|
||||
@@ -314,6 +314,7 @@ func (c S3ApiController) PutBucketPolicy(ctx *fiber.Ctx) (*Response, error) {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
Status: http.StatusNoContent,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
@@ -460,7 +461,7 @@ func (c S3ApiController) PutBucketAcl(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, s3err.GetAPIError(s3err.ErrMissingSecurityHeader)
|
||||
}
|
||||
|
||||
updAcl, err := auth.UpdateACL(input, parsedAcl, c.iam, acct.Role == auth.RoleAdmin)
|
||||
updAcl, err := auth.UpdateACL(input, parsedAcl, c.iam)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
@@ -486,13 +487,24 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
grantWrite := ctx.Get("X-Amz-Grant-Write")
|
||||
grantWriteACP := ctx.Get("X-Amz-Grant-Write-Acp")
|
||||
lockEnabled := strings.EqualFold(ctx.Get("X-Amz-Bucket-Object-Lock-Enabled"), "true")
|
||||
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
grants := grantFullControl + grantRead + grantReadACP + grantWrite + grantWriteACP
|
||||
objectOwnership := types.ObjectOwnership(
|
||||
ctx.Get("X-Amz-Object-Ownership", string(types.ObjectOwnershipBucketOwnerEnforced)),
|
||||
)
|
||||
|
||||
if acct.Role != auth.RoleAdmin && acct.Role != auth.RoleUserPlus {
|
||||
if c.readonly {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
creator := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
if !utils.ContextKeyBucketOwner.IsSet(ctx) {
|
||||
utils.ContextKeyBucketOwner.Set(ctx, creator)
|
||||
}
|
||||
bucketOwner := utils.ContextKeyBucketOwner.Get(ctx).(auth.Account)
|
||||
|
||||
if creator.Role != auth.RoleAdmin && creator.Role != auth.RoleUserPlus {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
@@ -502,7 +514,7 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
if ok := utils.IsValidBucketName(bucket); !ok {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
@@ -512,7 +524,7 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
@@ -521,7 +533,7 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
if ok := utils.IsValidOwnership(objectOwnership); !ok {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, s3err.APIError{
|
||||
Code: "InvalidArgument",
|
||||
@@ -534,7 +546,7 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
debuglogger.Logf("bucket acls are disabled for %v object ownership", objectOwnership)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidBucketAclWithObjectOwnership)
|
||||
}
|
||||
@@ -543,7 +555,7 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
debuglogger.Logf("invalid request: %q (grants) %q (acl)", grants, acl)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrBothCannedAndHeaderGrants)
|
||||
}
|
||||
@@ -556,18 +568,18 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
debuglogger.Logf("failed to parse the request body: %v", err)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if body.LocationConstraint != "" {
|
||||
if body.LocationConstraint != nil {
|
||||
region := utils.ContextKeyRegion.Get(ctx).(string)
|
||||
if body.LocationConstraint != region {
|
||||
debuglogger.Logf("invalid location constraint: %s", body.LocationConstraint)
|
||||
if *body.LocationConstraint != region || *body.LocationConstraint == "us-east-1" {
|
||||
debuglogger.Logf("invalid location constraint: %s", *body.LocationConstraint)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidLocationConstraint)
|
||||
}
|
||||
@@ -575,7 +587,7 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
}
|
||||
|
||||
defACL := auth.ACL{
|
||||
Owner: acct.Access,
|
||||
Owner: bucketOwner.Access,
|
||||
}
|
||||
|
||||
updAcl, err := auth.UpdateACL(&auth.PutBucketAclInput{
|
||||
@@ -586,15 +598,15 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
GrantWriteACP: &grantWriteACP,
|
||||
AccessControlPolicy: &auth.AccessControlPolicy{
|
||||
Owner: &types.Owner{
|
||||
ID: &acct.Access,
|
||||
ID: &bucketOwner.Access,
|
||||
}},
|
||||
ACL: types.BucketCannedACL(acl),
|
||||
}, defACL, c.iam, acct.Role == auth.RoleAdmin)
|
||||
}, defACL, c.iam)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to update bucket acl: %v", err)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
@@ -609,7 +621,7 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, updAcl)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
BucketOwner: bucketOwner.Access,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
@@ -641,7 +641,10 @@ func TestS3ApiController_PutBucketPolicy(t *testing.T) {
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: "root"},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
Status: http.StatusNoContent,
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
},
|
||||
@@ -656,6 +659,7 @@ func TestS3ApiController_PutBucketPolicy(t *testing.T) {
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
Status: http.StatusNoContent,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -696,7 +700,7 @@ func TestS3ApiController_CreateBucket(t *testing.T) {
|
||||
}
|
||||
|
||||
invLocConstBody, err := xml.Marshal(s3response.CreateBucketConfiguration{
|
||||
LocationConstraint: "us-west-1",
|
||||
LocationConstraint: utils.GetStringPtr("us-west-1"),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
||||
92
s3api/controllers/cors_default_origin_test.go
Normal file
92
s3api/controllers/cors_default_origin_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func TestApplyBucketCORS_FallbackOrigin_NoBucketCors_NoRequestOrigin(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
mockedBackend := &BackendMock{
|
||||
GetBucketCorsFunc: func(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchCORSConfiguration)
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
app.Get("/:bucket/test",
|
||||
middlewares.ApplyBucketCORS(mockedBackend, origin),
|
||||
func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(http.StatusOK)
|
||||
},
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/mybucket/test", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin to be set to fallback, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Expose-Headers"); got != "ETag" {
|
||||
t.Fatalf("expected Access-Control-Expose-Headers to include ETag, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyBucketCORS_FallbackOrigin_NotAppliedWhenBucketCorsExists(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
mockedBackend := &BackendMock{
|
||||
GetBucketCorsFunc: func(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return []byte("not-parsed"), nil
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
app.Get("/:bucket/test",
|
||||
middlewares.ApplyBucketCORS(mockedBackend, origin),
|
||||
func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(http.StatusOK)
|
||||
},
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/mybucket/test", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != "" {
|
||||
t.Fatalf("expected no Access-Control-Allow-Origin when bucket CORS exists, got %q", got)
|
||||
}
|
||||
}
|
||||
@@ -72,6 +72,9 @@ func (c S3ApiController) DeleteObjectTagging(ctx *fiber.Ctx) (*Response, error)
|
||||
|
||||
err = c.be.DeleteObjectTagging(ctx.Context(), bucket, key, versionId)
|
||||
return &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
Status: http.StatusNoContent,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
|
||||
@@ -20,12 +20,14 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3event"
|
||||
)
|
||||
|
||||
func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
versionId := ulid.Make().String()
|
||||
tests := []struct {
|
||||
name string
|
||||
input testInput
|
||||
@@ -65,11 +67,17 @@ func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
queries: map[string]string{
|
||||
"versionId": versionId,
|
||||
},
|
||||
locals: defaultLocals,
|
||||
beErr: s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
Status: http.StatusNoContent,
|
||||
@@ -83,9 +91,15 @@ func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
name: "successful response",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": versionId,
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
Status: http.StatusNoContent,
|
||||
|
||||
@@ -93,6 +93,9 @@ func (c S3ApiController) GetObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
return &Response{
|
||||
Data: tags,
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
@@ -33,6 +34,7 @@ import (
|
||||
)
|
||||
|
||||
func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
versionId := ulid.Make().String()
|
||||
tests := []struct {
|
||||
name string
|
||||
input testInput
|
||||
@@ -88,6 +90,9 @@ func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
{
|
||||
name: "successful response",
|
||||
input: testInput{
|
||||
queries: map[string]string{
|
||||
"versionId": versionId,
|
||||
},
|
||||
locals: defaultLocals,
|
||||
beRes: map[string]string{
|
||||
"key": "val",
|
||||
@@ -95,6 +100,9 @@ func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": utils.GetStringPtr(versionId),
|
||||
},
|
||||
Data: s3response.Tagging{
|
||||
TagSet: s3response.TagSet{
|
||||
Tags: []s3response.Tag{
|
||||
|
||||
@@ -158,7 +158,16 @@ func (c S3ApiController) CreateMultipartUpload(ctx *fiber.Ctx) (*Response, error
|
||||
isRoot := utils.ContextKeyIsRoot.Get(ctx).(bool)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
err := utils.ValidateNoACLHeaders(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -352,6 +361,10 @@ func (c S3ApiController) CompleteMultipartUpload(ctx *fiber.Ctx) (*Response, err
|
||||
IfMatch: ifMatch,
|
||||
IfNoneMatch: ifNoneMatch,
|
||||
})
|
||||
if err == nil {
|
||||
objUrl := utils.GenerateObjectLocation(ctx, c.virtualDomain, bucket, key)
|
||||
res.Location = &objUrl
|
||||
}
|
||||
return &Response{
|
||||
Data: res,
|
||||
Headers: map[string]*string{
|
||||
|
||||
@@ -536,7 +536,8 @@ func TestS3ApiController_CompleteMultipartUpload(t *testing.T) {
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Data: s3response.CompleteMultipartUploadResult{
|
||||
ETag: &ETag,
|
||||
ETag: &ETag,
|
||||
Location: utils.GetStringPtr("http://example.com/bucket/object"),
|
||||
},
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
|
||||
@@ -86,6 +86,9 @@ func (c S3ApiController) PutObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
err = c.be.PutObjectTagging(ctx.Context(), bucket, key, versionId, tagging)
|
||||
return &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
EventName: s3event.EventObjectTaggingPut,
|
||||
@@ -507,7 +510,16 @@ func (c S3ApiController) CopyObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
isRoot := utils.ContextKeyIsRoot.Get(ctx).(bool)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
err := utils.ValidateCopySource(copySource)
|
||||
err := utils.ValidateNoACLHeaders(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateCopySource(copySource)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
@@ -656,6 +668,15 @@ func (c S3ApiController) PutObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
IsBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
|
||||
err := utils.ValidateNoACLHeaders(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// Content Length
|
||||
contentLengthStr := ctx.Get("Content-Length")
|
||||
if contentLengthStr == "" {
|
||||
@@ -671,7 +692,7 @@ func (c S3ApiController) PutObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
// load the meta headers
|
||||
metadata := utils.GetUserMetaData(&ctx.Request().Header)
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
err = auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
@@ -45,6 +46,8 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
versionId := ulid.Make().String()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input testInput
|
||||
@@ -102,9 +105,15 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
locals: defaultLocals,
|
||||
beErr: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
body: validTaggingBody,
|
||||
queries: map[string]string{
|
||||
"versionId": versionId,
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
EventName: s3event.EventObjectTaggingPut,
|
||||
@@ -118,9 +127,15 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validTaggingBody,
|
||||
queries: map[string]string{
|
||||
"versionId": versionId,
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-version-id": &versionId,
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
EventName: s3event.EventObjectTaggingPut,
|
||||
|
||||
73
s3api/middlewares/apply-bucket-cors-preflight.go
Normal file
73
s3api/middlewares/apply-bucket-cors-preflight.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// ApplyBucketCORSPreflightFallback handles CORS preflight (OPTIONS) requests for S3 routes
|
||||
// when no per-bucket CORS configuration exists.
|
||||
//
|
||||
// If the bucket has no CORS configuration and fallbackOrigin is set, it responds with 204 and:
|
||||
// - Access-Control-Allow-Origin: fallbackOrigin
|
||||
// - Vary: Origin, Access-Control-Request-Headers, Access-Control-Request-Method
|
||||
// - Access-Control-Allow-Methods: mirrors Access-Control-Request-Method (if present)
|
||||
// - Access-Control-Allow-Headers: mirrors Access-Control-Request-Headers (if present)
|
||||
//
|
||||
// If the bucket has a CORS configuration (or fallbackOrigin is blank), it calls next so the
|
||||
// standard CORS OPTIONS handler can apply bucket-specific rules.
|
||||
func ApplyBucketCORSPreflightFallback(be backend.Backend, fallbackOrigin string) fiber.Handler {
|
||||
fallbackOrigin = strings.TrimSpace(fallbackOrigin)
|
||||
if fallbackOrigin == "" {
|
||||
return func(ctx *fiber.Ctx) error { return ctx.Next() }
|
||||
}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
bucket := ctx.Params("bucket")
|
||||
_, err := be.GetBucketCors(ctx.Context(), bucket)
|
||||
if err != nil {
|
||||
if s3Err, ok := err.(s3err.APIError); ok && (s3Err.Code == "NoSuchCORSConfiguration" || s3Err.Code == "NoSuchBucket") {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Origin")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Origin", fallbackOrigin)
|
||||
}
|
||||
if len(ctx.Response().Header.Peek("Vary")) == 0 {
|
||||
ctx.Response().Header.Add("Vary", VaryHdr)
|
||||
}
|
||||
|
||||
if reqMethod := strings.TrimSpace(ctx.Get("Access-Control-Request-Method")); reqMethod != "" {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Methods")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Methods", reqMethod)
|
||||
}
|
||||
}
|
||||
|
||||
if reqHeaders := strings.TrimSpace(ctx.Get("Access-Control-Request-Headers")); reqHeaders != "" {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Headers")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Headers", reqHeaders)
|
||||
}
|
||||
}
|
||||
|
||||
ctx.Status(fiber.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
146
s3api/middlewares/apply-bucket-cors-preflight_test.go
Normal file
146
s3api/middlewares/apply-bucket-cors-preflight_test.go
Normal file
@@ -0,0 +1,146 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
type backendWithGetBucketCors struct {
|
||||
backend.BackendUnsupported
|
||||
getBucketCors func(ctx context.Context, bucket string) ([]byte, error)
|
||||
}
|
||||
|
||||
func (b backendWithGetBucketCors) GetBucketCors(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return b.getBucketCors(ctx, bucket)
|
||||
}
|
||||
|
||||
func TestApplyBucketCORSPreflightFallback_NoBucketCors_Responds204(t *testing.T) {
|
||||
be := backendWithGetBucketCors{
|
||||
getBucketCors: func(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchCORSConfiguration)
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
app.Options("/:bucket",
|
||||
ApplyBucketCORSPreflightFallback(be, "https://example.com"),
|
||||
func(c *fiber.Ctx) error {
|
||||
// Should not be reached if fallback triggers
|
||||
return c.SendStatus(http.StatusTeapot)
|
||||
},
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodOptions, "/testing", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", "https://request-origin.example")
|
||||
req.Header.Set("Access-Control-Request-Method", "GET")
|
||||
req.Header.Set("Access-Control-Request-Headers", "content-type")
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
t.Fatalf("expected status 204, got %d", resp.StatusCode)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != "https://example.com" {
|
||||
t.Fatalf("expected allow origin fallback, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Methods"); got != "GET" {
|
||||
t.Fatalf("expected allow methods to mirror request, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Headers"); got != "content-type" {
|
||||
t.Fatalf("expected allow headers to mirror request, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyBucketCORSPreflightFallback_NoSuchBucket_Responds204(t *testing.T) {
|
||||
be := backendWithGetBucketCors{
|
||||
getBucketCors: func(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
app.Options("/:bucket",
|
||||
ApplyBucketCORSPreflightFallback(be, "https://example.com"),
|
||||
func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(http.StatusTeapot)
|
||||
},
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodOptions, "/testing", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", "https://request-origin.example")
|
||||
req.Header.Set("Access-Control-Request-Method", "PUT")
|
||||
req.Header.Set("Access-Control-Request-Headers", "content-type")
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
t.Fatalf("expected status 204, got %d", resp.StatusCode)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != "https://example.com" {
|
||||
t.Fatalf("expected allow origin fallback, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Methods"); got != "PUT" {
|
||||
t.Fatalf("expected allow methods to mirror request, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyBucketCORSPreflightFallback_BucketHasCors_CallsNext(t *testing.T) {
|
||||
be := backendWithGetBucketCors{
|
||||
getBucketCors: func(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return []byte("dummy"), nil
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
app.Options("/:bucket",
|
||||
ApplyBucketCORSPreflightFallback(be, "https://example.com"),
|
||||
func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(http.StatusOK)
|
||||
},
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodOptions, "/testing", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("expected status 200 from next handler, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,7 @@ package middlewares
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
@@ -31,12 +32,14 @@ var VaryHdr = "Origin, Access-Control-Request-Headers, Access-Control-Request-Me
|
||||
// checks if origin and method meets the cors rules and
|
||||
// adds the necessary response headers.
|
||||
// CORS check is applied only when 'Origin' request header is present
|
||||
func ApplyBucketCORS(be backend.Backend) fiber.Handler {
|
||||
func ApplyBucketCORS(be backend.Backend, fallbackOrigin string) fiber.Handler {
|
||||
fallbackOrigin = strings.TrimSpace(fallbackOrigin)
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
bucket := ctx.Params("bucket")
|
||||
origin := ctx.Get("Origin")
|
||||
// if the origin request header is empty, skip cors validation
|
||||
if origin == "" {
|
||||
// If neither Origin is present nor a fallback is configured, skip CORS entirely.
|
||||
if origin == "" && fallbackOrigin == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -46,12 +49,32 @@ func ApplyBucketCORS(be backend.Backend) fiber.Handler {
|
||||
// If CORS is not configured, S3Error will have code NoSuchCORSConfiguration.
|
||||
// In this case, we can safely continue. For any other error, we should log it.
|
||||
s3Err, ok := err.(s3err.APIError)
|
||||
if ok && (s3Err.Code == "NoSuchCORSConfiguration" || s3Err.Code == "NoSuchBucket") {
|
||||
// Optional global fallback: add Access-Control-Allow-Origin for buckets
|
||||
// without a specific CORS configuration.
|
||||
if fallbackOrigin != "" {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Origin")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Origin", fallbackOrigin)
|
||||
}
|
||||
if len(ctx.Response().Header.Peek("Vary")) == 0 {
|
||||
ctx.Response().Header.Add("Vary", VaryHdr)
|
||||
}
|
||||
ensureExposeETag(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if !ok || s3Err.Code != "NoSuchCORSConfiguration" {
|
||||
debuglogger.Logf("failed to get bucket cors for bucket %q: %v", bucket, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If Origin is missing, don't attempt per-bucket CORS evaluation.
|
||||
// (Fallback has already been handled above for buckets without CORS config.)
|
||||
if origin == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
cors, err := auth.ParseCORSOutput(data)
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -100,6 +123,9 @@ func ApplyBucketCORS(be backend.Backend) fiber.Handler {
|
||||
}
|
||||
}
|
||||
|
||||
// Always expose ETag and user metadata headers for browser clients.
|
||||
ensureExposeETag(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
58
s3api/middlewares/apply-default-cors-preflight.go
Normal file
58
s3api/middlewares/apply-default-cors-preflight.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// ApplyDefaultCORSPreflight responds to CORS preflight (OPTIONS) requests for routes
|
||||
// that don't have per-bucket CORS configuration (e.g. admin APIs).
|
||||
//
|
||||
// It uses the provided fallbackOrigin as the Access-Control-Allow-Origin value.
|
||||
// It mirrors Access-Control-Request-Method into Access-Control-Allow-Methods and
|
||||
// mirrors Access-Control-Request-Headers into Access-Control-Allow-Headers.
|
||||
func ApplyDefaultCORSPreflight(fallbackOrigin string) fiber.Handler {
|
||||
fallbackOrigin = strings.TrimSpace(fallbackOrigin)
|
||||
if fallbackOrigin == "" {
|
||||
return func(ctx *fiber.Ctx) error { return nil }
|
||||
}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Origin")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Origin", fallbackOrigin)
|
||||
}
|
||||
if len(ctx.Response().Header.Peek("Vary")) == 0 {
|
||||
ctx.Response().Header.Add("Vary", VaryHdr)
|
||||
}
|
||||
|
||||
if reqMethod := strings.TrimSpace(ctx.Get("Access-Control-Request-Method")); reqMethod != "" {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Methods")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Methods", reqMethod)
|
||||
}
|
||||
}
|
||||
|
||||
if reqHeaders := strings.TrimSpace(ctx.Get("Access-Control-Request-Headers")); reqHeaders != "" {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Headers")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Headers", reqHeaders)
|
||||
}
|
||||
}
|
||||
|
||||
ctx.Status(fiber.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
59
s3api/middlewares/apply-default-cors-preflight_test.go
Normal file
59
s3api/middlewares/apply-default-cors-preflight_test.go
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func TestApplyDefaultCORSPreflight_OptionsSetsPreflightHeaders(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
app := fiber.New()
|
||||
app.Options("/admin",
|
||||
ApplyDefaultCORSPreflight(origin),
|
||||
ApplyDefaultCORS(origin),
|
||||
func(c *fiber.Ctx) error { return nil },
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodOptions, "/admin", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", "https://request-origin.example")
|
||||
req.Header.Set("Access-Control-Request-Method", "PATCH")
|
||||
req.Header.Set("Access-Control-Request-Headers", "content-type,authorization")
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
t.Fatalf("expected status 204, got %d", resp.StatusCode)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected allow origin fallback, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Methods"); got != "PATCH" {
|
||||
t.Fatalf("expected allow methods to mirror request, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Headers"); got != "content-type,authorization" {
|
||||
t.Fatalf("expected allow headers to mirror request, got %q", got)
|
||||
}
|
||||
}
|
||||
73
s3api/middlewares/apply-default-cors.go
Normal file
73
s3api/middlewares/apply-default-cors.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func ensureExposeETag(ctx *fiber.Ctx) {
|
||||
existing := strings.TrimSpace(string(ctx.Response().Header.Peek("Access-Control-Expose-Headers")))
|
||||
defaults := []string{"ETag"}
|
||||
if existing == "" {
|
||||
ctx.Response().Header.Add("Access-Control-Expose-Headers", strings.Join(defaults, ", "))
|
||||
return
|
||||
}
|
||||
|
||||
lowerExisting := map[string]struct{}{}
|
||||
for _, part := range strings.Split(existing, ",") {
|
||||
p := strings.ToLower(strings.TrimSpace(part))
|
||||
if p != "" {
|
||||
lowerExisting[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
updated := existing
|
||||
for _, h := range defaults {
|
||||
if _, ok := lowerExisting[strings.ToLower(h)]; ok {
|
||||
continue
|
||||
}
|
||||
updated += ", " + h
|
||||
}
|
||||
|
||||
if updated != existing {
|
||||
ctx.Response().Header.Set("Access-Control-Expose-Headers", updated)
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyDefaultCORS adds a default Access-Control-Allow-Origin header to responses
|
||||
// when the provided fallbackOrigin is non-empty.
|
||||
//
|
||||
// This is intended for routes that don't have per-bucket CORS configuration (e.g. admin APIs).
|
||||
// It will not override an existing Access-Control-Allow-Origin header.
|
||||
func ApplyDefaultCORS(fallbackOrigin string) fiber.Handler {
|
||||
fallbackOrigin = strings.TrimSpace(fallbackOrigin)
|
||||
if fallbackOrigin == "" {
|
||||
return func(ctx *fiber.Ctx) error { return nil }
|
||||
}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
if len(ctx.Response().Header.Peek("Access-Control-Allow-Origin")) == 0 {
|
||||
ctx.Response().Header.Add("Access-Control-Allow-Origin", fallbackOrigin)
|
||||
}
|
||||
if len(ctx.Response().Header.Peek("Vary")) == 0 {
|
||||
ctx.Response().Header.Add("Vary", VaryHdr)
|
||||
}
|
||||
ensureExposeETag(ctx)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
74
s3api/middlewares/apply-default-cors_test.go
Normal file
74
s3api/middlewares/apply-default-cors_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func TestApplyDefaultCORS_AddsHeaderWhenOriginSet(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
app := fiber.New()
|
||||
app.Get("/admin", ApplyDefaultCORS(origin), func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(http.StatusOK)
|
||||
})
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/admin", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected fallback origin header, got %q", got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Expose-Headers"); got != "ETag" {
|
||||
t.Fatalf("expected expose headers to include ETag, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyDefaultCORS_DoesNotOverrideExistingHeader(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
app := fiber.New()
|
||||
app.Get("/admin", func(c *fiber.Ctx) error {
|
||||
c.Response().Header.Add("Access-Control-Allow-Origin", "https://already-set.com")
|
||||
return nil
|
||||
}, ApplyDefaultCORS(origin), func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(http.StatusOK)
|
||||
})
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/admin", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != "https://already-set.com" {
|
||||
t.Fatalf("expected existing header to remain, got %q", got)
|
||||
}
|
||||
}
|
||||
@@ -134,7 +134,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string,
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
var cr io.Reader
|
||||
cr, err = utils.NewChunkReader(ctx, r, authData, region, account.Secret, tdate)
|
||||
cr, err = utils.NewChunkReader(ctx, r, authData, account.Secret, tdate)
|
||||
return cr
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
193
s3api/router.go
193
s3api/router.go
@@ -28,58 +28,102 @@ import (
|
||||
|
||||
type S3ApiRouter struct {
|
||||
WithAdmSrv bool
|
||||
Ctrl controllers.S3ApiController
|
||||
}
|
||||
|
||||
func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, aLogger s3log.AuditLogger, evs s3event.S3EventSender, mm metrics.Manager, readonly bool, region string, root middlewares.RootUserConfig) {
|
||||
ctrl := controllers.New(be, iam, logger, evs, mm, readonly)
|
||||
func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, aLogger s3log.AuditLogger, evs s3event.S3EventSender, mm metrics.Manager, readonly bool, region, virtualDomain string, root middlewares.RootUserConfig, corsAllowOrigin string) {
|
||||
ctrl := controllers.New(be, iam, logger, evs, mm, readonly, virtualDomain)
|
||||
sa.Ctrl = ctrl
|
||||
adminServices := &controllers.Services{
|
||||
Logger: aLogger,
|
||||
}
|
||||
|
||||
if sa.WithAdmSrv {
|
||||
adminController := controllers.NewAdminController(iam, be, aLogger)
|
||||
adminController := controllers.NewAdminController(iam, be, aLogger, ctrl)
|
||||
|
||||
// CreateUser admin api
|
||||
app.Patch("/create-user",
|
||||
controllers.ProcessHandlers(adminController.CreateUser, metrics.ActionAdminCreateUser, adminServices,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminCreateUser),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/create-user",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// DeleteUsers admin api
|
||||
app.Patch("/delete-user",
|
||||
controllers.ProcessHandlers(adminController.DeleteUser, metrics.ActionAdminDeleteUser, adminServices,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminDeleteUser),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/delete-user",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// UpdateUser admin api
|
||||
app.Patch("/update-user",
|
||||
controllers.ProcessHandlers(adminController.UpdateUser, metrics.ActionAdminUpdateUser, adminServices,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminUpdateUser),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/update-user",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// ListUsers admin api
|
||||
app.Patch("/list-users",
|
||||
controllers.ProcessHandlers(adminController.ListUsers, metrics.ActionAdminListUsers, adminServices,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminListUsers),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/list-users",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// ChangeBucketOwner admin api
|
||||
app.Patch("/change-bucket-owner",
|
||||
controllers.ProcessHandlers(adminController.ChangeBucketOwner, metrics.ActionAdminChangeBucketOwner, adminServices,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminChangeBucketOwner),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/change-bucket-owner",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// ListBucketsAndOwners admin api
|
||||
app.Patch("/list-buckets",
|
||||
controllers.ProcessHandlers(adminController.ListBuckets, metrics.ActionAdminListBuckets, adminServices,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminListBuckets),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/list-buckets",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
// CreateBucket admin api
|
||||
app.Patch("/:bucket/create",
|
||||
controllers.ProcessHandlers(adminController.CreateBucket, metrics.ActionAdminCreateBucket, adminServices,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminCreateBucket),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
))
|
||||
app.Options("/:bucket/create",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
}
|
||||
|
||||
services := &controllers.Services{
|
||||
@@ -92,7 +136,12 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
|
||||
// copy source is not allowed on '/'
|
||||
app.Get("/", middlewares.MatchHeader("X-Amz-Copy-Source"),
|
||||
controllers.ProcessHandlers(ctrl.HandleErrorRoute(s3err.GetAPIError(s3err.ErrCopySourceNotAllowed)), metrics.ActionUndetected, services),
|
||||
controllers.ProcessHandlers(
|
||||
ctrl.HandleErrorRoute(s3err.GetAPIError(s3err.ErrCopySourceNotAllowed)),
|
||||
metrics.ActionUndetected,
|
||||
services,
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
),
|
||||
)
|
||||
|
||||
app.Get("/",
|
||||
@@ -100,11 +149,17 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
ctrl.ListBuckets,
|
||||
metrics.ActionListAllMyBuckets,
|
||||
services,
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionListAllMyBuckets, "", auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
))
|
||||
|
||||
app.Options("/",
|
||||
middlewares.ApplyDefaultCORSPreflight(corsAllowOrigin),
|
||||
middlewares.ApplyDefaultCORS(corsAllowOrigin),
|
||||
)
|
||||
|
||||
bucketRouter := app.Group("/:bucket")
|
||||
objectRouter := app.Group("/:bucket/*")
|
||||
|
||||
@@ -116,12 +171,12 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
metrics.ActionPutBucketTagging,
|
||||
services,
|
||||
middlewares.BucketObjectNameValidator(),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionPutBucketTagging, auth.PutBucketTaggingAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, true, true),
|
||||
middlewares.ParseAcl(be),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
))
|
||||
bucketRouter.Put("",
|
||||
middlewares.MatchQueryArgs("ownershipControls"),
|
||||
@@ -134,7 +189,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, true, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Put("",
|
||||
@@ -148,7 +203,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, true, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Put("",
|
||||
@@ -162,7 +217,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, true, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Put("",
|
||||
@@ -176,7 +231,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, true, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Put("",
|
||||
@@ -190,7 +245,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, false, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Put("",
|
||||
@@ -204,7 +259,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, false, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Put("",
|
||||
@@ -386,7 +441,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, false, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
))
|
||||
|
||||
// HeadBucket action
|
||||
@@ -401,12 +456,11 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
ctrl.HeadBucket,
|
||||
metrics.ActionHeadBucket,
|
||||
services,
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.BucketObjectNameValidator(),
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionHeadBucket, auth.ListBucketAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
|
||||
@@ -427,7 +481,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionDeleteBucketTagging, auth.PutBucketTaggingAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Delete("",
|
||||
@@ -440,7 +494,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionDeleteBucketOwnershipControls, auth.PutBucketOwnershipControlsAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Delete("",
|
||||
@@ -453,7 +507,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionDeleteBucketPolicy, auth.PutBucketPolicyAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Delete("",
|
||||
@@ -466,7 +520,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionDeleteBucketCors, auth.PutBucketCorsAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Delete("",
|
||||
@@ -595,7 +649,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionDeleteBucket, auth.DeleteBucketAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
|
||||
@@ -616,7 +670,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetBucketLocation, auth.GetBucketLocationAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
),
|
||||
)
|
||||
@@ -630,7 +684,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetBucketTagging, auth.GetBucketTaggingAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Get("",
|
||||
@@ -643,7 +697,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetBucketOwnershipControls, auth.GetBucketOwnershipControlsAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Get("",
|
||||
@@ -656,7 +710,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetBucketVersioning, auth.GetBucketVersioningAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Get("",
|
||||
@@ -669,7 +723,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetBucketPolicy, auth.GetBucketPolicyAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Get("",
|
||||
@@ -682,7 +736,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetBucketCors, auth.GetBucketCorsAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Get("",
|
||||
@@ -695,7 +749,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetObjectLockConfiguration, auth.GetBucketObjectLockConfigurationAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Get("",
|
||||
@@ -708,7 +762,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetBucketAcl, auth.GetBucketAclAction, auth.PermissionReadAcp, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Get("",
|
||||
@@ -721,7 +775,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionListMultipartUploads, auth.ListBucketMultipartUploadsAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Get("",
|
||||
@@ -734,7 +788,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionListObjectVersions, auth.ListBucketVersionsAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Get("",
|
||||
@@ -747,7 +801,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetBucketPolicyStatus, auth.GetBucketPolicyStatusAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Get("",
|
||||
@@ -981,7 +1035,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionListObjectsV2, auth.ListBucketAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
bucketRouter.Get("",
|
||||
@@ -993,7 +1047,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionListObjects, auth.ListBucketAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
|
||||
@@ -1016,7 +1070,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, true, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
|
||||
@@ -1036,7 +1090,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionHeadObject, auth.GetObjectAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
|
||||
@@ -1048,6 +1102,12 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
controllers.ProcessHandlers(ctrl.HandleErrorRoute(s3err.GetAPIError(s3err.ErrGetUploadsWithKey)), metrics.ActionUndetected, services),
|
||||
)
|
||||
|
||||
// object operation with '?versions' is rejected with a specific error
|
||||
objectRouter.Get("",
|
||||
middlewares.MatchQueryArgs("versions"),
|
||||
controllers.ProcessHandlers(ctrl.HandleErrorRoute(s3err.GetAPIError(s3err.ErrVersionsWithKey)), metrics.ActionUndetected, services),
|
||||
)
|
||||
|
||||
// object GET operation is not allowed with copy source
|
||||
objectRouter.Get("/",
|
||||
middlewares.MatchHeader("X-Amz-Copy-Source"),
|
||||
@@ -1064,7 +1124,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetObjectTagging, auth.GetObjectTaggingAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Get("",
|
||||
@@ -1077,7 +1137,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetObjectRetention, auth.GetObjectRetentionAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Get("",
|
||||
@@ -1090,7 +1150,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetObjectLegalHold, auth.GetObjectLegalHoldAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Get("",
|
||||
@@ -1103,7 +1163,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetObjectAcl, auth.GetObjectAclAction, auth.PermissionReadAcp, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Get("",
|
||||
@@ -1116,7 +1176,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetObjectAttributes, auth.GetObjectAttributesAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Get("",
|
||||
@@ -1129,7 +1189,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionListParts, auth.ListMultipartUploadPartsAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Get("",
|
||||
@@ -1141,7 +1201,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionGetObject, auth.GetObjectAction, auth.PermissionRead, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
|
||||
@@ -1163,7 +1223,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionDeleteObjectTagging, auth.DeleteObjectTaggingAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Delete("",
|
||||
@@ -1176,7 +1236,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionAbortMultipartUpload, auth.AbortMultipartUploadAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Delete("",
|
||||
@@ -1188,7 +1248,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionDeleteObject, auth.DeleteObjectAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
|
||||
@@ -1212,7 +1272,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, false, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Post("",
|
||||
@@ -1227,7 +1287,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, false, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Post("",
|
||||
@@ -1240,7 +1300,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionCompleteMultipartUpload, auth.PutObjectAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Post("",
|
||||
@@ -1253,7 +1313,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionCreateMultipartUpload, auth.PutObjectAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
|
||||
@@ -1265,11 +1325,11 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
metrics.ActionPutObjectTagging,
|
||||
services,
|
||||
middlewares.BucketObjectNameValidator(),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionPutObjectTagging, auth.PutObjectTaggingAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, true, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Put("",
|
||||
@@ -1283,7 +1343,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Put("",
|
||||
@@ -1297,7 +1357,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Put("",
|
||||
@@ -1311,7 +1371,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyChecksums(false, false, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Put("",
|
||||
@@ -1325,7 +1385,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionUploadPartCopy, auth.PutObjectAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Put("",
|
||||
@@ -1339,7 +1399,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, true),
|
||||
middlewares.VerifyV4Signature(root, iam, region, true, true),
|
||||
middlewares.VerifyChecksums(true, false, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
|
||||
@@ -1361,10 +1421,10 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
metrics.ActionCopyObject,
|
||||
services,
|
||||
middlewares.BucketObjectNameValidator(),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionCopyObject, auth.PutObjectAction, auth.PermissionWrite, region, false),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, false),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
objectRouter.Put("",
|
||||
@@ -1373,18 +1433,31 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
metrics.ActionPutObject,
|
||||
services,
|
||||
middlewares.BucketObjectNameValidator(),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.AuthorizePublicBucketAccess(be, metrics.ActionPutObject, auth.PutObjectAction, auth.PermissionWrite, region, true),
|
||||
middlewares.VerifyPresignedV4Signature(root, iam, region, true),
|
||||
middlewares.VerifyV4Signature(root, iam, region, true, true),
|
||||
middlewares.VerifyChecksums(true, false, false),
|
||||
middlewares.ApplyBucketCORS(be),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
|
||||
app.Options("/:bucket/*", controllers.ProcessHandlers(ctrl.CORSOptions, metrics.ActionOptions, services,
|
||||
middlewares.BucketObjectNameValidator(),
|
||||
middlewares.ParseAcl(be),
|
||||
))
|
||||
app.Options("/:bucket",
|
||||
middlewares.ApplyBucketCORSPreflightFallback(be, corsAllowOrigin),
|
||||
controllers.ProcessHandlers(ctrl.CORSOptions, metrics.ActionOptions, services,
|
||||
middlewares.BucketObjectNameValidator(),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
),
|
||||
)
|
||||
|
||||
app.Options("/:bucket/*",
|
||||
middlewares.ApplyBucketCORSPreflightFallback(be, corsAllowOrigin),
|
||||
controllers.ProcessHandlers(ctrl.CORSOptions, metrics.ActionOptions, services,
|
||||
middlewares.BucketObjectNameValidator(),
|
||||
middlewares.ApplyBucketCORS(be, corsAllowOrigin),
|
||||
middlewares.ParseAcl(be),
|
||||
),
|
||||
)
|
||||
|
||||
// Return MethodNotAllowed for all the unmatched routes
|
||||
app.All("*", controllers.ProcessHandlers(ctrl.HandleErrorRoute(s3err.GetAPIError(s3err.ErrMethodNotAllowed)), metrics.ActionUndetected, services))
|
||||
|
||||
253
s3api/router_cors_test.go
Normal file
253
s3api/router_cors_test.go
Normal file
@@ -0,0 +1,253 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
type backendWithCorsOnly struct {
|
||||
backend.BackendUnsupported
|
||||
}
|
||||
|
||||
func (b backendWithCorsOnly) GetBucketCors(ctx context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchCORSConfiguration)
|
||||
}
|
||||
|
||||
func TestS3ApiRouter_ListBuckets_DefaultCORSAllowOrigin(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
app := fiber.New()
|
||||
(&S3ApiRouter{}).Init(
|
||||
app,
|
||||
backend.BackendUnsupported{},
|
||||
&auth.IAMServiceInternal{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
"us-east-1",
|
||||
"",
|
||||
middlewares.RootUserConfig{},
|
||||
origin,
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin %q, got %q", origin, got)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Expose-Headers"); got == "" {
|
||||
t.Fatalf("expected Access-Control-Expose-Headers to be set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3ApiRouter_ListBuckets_OptionsPreflight_DefaultCORS(t *testing.T) {
|
||||
origin := "https://example.com"
|
||||
|
||||
app := fiber.New()
|
||||
(&S3ApiRouter{}).Init(
|
||||
app,
|
||||
backend.BackendUnsupported{},
|
||||
&auth.IAMServiceInternal{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
"us-east-1",
|
||||
"",
|
||||
middlewares.RootUserConfig{},
|
||||
origin,
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodOptions, "/", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", "https://client.example")
|
||||
req.Header.Set("Access-Control-Request-Method", "GET")
|
||||
req.Header.Set("Access-Control-Request-Headers", "authorization")
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusNoContent, resp.StatusCode)
|
||||
}
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin %q, got %q", origin, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3ApiRouter_PutBucketTagging_ErrorStillIncludesFallbackCORS(t *testing.T) {
|
||||
origin := "http://127.0.0.1:9090"
|
||||
|
||||
app := fiber.New()
|
||||
(&S3ApiRouter{}).Init(
|
||||
app,
|
||||
backendWithCorsOnly{},
|
||||
&auth.IAMServiceInternal{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
"us-east-1",
|
||||
"",
|
||||
middlewares.RootUserConfig{},
|
||||
origin,
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodPut, "/testing?tagging", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", origin)
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin %q, got %q", origin, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3ApiRouter_PutObjectTagging_ErrorStillIncludesFallbackCORS(t *testing.T) {
|
||||
origin := "http://127.0.0.1:9090"
|
||||
|
||||
app := fiber.New()
|
||||
(&S3ApiRouter{}).Init(
|
||||
app,
|
||||
backendWithCorsOnly{},
|
||||
&auth.IAMServiceInternal{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
"us-east-1",
|
||||
"",
|
||||
middlewares.RootUserConfig{},
|
||||
origin,
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodPut, "/testing/myobj?tagging", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", origin)
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin %q, got %q", origin, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3ApiRouter_CopyObject_ErrorStillIncludesFallbackCORS(t *testing.T) {
|
||||
origin := "http://127.0.0.1:9090"
|
||||
|
||||
app := fiber.New()
|
||||
(&S3ApiRouter{}).Init(
|
||||
app,
|
||||
backendWithCorsOnly{},
|
||||
&auth.IAMServiceInternal{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
"us-east-1",
|
||||
"",
|
||||
middlewares.RootUserConfig{},
|
||||
origin,
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodPut, "/testing/myobj", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", origin)
|
||||
req.Header.Set("X-Amz-Copy-Source", "srcbucket/srckey")
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin %q, got %q", origin, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3ApiRouter_PutObject_ErrorStillIncludesFallbackCORS(t *testing.T) {
|
||||
origin := "http://127.0.0.1:9090"
|
||||
|
||||
app := fiber.New()
|
||||
(&S3ApiRouter{}).Init(
|
||||
app,
|
||||
backendWithCorsOnly{},
|
||||
&auth.IAMServiceInternal{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
"us-east-1",
|
||||
"",
|
||||
middlewares.RootUserConfig{},
|
||||
origin,
|
||||
)
|
||||
|
||||
req, err := http.NewRequest(http.MethodPut, "/testing/myobj", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Origin", origin)
|
||||
|
||||
resp, err := app.Test(req)
|
||||
if err != nil {
|
||||
t.Fatalf("app.Test: %v", err)
|
||||
}
|
||||
|
||||
if got := resp.Header.Get("Access-Control-Allow-Origin"); got != origin {
|
||||
t.Fatalf("expected Access-Control-Allow-Origin %q, got %q", origin, got)
|
||||
}
|
||||
}
|
||||
@@ -46,7 +46,7 @@ func TestS3ApiRouter_Init(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.sa.Init(tt.args.app, tt.args.be, tt.args.iam, nil, nil, nil, nil, false, "us-east-1", middlewares.RootUserConfig{})
|
||||
tt.sa.Init(tt.args.app, tt.args.be, tt.args.iam, nil, nil, nil, nil, false, "us-east-1", "", middlewares.RootUserConfig{}, "")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -41,16 +40,17 @@ const (
|
||||
)
|
||||
|
||||
type S3ApiServer struct {
|
||||
app *fiber.App
|
||||
backend backend.Backend
|
||||
router *S3ApiRouter
|
||||
port string
|
||||
cert *tls.Certificate
|
||||
quiet bool
|
||||
readonly bool
|
||||
keepAlive bool
|
||||
health string
|
||||
virtualDomain string
|
||||
Router *S3ApiRouter
|
||||
app *fiber.App
|
||||
backend backend.Backend
|
||||
port string
|
||||
CertStorage *utils.CertStorage
|
||||
quiet bool
|
||||
readonly bool
|
||||
keepAlive bool
|
||||
health string
|
||||
virtualDomain string
|
||||
corsAllowOrigin string
|
||||
}
|
||||
|
||||
func New(
|
||||
@@ -66,7 +66,7 @@ func New(
|
||||
) (*S3ApiServer, error) {
|
||||
server := &S3ApiServer{
|
||||
backend: be,
|
||||
router: new(S3ApiRouter),
|
||||
Router: new(S3ApiRouter),
|
||||
port: port,
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func New(
|
||||
// Logging middlewares
|
||||
if !server.quiet {
|
||||
app.Use(logger.New(logger.Config{
|
||||
Format: "${time} | ${status} | ${latency} | ${ip} | ${method} | ${path} | ${error} | ${queryParams}\n",
|
||||
Format: "${time} | vgw | ${status} | ${latency} | ${ip} | ${method} | ${path} | ${error} | ${queryParams}\n",
|
||||
}))
|
||||
}
|
||||
// Set up health endpoint if specified
|
||||
@@ -123,7 +123,7 @@ func New(
|
||||
app.Use(middlewares.DebugLogger())
|
||||
}
|
||||
|
||||
server.router.Init(app, be, iam, l, adminLogger, evs, mm, server.readonly, region, root)
|
||||
server.Router.Init(app, be, iam, l, adminLogger, evs, mm, server.readonly, region, server.virtualDomain, root, server.corsAllowOrigin)
|
||||
|
||||
return server, nil
|
||||
}
|
||||
@@ -132,13 +132,13 @@ func New(
|
||||
type Option func(*S3ApiServer)
|
||||
|
||||
// WithTLS sets TLS Credentials
|
||||
func WithTLS(cert tls.Certificate) Option {
|
||||
return func(s *S3ApiServer) { s.cert = &cert }
|
||||
func WithTLS(cs *utils.CertStorage) Option {
|
||||
return func(s *S3ApiServer) { s.CertStorage = cs }
|
||||
}
|
||||
|
||||
// WithAdminServer runs admin endpoints with the gateway in the same network
|
||||
func WithAdminServer() Option {
|
||||
return func(s *S3ApiServer) { s.router.WithAdmSrv = true }
|
||||
return func(s *S3ApiServer) { s.Router.WithAdmSrv = true }
|
||||
}
|
||||
|
||||
// WithQuiet silences default logging output
|
||||
@@ -165,9 +165,20 @@ func WithKeepAlive() Option {
|
||||
return func(s *S3ApiServer) { s.keepAlive = true }
|
||||
}
|
||||
|
||||
// WithCORSAllowOrigin sets the default CORS Access-Control-Allow-Origin value.
|
||||
// This is applied when no bucket CORS configuration exists, and for admin APIs.
|
||||
func WithCORSAllowOrigin(origin string) Option {
|
||||
return func(s *S3ApiServer) { s.corsAllowOrigin = origin }
|
||||
}
|
||||
|
||||
func (sa *S3ApiServer) Serve() (err error) {
|
||||
if sa.cert != nil {
|
||||
return sa.app.ListenTLSWithCertificate(sa.port, *sa.cert)
|
||||
if sa.CertStorage != nil {
|
||||
ln, err := utils.NewTLSListener(sa.app.Config().Network, sa.port, sa.CertStorage.GetCertificate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sa.app.Listener(ln)
|
||||
}
|
||||
return sa.app.Listen(sa.port)
|
||||
}
|
||||
|
||||
@@ -15,11 +15,11 @@
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
)
|
||||
|
||||
func TestS3ApiServer_Serve(t *testing.T) {
|
||||
@@ -35,18 +35,18 @@ func TestS3ApiServer_Serve(t *testing.T) {
|
||||
app: fiber.New(),
|
||||
backend: backend.BackendUnsupported{},
|
||||
port: "Invalid address",
|
||||
router: &S3ApiRouter{},
|
||||
Router: &S3ApiRouter{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Serve-invalid-address-with-certificate",
|
||||
wantErr: true,
|
||||
sa: &S3ApiServer{
|
||||
app: fiber.New(),
|
||||
backend: backend.BackendUnsupported{},
|
||||
port: "Invalid address",
|
||||
router: &S3ApiRouter{},
|
||||
cert: &tls.Certificate{},
|
||||
app: fiber.New(),
|
||||
backend: backend.BackendUnsupported{},
|
||||
port: "Invalid address",
|
||||
Router: &S3ApiRouter{},
|
||||
CertStorage: &utils.CertStorage{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -182,7 +182,7 @@ func ParseDecodedContentLength(ctx *fiber.Ctx) (int64, error) {
|
||||
return decContLength, nil
|
||||
}
|
||||
|
||||
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
|
||||
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, secret string, date time.Time) (io.Reader, error) {
|
||||
cLength, err := ParseDecodedContentLength(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -204,9 +204,9 @@ func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secr
|
||||
case payloadTypeStreamingUnsignedTrailer:
|
||||
return NewUnsignedChunkReader(r, checksumType, cLength)
|
||||
case payloadTypeStreamingSignedTrailer:
|
||||
return NewSignedChunkReader(r, authdata, region, secret, date, checksumType)
|
||||
return NewSignedChunkReader(r, authdata, secret, date, checksumType, true, cLength)
|
||||
case payloadTypeStreamingSigned:
|
||||
return NewSignedChunkReader(r, authdata, region, secret, date, "")
|
||||
return NewSignedChunkReader(r, authdata, secret, date, "", false, cLength)
|
||||
// return not supported for:
|
||||
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD
|
||||
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER
|
||||
|
||||
@@ -36,6 +36,7 @@ const (
|
||||
ContextKeyBodyReader ContextKey = "body-reader"
|
||||
ContextKeySkip ContextKey = "__skip"
|
||||
ContextKeyStack ContextKey = "stack"
|
||||
ContextKeyBucketOwner ContextKey = "bucket-owner"
|
||||
)
|
||||
|
||||
func (ck ContextKey) Values() []ContextKey {
|
||||
@@ -50,6 +51,7 @@ func (ck ContextKey) Values() []ContextKey {
|
||||
ContextKeyParsedAcl,
|
||||
ContextKeySkipResBodyLog,
|
||||
ContextKeyBodyReader,
|
||||
ContextKeyBucketOwner,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -67,7 +67,10 @@ func ParsePreconditionMatchHeaders(ctx *fiber.Ctx, opts ...preconditionOpt) (*st
|
||||
if cfg.withCopySource {
|
||||
prefix = "X-Amz-Copy-Source-"
|
||||
}
|
||||
return GetStringPtr(ctx.Get(prefix + "If-Match")), GetStringPtr(ctx.Get(prefix + "If-None-Match"))
|
||||
|
||||
ifMatch := trimQuotes(ctx.Get(prefix + "If-Match"))
|
||||
ifNoneMatch := trimQuotes(ctx.Get(prefix + "If-None-Match"))
|
||||
return GetStringPtr(ifMatch), GetStringPtr(ifNoneMatch)
|
||||
}
|
||||
|
||||
// ParsePreconditionDateHeaders parses the "If-Modified-Since" and "If-Unmodified-Since"
|
||||
@@ -128,6 +131,9 @@ func ParsePreconditionDateHeader(date string) *time.Time {
|
||||
// if parsing fails, returns nil
|
||||
func ParseIfMatchSize(ctx *fiber.Ctx) *int64 {
|
||||
ifMatchSizeHdr := ctx.Get("x-amz-if-match-size")
|
||||
if ifMatchSizeHdr == "" {
|
||||
return nil
|
||||
}
|
||||
ifMatchSize, err := strconv.ParseInt(ifMatchSizeHdr, 10, 64)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse 'x-amz-if-match-size': %s", ifMatchSizeHdr)
|
||||
@@ -136,3 +142,15 @@ func ParseIfMatchSize(ctx *fiber.Ctx) *int64 {
|
||||
|
||||
return &ifMatchSize
|
||||
}
|
||||
|
||||
func trimQuotes(str string) string {
|
||||
if len(str) < 2 {
|
||||
return str
|
||||
}
|
||||
|
||||
if str[0] == str[len(str)-1] && str[0] == '"' {
|
||||
return str[1 : len(str)-1]
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
@@ -43,9 +43,16 @@ const (
|
||||
awsV4 = "AWS4"
|
||||
awsS3Service = "s3"
|
||||
awsV4Request = "aws4_request"
|
||||
trailerSignatureHeader = "x-amz-trailer-signature"
|
||||
trailerSignatureHeader = "x-amz-trailer-signature:"
|
||||
streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD"
|
||||
streamPayloadTrailerAlgo = "AWS4-HMAC-SHA256-TRAILER"
|
||||
|
||||
maxHeaderSize = 1024
|
||||
)
|
||||
|
||||
var (
|
||||
errskipHeader = errors.New("skip to next header")
|
||||
delimiter = []byte{'\r', '\n'}
|
||||
)
|
||||
|
||||
// ChunkReader reads from chunked upload request body, and returns
|
||||
@@ -66,24 +73,31 @@ type ChunkReader struct {
|
||||
isFirstHeader bool
|
||||
region string
|
||||
date time.Time
|
||||
requireTrailer bool
|
||||
chunkSizes []int64
|
||||
cLength int64
|
||||
dataRead int64
|
||||
}
|
||||
|
||||
// NewChunkReader reads from request body io.Reader and parses out the
|
||||
// chunk metadata in stream. The headers are validated for proper signatures.
|
||||
// Reading from the chunk reader will read only the object data stream
|
||||
// without the chunk headers/trailers.
|
||||
func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string, date time.Time, chType checksumType) (io.Reader, error) {
|
||||
func NewSignedChunkReader(r io.Reader, authdata AuthData, secret string, date time.Time, chType checksumType, requireTrailer bool, cLength int64) (io.Reader, error) {
|
||||
chRdr := &ChunkReader{
|
||||
r: r,
|
||||
signingKey: getSigningKey(secret, region, date),
|
||||
signingKey: getSigningKey(secret, authdata.Region, date),
|
||||
// the authdata.Signature is validated in the auth-reader,
|
||||
// so we can use that here without any other checks
|
||||
prevSig: authdata.Signature,
|
||||
chunkHash: sha256.New(),
|
||||
isFirstHeader: true,
|
||||
date: date,
|
||||
region: region,
|
||||
trailer: chType,
|
||||
prevSig: authdata.Signature,
|
||||
chunkHash: sha256.New(),
|
||||
isFirstHeader: true,
|
||||
date: date,
|
||||
region: authdata.Region,
|
||||
trailer: chType,
|
||||
requireTrailer: requireTrailer,
|
||||
chunkSizes: []int64{},
|
||||
cLength: cLength,
|
||||
}
|
||||
|
||||
if chType != "" {
|
||||
@@ -95,7 +109,7 @@ func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string,
|
||||
|
||||
chRdr.checksumHash = checksumHasher
|
||||
}
|
||||
if chType == "" {
|
||||
if !requireTrailer {
|
||||
debuglogger.Infof("initializing signed chunk reader")
|
||||
} else {
|
||||
debuglogger.Infof("initializing signed chunk reader with '%v' trailing checksum", chType)
|
||||
@@ -121,7 +135,17 @@ func (cr *ChunkReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
}
|
||||
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
|
||||
if err != nil && err != io.EOF {
|
||||
return 0, err
|
||||
}
|
||||
n += int(chunkSize)
|
||||
cr.dataRead += int64(n)
|
||||
if cr.isEOF {
|
||||
if cr.cLength != cr.dataRead {
|
||||
debuglogger.Logf("number of bytes expected: (%v), number of bytes read: (%v)", cr.cLength, cr.dataRead)
|
||||
return 0, s3err.GetAPIError(s3err.ErrContentLengthMismatch)
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
@@ -130,6 +154,13 @@ func (cr *ChunkReader) Read(p []byte) (int, error) {
|
||||
if cr.checksumHash != nil {
|
||||
cr.checksumHash.Write(p[:n])
|
||||
}
|
||||
cr.dataRead += int64(n)
|
||||
if cr.isEOF {
|
||||
if cr.cLength != cr.dataRead {
|
||||
debuglogger.Logf("number of bytes expected: (%v), number of bytes read: (%v)", cr.cLength, cr.dataRead)
|
||||
return 0, s3err.GetAPIError(s3err.ErrContentLengthMismatch)
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
@@ -328,15 +359,6 @@ func hmac256(key []byte, data []byte) []byte {
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidChunkFormat = errors.New("invalid chunk header format")
|
||||
errskipHeader = errors.New("skip to next header")
|
||||
)
|
||||
|
||||
const (
|
||||
maxHeaderSize = 1024
|
||||
)
|
||||
|
||||
// This returns the chunk payload size, signature, data start offset, and
|
||||
// error if any. See the AWS documentation for the chunk header format. The
|
||||
// header[0] byte is expected to be the first byte of the chunk size here.
|
||||
@@ -344,7 +366,7 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
|
||||
stashLen := len(cr.stash)
|
||||
if stashLen > maxHeaderSize {
|
||||
debuglogger.Logf("the stash length exceeds the maximum allowed chunk header size: (stash len): %v, (header limit): %v", stashLen, maxHeaderSize)
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
return 0, "", 0, s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
}
|
||||
if cr.stash != nil {
|
||||
debuglogger.Logf("recovering the stash: (stash len): %v", stashLen)
|
||||
@@ -360,45 +382,39 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
|
||||
// After the first chunk each chunk header should start
|
||||
// with "\n\r\n"
|
||||
if !cr.isFirstHeader {
|
||||
err := readAndSkip(rdr, '\r', '\n')
|
||||
err := readAndSkip(rdr, delimiter...)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read chunk header first 2 bytes: (should be): \\r\\n, (got): %q", header[:min(2, len(header))])
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
}
|
||||
|
||||
// read and parse the chunk size
|
||||
chunkSizeStr, err := readAndTrim(rdr, ';')
|
||||
chunkSize, err := cr.parseChunkSize(rdr, header)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read chunk size: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
chunkSize, err := strconv.ParseInt(chunkSizeStr, 16, 64)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse chunk size: (size): %v, (err): %v", chunkSizeStr, err)
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
return 0, "", 0, err
|
||||
}
|
||||
|
||||
// read the chunk signature
|
||||
err = readAndSkip(rdr, 'c', 'h', 'u', 'n', 'k', '-', 's', 'i', 'g', 'n', 'a', 't', 'u', 'r', 'e', '=')
|
||||
err = readAndSkip(rdr, []byte("chunk-signature=")...)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read 'chunk-signature=': %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
sig, err := readAndTrim(rdr, '\r')
|
||||
sig, err := readBytes(rdr, 64)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read '\\r', after chunk signature: %v", err)
|
||||
debuglogger.Logf("failed to read the chunk signature: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
err = readAndSkip(rdr, delimiter...)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read '\\r\\n' after chunk signature")
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
// read and parse the final chunk trailer and checksum
|
||||
if chunkSize == 0 {
|
||||
if cr.trailer != "" {
|
||||
err = readAndSkip(rdr, '\n')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read \\n before the trailer: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
if cr.requireTrailer {
|
||||
// parse and validate the trailing header
|
||||
trailer, err := readAndTrim(rdr, ':')
|
||||
if err != nil {
|
||||
@@ -407,7 +423,7 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
|
||||
}
|
||||
if trailer != string(cr.trailer) {
|
||||
debuglogger.Logf("incorrect trailer prefix: (expected): %v, (got): %v", cr.trailer, trailer)
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
return 0, "", 0, s3err.GetAPIError(s3err.ErrMalformedTrailer)
|
||||
}
|
||||
|
||||
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(trailer, "x-amz-checksum-")))
|
||||
@@ -419,19 +435,19 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
if !IsValidChecksum(checksum, algo) {
|
||||
debuglogger.Logf("invalid checksum value: %v", checksum)
|
||||
return 0, "", 0, s3err.GetInvalidTrailingChecksumHeaderErr(trailer)
|
||||
}
|
||||
|
||||
err = readAndSkip(rdr, '\n')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read \\n after checksum: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
if !IsValidChecksum(checksum, algo) {
|
||||
debuglogger.Logf("invalid checksum value: %v", checksum)
|
||||
return 0, "", 0, s3err.GetInvalidTrailingChecksumHeaderErr(trailer)
|
||||
}
|
||||
|
||||
// parse the trailing signature
|
||||
trailerSigPrefix, err := readAndTrim(rdr, ':')
|
||||
trailerSigPrefix, err := readBytes(rdr, 24)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read trailing signature prefix: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
@@ -439,40 +455,44 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
|
||||
|
||||
if trailerSigPrefix != trailerSignatureHeader {
|
||||
debuglogger.Logf("invalid trailing signature prefix: (expected): %v, (got): %v", trailerSignatureHeader, trailerSigPrefix)
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
return 0, "", 0, s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
}
|
||||
|
||||
trailerSig, err := readAndTrim(rdr, '\r')
|
||||
trailerSig, err := readBytes(rdr, 64)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read trailing signature: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
err = readAndSkip(rdr, delimiter...)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read '\\r\\n' after last chunk signature")
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
cr.trailerSig = trailerSig
|
||||
cr.parsedChecksum = checksum
|
||||
}
|
||||
|
||||
// "\r\n\r\n" is followed after the last chunk
|
||||
err = readAndSkip(rdr, '\n', '\r', '\n')
|
||||
err = readAndSkip(rdr, delimiter...)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read \\n\\r\\n at the end of chunk header: %v", err)
|
||||
debuglogger.Logf("failed to read \\r\\n at the end of chunk header: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
return 0, sig, 0, nil
|
||||
}
|
||||
|
||||
err = readAndSkip(rdr, '\n')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read \\n at the end of chunk header: %v", err)
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
// add the chunk size at the end of header parsing
|
||||
// to avoid duplication because of header stashing
|
||||
cr.addChunkSize(chunkSize)
|
||||
|
||||
// find the index of chunk ending: '\r\n'
|
||||
// skip the first 2 bytes as it is the starting '\r\n'
|
||||
// the first chunk doesn't contain the starting '\r\n', but
|
||||
// anyway, trimming the first 2 bytes doesn't pollute the logic.
|
||||
ind := bytes.Index(header[2:], []byte{'\r', '\n'})
|
||||
ind := bytes.Index(header[2:], delimiter)
|
||||
cr.isFirstHeader = false
|
||||
|
||||
// the offset is the found index + 4 - the stash length
|
||||
@@ -495,30 +515,85 @@ func (cr *ChunkReader) stashAndSkipHeader(header []byte) (int64, string, int, er
|
||||
// calls "cr.stashAndSkipHeader" if the passed err is "io.EOF" and cr.isEOF is false
|
||||
// Returns the error otherwise
|
||||
func (cr *ChunkReader) handleRdrErr(err error, header []byte) (int64, string, int, error) {
|
||||
if err == io.EOF {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
if cr.isEOF {
|
||||
debuglogger.Logf("incomplete chunk encoding, EOF reached")
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
return 0, "", 0, s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
}
|
||||
return cr.stashAndSkipHeader(header)
|
||||
}
|
||||
return 0, "", 0, err
|
||||
return 0, "", 0, s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
}
|
||||
|
||||
// parseChunkSize parses and validates the chunk size
|
||||
func (cr *ChunkReader) parseChunkSize(rdr *bufio.Reader, header []byte) (int64, error) {
|
||||
// read and parse the chunk size
|
||||
chunkSizeStr, err := readAndTrim(rdr, ';')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read chunk size: %v", err)
|
||||
_, _, _, err := cr.handleRdrErr(err, header)
|
||||
return 0, err
|
||||
}
|
||||
chunkSize, err := strconv.ParseInt(chunkSizeStr, 16, 64)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse chunk size: (size): %v, (err): %v", chunkSizeStr, err)
|
||||
return 0, s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
}
|
||||
|
||||
if !cr.isValidChunkSize(chunkSize) {
|
||||
return 0, s3err.GetAPIError(s3err.ErrInvalidChunkSize)
|
||||
}
|
||||
|
||||
return chunkSize, nil
|
||||
}
|
||||
|
||||
// addChunkSize adds the input chunk size to chunkSizes slice
|
||||
func (cr *ChunkReader) addChunkSize(size int64) {
|
||||
cr.chunkSizes = append(cr.chunkSizes, size)
|
||||
}
|
||||
|
||||
// isValidChunkSize checks if the parsed chunk size is valid
|
||||
// they follow one rule: all chunk sizes except for the last one
|
||||
// should be greater than 8192
|
||||
func (cr *ChunkReader) isValidChunkSize(size int64) bool {
|
||||
if len(cr.chunkSizes) == 0 {
|
||||
// any valid number is valid as a first chunk size
|
||||
return true
|
||||
}
|
||||
|
||||
lastChunkSize := cr.chunkSizes[len(cr.chunkSizes)-1]
|
||||
// any chunk size, except the last one should be greater than 8192
|
||||
if size != 0 && lastChunkSize < minChunkSize {
|
||||
debuglogger.Logf("invalid chunk size %v", lastChunkSize)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Algorithm returns the checksum algorithm
|
||||
func (cr *ChunkReader) Algorithm() string {
|
||||
return strings.TrimPrefix(string(cr.trailer), "x-amz-checksum-")
|
||||
}
|
||||
|
||||
// Checksum returns the parsed trailing checksum
|
||||
func (cr *ChunkReader) Checksum() string {
|
||||
return cr.parsedChecksum
|
||||
}
|
||||
|
||||
// reads data from the "rdr" and validates the passed data bytes
|
||||
func readAndSkip(rdr *bufio.Reader, data ...byte) error {
|
||||
for _, d := range data {
|
||||
b, err := rdr.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b != d {
|
||||
return errMalformedEncoding
|
||||
}
|
||||
func readAndSkip(rdr *bufio.Reader, expected ...byte) error {
|
||||
buf := make([]byte, len(expected))
|
||||
_, err := io.ReadFull(rdr, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
if bytes.Equal(buf, expected) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
}
|
||||
|
||||
// reads string by "delim" and trims the delimiter at the end
|
||||
@@ -530,3 +605,10 @@ func readAndTrim(r *bufio.Reader, delim byte) (string, error) {
|
||||
|
||||
return strings.TrimSuffix(str, string(delim)), nil
|
||||
}
|
||||
|
||||
func readBytes(r *bufio.Reader, count int) (string, error) {
|
||||
buf := make([]byte, count)
|
||||
_, err := io.ReadFull(r, buf)
|
||||
|
||||
return string(buf), err
|
||||
}
|
||||
|
||||
@@ -35,9 +35,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
trailerDelim = []byte{'\n', '\r', '\n'}
|
||||
minChunkSize int64 = 8192
|
||||
errMalformedEncoding = errors.New("malformed chunk encoding")
|
||||
trailerDelim = []byte{'\n', '\r', '\n'}
|
||||
minChunkSize int64 = 8192
|
||||
)
|
||||
|
||||
type UnsignedChunkReader struct {
|
||||
|
||||
@@ -16,11 +16,13 @@ package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
@@ -355,7 +357,7 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
|
||||
rDate, err := time.Parse(time.RFC3339, objLockDate)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse retain until date: %v\n", err)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRetainUntilDate)
|
||||
}
|
||||
if rDate.Before(time.Now()) {
|
||||
debuglogger.Logf("expired retain until date: %v\n", rDate.Format(time.RFC3339))
|
||||
@@ -887,3 +889,84 @@ func ValidateVersionId(versionId string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateObjectLocation generates the object location path-styled or host-styled
|
||||
// depending on the gateway configuration
|
||||
func GenerateObjectLocation(ctx *fiber.Ctx, virtualDomain, bucket, object string) string {
|
||||
scheme := ctx.Protocol()
|
||||
host := ctx.Hostname()
|
||||
|
||||
// escape the object name
|
||||
obj := url.PathEscape(object)
|
||||
|
||||
if virtualDomain != "" && strings.Contains(host, virtualDomain) {
|
||||
// the host already contains the bucket name
|
||||
return fmt.Sprintf("%s://%s/%s", scheme, host, obj)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%s://%s/%s/%s",
|
||||
scheme,
|
||||
host,
|
||||
bucket,
|
||||
obj,
|
||||
)
|
||||
}
|
||||
|
||||
type CertStorage struct {
|
||||
cert atomic.Pointer[tls.Certificate]
|
||||
}
|
||||
|
||||
func NewCertStorage() *CertStorage {
|
||||
return &CertStorage{}
|
||||
}
|
||||
|
||||
func (cs *CertStorage) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return cs.cert.Load(), nil
|
||||
}
|
||||
|
||||
func (cs *CertStorage) SetCertificate(certFile string, keyFile string) error {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to set certificate: %w", err)
|
||||
}
|
||||
|
||||
cs.cert.Store(&cert)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewTLSListener(network string, address string, getCertificateFunc func(*tls.ClientHelloInfo) (*tls.Certificate, error)) (net.Listener, error) {
|
||||
config := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
GetCertificate: getCertificateFunc,
|
||||
}
|
||||
|
||||
ln, err := net.Listen(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tls.NewListener(ln, config), nil
|
||||
}
|
||||
|
||||
// ValidateNoACLHeaders checks whether any ACL-related request headers are set.
|
||||
// since ACL operations are not supported on objects, the presence of any ACL headers
|
||||
// results in a NotImplemented error. It returns nil only when all ACL headers
|
||||
// are absent.
|
||||
func ValidateNoACLHeaders(ctx *fiber.Ctx) error {
|
||||
for _, header := range []string{
|
||||
"x-amz-acl",
|
||||
"x-amz-grant-full-control",
|
||||
"x-amz-grant-read",
|
||||
"x-amz-grant-read-acp",
|
||||
"x-amz-grant-write-acp",
|
||||
} {
|
||||
value := ctx.Request().Header.Peek(header)
|
||||
if len(value) != 0 {
|
||||
debuglogger.Logf("an unsupported object acl header present: %s:%s", header, value)
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -127,6 +127,7 @@ const (
|
||||
ErrRequestNotReadyYet
|
||||
ErrMissingDateHeader
|
||||
ErrGetUploadsWithKey
|
||||
ErrVersionsWithKey
|
||||
ErrCopySourceNotAllowed
|
||||
ErrInvalidRequest
|
||||
ErrAuthNotSetup
|
||||
@@ -137,9 +138,11 @@ const (
|
||||
ErrInvalidURI
|
||||
ErrObjectLockConfigurationNotFound
|
||||
ErrNoSuchObjectLockConfiguration
|
||||
ErrInvalidBucketObjectLockConfiguration
|
||||
ErrMissingObjectLockConfiguration
|
||||
ErrMissingObjectLockConfigurationNoSpaces
|
||||
ErrObjectLockConfigurationNotAllowed
|
||||
ErrObjectLocked
|
||||
ErrInvalidRetainUntilDate
|
||||
ErrPastObjectLockRetainDate
|
||||
ErrObjectLockInvalidRetentionPeriod
|
||||
ErrInvalidLegalHoldStatus
|
||||
@@ -199,6 +202,7 @@ const (
|
||||
ErrAdminInvalidUserRole
|
||||
ErrAdminMissingUserAcess
|
||||
ErrAdminMethodNotSupported
|
||||
ErrAdminEmptyBucketOwnerHeader
|
||||
)
|
||||
|
||||
var errorCodeResponse = map[ErrorCode]APIError{
|
||||
@@ -542,6 +546,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Key is not expected for the GET method ?uploads subresource",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrVersionsWithKey: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "There is no such thing as the ?versions sub-resource for a key",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrCopySourceNotAllowed: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "You can only specify a copy source header for copy requests.",
|
||||
@@ -592,9 +601,14 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "The specified object does not have a ObjectLock configuration.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidBucketObjectLockConfiguration: {
|
||||
ErrMissingObjectLockConfiguration: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Bucket is missing Object Lock Configuration.",
|
||||
Description: "Bucket is missing Object Lock Configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingObjectLockConfigurationNoSpaces: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Bucket is missing ObjectLockConfiguration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectLockConfigurationNotAllowed: {
|
||||
@@ -607,14 +621,19 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Access Denied because object protected by object lock.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidRetainUntilDate: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "The retain until date must be provided in ISO 8601 format",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPastObjectLockRetainDate: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "the retain until date must be in the future.",
|
||||
Code: "InvalidArgument",
|
||||
Description: "The retain until date must be in the future!",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectLockInvalidRetentionPeriod: {
|
||||
Code: "InvalidRetentionPeriod",
|
||||
Description: "the retention days/years must be positive integer.",
|
||||
Code: "InvalidArgument",
|
||||
Description: "Default retention period must be a positive integer value.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidLegalHoldStatus: {
|
||||
@@ -886,6 +905,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "The method is not supported in single root user mode.",
|
||||
HTTPStatusCode: http.StatusNotImplemented,
|
||||
},
|
||||
ErrAdminEmptyBucketOwnerHeader: {
|
||||
Code: "XAdminInvalidRequest",
|
||||
Description: "The x-vgw-owner header specifying the new bucket owner access key id is either missing or empty",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
}
|
||||
|
||||
// GetAPIError provides API Error for input API error code.
|
||||
|
||||
@@ -24,7 +24,7 @@ func malformedAuthError(format string, args ...any) APIError {
|
||||
return APIError{
|
||||
Code: "AuthorizationHeaderMalformed",
|
||||
Description: fmt.Sprintf("The authorization header is malformed; %s", fmt.Sprintf(format, args...)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -726,10 +726,10 @@ type Checksum struct {
|
||||
// LocationConstraint represents the GetBucketLocation response
|
||||
type LocationConstraint struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint"`
|
||||
Value string `xml:",chardata"`
|
||||
Value *string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type CreateBucketConfiguration struct {
|
||||
LocationConstraint string
|
||||
LocationConstraint *string
|
||||
TagSet []types.Tag `xml:"Tags>Tag"`
|
||||
}
|
||||
|
||||
@@ -179,7 +179,7 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
|
||||
|
||||
**CREATE_STATIC_USERS_IF_NONEXISTENT**: setup_user_v2, if **AUTOCREATE_USERS** is set to **false**, generate non-existing users if they don't exist, but don't delete them, as with user autogeneration
|
||||
|
||||
**DIRECT_POST_COMMAND_DELAY**: in direct mode, time to wait before sending new commands to try to prevent propagation delay issues
|
||||
**DIRECT_POST_COMMAND_DELAY**: in v1 direct mode, time to wait before sending new commands to try to prevent propagation delay issues
|
||||
|
||||
**SKIP_ACL_TESTING**: avoid ACL tests for systems which do not use ACLs
|
||||
|
||||
@@ -187,6 +187,8 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
|
||||
|
||||
**SKIP_USERS_TESTS**: skip versitygw-specific users tests, set to **false** to test against other S3 gateways
|
||||
|
||||
**MAX_OPENSSL_COMMAND_LOG_BYTES**: number of OpenSSL command bytes to display in command log, can prevent the display of too many chars in the case of large payload commands, -1 means display whole command
|
||||
|
||||
## REST Scripts
|
||||
|
||||
REST scripts are included for calls to S3's REST API in the `./tests/rest_scripts/` folder. To call a script, the following parameters are needed:
|
||||
|
||||
@@ -28,7 +28,7 @@ copy_object() {
|
||||
error=$(send_command aws --no-verify-ssl s3api copy-object --copy-source "$2" --bucket "$3" --key "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
log 5 "s3cmd ${S3CMD_OPTS[*]} --no-check-certificate cp s3://$2 s3://$3/$4"
|
||||
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate cp "s3://$2" s3://"$3/$4" 2>&1) || exit_code=$?
|
||||
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate --region "$AWS_REGION" cp "s3://$2" s3://"$3/$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(send_command mc --insecure cp "$MC_ALIAS/$2" "$MC_ALIAS/$3/$4" 2>&1) || exit_code=$?
|
||||
else
|
||||
|
||||
@@ -36,7 +36,7 @@ create_bucket() {
|
||||
log 5 "s3cmd ${S3CMD_OPTS[*]} --no-check-certificate mb s3://$2"
|
||||
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
error=$(send_command mc --insecure mb "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
error=$(send_command mc --insecure mb "$MC_ALIAS"/"$2" --region "$AWS_REGION" 2>&1) || exit_code=$?
|
||||
else
|
||||
log 2 "invalid command type $1"
|
||||
return 1
|
||||
@@ -59,7 +59,7 @@ create_bucket_invalid_name() {
|
||||
elif [[ $1 == 's3api' ]]; then
|
||||
bucket_create_error=$(aws --no-verify-ssl s3api create-bucket --bucket "s3://" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
bucket_create_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb "s3://" 2>&1) || exit_code=$?
|
||||
bucket_create_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb --region="$AWS_REGION" "s3://" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
bucket_create_error=$(mc --insecure mb "$MC_ALIAS/." 2>&1) || exit_code=$?
|
||||
else
|
||||
@@ -82,7 +82,7 @@ create_bucket_with_user() {
|
||||
if [[ $1 == "aws" ]] || [[ $1 == "s3api" ]]; then
|
||||
error=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" send_command aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb --access_key="$3" --secret_key="$4" s3://"$2" 2>&1) || exit_code=$?
|
||||
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb --access_key="$3" --secret_key="$4" --region="$AWS_REGION" s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
error=$(send_command mc --insecure mb "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
|
||||
@@ -85,6 +85,17 @@ delete_object_version_rest() {
|
||||
return 0
|
||||
}
|
||||
|
||||
delete_object_version_rest_expect_error() {
|
||||
if ! check_param_count_v2 "bucket name, object name, version ID, expected code, expected error, expected message" 6 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_command_expect_error "BUCKET_NAME=$1 OBJECT_KEY=$2 VERSION_ID=$3" "./tests/rest_scripts/delete_object.sh" "$4" "$5" "$6"; then
|
||||
log 2 "error deleting object: $result"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
delete_object_version_bypass_retention() {
|
||||
if ! check_param_count "delete_object_version_bypass_retention" "bucket, key, version ID" 3 $#; then
|
||||
return 1
|
||||
|
||||
@@ -26,7 +26,11 @@ get_bucket_location() {
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
get_bucket_location_s3cmd "$2" || get_result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
get_bucket_location_mc "$2" || get_result=$?
|
||||
if ! get_bucket_location_mc "$2"; then
|
||||
log 2 "error getting mc bucket location"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
else
|
||||
log 2 "command type '$1' not implemented for get_bucket_location"
|
||||
return 1
|
||||
@@ -58,7 +62,7 @@ get_bucket_location_s3cmd() {
|
||||
echo "get bucket location (s3cmd) requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
info=$(send_command s3cmd --no-check-certificate info "s3://$1") || results=$?
|
||||
info=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate info "s3://$1") || results=$?
|
||||
if [[ $results -ne 0 ]]; then
|
||||
log 2 "error getting bucket location: $location"
|
||||
return 1
|
||||
@@ -69,13 +73,11 @@ get_bucket_location_s3cmd() {
|
||||
|
||||
get_bucket_location_mc() {
|
||||
record_command "get-bucket-location" "client:mc"
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "get bucket location (mc) requires bucket name"
|
||||
if ! check_param_count_v2 "bucket name" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
info=$(send_command mc --insecure stat "$MC_ALIAS/$1") || results=$?
|
||||
if [[ $results -ne 0 ]]; then
|
||||
log 2 "error getting s3cmd info: $info"
|
||||
if ! info=$(send_command mc --insecure stat "$MC_ALIAS/$1" 2>&1); then
|
||||
log 2 "error getting mc info: $info"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2034
|
||||
|
||||
@@ -87,7 +87,7 @@ get_bucket_policy_s3cmd() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! info=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate info "s3://$1" 2>&1); then
|
||||
if ! info=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate --region "$AWS_REGION" info "s3://$1" 2>&1); then
|
||||
log 2 "error getting bucket policy: $info"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -27,7 +27,7 @@ get_object() {
|
||||
elif [[ $1 == 's3api' ]]; then
|
||||
get_object_error=$(send_command aws --no-verify-ssl s3api get-object --bucket "$2" --key "$3" "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
get_object_error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate get "s3://$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
get_object_error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate get --force "s3://$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
get_object_error=$(send_command mc --insecure get "$MC_ALIAS/$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'rest' ]]; then
|
||||
|
||||
@@ -31,7 +31,7 @@ head_bucket() {
|
||||
if [[ $1 == 's3api' ]] || [[ $1 == 's3' ]]; then
|
||||
bucket_info=$(send_command aws --no-verify-ssl s3api head-bucket --bucket "$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
bucket_info=$(send_command s3cmd --no-check-certificate info "s3://$2" 2>&1) || exit_code=$?
|
||||
bucket_info=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate info "s3://$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
bucket_info=$(send_command mc --insecure stat "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'rest' ]]; then
|
||||
|
||||
@@ -44,8 +44,10 @@ list_objects() {
|
||||
fail "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
assert_success "error listing objects: $output"
|
||||
if [ "$list_objects_result" -ne 0 ]; then
|
||||
log 2 "error listing objects: $output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
object_array=()
|
||||
while IFS= read -r line; do
|
||||
|
||||
@@ -26,11 +26,11 @@ put_bucket_policy() {
|
||||
if [[ $1 == 's3api' ]]; then
|
||||
policy=$(send_command aws --no-verify-ssl s3api put-bucket-policy --bucket "$2" --policy "file://$3" 2>&1) || put_policy_result=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
policy=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate setpolicy "$3" "s3://$2" 2>&1) || put_policy_result=$?
|
||||
policy=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate --region "$AWS_REGION" setpolicy "$3" "s3://$2" 2>&1) || put_policy_result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
policy=$(send_command mc --insecure anonymous set-json "$3" "$MC_ALIAS/$2" 2>&1) || put_policy_result=$?
|
||||
elif [ "$1" == 'rest' ]; then
|
||||
put_bucket_policy_rest "$2" "$3" || put_policy_result=$?
|
||||
put_bucket_policy_rest_200_or_204 "$2" "$3" || put_policy_result=$?
|
||||
return $put_policy_result
|
||||
else
|
||||
log 2 "command 'put bucket policy' not implemented for '$1'"
|
||||
@@ -71,8 +71,23 @@ put_bucket_policy_rest() {
|
||||
log 2 "error putting bucket policy: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "200" ]; then
|
||||
log 2 "expected '200', was '$result' ($(cat "$TEST_FILE_FOLDER/result.txt"))"
|
||||
if [ "$result" != "204" ]; then
|
||||
log 2 "expected '204', was '$result' ($(cat "$TEST_FILE_FOLDER/result.txt"))"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
put_bucket_policy_rest_200_or_204() {
|
||||
if ! check_param_count "put_bucket_policy_rest" "bucket, policy file" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" POLICY_FILE="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/put_bucket_policy.sh); then
|
||||
log 2 "error putting bucket policy: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "200" ] && [ "$result" != "204" ]; then
|
||||
log 2 "expected '200' or '204', was '$result' ($(cat "$TEST_FILE_FOLDER/result.txt"))"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
|
||||
@@ -30,7 +30,7 @@ put_object() {
|
||||
elif [[ $1 == 's3api' ]]; then
|
||||
error=$(send_command aws --no-verify-ssl s3api put-object --body "$2" --bucket "$3" --key "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate put "$2" s3://"$3/$4" 2>&1) || exit_code=$?
|
||||
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate --region "$AWS_REGION" put "$2" s3://"$3/$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(send_command mc --insecure put "$2" "$MC_ALIAS/$3/$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'rest' ]]; then
|
||||
|
||||
@@ -27,6 +27,28 @@ put_object_lock_configuration() {
|
||||
return 0
|
||||
}
|
||||
|
||||
put_object_lock_configuration_rest() {
|
||||
if ! check_param_count_v2 "bucket name, params" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_command "BUCKET_NAME=$1 $2" "./tests/rest_scripts/put_object_lock_configuration.sh"; then
|
||||
log 2 "error sending put object lock config command or error mismatch"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
put_object_lock_configuration_rest_expect_error() {
|
||||
if ! check_param_count_v2 "bucket name, params, expected response code, expected error code, expected message" 5 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_command_expect_error "BUCKET_NAME=$1 $2" "./tests/rest_scripts/put_object_lock_configuration.sh" "$3" "$4" "$5"; then
|
||||
log 2 "error sending put object lock config command or error mismatch"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
remove_retention_policy_rest() {
|
||||
if ! check_param_count "remove_retention_policy_rest" "bucket" 1 $#; then
|
||||
return 1
|
||||
|
||||
@@ -14,9 +14,10 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/util/util_rest.sh
|
||||
|
||||
put_object_tagging() {
|
||||
if [ $# -ne 5 ]; then
|
||||
log 2 "'put-object-tagging' command missing command type, bucket, object name, file, key, and/or value"
|
||||
if ! check_param_count_v2 "command type, bucket, object key, tag key, value" 5 $#; then
|
||||
return 1
|
||||
fi
|
||||
local error
|
||||
|
||||
@@ -76,12 +76,10 @@ calculate_composite_checksum() {
|
||||
fi
|
||||
log 5 "checksums: ${*:2}"
|
||||
for checksum in ${@:2}; do
|
||||
if ! binary_checksum=$(echo -n "$checksum" | base64 -d 2>&1); then
|
||||
log 2 "error calculating binary checksum: $binary_checksum"
|
||||
if ! printf '%s' "$checksum" | base64 -d >> "$TEST_FILE_FOLDER/all_checksums.bin"; then
|
||||
log 2 "error calculating binary checksum and adding to file"
|
||||
return 1
|
||||
fi
|
||||
log 5 "binary checksum: $binary_checksum"
|
||||
printf "%s" "$binary_checksum" | cat >> "$TEST_FILE_FOLDER/all_checksums.bin"
|
||||
done
|
||||
if [ "$1" == "sha256" ]; then
|
||||
composite=$(openssl dgst -sha256 -binary "$TEST_FILE_FOLDER/all_checksums.bin" | base64)
|
||||
@@ -96,6 +94,7 @@ calculate_composite_checksum() {
|
||||
fi
|
||||
fi
|
||||
log 5 "composite: $composite"
|
||||
echo "$composite"
|
||||
}
|
||||
|
||||
test_multipart_upload_with_checksum() {
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# under the License.
|
||||
|
||||
source ./tests/commands/list_objects_v2.sh
|
||||
source ./tests/drivers/list_object_versions/list_object_versions_rest.sh
|
||||
source ./tests/drivers/xml.sh
|
||||
source ./tests/util/util_legal_hold.sh
|
||||
|
||||
@@ -102,3 +103,50 @@ delete_object_version_with_or_without_retention_base64() {
|
||||
log 5 "successfully deleted version with key '$key', id '$id'"
|
||||
return 0
|
||||
}
|
||||
|
||||
put_object_with_lock_mode_and_delete_latest_version() {
|
||||
if ! check_param_count_v2 "file, bucket, key, later time" 4 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_go_command "200" \
|
||||
"-bucketName" "$2" "-objectKey" "$3" "-payloadFile" "$1" \
|
||||
"-method" "PUT" "-contentMD5" "-signedParams" "x-amz-object-lock-mode:GOVERNANCE,x-amz-object-lock-retain-until-date:$4"; then
|
||||
log 2 "error sending put object command with object lock"
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_go_command_callback "200" "parse_latest_version_id" \
|
||||
"-method" "GET" "-bucketName" "$2" "-query" "versions="; then
|
||||
log 2 "error checking versions before deletion"
|
||||
return 1
|
||||
fi
|
||||
if ! delete_object_version_rest_expect_error "$2" "$3" "$version_id" "403" "AccessDenied" "object protected by object lock"; then
|
||||
log 2 "shouldn't have been able to delete"
|
||||
return 1
|
||||
fi
|
||||
sleep 15
|
||||
if ! delete_object_version "$2" "$3" "$version_id"; then
|
||||
log 2 "error deleting object version"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
attempt_to_delete_version_after_retention_policy() {
|
||||
if ! check_param_count_v2 "file, bucket name, key" 3 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_go_command "200" \
|
||||
"-bucketName" "$2" "-objectKey" "$3" "-payloadFile" "$1" "-method" "PUT" "-contentMD5"; then
|
||||
log 2 "error sending put object command"
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_go_command_callback "200" "parse_latest_version_id" \
|
||||
"-method" "GET" "-bucketName" "$2" "-query" "versions="; then
|
||||
log 2 "error checking versions before deletion"
|
||||
return 1
|
||||
fi
|
||||
if ! delete_object_version_rest_expect_error "$2" "$3" "$version_id" "403" "AccessDenied" "object protected by object lock"; then
|
||||
log 2 "shouldn't have been able to delete"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -151,3 +151,11 @@ chunked_upload_trailer_success() {
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
get_file_name() {
|
||||
if ! uuid=$(uuidgen 2>&1); then
|
||||
log 2 "error getting UUID: $uuid"
|
||||
return 1
|
||||
fi
|
||||
echo "test-file-${uuid}"
|
||||
}
|
||||
|
||||
@@ -23,8 +23,13 @@ get_check_bucket_location_various() {
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
if [[ $bucket_location != "null" ]] && [[ $bucket_location != "us-east-1" ]]; then
|
||||
log 2 "wrong location: '$bucket_location'"
|
||||
if [ "$AWS_REGION" == "us-east-1" ]; then
|
||||
if [ "$bucket_location" != "null" ]; then
|
||||
log 2 "expected 'null' for 'us-east-1' region, got : '$bucket_location'"
|
||||
return 1
|
||||
fi
|
||||
elif [ "$AWS_REGION" != "$bucket_location" ]; then
|
||||
log 2 "expected bucket location of '$AWS_REGION', got '$bucket_location'"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# Copyright 2025 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/drivers/xml.sh
|
||||
|
||||
check_policy_status() {
|
||||
if ! check_param_count_v2 "data file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
log 5 "data: $(cat "$1")"
|
||||
if ! check_xml_element "$1" "$expected_policy_status" "PolicyStatus" "IsPublic"; then
|
||||
log 2 "error checking policy status"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
get_and_check_policy_status() {
|
||||
if ! check_param_count_v2 "bucket, expected status" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
expected_policy_status="$2"
|
||||
if ! send_rest_go_command_callback "200" "check_policy_status" "-bucketName" "$1" "-query" "policyStatus="; then
|
||||
log 2 "error sending REST go command or checking callback"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -14,6 +14,8 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/util/util_list_parts.sh
|
||||
|
||||
upload_and_check_attributes() {
|
||||
if ! check_param_count_v2 "bucket, test file, file size" 3 $#; then
|
||||
return 1
|
||||
|
||||
@@ -14,23 +14,23 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
parse_non_latest_version_id() {
|
||||
if ! check_param_count_v2 "data file" 1 $#; then
|
||||
parse_version_id() {
|
||||
if ! check_param_count_v2 "data file, IsLatest val" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
log 5 "data: $(cat "$1")"
|
||||
not_latest_string="//*[local-name()=\"Version\"][*[local-name()=\"IsLatest\" and text()=\"false\"]]"
|
||||
log 5 "match string: $not_latest_string"
|
||||
version_string="//*[local-name()=\"Version\"][*[local-name()=\"IsLatest\" and text()=\"$2\"]]"
|
||||
log 5 "match string: $version_string"
|
||||
if ! get_xml_data "$1" "$1.xml"; then
|
||||
log 2 "error getting XML data"
|
||||
return 1
|
||||
fi
|
||||
if ! not_latest=$(xmllint --xpath "$not_latest_string" "$1.xml" 2>&1); then
|
||||
log 2 "error getting result: $not_latest"
|
||||
if ! version=$(xmllint --xpath "$version_string" "$1.xml" 2>&1); then
|
||||
log 2 "error getting result: $version"
|
||||
return 1
|
||||
fi
|
||||
log 5 "not latest: $not_latest"
|
||||
if ! version_id=$(xmllint --xpath "//*[local-name()=\"VersionId\"]/text()" <(echo "$not_latest" | head -n 1) 2>&1); then
|
||||
log 5 "latest: $2, version: $version"
|
||||
if ! version_id=$(xmllint --xpath "//*[local-name()=\"VersionId\"]/text()" <(echo "$version" | head -n 1) 2>&1); then
|
||||
log 2 "error getting version ID: $version_id"
|
||||
return 1
|
||||
fi
|
||||
@@ -38,6 +38,17 @@ parse_non_latest_version_id() {
|
||||
return 0
|
||||
}
|
||||
|
||||
parse_non_latest_version_id() {
|
||||
if ! check_param_count_v2 "data file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! parse_version_id "$1" "false"; then
|
||||
log 2 "error getting non-latest version ID"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
get_non_latest_version() {
|
||||
if ! check_param_count_v2 "bucket" $# 1; then
|
||||
return 1
|
||||
@@ -48,3 +59,88 @@ get_non_latest_version() {
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_object_versions_before_deletion() {
|
||||
if ! check_param_count_v2 "data file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! get_xml_data "$1" "$1.tmp"; then
|
||||
log 2 "error getting XML data"
|
||||
return 1
|
||||
fi
|
||||
if ! parse_versions_rest "$1.tmp"; then
|
||||
log 2 "error parsing versions"
|
||||
return 1
|
||||
fi
|
||||
if [ "${#version_ids[@]}" -ne 1 ]; then
|
||||
log 2 "expected version ID count of 1, was '${#version_ids[@]}'"
|
||||
return 1
|
||||
fi
|
||||
version_id="${version_ids[0]}"
|
||||
log 5 "version ID: $version_id"
|
||||
return 0
|
||||
}
|
||||
|
||||
check_object_versions_after_deletion() {
|
||||
if ! check_param_count_v2 "data file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! get_xml_data "$1" "$1.tmp"; then
|
||||
log 2 "error getting XML data"
|
||||
return 1
|
||||
fi
|
||||
if ! parse_versions_rest "$1.tmp"; then
|
||||
log 2 "error parsing versions"
|
||||
return 1
|
||||
fi
|
||||
if [ "${#version_ids[@]}" -ne 2 ]; then
|
||||
log 2 "expected version ID count of 2, was '${#version_ids[@]}'"
|
||||
return 1
|
||||
fi
|
||||
if [ "${version_ids[0]}" != "$version_id" ]; then
|
||||
log 2 "expected version ID of '$version_id', was '${version_ids[0]}'"
|
||||
return 1
|
||||
fi
|
||||
if [ "${version_islatests[0]}" != "false" ]; then
|
||||
log 2 "expected 'IsLatest' of version ID to be false, was '${version_islatests[0]}'"
|
||||
return 1
|
||||
fi
|
||||
if [ "${version_islatests[1]}" != "true" ]; then
|
||||
log 2 "expected 'IsLatest' of delete marker to be true, was '${version_islatests[1]}'"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
list_object_versions_before_and_after_retention_deletion() {
|
||||
if ! check_param_count_v2 "bucket name, file" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_go_command_callback "200" "check_object_versions_before_deletion" \
|
||||
"-method" "GET" "-bucketName" "$1" "-query" "versions="; then
|
||||
log 2 "error checking versions before deletion"
|
||||
return 1
|
||||
fi
|
||||
if ! delete_object_rest "$1" "$2"; then
|
||||
log 2 "error deleting file"
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_go_command_callback "200" "check_object_versions_after_deletion" \
|
||||
"-method" "GET" "-bucketName" "$1" "-query" "versions="; then
|
||||
log 2 "error checking versions before deletion"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
parse_latest_version_id() {
|
||||
if ! check_param_count_v2 "data file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! parse_version_id "$1" "true"; then
|
||||
log 2 "error getting latest version ID"
|
||||
return 1
|
||||
fi
|
||||
log 5 "version ID: $version_id"
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -22,7 +22,8 @@ send_not_implemented_expect_failure() {
|
||||
log 2 "'send_not_implemented_expect_failure' param count must be multiple of 2 (key/value pairs)"
|
||||
return 1
|
||||
fi
|
||||
if ! curl_command=$(go run ./tests/rest_scripts/generateCommand.go -awsAccessKeyId "$AWS_ACCESS_KEY_ID" -awsSecretAccessKey "$AWS_SECRET_ACCESS_KEY" -url "$AWS_ENDPOINT_URL" "$@" 2>&1); then
|
||||
if ! curl_command=$(go run ./tests/rest_scripts/generateCommand.go -awsAccessKeyId "$AWS_ACCESS_KEY_ID" \
|
||||
-awsSecretAccessKey "$AWS_SECRET_ACCESS_KEY" -awsRegion "$AWS_REGION" -url "$AWS_ENDPOINT_URL" "$@" 2>&1); then
|
||||
log 2 "error: $curl_command"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -16,6 +16,32 @@
|
||||
|
||||
source ./tests/drivers/xml.sh
|
||||
|
||||
write_openssl_command_to_command_log() {
|
||||
if ! check_param_count_v2 "command file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
max_chars=1024
|
||||
if [ -n "$MAX_OPENSSL_COMMAND_LOG_BYTES" ]; then
|
||||
max_chars="$MAX_OPENSSL_COMMAND_LOG_BYTES"
|
||||
fi
|
||||
if ! file_size=$(get_file_size "$1"); then
|
||||
return 1
|
||||
fi
|
||||
if [ "$max_chars" -eq -1 ] || [ "$file_size" -lt "$max_chars" ]; then
|
||||
log_data=$(perl -pe 's/\x00/<NULL>/g' "$1" | perl -pe 's/\r/<CR>/g')
|
||||
else
|
||||
log_data=$(head -c "$max_chars" "$1" | perl -pe 's/\x00/<NULL>/g' | perl -pe 's/\r/<CR>/g')
|
||||
log_data+="<TRUNC>"
|
||||
fi
|
||||
while IFS=$' ' read -r -a line_words; do
|
||||
if ! mask_arg_array "${line_words[@]}"; then
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
echo "${masked_args[*]}" >> "$COMMAND_LOG"
|
||||
done <<< "$log_data"
|
||||
}
|
||||
|
||||
send_via_openssl() {
|
||||
if ! check_param_count_v2 "command file" 1 $#; then
|
||||
return 1
|
||||
@@ -25,6 +51,9 @@ send_via_openssl() {
|
||||
host+=":443"
|
||||
fi
|
||||
log 5 "connecting to $host"
|
||||
if [ -n "$COMMAND_LOG" ]; then
|
||||
write_openssl_command_to_command_log "$1"
|
||||
fi
|
||||
if ! result=$(openssl s_client -connect "$host" -ign_eof < "$1" 2>&1); then
|
||||
log 2 "error sending openssl command: $result"
|
||||
return 1
|
||||
@@ -118,7 +147,8 @@ send_openssl_go_command_expect_error() {
|
||||
if ! check_param_count_gt "expected HTTP code, expected error code, expected message, params" 4 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(go run "./tests/rest_scripts/generateCommand.go" "-awsAccessKeyId" "$AWS_ACCESS_KEY_ID" "-awsSecretAccessKey" "$AWS_SECRET_ACCESS_KEY" "-url" "$AWS_ENDPOINT_URL" "-client" "openssl" "-filePath" "$TEST_FILE_FOLDER/openssl_command.txt" "${@:4}" 2>&1); then
|
||||
if ! result=$(go run "./tests/rest_scripts/generateCommand.go" "-awsAccessKeyId" "$AWS_ACCESS_KEY_ID" "-awsSecretAccessKey" \
|
||||
"$AWS_SECRET_ACCESS_KEY" "-url" "$AWS_ENDPOINT_URL" "-awsRegion" "$AWS_REGION" "-client" "openssl" "-filePath" "$TEST_FILE_FOLDER/openssl_command.txt" "${@:4}" 2>&1); then
|
||||
log 2 "error sending go command and checking error: $result"
|
||||
return 1
|
||||
fi
|
||||
@@ -133,8 +163,8 @@ send_openssl_go_command() {
|
||||
if ! check_param_count_gt "expected HTTP code, params" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! go run "./tests/rest_scripts/generateCommand.go" "-awsAccessKeyId" "$AWS_ACCESS_KEY_ID" "-awsSecretAccessKey" "$AWS_SECRET_ACCESS_KEY" "-awsRegion" "$AWS_REGION" "-url" "$AWS_ENDPOINT_URL" "-client" "openssl" "-filePath" "$TEST_FILE_FOLDER/openssl_command.txt" "${@:2}"; then
|
||||
log 2 "error sending go command and checking error"
|
||||
if ! result=$(go run "./tests/rest_scripts/generateCommand.go" "-awsAccessKeyId" "$AWS_ACCESS_KEY_ID" "-awsSecretAccessKey" "$AWS_SECRET_ACCESS_KEY" "-awsRegion" "$AWS_REGION" "-url" "$AWS_ENDPOINT_URL" "-client" "openssl" "-filePath" "$TEST_FILE_FOLDER/openssl_command.txt" "${@:2}" 2>&1); then
|
||||
log 2 "error sending go command and checking error: $result"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(send_via_openssl_and_check_code "$TEST_FILE_FOLDER/openssl_command.txt" "$1" 2>&1); then
|
||||
@@ -148,8 +178,9 @@ send_openssl_go_command_check_header() {
|
||||
if ! check_param_count_gt "expected HTTP code, header key, value, params" 4 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! go run "./tests/rest_scripts/generateCommand.go" "-awsAccessKeyId" "$AWS_ACCESS_KEY_ID" "-awsSecretAccessKey" "$AWS_SECRET_ACCESS_KEY" "-url" "$AWS_ENDPOINT_URL" "-client" "openssl" "-filePath" "$TEST_FILE_FOLDER/openssl_command.txt" "${@:4}"; then
|
||||
log 2 "error sending go command and checking error"
|
||||
if ! result=$(go run "./tests/rest_scripts/generateCommand.go" "-awsAccessKeyId" "$AWS_ACCESS_KEY_ID" "-awsSecretAccessKey" "$AWS_SECRET_ACCESS_KEY" \
|
||||
"-awsRegion" "$AWS_REGION" "-url" "$AWS_ENDPOINT_URL" "-client" "openssl" "-filePath" "$TEST_FILE_FOLDER/openssl_command.txt" "${@:4}" 2>&1); then
|
||||
log 2 "error sending go command and checking error: $result"
|
||||
return 1
|
||||
fi
|
||||
if ! send_via_openssl_and_check_code_header "$TEST_FILE_FOLDER/openssl_command.txt" "$1" "$2" "$3"; then
|
||||
|
||||
31
tests/drivers/put_bucket_policy/put_bucket_policy.sh
Normal file
31
tests/drivers/put_bucket_policy/put_bucket_policy.sh
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# Copyright 2025 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
put_and_check_for_malformed_policy() {
|
||||
if ! check_param_count "put_and_check_for_malformed_policy" "bucket, policy file" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if put_bucket_policy "s3api" "$1" "$2"; then
|
||||
log 2 "put succeeded despite malformed policy"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
if [[ "$put_bucket_policy_error" != *"MalformedPolicy"*"invalid action"* ]]; then
|
||||
log 2 "invalid policy error: $put_bucket_policy_error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
54
tests/drivers/put_bucket_policy/put_bucket_policy_rest.sh
Normal file
54
tests/drivers/put_bucket_policy/put_bucket_policy_rest.sh
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# Copyright 2025 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
put_simple_bucket_policy() {
|
||||
if ! check_param_count_v2 "bucket" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if [ "$DIRECT" == "true" ]; then
|
||||
user_id="$DIRECT_S3_ROOT_ACCOUNT_NAME"
|
||||
else
|
||||
user_id="$AWS_ACCESS_KEY_ID"
|
||||
fi
|
||||
|
||||
if ! setup_policy_with_single_statement "$TEST_FILE_FOLDER/policy_file" "2012-10-17" "Allow" "$user_id" "s3:*" "arn:aws:s3:::$1"; then
|
||||
log 2 "error setting up policy"
|
||||
return 1
|
||||
fi
|
||||
log 5 "policy: $TEST_FILE_FOLDER/policy_file"
|
||||
if ! put_bucket_policy_rest "$1" "$TEST_FILE_FOLDER/policy_file"; then
|
||||
log 2 "error putting policy"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
put_public_bucket_policy() {
|
||||
if ! check_param_count_v2 "bucket" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! setup_policy_with_single_statement "$TEST_FILE_FOLDER/policy_file" "2012-10-17" "Allow" "*" "s3:*" "arn:aws:s3:::$1"; then
|
||||
log 2 "error setting up policy"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! put_bucket_policy_rest "$1" "$TEST_FILE_FOLDER/policy_file"; then
|
||||
log 2 "error putting policy"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -289,3 +289,31 @@ check_for_header_key_and_value() {
|
||||
log 2 "no header key '$2' found"
|
||||
return 1
|
||||
}
|
||||
|
||||
check_argument_name_and_value() {
|
||||
if ! check_param_count_v2 "data file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! check_error_parameter "$1" "ArgumentName" "$argument_name"; then
|
||||
log 2 "error checking 'ArgumentName' parameter"
|
||||
return 1
|
||||
fi
|
||||
if ! check_error_parameter "$1" "ArgumentValue" "$argument_value"; then
|
||||
log 2 "error checking 'ArgumentValue' parameter"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
send_rest_go_command_expect_error_with_arg_name_value() {
|
||||
if ! check_param_count_gt "response code, error code, message, arg name, arg value, params" 5 $#; then
|
||||
return 1
|
||||
fi
|
||||
argument_name=$4
|
||||
argument_value=$5
|
||||
if ! send_rest_go_command_expect_error_callback "$1" "$2" "$3" "check_argument_name_and_value" "${@:6}"; then
|
||||
log 2 "error checking error response values"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ check_tags_empty() {
|
||||
if ! check_param_count_v2 "command type" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
if [ "$1" == 'aws' ] || [ "$1" == 's3api' ]; then
|
||||
# shellcheck disable=SC2154
|
||||
if [[ $tags == "" ]]; then
|
||||
return 0
|
||||
|
||||
@@ -281,7 +281,7 @@ check_user_vars() {
|
||||
exit 1
|
||||
fi
|
||||
IAM_PARAMS="--s3-iam-access $AWS_ACCESS_KEY_ID --s3-iam-secret $AWS_SECRET_ACCESS_KEY \
|
||||
--s3-iam-region us-east-1 --s3-iam-bucket $USERS_BUCKET --s3-iam-endpoint $AWS_ENDPOINT_URL \
|
||||
--s3-iam-region $AWS_REGION --s3-iam-bucket $USERS_BUCKET --s3-iam-endpoint $AWS_ENDPOINT_URL \
|
||||
--s3-iam-noverify"
|
||||
export IAM_PARAMS
|
||||
return 0
|
||||
|
||||
92
tests/generate_matrix.sh
Executable file
92
tests/generate_matrix.sh
Executable file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2026 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# generate github-actions matrix for system.yml
|
||||
|
||||
source ./tests/drivers/params.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
files=()
|
||||
iam_types=()
|
||||
regions=()
|
||||
idx=0
|
||||
|
||||
check_for_and_load_test_file_and_params() {
|
||||
if ! check_param_count_v2 "file name" 1 $#; then
|
||||
exit 1
|
||||
fi
|
||||
if grep -q '@test' "$1"; then
|
||||
if [ $(( idx % 8 )) -eq 0 ]; then
|
||||
iam="s3"
|
||||
else
|
||||
iam="folder"
|
||||
fi
|
||||
iam_types+=("$iam")
|
||||
if [ $(( idx % 4 )) -eq 0 ]; then
|
||||
region="us-west-1"
|
||||
else
|
||||
region="us-east-1"
|
||||
fi
|
||||
regions+=("$region")
|
||||
files+=("$1")
|
||||
idx=$((idx + 1))
|
||||
fi
|
||||
}
|
||||
|
||||
while IFS= read -r f; do
|
||||
check_for_and_load_test_file_and_params "$f"
|
||||
done < <(find tests -name 'test_*.sh' | sort)
|
||||
|
||||
files_json_arr=$(printf '%s\n' "${files[@]}" | jq -R . | jq -s .)
|
||||
regions_json_arr=$(printf '%s\n' "${regions[@]}" | jq -R . | jq -s .)
|
||||
iam_types_json_arr=$(printf '%s\n' "${iam_types[@]}" | jq -R . | jq -s .)
|
||||
|
||||
matrix_json=$(
|
||||
jq -n \
|
||||
--argjson files "$files_json_arr" \
|
||||
--argjson regions "$regions_json_arr" \
|
||||
--argjson iam_types "$iam_types_json_arr" \
|
||||
'
|
||||
{
|
||||
include:
|
||||
[ range(0; ($files|length)) as $i
|
||||
| [
|
||||
{
|
||||
desc: ("Run " + $files[$i] + ", non-static, " + $regions[$i] + " region, " + $iam_types[$i] + " IAM type"),
|
||||
RUN_SET: $files[$i],
|
||||
AWS_REGION: $regions[$i],
|
||||
IAM_TYPE: $iam_types[$i],
|
||||
BACKEND: "posix",
|
||||
RECREATE_BUCKETS: "true",
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
},
|
||||
{
|
||||
desc: ("Run " + $files[$i] + ", static, " + $regions[$i] + " region, " + $iam_types[$i] + " IAM type"),
|
||||
RUN_SET: $files[$i],
|
||||
AWS_REGION: $regions[$i],
|
||||
IAM_TYPE: $iam_types[$i],
|
||||
BACKEND: "posix",
|
||||
RECREATE_BUCKETS: "false",
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
}
|
||||
]
|
||||
] | add
|
||||
}
|
||||
'
|
||||
)
|
||||
|
||||
echo "$matrix_json"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user