mirror of
https://github.com/versity/versitygw.git
synced 2026-01-28 05:52:03 +00:00
Compare commits
1 Commits
main
...
ben/read_o
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e9b41d53b6 |
18
.github/ISSUE_TEMPLATE/bug_report.md
vendored
18
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,23 +1,27 @@
|
||||
---
|
||||
name: Bug Report
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: '[Bug] - <Short Description>'
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
<!-- A clear and concise description of what the bug is. -->
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
<!-- Steps to reproduce the behavior. -->
|
||||
Steps to reproduce the behavior.
|
||||
|
||||
**Expected behavior**
|
||||
<!-- A clear and concise description of what you expected to happen. -->
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Server Version**
|
||||
<!-- output of: './versitygw -version && uname -a' -->
|
||||
output of
|
||||
```
|
||||
./versitygw -version
|
||||
uname -a
|
||||
```
|
||||
|
||||
**Additional context**
|
||||
<!-- Describe s3 client and version if applicable.
|
||||
Describe s3 client and version if applicable.
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/feature_request.md
vendored
8
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,14 +1,14 @@
|
||||
---
|
||||
name: Feature Request
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: '[Feature] - <Short Description>'
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the solution you'd like**
|
||||
<!-- A clear and concise description of what you want to happen. -->
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Additional context**
|
||||
<!-- Add any other context or screenshots about the feature request here. -->
|
||||
Add any other context or screenshots about the feature request here.
|
||||
|
||||
33
.github/ISSUE_TEMPLATE/test_case.md
vendored
33
.github/ISSUE_TEMPLATE/test_case.md
vendored
@@ -1,33 +0,0 @@
|
||||
---
|
||||
name: Test Case Request
|
||||
about: Request new test cases or additional test coverage
|
||||
title: '[Test Case] - <Short Description>'
|
||||
labels: 'testcase'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Description
|
||||
<!-- Please provide a detailed description of the test case or test coverage request. -->
|
||||
|
||||
## Purpose
|
||||
<!-- Explain why this test case is important and what it aims to achieve. -->
|
||||
|
||||
## Scope
|
||||
<!-- Describe the scope of the test case, including any specific functionalities, features, or modules that should be tested. -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- List the criteria that must be met for the test case to be considered complete. -->
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
## Additional Context
|
||||
<!-- Add any other context or screenshots about the feature request here. -->
|
||||
|
||||
## Resources
|
||||
<!-- Provide any resources, documentation, or links that could help in writing the test case. -->
|
||||
|
||||
|
||||
**Thank you for contributing to our project!**
|
||||
25
.github/SECURITY.md
vendored
25
.github/SECURITY.md
vendored
@@ -1,25 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you discover a security vulnerability in `versitygw`, we strongly encourage you to report it privately and responsibly.
|
||||
|
||||
Please do **not** create public issues or pull requests that contain details about the vulnerability.
|
||||
|
||||
Instead, report the issue using GitHub's private **Security Advisories** feature:
|
||||
|
||||
- Go to [versitygw's Security Advisories page](https://github.com/versity/versitygw/security/advisories)
|
||||
- Click on **"Report a vulnerability"**
|
||||
|
||||
We aim to respond within **2 business days** and work with you to quickly resolve the issue.
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| --------------- | --------- |
|
||||
| Latest (v1.x.x) | ✅ |
|
||||
| Older versions | ❌ |
|
||||
|
||||
## Responsible Disclosure
|
||||
|
||||
We appreciate responsible disclosures and are committed to fixing vulnerabilities in a timely manner. Thank you for helping keep `versitygw` secure.
|
||||
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -12,7 +12,3 @@ updates:
|
||||
# Allow both direct and indirect updates for all packages
|
||||
- dependency-type: "all"
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
37
.github/workflows/azurite.yml
vendored
37
.github/workflows/azurite.yml
vendored
@@ -1,37 +0,0 @@
|
||||
name: azurite functional tests
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
- name: Set up Docker Compose
|
||||
run: |
|
||||
docker compose -f tests/docker-compose.yml --env-file .env.dev --project-directory . up -d azurite azuritegw
|
||||
|
||||
- name: Wait for Azurite to be ready
|
||||
run: sleep 40
|
||||
|
||||
- name: Get Dependencies
|
||||
run: |
|
||||
go mod download
|
||||
|
||||
- name: Build and Run
|
||||
run: |
|
||||
make
|
||||
./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow --azure
|
||||
|
||||
- name: Shut down services
|
||||
run: |
|
||||
docker compose -f tests/docker-compose.yml --env-file .env.dev --project-directory . down azurite azuritegw
|
||||
108
.github/workflows/codeql.yml
vendored
108
.github/workflows/codeql.yml
vendored
@@ -1,108 +0,0 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL Advanced"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
schedule:
|
||||
- cron: '21 17 * * 2'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||
# - https://gh.io/supported-runners-and-hardware-resources
|
||||
# - https://gh.io/using-larger-runners (GitHub.com only)
|
||||
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
# required to fetch internal or private CodeQL packs
|
||||
packages: read
|
||||
|
||||
# only required for workflows in private repositories
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: actions
|
||||
build-mode: none
|
||||
- language: go
|
||||
build-mode: autobuild
|
||||
- language: javascript-typescript
|
||||
build-mode: none
|
||||
paths-ignore:
|
||||
# ignore embedded 3rd party assets
|
||||
- 'webui/web/assets/**'
|
||||
- language: python
|
||||
build-mode: none
|
||||
# CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift'
|
||||
# Use `c-cpp` to analyze code written in C, C++ or both
|
||||
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
||||
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
||||
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
||||
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
||||
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
# Add any setup steps before running the `github/codeql-action/init` action.
|
||||
# This includes steps like installing compilers or runtimes (`actions/setup-node`
|
||||
# or others). This is typically only required for manual builds.
|
||||
# - name: Setup runtime (example)
|
||||
# uses: actions/setup-example@v1
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v4
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
||||
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
# If the analyze step fails for one of the languages you are analyzing with
|
||||
# "We were unable to automatically build your code", modify the matrix above
|
||||
# to set the build mode to "manual" for that language. Then modify this step
|
||||
# to build your code.
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
- name: Run manual build steps
|
||||
if: matrix.build-mode == 'manual'
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'If you are using a "manual" build mode for one or more of the' \
|
||||
'languages you are analyzing, replace this with the commands to build' \
|
||||
'your code, for example:'
|
||||
echo ' make bootstrap'
|
||||
echo ' make release'
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v4
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
28
.github/workflows/docker-bats.yml
vendored
28
.github/workflows/docker-bats.yml
vendored
@@ -1,28 +0,0 @@
|
||||
name: docker bats tests
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Build Docker Image
|
||||
run: |
|
||||
cp tests/.env.docker.default tests/.env.docker
|
||||
cp tests/.secrets.default tests/.secrets
|
||||
docker build \
|
||||
--build-arg="GO_LIBRARY=go1.23.1.linux-amd64.tar.gz" \
|
||||
--build-arg="AWS_CLI=awscli-exe-linux-x86_64.zip" \
|
||||
--build-arg="MC_FOLDER=linux-amd64" \
|
||||
--progress=plain \
|
||||
-f tests/Dockerfile_test_bats \
|
||||
-t bats_test .
|
||||
|
||||
- name: Run Docker Container
|
||||
run: |
|
||||
docker compose -f tests/docker-compose-bats.yml --project-directory . \
|
||||
up --exit-code-from s3api_np_only s3api_np_only
|
||||
@@ -1,4 +1,5 @@
|
||||
name: Publish Docker image
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
@@ -12,14 +13,7 @@ jobs:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
@@ -43,13 +37,12 @@ jobs:
|
||||
ghcr.io/${{ github.repository }}
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v6
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
VERSION=${{ github.event.release.tag_name }}
|
||||
TIME=${{ github.event.release.published_at }}
|
||||
9
.github/workflows/functional.yml
vendored
9
.github/workflows/functional.yml
vendored
@@ -1,25 +1,24 @@
|
||||
name: functional tests
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: RunTests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
- name: Get Dependencies
|
||||
run: |
|
||||
go mod download
|
||||
go get -v -t -d ./...
|
||||
|
||||
- name: Build and Run
|
||||
run: |
|
||||
|
||||
34
.github/workflows/go.yml
vendored
34
.github/workflows/go.yml
vendored
@@ -1,18 +1,17 @@
|
||||
name: general
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Go Basic Checks
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
@@ -24,6 +23,9 @@ jobs:
|
||||
run: |
|
||||
go get -v -t -d ./...
|
||||
|
||||
- name: Build
|
||||
run: make
|
||||
|
||||
- name: Test
|
||||
run: go test -coverprofile profile.txt -race -v -timeout 30s -tags=github ./...
|
||||
|
||||
@@ -33,26 +35,4 @@ jobs:
|
||||
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
||||
shell: bash
|
||||
|
||||
verify-build:
|
||||
name: Verify Build Targets
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
os: [darwin, freebsd, linux]
|
||||
arch: [amd64, arm64]
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
- name: Build for ${{ matrix.os }}/${{ matrix.arch }}
|
||||
run: |
|
||||
GOOS=${{ matrix.os }} GOARCH=${{ matrix.arch }} go build -o versitygw-${{ matrix.os }}-${{ matrix.arch }} cmd/versitygw/*.go
|
||||
shell: bash
|
||||
16
.github/workflows/goreleaser.yml
vendored
16
.github/workflows/goreleaser.yml
vendored
@@ -1,18 +1,22 @@
|
||||
name: goreleaser
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
on:
|
||||
push:
|
||||
# run only against tags
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
# packages: write
|
||||
# issues: write
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -20,15 +24,15 @@ jobs:
|
||||
run: git fetch --force --tags
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: stable
|
||||
|
||||
- name: Run Releaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
uses: goreleaser/goreleaser-action@v5
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: '~> v2'
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.TOKEN }}
|
||||
|
||||
13
.github/workflows/host-style-tests.yml
vendored
13
.github/workflows/host-style-tests.yml
vendored
@@ -1,13 +0,0 @@
|
||||
name: host style tests
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
|
||||
jobs:
|
||||
build-and-run:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: run host-style tests
|
||||
run: make test-host-style
|
||||
17
.github/workflows/shellcheck.yml
vendored
17
.github/workflows/shellcheck.yml
vendored
@@ -1,17 +0,0 @@
|
||||
name: shellcheck
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Run shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Run checks
|
||||
run: |
|
||||
shellcheck --version
|
||||
shellcheck -e SC1091 tests/*.sh tests/*/*.sh
|
||||
84
.github/workflows/skips.yml
vendored
84
.github/workflows/skips.yml
vendored
@@ -1,84 +0,0 @@
|
||||
name: skips check
|
||||
permissions: {}
|
||||
on: workflow_dispatch
|
||||
jobs:
|
||||
skip-ticket-check:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Fail if any skip descriptions are empty or point to closed issues/PRs
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Find uncommented lines with "skip " (ignore lines whose first non-space char is #)
|
||||
mapfile -t MATCHES < <(
|
||||
git ls-files 'tests/test_*.sh' \
|
||||
| xargs -r grep -nE '^[[:space:]]*[^#][[:space:]]*skip[[:space:]]*$' \
|
||||
|| true
|
||||
)
|
||||
|
||||
if [ ${#MATCHES[@]} -ne 0 ]; then
|
||||
echo "${#MATCHES[@]} skip(s) lack a description"
|
||||
printf ' - %s\n' "${MATCHES[@]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mapfile -t MATCHES < <(
|
||||
git ls-files 'tests/test_*.sh' \
|
||||
| xargs -r grep -nE '^[[:space:]]*[^#][[:space:]]*skip[[:space:]]*"https://github.com' \
|
||||
|| true
|
||||
)
|
||||
|
||||
urls=()
|
||||
for m in "${MATCHES[@]}"; do
|
||||
# Extract first GitHub issue/PR URL on the line:
|
||||
# supports /issues/123 and /pull/123 (with or without extra suffix)
|
||||
url="$(echo "$m" | grep -oE 'https://github\.com/[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+/(issues|pull)/[0-9]+' | head -n1 || true)"
|
||||
if [ -n "$url" ]; then
|
||||
urls+=("$url")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#urls[@]} -eq 0 ]; then
|
||||
echo "Found skip lines, but no recognizable GitHub issue/PR URLs."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Found skip ticket URLs:"
|
||||
printf ' - %s\n' "${urls[@]}"
|
||||
|
||||
closed=()
|
||||
|
||||
for url in "${urls[@]}"; do
|
||||
# Parse owner/repo and number from URL
|
||||
# url format: https://github.com/OWNER/REPO/issues/123 or /pull/123
|
||||
path="${url#https://github.com/}"
|
||||
owner="$(echo "$path" | cut -d/ -f1)"
|
||||
repo="$(echo "$path" | cut -d/ -f2)"
|
||||
num="$(echo "$path" | cut -d/ -f4)"
|
||||
|
||||
# Issues API works for both issues and PRs; state=open/closed
|
||||
state="$(curl -fsSL \
|
||||
-H "Authorization: Bearer $GH_TOKEN" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"https://api.github.com/repos/$owner/$repo/issues/$num" \
|
||||
| python -c "import sys,json; print(json.load(sys.stdin).get('state',''))")"
|
||||
|
||||
echo "$url -> $state"
|
||||
if [ "$state" = "closed" ]; then
|
||||
closed+=("$url")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#closed[@]} -gt 0 ]; then
|
||||
echo "::error::Closed tickets referenced by uncommented skip URLs:"
|
||||
printf '::error:: - %s\n' "${closed[@]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "All referenced tickets are open. ✅"
|
||||
5
.github/workflows/static.yml
vendored
5
.github/workflows/static.yml
vendored
@@ -1,5 +1,4 @@
|
||||
name: staticcheck
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
jobs:
|
||||
|
||||
@@ -9,12 +8,12 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
121
.github/workflows/system.yml
vendored
121
.github/workflows/system.yml
vendored
@@ -1,37 +1,23 @@
|
||||
name: system tests
|
||||
permissions: {}
|
||||
on: pull_request
|
||||
jobs:
|
||||
generate:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.make.outputs.matrix }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- id: make
|
||||
run: |
|
||||
if ! matrix_output=$(tests/generate_matrix.sh 2>&1); then
|
||||
echo "error generating matrix: $matrix_output"
|
||||
exit 1
|
||||
fi
|
||||
MATRIX_JSON=$(echo -n "$matrix_output" | jq -c . )
|
||||
echo "matrix=$MATRIX_JSON" >> "$GITHUB_OUTPUT"
|
||||
|
||||
build:
|
||||
name: RunTests
|
||||
needs: generate
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix: ${{ fromJson(needs.generate.outputs.matrix) }}
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install ShellCheck
|
||||
run: sudo apt-get install shellcheck
|
||||
|
||||
- name: Run ShellCheck
|
||||
run: shellcheck -S warning ./tests/*.sh
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "stable"
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
- name: Get Dependencies
|
||||
@@ -42,12 +28,9 @@ jobs:
|
||||
run: |
|
||||
git clone https://github.com/bats-core/bats-core.git
|
||||
cd bats-core && ./install.sh $HOME
|
||||
git clone https://github.com/bats-core/bats-support.git ${{ github.workspace }}/tests/bats-support
|
||||
git clone https://github.com/ztombol/bats-assert.git ${{ github.workspace }}/tests/bats-assert
|
||||
|
||||
- name: Install s3cmd
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install s3cmd
|
||||
|
||||
- name: Install mc
|
||||
@@ -55,81 +38,35 @@ jobs:
|
||||
curl https://dl.min.io/client/mc/release/linux-amd64/mc --create-dirs -o /usr/local/bin/mc
|
||||
chmod 755 /usr/local/bin/mc
|
||||
|
||||
- name: Install xml libraries (for rest)
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install libxml2-utils xmlstarlet
|
||||
|
||||
# see https://github.com/versity/versitygw/issues/1034
|
||||
- name: Install AWS cli
|
||||
run: |
|
||||
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.22.35.zip" -o "awscliv2.zip"
|
||||
unzip -o awscliv2.zip
|
||||
./aws/install -i ${{ github.workspace }}/aws-cli -b ${{ github.workspace }}/bin
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Build and run
|
||||
env:
|
||||
IAM_TYPE: ${{ matrix.IAM_TYPE }}
|
||||
RUN_SET: ${{ matrix.RUN_SET }}
|
||||
AWS_PROFILE: versity
|
||||
VERSITY_EXE: ${{ github.workspace }}/versitygw
|
||||
RUN_VERSITYGW: true
|
||||
BACKEND: ${{ matrix.BACKEND }}
|
||||
RECREATE_BUCKETS: ${{ matrix.RECREATE_BUCKETS }}
|
||||
DELETE_BUCKETS_AFTER_TEST: ${{ matrix.DELETE_BUCKETS_AFTER_TEST }}
|
||||
CERT: ${{ github.workspace }}/cert.pem
|
||||
KEY: ${{ github.workspace }}/versitygw.pem
|
||||
LOCAL_FOLDER: /tmp/gw
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two
|
||||
USERS_FOLDER: /tmp/iam
|
||||
USERS_BUCKET: versity-gwtest-iam
|
||||
AWS_ENDPOINT_URL: https://127.0.0.1:7070
|
||||
PORT: 7070
|
||||
S3CMD_CONFIG: tests/s3cfg.local.default
|
||||
MC_ALIAS: versity
|
||||
LOG_LEVEL: 4
|
||||
GOCOVERDIR: ${{ github.workspace }}/cover
|
||||
USERNAME_ONE: HIJKLMN
|
||||
PASSWORD_ONE: 1234567
|
||||
USERNAME_TWO: OPQRSTU
|
||||
PASSWORD_TWO: 8901234
|
||||
TEST_FILE_FOLDER: ${{ github.workspace }}/versity-gwtest-files
|
||||
REMOVE_TEST_FILE_FOLDER: true
|
||||
VERSIONING_DIR: ${{ github.workspace }}/versioning
|
||||
COMMAND_LOG: command.log
|
||||
TIME_LOG: time.log
|
||||
PYTHON_ENV_FOLDER: ${{ github.workspace }}/env
|
||||
AUTOGENERATE_USERS: true
|
||||
USER_AUTOGENERATION_PREFIX: github-actions-test-
|
||||
AWS_REGION: ${{ matrix.AWS_REGION }}
|
||||
- name: Build and run, posix backend
|
||||
run: |
|
||||
make testbin
|
||||
export AWS_ACCESS_KEY_ID=ABCDEFGHIJKLMNOPQRST
|
||||
export AWS_SECRET_ACCESS_KEY=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmn
|
||||
export AWS_REGION=$AWS_REGION
|
||||
export AWS_ACCESS_KEY_ID_TWO=user
|
||||
export AWS_SECRET_ACCESS_KEY_TWO=pass
|
||||
export AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED
|
||||
export AWS_REGION=us-east-1
|
||||
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile versity
|
||||
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile versity
|
||||
aws configure set aws_region $AWS_REGION --profile versity
|
||||
mkdir $LOCAL_FOLDER
|
||||
mkdir /tmp/gw
|
||||
export WORKSPACE=$GITHUB_WORKSPACE
|
||||
openssl genpkey -algorithm RSA -out $KEY -pkeyopt rsa_keygen_bits:2048
|
||||
openssl req -new -x509 -key $KEY -out $CERT -days 365 -subj "/C=US/ST=California/L=San Francisco/O=Versity/OU=Software/CN=versity.com"
|
||||
mkdir $GOCOVERDIR $USERS_FOLDER
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
BYPASS_ENV_FILE=true ${{ github.workspace }}/tests/setup_static.sh
|
||||
fi
|
||||
BYPASS_ENV_FILE=true $HOME/bin/bats ${{ github.workspace }}/$RUN_SET
|
||||
openssl genpkey -algorithm RSA -out versitygw.pem -pkeyopt rsa_keygen_bits:2048
|
||||
openssl req -new -x509 -key versitygw.pem -out cert.pem -days 365 -subj "/C=US/ST=California/L=San Francisco/O=Versity/OU=Software/CN=versity.com"
|
||||
mkdir cover
|
||||
VERSITYGW_TEST_ENV=./tests/.env.default ./tests/run_all.sh
|
||||
|
||||
- name: Time report
|
||||
run: |
|
||||
if [ -e ${{ github.workspace }}/time.log ]; then
|
||||
cat ${{ github.workspace }}/time.log
|
||||
fi
|
||||
#- name: Build and run, s3 backend
|
||||
# run: |
|
||||
# make testbin
|
||||
# export AWS_ACCESS_KEY_ID=ABCDEFGHIJKLMNOPQRST
|
||||
# export AWS_SECRET_ACCESS_KEY=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmn
|
||||
# export AWS_REGION=us-east-1
|
||||
# aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile versity_s3
|
||||
# aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile versity_s3
|
||||
# aws configure set aws_region $AWS_REGION --profile versity_s3
|
||||
# export AWS_ACCESS_KEY_ID_TWO=ABCDEFGHIJKLMNOPQRST
|
||||
# export AWS_SECRET_ACCESS_KEY_TWO=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmn
|
||||
# export WORKSPACE=$GITHUB_WORKSPACE
|
||||
# VERSITYGW_TEST_ENV=./tests/.env.s3.default GOCOVERDIR=/tmp/cover ./tests/run_all.sh
|
||||
|
||||
- name: Coverage report
|
||||
run: |
|
||||
|
||||
22
.gitignore
vendored
22
.gitignore
vendored
@@ -45,25 +45,7 @@ tests/.secrets*
|
||||
|
||||
# IAM users files often created in testing
|
||||
users.json
|
||||
users.json.backup
|
||||
|
||||
# env files for testing
|
||||
**/.env*
|
||||
**/!.env.default
|
||||
|
||||
# s3cmd config files (testing)
|
||||
tests/s3cfg.local*
|
||||
tests/!s3cfg.local.default
|
||||
|
||||
# keys
|
||||
*.pem
|
||||
|
||||
# patches
|
||||
*.patch
|
||||
|
||||
# grafana's local database (kept on filesystem for survival between instantiations)
|
||||
metrics-exploration/grafana_data/**
|
||||
|
||||
# bats tools
|
||||
/tests/bats-assert
|
||||
/tests/bats-support
|
||||
.env*
|
||||
!.env.default
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: 2
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
@@ -25,7 +23,7 @@ builds:
|
||||
- -X=main.Build={{.Commit}} -X=main.BuildTime={{.Date}} -X=main.Version={{.Version}}
|
||||
|
||||
archives:
|
||||
- formats: [ 'tar.gz' ]
|
||||
- format: tar.gz
|
||||
# this name template makes the OS and Arch compatible with the results of uname.
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_v{{ .Version }}_
|
||||
@@ -45,7 +43,7 @@ archives:
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
formats: [ 'zip' ]
|
||||
format: zip
|
||||
|
||||
# Additional files/globs you want to add to the archive.
|
||||
#
|
||||
@@ -60,7 +58,7 @@ checksum:
|
||||
name_template: 'checksums.txt'
|
||||
|
||||
snapshot:
|
||||
version_template: "{{ incpatch .Version }}-{{.ShortCommit}}"
|
||||
name_template: "{{ incpatch .Version }}-next"
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
@@ -88,7 +86,7 @@ nfpms:
|
||||
|
||||
license: Apache 2.0
|
||||
|
||||
ids:
|
||||
builds:
|
||||
- versitygw
|
||||
|
||||
formats:
|
||||
|
||||
@@ -23,16 +23,13 @@ RUN go build -ldflags "-X=main.Build=${BUILD} -X=main.BuildTime=${TIME} -X=main.
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
# These arguments can be overridden when building the image
|
||||
# These arguments can be overriden when building the image
|
||||
ARG IAM_DIR=/tmp/vgw
|
||||
ARG SETUP_DIR=/tmp/vgw
|
||||
|
||||
RUN mkdir -p $IAM_DIR
|
||||
RUN mkdir -p $SETUP_DIR
|
||||
|
||||
COPY --from=0 /app/cmd/versitygw/versitygw /usr/local/bin/versitygw
|
||||
COPY --from=0 /app/cmd/versitygw/versitygw /app/versitygw
|
||||
|
||||
COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT [ "/usr/local/bin/docker-entrypoint.sh" ]
|
||||
ENTRYPOINT [ "/app/versitygw" ]
|
||||
@@ -6,7 +6,7 @@ COPY go.mod ./
|
||||
RUN go mod download
|
||||
|
||||
COPY ./ ./
|
||||
COPY ./tests/certs/* /etc/pki/tls/certs/
|
||||
COPY certs/* /etc/pki/tls/certs/
|
||||
|
||||
ARG IAM_DIR=/tmp/vgw
|
||||
ARG SETUP_DIR=/tmp/vgw
|
||||
@@ -1,15 +1,14 @@
|
||||
FROM ubuntu:latest
|
||||
FROM --platform=linux/arm64 ubuntu:latest
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG SECRETS_FILE=tests/.secrets.direct
|
||||
ARG CONFIG_FILE=tests/.env.direct
|
||||
ARG AWS_CLI=awscli-exe-linux-aarch64.zip
|
||||
ARG MC_FOLDER=linux-arm64
|
||||
ARG SECRETS_FILE=tests/.secrets
|
||||
ARG CONFIG_FILE=tests/.env.docker
|
||||
|
||||
ENV TZ=Etc/UTC
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
git \
|
||||
make \
|
||||
wget \
|
||||
curl \
|
||||
unzip \
|
||||
@@ -17,7 +16,6 @@ RUN apt-get update && \
|
||||
s3cmd \
|
||||
jq \
|
||||
bc \
|
||||
libxml2-utils \
|
||||
ca-certificates && \
|
||||
update-ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
@@ -26,14 +24,28 @@ RUN apt-get update && \
|
||||
WORKDIR /tmp
|
||||
|
||||
# Install AWS cli
|
||||
RUN curl "https://awscli.amazonaws.com/${AWS_CLI}" -o "awscliv2.zip" && unzip awscliv2.zip && ./aws/install
|
||||
RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-aarch64.zip" -o "awscliv2.zip" && unzip awscliv2.zip && ./aws/install
|
||||
|
||||
# Install mc
|
||||
RUN curl https://dl.min.io/client/mc/release/${MC_FOLDER}/mc \
|
||||
RUN curl https://dl.min.io/client/mc/release/linux-arm64/mc \
|
||||
--create-dirs \
|
||||
-o /usr/local/minio-binaries/mc && \
|
||||
chmod -R 755 /usr/local/minio-binaries
|
||||
ENV PATH=/usr/local/minio-binaries:${PATH}
|
||||
ENV PATH="/usr/local/minio-binaries":${PATH}
|
||||
|
||||
# Download Go 1.21 (adjust the version and platform as needed)
|
||||
RUN wget https://golang.org/dl/go1.21.7.linux-arm64.tar.gz
|
||||
|
||||
# Extract the downloaded archive
|
||||
RUN tar -xvf go1.21.7.linux-arm64.tar.gz -C /usr/local
|
||||
|
||||
# Set Go environment variables
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
ENV GOPATH="/go"
|
||||
ENV GOBIN="$GOPATH/bin"
|
||||
|
||||
# Make the directory for Go packages
|
||||
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
|
||||
|
||||
# Create tester user
|
||||
RUN groupadd -r tester && useradd -r -g tester tester
|
||||
@@ -46,14 +58,12 @@ RUN git clone https://github.com/bats-core/bats-core.git && \
|
||||
./install.sh /home/tester
|
||||
|
||||
USER tester
|
||||
RUN mkdir -p /home/tester/tests
|
||||
COPY --chown=tester:tester . /home/tester
|
||||
|
||||
# add bats support libraries
|
||||
RUN git clone https://github.com/bats-core/bats-support.git && rm -rf /home/tester/tests/bats-support && mv bats-support /home/tester/tests
|
||||
RUN git clone https://github.com/ztombol/bats-assert.git && rm -rf /home/tester/tests/bats-assert && mv bats-assert /home/tester/tests
|
||||
|
||||
WORKDIR /home/tester
|
||||
#RUN cp tests/.env.docker.s3.default tests/.env.docker.s3
|
||||
RUN cp tests/s3cfg.local.default tests/s3cfg.local
|
||||
RUN make
|
||||
|
||||
RUN . $SECRETS_FILE && \
|
||||
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_REGION AWS_PROFILE && \
|
||||
23
Makefile
23
Makefile
@@ -18,10 +18,6 @@ GOBUILD=$(GOCMD) build
|
||||
GOCLEAN=$(GOCMD) clean
|
||||
GOTEST=$(GOCMD) test
|
||||
|
||||
# docker-compose
|
||||
DCCMD=docker-compose
|
||||
DOCKERCOMPOSE=$(DCCMD) -f tests/docker-compose.yml --env-file .env.dev --project-directory .
|
||||
|
||||
BIN=versitygw
|
||||
|
||||
VERSION := $(shell if test -e VERSION; then cat VERSION; else git describe --abbrev=0 --tags HEAD; fi)
|
||||
@@ -72,33 +68,22 @@ dist:
|
||||
rm -f VERSION
|
||||
gzip -f $(TARFILE)
|
||||
|
||||
.PHONY: snapshot
|
||||
snapshot:
|
||||
# brew install goreleaser/tap/goreleaser
|
||||
goreleaser release --snapshot --skip publish --clean
|
||||
|
||||
# Creates and runs S3 gateway instance in a docker container
|
||||
.PHONY: up-posix
|
||||
up-posix:
|
||||
$(DOCKERCOMPOSE) up posix
|
||||
docker compose --env-file .env.dev up posix
|
||||
|
||||
# Creates and runs S3 gateway proxy instance in a docker container
|
||||
.PHONY: up-proxy
|
||||
up-proxy:
|
||||
$(DOCKERCOMPOSE) up proxy
|
||||
docker compose --env-file .env.dev up proxy
|
||||
|
||||
# Creates and runs S3 gateway to azurite instance in a docker container
|
||||
.PHONY: up-azurite
|
||||
up-azurite:
|
||||
$(DOCKERCOMPOSE) up azurite azuritegw
|
||||
docker compose --env-file .env.dev up azurite azuritegw
|
||||
|
||||
# Creates and runs both S3 gateway and proxy server instances in docker containers
|
||||
.PHONY: up-app
|
||||
up-app:
|
||||
$(DOCKERCOMPOSE) up
|
||||
|
||||
# Run the host-style tests in docker containers
|
||||
.PHONY: test-host-style
|
||||
test-host-style:
|
||||
docker compose -f tests/host-style-tests/docker-compose.yml up --build --abort-on-container-exit --exit-code-from test
|
||||
|
||||
docker compose --env-file .env.dev up
|
||||
|
||||
44
README.md
44
README.md
@@ -6,7 +6,7 @@
|
||||
<a href="https://www.versity.com"><img alt="Versity Software logo image." src="https://github.com/versity/versitygw/blob/assets/assets/logo.svg"></a>
|
||||
</picture>
|
||||
|
||||
[](https://github.com/versity/versitygw/blob/main/LICENSE) [](https://goreportcard.com/report/github.com/versity/versitygw) [](https://pkg.go.dev/github.com/versity/versitygw)
|
||||
[](https://github.com/versity/versitygw/blob/main/LICENSE)
|
||||
|
||||
### Binary release builds
|
||||
Download [latest release](https://github.com/versity/versitygw/releases)
|
||||
@@ -14,15 +14,8 @@ Download [latest release](https://github.com/versity/versitygw/releases)
|
||||
|:-----------:|:-----------:|:-----------:|:-----------:|:---------:|:---------:|
|
||||
| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ |
|
||||
|
||||
### Use Cases
|
||||
* Turn your local filesystem into an S3 server with a single command!
|
||||
* Proxy S3 requests to S3 storage
|
||||
* Simple to deploy S3 server with a single command
|
||||
* Protocol compatibility in `posix` allows common access to files via posix or S3
|
||||
* Simplified interface for adding new storage system support
|
||||
|
||||
### News
|
||||
Check out latest wiki articles: [https://github.com/versity/versitygw/wiki/Articles](https://github.com/versity/versitygw/wiki/Articles)
|
||||
* New performance analysis article [https://github.com/versity/versitygw/wiki/Performance](https://github.com/versity/versitygw/wiki/Performance)
|
||||
|
||||
### Mailing List
|
||||
Keep up to date with latest gateway announcements by signing up to the [versitygw mailing list](https://www.versity.com/products/versitygw#signup).
|
||||
@@ -35,12 +28,18 @@ Ask questions in the [community discussions](https://github.com/versity/versityg
|
||||
<br>
|
||||
Contact [Versity Sales](https://www.versity.com/contact/) to discuss enterprise support.
|
||||
|
||||
### Use Cases
|
||||
* Share filesystem directory via S3 protocol
|
||||
* Proxy S3 requests to S3 storage
|
||||
* Simple to deploy S3 server with a single command
|
||||
* Protocol compatibility in `posix` allows common access to files via posix or S3
|
||||
|
||||
### Overview
|
||||
Versity Gateway, a simple to use tool for seamless inline translation between AWS S3 object commands and storage systems. The Versity Gateway bridges the gap between S3-reliant applications and other storage systems, enabling enhanced compatibility and integration while offering exceptional scalability.
|
||||
|
||||
The server translates incoming S3 API requests and transforms them into equivalent operations to the backend service. By leveraging this gateway server, applications can interact with the S3-compatible API on top of already existing storage systems. This project enables leveraging existing infrastructure investments while seamlessly integrating with S3-compatible systems, offering increased flexibility and compatibility in managing data storage.
|
||||
|
||||
The Versity Gateway is focused on performance, simplicity, and expandability. The Versity Gateway is designed with modularity in mind, enabling future extensions to support additional backend storage systems. At present, the Versity Gateway supports any generic POSIX file backend storage, Versity’s open source ScoutFS filesystem, Azure Blob Storage, and other S3 servers.
|
||||
The Versity Gateway is focused on performance, simplicity, and expandability. The Versity Gateway is designed with modularity in mind, enabling future extensions to support additional backend storage systems. At present, the Versity Gateway supports any generic POSIX file backend storage and Versity’s open source ScoutFS filesystem.
|
||||
|
||||
The gateway is completely stateless. Multiple Versity Gateway instances may be deployed in a cluster to increase aggregate throughput. The Versity Gateway’s stateless architecture allows any request to be serviced by any gateway thereby distributing workloads and enhancing performance. Load balancers may be used to evenly distribute requests across the cluster of gateways for optimal performance.
|
||||
|
||||
@@ -68,30 +67,7 @@ The command format is
|
||||
```
|
||||
versitygw [global options] command [command options] [arguments...]
|
||||
```
|
||||
The [global options](https://github.com/versity/versitygw/wiki/Global-Options) are specified before the backend type and the backend options are specified after.
|
||||
|
||||
### Run the gateway in Docker
|
||||
|
||||
Use the published image like the native binary by passing CLI arguments:
|
||||
|
||||
```bash
|
||||
docker run --rm versity/versitygw:latest --version
|
||||
```
|
||||
|
||||
When no command arguments are supplied, the container looks for `VGW_BACKEND` and optional `VGW_BACKEND_ARG`/`VGW_BACKEND_ARGS` environment variables to determine which backend to start. Backend-specific configuration continues to come from the existing environment flags (for example `ROOT_ACCESS_KEY`, `VGW_PORT`, and others).
|
||||
|
||||
```bash
|
||||
docker run --rm \
|
||||
-e ROOT_ACCESS_KEY=testuser \
|
||||
-e ROOT_SECRET_KEY=secret \
|
||||
-e VGW_BACKEND=posix \
|
||||
-e VGW_BACKEND_ARG=/data \
|
||||
-p 10000:7070 \
|
||||
-v $(pwd)/data:/data \
|
||||
versity/versitygw:latest
|
||||
```
|
||||
|
||||
If you need to pass additional CLI options, set `VGW_ARGS` with a space-delimited list, or continue passing arguments directly to `docker run`.
|
||||
The global options are specified before the backend type and the backend options are specified after.
|
||||
|
||||
***
|
||||
|
||||
|
||||
@@ -1,189 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func VerifyObjectCopyAccess(ctx context.Context, be backend.Backend, copySource string, opts AccessOptions) error {
|
||||
if opts.IsRoot {
|
||||
return nil
|
||||
}
|
||||
if opts.Acc.Role == RoleAdmin {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify destination bucket access
|
||||
if err := VerifyAccess(ctx, be, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
// Verify source bucket access
|
||||
srcBucket, srcObject, found := strings.Cut(copySource, "/")
|
||||
if !found {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidCopySourceBucket)
|
||||
}
|
||||
|
||||
// Get source bucket ACL
|
||||
srcBucketACLBytes, err := be.GetBucketAcl(ctx, &s3.GetBucketAclInput{Bucket: &srcBucket})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var srcBucketAcl ACL
|
||||
if err := json.Unmarshal(srcBucketACLBytes, &srcBucketAcl); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifyAccess(ctx, be, AccessOptions{
|
||||
Acl: srcBucketAcl,
|
||||
AclPermission: PermissionRead,
|
||||
IsRoot: opts.IsRoot,
|
||||
Acc: opts.Acc,
|
||||
Bucket: srcBucket,
|
||||
Object: srcObject,
|
||||
Action: GetObjectAction,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type AccessOptions struct {
|
||||
Acl ACL
|
||||
AclPermission Permission
|
||||
IsRoot bool
|
||||
Acc Account
|
||||
Bucket string
|
||||
Object string
|
||||
Action Action
|
||||
Readonly bool
|
||||
IsPublicRequest bool
|
||||
}
|
||||
|
||||
func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) error {
|
||||
if opts.Readonly {
|
||||
if opts.AclPermission == PermissionWrite || opts.AclPermission == PermissionWriteAcp {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
}
|
||||
// Skip the access check for public bucket requests
|
||||
if opts.IsPublicRequest {
|
||||
return nil
|
||||
}
|
||||
if opts.IsRoot {
|
||||
return nil
|
||||
}
|
||||
if opts.Acc.Role == RoleAdmin {
|
||||
return nil
|
||||
}
|
||||
|
||||
policy, policyErr := be.GetBucketPolicy(ctx, opts.Bucket)
|
||||
if policyErr != nil {
|
||||
if !errors.Is(policyErr, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return policyErr
|
||||
}
|
||||
} else {
|
||||
return VerifyBucketPolicy(policy, opts.Acc.Access, opts.Bucket, opts.Object, opts.Action)
|
||||
}
|
||||
|
||||
if err := verifyACL(opts.Acl, opts.Acc.Access, opts.AclPermission); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Detects if the action is policy related
|
||||
// e.g.
|
||||
// 'GetBucketPolicy', 'PutBucketPolicy'
|
||||
func isPolicyAction(action Action) bool {
|
||||
return action == GetBucketPolicyAction || action == PutBucketPolicyAction
|
||||
}
|
||||
|
||||
// VerifyPublicAccess checks if the bucket is publically accessible by ACL or Policy
|
||||
func VerifyPublicAccess(ctx context.Context, be backend.Backend, action Action, permission Permission, bucket, object string) error {
|
||||
// ACL disabled
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
err = VerifyPublicBucketPolicy(policy, bucket, object, action)
|
||||
if err == nil {
|
||||
// if ACLs are disabled, and the bucket grants public access,
|
||||
// policy actions should return 'MethodNotAllowed'
|
||||
if isPolicyAction(action) {
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// if the action is not in the ACL whitelist the access is denied
|
||||
_, ok := publicACLAllowedActions[action]
|
||||
if !ok {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
err = VerifyPublicBucketACL(ctx, be, bucket, action, permission)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsAdminOrOwner(acct Account, isRoot bool, acl ACL) error {
|
||||
// Owner check
|
||||
if acct.Access == acl.Owner {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Root user has access over almost everything
|
||||
if isRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Admin user case
|
||||
if acct.Role == RoleAdmin {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return access denied in all other cases
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
type PublicACLAllowedActions map[Action]struct{}
|
||||
|
||||
var publicACLAllowedActions PublicACLAllowedActions = PublicACLAllowedActions{
|
||||
ListBucketAction: struct{}{},
|
||||
PutObjectAction: struct{}{},
|
||||
ListBucketMultipartUploadsAction: struct{}{},
|
||||
DeleteObjectAction: struct{}{},
|
||||
ListBucketVersionsAction: struct{}{},
|
||||
GetObjectAction: struct{}{},
|
||||
GetObjectAttributesAction: struct{}{},
|
||||
GetObjectAclAction: struct{}{},
|
||||
}
|
||||
458
auth/acl.go
458
auth/acl.go
@@ -17,180 +17,37 @@ package auth
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
type ACL struct {
|
||||
ACL types.BucketCannedACL
|
||||
Owner string
|
||||
Grantees []Grantee
|
||||
}
|
||||
|
||||
// IsPublic specifies if the acl grants public read access
|
||||
func (acl *ACL) IsPublic(permission Permission) bool {
|
||||
for _, grt := range acl.Grantees {
|
||||
if grt.Permission == permission && grt.Type == types.TypeGroup && grt.Access == "all-users" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type Grantee struct {
|
||||
Permission Permission
|
||||
Permission types.Permission
|
||||
Access string
|
||||
Type types.Type
|
||||
}
|
||||
|
||||
type GetBucketAclOutput struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlPolicy"`
|
||||
Owner *types.Owner
|
||||
AccessControlList AccessControlList
|
||||
}
|
||||
|
||||
type PutBucketAclInput struct {
|
||||
Bucket *string
|
||||
ACL types.BucketCannedACL
|
||||
AccessControlPolicy *AccessControlPolicy
|
||||
GrantFullControl *string
|
||||
GrantRead *string
|
||||
GrantReadACP *string
|
||||
GrantWrite *string
|
||||
GrantWriteACP *string
|
||||
type AccessControlList struct {
|
||||
Grants []types.Grant `xml:"Grant"`
|
||||
}
|
||||
|
||||
type AccessControlPolicy struct {
|
||||
AccessControlList AccessControlList `xml:"AccessControlList"`
|
||||
Owner *types.Owner
|
||||
}
|
||||
|
||||
func (acp *AccessControlPolicy) Validate() error {
|
||||
if !acp.AccessControlList.isValid() {
|
||||
return s3err.GetAPIError(s3err.ErrMalformedACL)
|
||||
}
|
||||
|
||||
// The Owner can't be nil
|
||||
if acp.Owner == nil {
|
||||
return s3err.GetAPIError(s3err.ErrMalformedACL)
|
||||
}
|
||||
|
||||
// The Owner ID can't be empty
|
||||
if acp.Owner.ID == nil || *acp.Owner.ID == "" {
|
||||
return s3err.GetAPIError(s3err.ErrMalformedACL)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type AccessControlList struct {
|
||||
Grants []Grant `xml:"Grant"`
|
||||
}
|
||||
|
||||
// Validates the AccessControlList
|
||||
func (acl *AccessControlList) isValid() bool {
|
||||
for _, el := range acl.Grants {
|
||||
if !el.isValid() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type Permission string
|
||||
|
||||
const (
|
||||
PermissionFullControl Permission = "FULL_CONTROL"
|
||||
PermissionWrite Permission = "WRITE"
|
||||
PermissionWriteAcp Permission = "WRITE_ACP"
|
||||
PermissionRead Permission = "READ"
|
||||
PermissionReadAcp Permission = "READ_ACP"
|
||||
)
|
||||
|
||||
// Check if the permission is valid
|
||||
func (p Permission) isValid() bool {
|
||||
return p == PermissionFullControl ||
|
||||
p == PermissionRead ||
|
||||
p == PermissionReadAcp ||
|
||||
p == PermissionWrite ||
|
||||
p == PermissionWriteAcp
|
||||
}
|
||||
|
||||
type Grant struct {
|
||||
Grantee *Grt `xml:"Grantee"`
|
||||
Permission Permission `xml:"Permission"`
|
||||
}
|
||||
|
||||
// Checks if Grant is valid
|
||||
func (g *Grant) isValid() bool {
|
||||
return g.Permission.isValid() && g.Grantee.isValid()
|
||||
}
|
||||
|
||||
type Grt struct {
|
||||
XMLNS string `xml:"xmlns:xsi,attr"`
|
||||
Type types.Type `xml:"xsi:type,attr"`
|
||||
ID string `xml:"ID"`
|
||||
}
|
||||
|
||||
// Custom Unmarshalling for Grt to parse xsi:type properly
|
||||
func (g *Grt) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// Iterate through the XML tokens to process the attributes
|
||||
for _, attr := range start.Attr {
|
||||
// Check if the attribute is xsi:type and belongs to the xsi namespace
|
||||
if attr.Name.Space == "http://www.w3.org/2001/XMLSchema-instance" && attr.Name.Local == "type" {
|
||||
g.Type = types.Type(attr.Value)
|
||||
}
|
||||
// Handle xmlns:xsi
|
||||
if attr.Name.Local == "xmlns:xsi" {
|
||||
g.XMLNS = attr.Value
|
||||
}
|
||||
}
|
||||
|
||||
// Decode the inner XML elements like ID
|
||||
for {
|
||||
t, err := d.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch se := t.(type) {
|
||||
case xml.StartElement:
|
||||
if se.Name.Local == "ID" {
|
||||
if err := d.DecodeElement(&g.ID, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case xml.EndElement:
|
||||
if se.Name.Local == start.Name.Local {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validates Grt
|
||||
func (g *Grt) isValid() bool {
|
||||
// Validate the Type
|
||||
// Only these 2 types are supported in the gateway
|
||||
if g.Type != types.TypeCanonicalUser && g.Type != types.TypeGroup {
|
||||
return false
|
||||
}
|
||||
|
||||
// The ID prop shouldn't be empty
|
||||
if g.ID == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
Owner types.Owner
|
||||
}
|
||||
|
||||
func ParseACL(data []byte) (ACL, error) {
|
||||
@@ -205,35 +62,17 @@ func ParseACL(data []byte) (ACL, error) {
|
||||
return acl, nil
|
||||
}
|
||||
|
||||
func ParseACLOutput(data []byte, owner string) (GetBucketAclOutput, error) {
|
||||
grants := []Grant{}
|
||||
|
||||
if len(data) == 0 {
|
||||
return GetBucketAclOutput{
|
||||
Owner: &types.Owner{
|
||||
ID: &owner,
|
||||
},
|
||||
AccessControlList: AccessControlList{
|
||||
Grants: grants,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ParseACLOutput(data []byte) (GetBucketAclOutput, error) {
|
||||
var acl ACL
|
||||
if err := json.Unmarshal(data, &acl); err != nil {
|
||||
return GetBucketAclOutput{}, fmt.Errorf("parse acl: %w", err)
|
||||
}
|
||||
|
||||
grants := []types.Grant{}
|
||||
|
||||
for _, elem := range acl.Grantees {
|
||||
acs := elem.Access
|
||||
grants = append(grants, Grant{
|
||||
Grantee: &Grt{
|
||||
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
|
||||
ID: acs,
|
||||
Type: elem.Type,
|
||||
},
|
||||
Permission: elem.Permission,
|
||||
})
|
||||
grants = append(grants, types.Grant{Grantee: &types.Grantee{ID: &acs}, Permission: elem.Permission})
|
||||
}
|
||||
|
||||
return GetBucketAclOutput{
|
||||
@@ -246,43 +85,20 @@ func ParseACLOutput(data []byte, owner string) (GetBucketAclOutput, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService) ([]byte, error) {
|
||||
func UpdateACL(input *s3.PutBucketAclInput, acl ACL, iam IAMService) ([]byte, error) {
|
||||
if input == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
defaultGrantees := []Grantee{
|
||||
{
|
||||
Permission: PermissionFullControl,
|
||||
Access: acl.Owner,
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
if acl.Owner != *input.AccessControlPolicy.Owner.ID {
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
// if the ACL is specified, set the ACL, else replace the grantees
|
||||
if input.ACL != "" {
|
||||
switch input.ACL {
|
||||
case types.BucketCannedACLPublicRead:
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Permission: PermissionRead,
|
||||
Access: "all-users",
|
||||
Type: types.TypeGroup,
|
||||
})
|
||||
case types.BucketCannedACLPublicReadWrite:
|
||||
defaultGrantees = append(defaultGrantees, []Grantee{
|
||||
{
|
||||
Permission: PermissionRead,
|
||||
Access: "all-users",
|
||||
Type: types.TypeGroup,
|
||||
},
|
||||
{
|
||||
Permission: PermissionWrite,
|
||||
Access: "all-users",
|
||||
Type: types.TypeGroup,
|
||||
},
|
||||
}...)
|
||||
}
|
||||
acl.ACL = input.ACL
|
||||
acl.Grantees = []Grantee{}
|
||||
} else {
|
||||
grantees := []Grantee{}
|
||||
accs := []string{}
|
||||
|
||||
if input.GrantRead != nil || input.GrantReadACP != nil || input.GrantFullControl != nil || input.GrantWrite != nil || input.GrantWriteACP != nil {
|
||||
@@ -291,71 +107,45 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService) ([]byte, error
|
||||
if input.GrantFullControl != nil && *input.GrantFullControl != "" {
|
||||
fullControlList = splitUnique(*input.GrantFullControl, ",")
|
||||
for _, str := range fullControlList {
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: PermissionFullControl,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "FULL_CONTROL"})
|
||||
}
|
||||
}
|
||||
if input.GrantRead != nil && *input.GrantRead != "" {
|
||||
readList = splitUnique(*input.GrantRead, ",")
|
||||
for _, str := range readList {
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: PermissionRead,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "READ"})
|
||||
}
|
||||
}
|
||||
if input.GrantReadACP != nil && *input.GrantReadACP != "" {
|
||||
readACPList = splitUnique(*input.GrantReadACP, ",")
|
||||
for _, str := range readACPList {
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: PermissionReadAcp,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "READ_ACP"})
|
||||
}
|
||||
}
|
||||
if input.GrantWrite != nil && *input.GrantWrite != "" {
|
||||
writeList = splitUnique(*input.GrantWrite, ",")
|
||||
for _, str := range writeList {
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: PermissionWrite,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "WRITE"})
|
||||
}
|
||||
}
|
||||
if input.GrantWriteACP != nil && *input.GrantWriteACP != "" {
|
||||
writeACPList = splitUnique(*input.GrantWriteACP, ",")
|
||||
for _, str := range writeACPList {
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: PermissionWriteAcp,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "WRITE_ACP"})
|
||||
}
|
||||
}
|
||||
|
||||
accs = append(append(append(append(fullControlList, readList...), writeACPList...), readACPList...), writeList...)
|
||||
} else {
|
||||
cache := make(map[string]bool)
|
||||
for _, grt := range input.AccessControlPolicy.AccessControlList.Grants {
|
||||
if grt.Grantee == nil || grt.Grantee.ID == "" || grt.Permission == "" {
|
||||
for _, grt := range input.AccessControlPolicy.Grants {
|
||||
if grt.Grantee == nil || grt.Grantee.ID == nil || grt.Permission == "" {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
access := grt.Grantee.ID
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: access,
|
||||
Permission: grt.Permission,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
if _, ok := cache[access]; !ok {
|
||||
cache[access] = true
|
||||
accs = append(accs, access)
|
||||
grantees = append(grantees, Grantee{Access: *grt.Grantee.ID, Permission: grt.Permission})
|
||||
if _, ok := cache[*grt.Grantee.ID]; !ok {
|
||||
cache[*grt.Grantee.ID] = true
|
||||
accs = append(accs, *grt.Grantee.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -368,9 +158,10 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService) ([]byte, error
|
||||
if len(accList) > 0 {
|
||||
return nil, fmt.Errorf("accounts does not exist: %s", strings.Join(accList, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
acl.Grantees = defaultGrantees
|
||||
acl.Grantees = grantees
|
||||
acl.ACL = ""
|
||||
}
|
||||
|
||||
result, err := json.Marshal(acl)
|
||||
if err != nil {
|
||||
@@ -386,12 +177,12 @@ func CheckIfAccountsExist(accs []string, iam IAMService) ([]string, error) {
|
||||
for _, acc := range accs {
|
||||
_, err := iam.GetUserAccount(acc)
|
||||
if err != nil {
|
||||
if err == ErrNoSuchUser || err == s3err.GetAPIError(s3err.ErrAdminUserNotFound) {
|
||||
if err == ErrNoSuchUser {
|
||||
result = append(result, acc)
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)) {
|
||||
return nil, err
|
||||
if err == ErrNotSupported {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
return nil, fmt.Errorf("check user account: %w", err)
|
||||
}
|
||||
@@ -414,94 +205,153 @@ func splitUnique(s, divider string) []string {
|
||||
return result
|
||||
}
|
||||
|
||||
func verifyACL(acl ACL, access string, permission Permission) error {
|
||||
grantee := Grantee{
|
||||
Access: access,
|
||||
Permission: permission,
|
||||
Type: types.TypeCanonicalUser,
|
||||
}
|
||||
granteeFullCtrl := Grantee{
|
||||
Access: access,
|
||||
Permission: PermissionFullControl,
|
||||
Type: types.TypeCanonicalUser,
|
||||
}
|
||||
granteeAllUsers := Grantee{
|
||||
Access: "all-users",
|
||||
Permission: permission,
|
||||
Type: types.TypeGroup,
|
||||
}
|
||||
|
||||
isFound := false
|
||||
|
||||
for _, grt := range acl.Grantees {
|
||||
if grt == grantee || grt == granteeFullCtrl || grt == granteeAllUsers {
|
||||
isFound = true
|
||||
break
|
||||
func verifyACL(acl ACL, access string, permission types.Permission) error {
|
||||
if acl.ACL != "" {
|
||||
if (permission == "READ" || permission == "READ_ACP") && (acl.ACL != "public-read" && acl.ACL != "public-read-write") {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
if (permission == "WRITE" || permission == "WRITE_ACP") && acl.ACL != "public-read-write" {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
}
|
||||
|
||||
if isFound {
|
||||
return nil
|
||||
} else {
|
||||
if len(acl.Grantees) == 0 {
|
||||
return nil
|
||||
}
|
||||
grantee := Grantee{Access: access, Permission: permission}
|
||||
granteeFullCtrl := Grantee{Access: access, Permission: "FULL_CONTROL"}
|
||||
|
||||
isFound := false
|
||||
|
||||
for _, grt := range acl.Grantees {
|
||||
if grt == grantee || grt == granteeFullCtrl {
|
||||
isFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if isFound {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
// Verifies if the bucket acl grants public access
|
||||
func VerifyPublicBucketACL(ctx context.Context, be backend.Backend, bucket string, action Action, permission Permission) error {
|
||||
aclBytes, err := be.GetBucketAcl(ctx, &s3.GetBucketAclInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
func MayCreateBucket(acct Account, isRoot bool) error {
|
||||
if isRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
acl, err := ParseACL(aclBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !acl.IsPublic(permission) {
|
||||
return ErrAccessDenied
|
||||
if acct.Role == RoleUser {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBucketACLOwner sets default ACL with new owner and removes
|
||||
// any previous bucket policy that was in place
|
||||
func UpdateBucketACLOwner(ctx context.Context, be backend.Backend, bucket, newOwner string) error {
|
||||
acl := ACL{
|
||||
Owner: newOwner,
|
||||
Grantees: []Grantee{
|
||||
{
|
||||
Permission: PermissionFullControl,
|
||||
Access: newOwner,
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
},
|
||||
func IsAdminOrOwner(acct Account, isRoot bool, acl ACL) error {
|
||||
// Owner check
|
||||
if acct.Access == acl.Owner {
|
||||
return nil
|
||||
}
|
||||
|
||||
result, err := json.Marshal(acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal ACL: %w", err)
|
||||
// Root user has access over almost everything
|
||||
if isRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = be.PutBucketAcl(ctx, bucket, result)
|
||||
// Admin user case
|
||||
if acct.Role == RoleAdmin {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return access denied in all other cases
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
type AccessOptions struct {
|
||||
Acl ACL
|
||||
AclPermission types.Permission
|
||||
IsRoot bool
|
||||
Acc Account
|
||||
Bucket string
|
||||
Object string
|
||||
Action Action
|
||||
}
|
||||
|
||||
func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) error {
|
||||
if opts.IsRoot {
|
||||
return nil
|
||||
}
|
||||
if opts.Acc.Role == RoleAdmin {
|
||||
return nil
|
||||
}
|
||||
if opts.Acc.Access == opts.Acl.Owner {
|
||||
return nil
|
||||
}
|
||||
|
||||
policy, err := be.GetBucketPolicy(ctx, opts.Bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return be.DeleteBucketPolicy(ctx, bucket)
|
||||
// If bucket policy is not set and the ACL is default, only the owner has access
|
||||
if len(policy) == 0 && opts.Acl.ACL == "" && len(opts.Acl.Grantees) == 0 {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
if err := verifyBucketPolicy(policy, opts.Acc.Access, opts.Bucket, opts.Object, opts.Action); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := verifyACL(opts.Acl, opts.Acc.Access, opts.AclPermission); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateCannedACL validates bucket canned acl value
|
||||
func ValidateCannedACL(acl string) error {
|
||||
switch types.BucketCannedACL(acl) {
|
||||
case types.BucketCannedACLPrivate, types.BucketCannedACLPublicRead, types.BucketCannedACLPublicReadWrite, "":
|
||||
func VerifyObjectCopyAccess(ctx context.Context, be backend.Backend, copySource string, opts AccessOptions) error {
|
||||
if opts.IsRoot {
|
||||
return nil
|
||||
default:
|
||||
debuglogger.Logf("invalid bucket canned acl: %v", acl)
|
||||
return s3err.GetAPIError(s3err.ErrInvalidArgument)
|
||||
}
|
||||
if opts.Acc.Role == RoleAdmin {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify destination bucket access
|
||||
if err := VerifyAccess(ctx, be, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
// Verify source bucket access
|
||||
srcBucket, srcObject, found := strings.Cut(copySource, "/")
|
||||
if !found {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidCopySource)
|
||||
}
|
||||
|
||||
// Get source bucket ACL
|
||||
srcBucketACLBytes, err := be.GetBucketAcl(ctx, &s3.GetBucketAclInput{Bucket: &srcBucket})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var srcBucketAcl ACL
|
||||
if err := json.Unmarshal(srcBucketACLBytes, &srcBucketAcl); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifyAccess(ctx, be, AccessOptions{
|
||||
Acl: srcBucketAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
IsRoot: opts.IsRoot,
|
||||
Acc: opts.Acc,
|
||||
Bucket: srcBucket,
|
||||
Object: srcObject,
|
||||
Action: GetObjectAction,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,338 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// headerRegex is the regexp to validate http header names
|
||||
var headerRegex = regexp.MustCompile(`^[!#$%&'*+\-.^_` + "`" + `|~0-9A-Za-z]+$`)
|
||||
|
||||
type CORSHeader string
|
||||
type CORSHTTPMethod string
|
||||
|
||||
// IsValid validates the CORS http header
|
||||
// the rules are based on http RFC
|
||||
// https://datatracker.ietf.org/doc/html/rfc7230#section-3.2
|
||||
//
|
||||
// Empty values are considered as valid
|
||||
func (ch CORSHeader) IsValid() bool {
|
||||
return ch == "" || headerRegex.MatchString(ch.String())
|
||||
}
|
||||
|
||||
// String converts the header value to 'string'
|
||||
func (ch CORSHeader) String() string {
|
||||
return string(ch)
|
||||
}
|
||||
|
||||
// ToLower converts the header to lower case
|
||||
func (ch CORSHeader) ToLower() string {
|
||||
return strings.ToLower(string(ch))
|
||||
}
|
||||
|
||||
// IsValid validates the cors http request method:
|
||||
// the methods are case sensitive
|
||||
func (cm CORSHTTPMethod) IsValid() bool {
|
||||
return cm.IsEmpty() || cm == http.MethodGet || cm == http.MethodHead || cm == http.MethodPut ||
|
||||
cm == http.MethodPost || cm == http.MethodDelete
|
||||
}
|
||||
|
||||
// IsEmpty checks if the cors method is an empty string
|
||||
func (cm CORSHTTPMethod) IsEmpty() bool {
|
||||
return cm == ""
|
||||
}
|
||||
|
||||
// String converts the method value to 'string'
|
||||
func (cm CORSHTTPMethod) String() string {
|
||||
return string(cm)
|
||||
}
|
||||
|
||||
type CORSConfiguration struct {
|
||||
Rules []CORSRule `xml:"CORSRule"`
|
||||
}
|
||||
|
||||
// Validate validates the cors configuration rules
|
||||
func (cc *CORSConfiguration) Validate() error {
|
||||
if cc == nil || cc.Rules == nil {
|
||||
debuglogger.Logf("invalid CORS configuration")
|
||||
return s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if len(cc.Rules) == 0 {
|
||||
debuglogger.Logf("empty CORS config rules")
|
||||
return s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
// validate each CORS rule
|
||||
for _, rule := range cc.Rules {
|
||||
if err := rule.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type CORSAllowanceConfig struct {
|
||||
Origin string
|
||||
Methods string
|
||||
ExposedHeaders string
|
||||
AllowCredentials string
|
||||
AllowHeaders string
|
||||
MaxAge *int32
|
||||
}
|
||||
|
||||
// IsAllowed walks through the CORS rules and finds the first one allowing access.
|
||||
// If no rule grants access, returns 'AccessForbidden'
|
||||
func (cc *CORSConfiguration) IsAllowed(origin string, method CORSHTTPMethod, headers []CORSHeader) (*CORSAllowanceConfig, error) {
|
||||
// if method is empty, anyways cors is forbidden
|
||||
// skip, without going through the rules
|
||||
if method.IsEmpty() {
|
||||
debuglogger.Logf("empty Access-Control-Request-Method")
|
||||
return nil, s3err.GetAPIError(s3err.ErrCORSForbidden)
|
||||
}
|
||||
for _, rule := range cc.Rules {
|
||||
// find the first rule granting access
|
||||
if isAllowed, wilcardOrigin := rule.Match(origin, method, headers); isAllowed {
|
||||
o := origin
|
||||
allowCredentials := "true"
|
||||
if wilcardOrigin {
|
||||
o = "*"
|
||||
allowCredentials = "false"
|
||||
}
|
||||
|
||||
return &CORSAllowanceConfig{
|
||||
Origin: o,
|
||||
AllowCredentials: allowCredentials,
|
||||
Methods: rule.GetAllowedMethods(),
|
||||
ExposedHeaders: rule.GetExposeHeaders(),
|
||||
AllowHeaders: buildAllowedHeaders(headers),
|
||||
MaxAge: rule.MaxAgeSeconds,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// if no matching rule is found, return AccessForbidden
|
||||
return nil, s3err.GetAPIError(s3err.ErrCORSForbidden)
|
||||
}
|
||||
|
||||
type CORSRule struct {
|
||||
AllowedMethods []CORSHTTPMethod `xml:"AllowedMethod"`
|
||||
AllowedHeaders []CORSHeader `xml:"AllowedHeader"`
|
||||
ExposeHeaders []CORSHeader `xml:"ExposeHeader"`
|
||||
AllowedOrigins []string `xml:"AllowedOrigin"`
|
||||
ID *string
|
||||
MaxAgeSeconds *int32
|
||||
}
|
||||
|
||||
// Validate validates and returns error if CORS configuration has invalid rule
|
||||
func (cr *CORSRule) Validate() error {
|
||||
// validate CORS allowed headers
|
||||
for _, header := range cr.AllowedHeaders {
|
||||
if !header.IsValid() {
|
||||
debuglogger.Logf("invalid CORS allowed header: %s", header)
|
||||
return s3err.GetInvalidCORSHeaderErr(header.String())
|
||||
}
|
||||
}
|
||||
// validate CORS allowed methods
|
||||
for _, method := range cr.AllowedMethods {
|
||||
if !method.IsValid() {
|
||||
debuglogger.Logf("invalid CORS allowed method: %s", method)
|
||||
return s3err.GetUnsopportedCORSMethodErr(method.String())
|
||||
}
|
||||
}
|
||||
// validate CORS expose headers
|
||||
for _, header := range cr.ExposeHeaders {
|
||||
if !header.IsValid() {
|
||||
debuglogger.Logf("invalid CORS exposed header: %s", header)
|
||||
return s3err.GetInvalidCORSHeaderErr(header.String())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match matches the provided origin, method and headers with the
|
||||
// CORS configuration rule
|
||||
// if the matching origin is "*", it returns true as the first argument
|
||||
func (cr *CORSRule) Match(origin string, method CORSHTTPMethod, headers []CORSHeader) (bool, bool) {
|
||||
wildcardOrigin := false
|
||||
originFound := false
|
||||
|
||||
// check if the provided origin exists in CORS AllowedOrigins
|
||||
for _, or := range cr.AllowedOrigins {
|
||||
if wildcardMatch(or, origin) {
|
||||
originFound = true
|
||||
if or == "*" {
|
||||
// mark wildcardOrigin as true, if "*" is found in AllowedOrigins
|
||||
wildcardOrigin = true
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !originFound {
|
||||
return false, false
|
||||
}
|
||||
|
||||
// cache the CORS AllowedMethods in a map
|
||||
allowedMethods := cacheCORSMethods(cr.AllowedMethods)
|
||||
// check if the provided method exists in CORS AllowedMethods
|
||||
if _, ok := allowedMethods[method]; !ok {
|
||||
return false, false
|
||||
}
|
||||
|
||||
// check is CORS rule allowed headers match
|
||||
// with the requested allowed headers
|
||||
for _, reqHeader := range headers {
|
||||
match := false
|
||||
for _, header := range cr.AllowedHeaders {
|
||||
if wildcardMatch(header.ToLower(), reqHeader.ToLower()) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !match {
|
||||
return false, false
|
||||
}
|
||||
}
|
||||
|
||||
return true, wildcardOrigin
|
||||
}
|
||||
|
||||
// GetExposeHeaders returns comma separated CORS expose headers
|
||||
func (cr *CORSRule) GetExposeHeaders() string {
|
||||
var result strings.Builder
|
||||
|
||||
for i, h := range cr.ExposeHeaders {
|
||||
if i > 0 {
|
||||
result.WriteString(", ")
|
||||
}
|
||||
result.WriteString(h.String())
|
||||
}
|
||||
|
||||
return result.String()
|
||||
}
|
||||
|
||||
// buildAllowedHeaders builds a comma separated string from []CORSHeader
|
||||
func buildAllowedHeaders(headers []CORSHeader) string {
|
||||
var result strings.Builder
|
||||
|
||||
for i, h := range headers {
|
||||
if i > 0 {
|
||||
result.WriteString(", ")
|
||||
}
|
||||
result.WriteString(h.ToLower())
|
||||
}
|
||||
|
||||
return result.String()
|
||||
}
|
||||
|
||||
// GetAllowedMethods returns comma separated CORS allowed methods
|
||||
func (cr *CORSRule) GetAllowedMethods() string {
|
||||
var result strings.Builder
|
||||
|
||||
for i, m := range cr.AllowedMethods {
|
||||
if i > 0 {
|
||||
result.WriteString(", ")
|
||||
}
|
||||
result.WriteString(m.String())
|
||||
}
|
||||
|
||||
return result.String()
|
||||
}
|
||||
|
||||
// ParseCORSOutput parses raw bytes to 'CORSConfiguration'
|
||||
func ParseCORSOutput(data []byte) (*CORSConfiguration, error) {
|
||||
var config CORSConfiguration
|
||||
err := xml.Unmarshal(data, &config)
|
||||
if err != nil {
|
||||
debuglogger.Logf("unmarshal cors output: %v", err)
|
||||
return nil, fmt.Errorf("failed to parse cors config: %w", err)
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
func cacheCORSMethods(input []CORSHTTPMethod) map[CORSHTTPMethod]struct{} {
|
||||
result := make(map[CORSHTTPMethod]struct{}, len(input))
|
||||
for _, el := range input {
|
||||
result[el] = struct{}{}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// ParseCORSHeaders parses/validates Access-Control-Request-Headers
|
||||
// and returns []CORSHeaders
|
||||
func ParseCORSHeaders(headers string) ([]CORSHeader, error) {
|
||||
result := []CORSHeader{}
|
||||
if headers == "" {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
headersSplitted := strings.Split(headers, ",")
|
||||
for _, h := range headersSplitted {
|
||||
corsHeader := CORSHeader(strings.TrimSpace(h))
|
||||
if corsHeader == "" || !corsHeader.IsValid() {
|
||||
debuglogger.Logf("invalid access control header: %s", h)
|
||||
return nil, s3err.GetInvalidCORSRequestHeaderErr(h)
|
||||
}
|
||||
result = append(result, corsHeader)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func wildcardMatch(pattern, input string) bool {
|
||||
pIdx, sIdx := 0, 0
|
||||
starIdx, matchIdx := -1, 0
|
||||
|
||||
for sIdx < len(input) {
|
||||
if pIdx < len(pattern) && pattern[pIdx] == input[sIdx] {
|
||||
// exact match of current char
|
||||
sIdx++
|
||||
pIdx++
|
||||
} else if pIdx < len(pattern) && pattern[pIdx] == '*' {
|
||||
// remember star position
|
||||
starIdx = pIdx
|
||||
matchIdx = sIdx
|
||||
pIdx++
|
||||
} else if starIdx != -1 {
|
||||
// backtrack: try to match more characters with '*'
|
||||
pIdx = starIdx + 1
|
||||
matchIdx++
|
||||
sIdx = matchIdx
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// skip trailing stars
|
||||
for pIdx < len(pattern) && pattern[pIdx] == '*' {
|
||||
pIdx++
|
||||
}
|
||||
|
||||
return pIdx == len(pattern)
|
||||
}
|
||||
@@ -1,736 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func TestCORSHeader_IsValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
header CORSHeader
|
||||
want bool
|
||||
}{
|
||||
{"empty", "", true},
|
||||
{"valid", "X-Custom-Header", true},
|
||||
{"invalid_1", "Invalid Header", false},
|
||||
{"invalid_2", "invalid/header", false},
|
||||
{"invalid_3", "Invalid\tHeader", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := tt.header.IsValid(); got != tt.want {
|
||||
t.Errorf("IsValid() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCORSHTTPMethod_IsValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
method CORSHTTPMethod
|
||||
want bool
|
||||
}{
|
||||
{"empty valid", "", true},
|
||||
{"GET valid", http.MethodGet, true},
|
||||
{"HEAD valid", http.MethodHead, true},
|
||||
{"PUT valid", http.MethodPut, true},
|
||||
{"POST valid", http.MethodPost, true},
|
||||
{"DELETE valid", http.MethodDelete, true},
|
||||
{"get valid", "get", false},
|
||||
{"put valid", "put", false},
|
||||
{"post valid", "post", false},
|
||||
{"head valid", "head", false},
|
||||
{"invalid", "FOO", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := tt.method.IsValid(); got != tt.want {
|
||||
t.Errorf("IsValid() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCORSHeader_ToLower(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
header CORSHeader
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "already lowercase",
|
||||
header: CORSHeader("content-type"),
|
||||
want: "content-type",
|
||||
},
|
||||
{
|
||||
name: "mixed case",
|
||||
header: CORSHeader("X-CuStOm-HeAdEr"),
|
||||
want: "x-custom-header",
|
||||
},
|
||||
{
|
||||
name: "uppercase",
|
||||
header: CORSHeader("AUTHORIZATION"),
|
||||
want: "authorization",
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
header: CORSHeader(""),
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "numeric and symbols",
|
||||
header: CORSHeader("X-123-HEADER"),
|
||||
want: "x-123-header",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.header.ToLower()
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCORSHTTPMethod_IsEmpty(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
method CORSHTTPMethod
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "empty string is empty",
|
||||
method: CORSHTTPMethod(""),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "GET method is not empty",
|
||||
method: CORSHTTPMethod("GET"),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "random string is not empty",
|
||||
method: CORSHTTPMethod("FOO"),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "lowercase get is not empty (case sensitive)",
|
||||
method: CORSHTTPMethod("get"),
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.method.IsEmpty()
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCORSConfiguration_Validate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cfg *CORSConfiguration
|
||||
want error
|
||||
}{
|
||||
{"nil config", nil, s3err.GetAPIError(s3err.ErrMalformedXML)},
|
||||
{"nil rules", &CORSConfiguration{}, s3err.GetAPIError(s3err.ErrMalformedXML)},
|
||||
{"empty rules", &CORSConfiguration{Rules: []CORSRule{}}, s3err.GetAPIError(s3err.ErrMalformedXML)},
|
||||
{"invalid rule", &CORSConfiguration{Rules: []CORSRule{{AllowedHeaders: []CORSHeader{"Invalid Header"}}}}, s3err.GetInvalidCORSHeaderErr("Invalid Header")},
|
||||
{"valid rule", &CORSConfiguration{Rules: []CORSRule{{
|
||||
AllowedOrigins: []string{"origin"},
|
||||
AllowedHeaders: []CORSHeader{"X-Test"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodGet},
|
||||
ExposeHeaders: []CORSHeader{"X-Expose"},
|
||||
}}}, nil},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.cfg.Validate()
|
||||
assert.EqualValues(t, tt.want, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCORSConfiguration_IsAllowed(t *testing.T) {
|
||||
type input struct {
|
||||
cfg *CORSConfiguration
|
||||
origin string
|
||||
method CORSHTTPMethod
|
||||
headers []CORSHeader
|
||||
}
|
||||
type output struct {
|
||||
result *CORSAllowanceConfig
|
||||
err error
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
input input
|
||||
output output
|
||||
}{
|
||||
{
|
||||
name: "allowed exact origin",
|
||||
input: input{
|
||||
cfg: &CORSConfiguration{Rules: []CORSRule{{
|
||||
AllowedOrigins: []string{"http://allowed.com"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodGet},
|
||||
AllowedHeaders: []CORSHeader{"X-Test"},
|
||||
}}},
|
||||
origin: "http://allowed.com",
|
||||
method: http.MethodGet,
|
||||
headers: []CORSHeader{"X-Test"},
|
||||
},
|
||||
output: output{
|
||||
result: &CORSAllowanceConfig{
|
||||
Origin: "http://allowed.com",
|
||||
AllowCredentials: "true",
|
||||
Methods: http.MethodGet,
|
||||
AllowHeaders: "x-test",
|
||||
ExposedHeaders: "",
|
||||
MaxAge: nil,
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "allowed wildcard origin",
|
||||
input: input{
|
||||
cfg: &CORSConfiguration{Rules: []CORSRule{{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodGet},
|
||||
AllowedHeaders: []CORSHeader{"X-Test"},
|
||||
}}},
|
||||
origin: "anything",
|
||||
method: http.MethodGet,
|
||||
headers: []CORSHeader{"X-Test"},
|
||||
},
|
||||
output: output{
|
||||
result: &CORSAllowanceConfig{
|
||||
Origin: "*",
|
||||
AllowCredentials: "false",
|
||||
AllowHeaders: "x-test",
|
||||
Methods: http.MethodGet,
|
||||
ExposedHeaders: "",
|
||||
MaxAge: nil,
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "forbidden no matching origin",
|
||||
input: input{
|
||||
cfg: &CORSConfiguration{Rules: []CORSRule{{
|
||||
AllowedOrigins: []string{"http://nope.com"},
|
||||
}}},
|
||||
origin: "http://not-allowed.com",
|
||||
method: http.MethodGet,
|
||||
},
|
||||
output: output{
|
||||
result: nil,
|
||||
err: s3err.GetAPIError(s3err.ErrCORSForbidden),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "forbidden method not allowed",
|
||||
input: input{
|
||||
cfg: &CORSConfiguration{Rules: []CORSRule{{
|
||||
AllowedOrigins: []string{"http://allowed.com"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodPost},
|
||||
AllowedHeaders: []CORSHeader{"X-Test"},
|
||||
}}},
|
||||
origin: "http://allowed.com",
|
||||
method: http.MethodGet,
|
||||
headers: []CORSHeader{"X-Test"},
|
||||
},
|
||||
output: output{
|
||||
result: nil,
|
||||
err: s3err.GetAPIError(s3err.ErrCORSForbidden),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "forbidden header not allowed",
|
||||
input: input{
|
||||
cfg: &CORSConfiguration{Rules: []CORSRule{{
|
||||
AllowedOrigins: []string{"http://allowed.com"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodGet},
|
||||
AllowedHeaders: []CORSHeader{"X-Test"},
|
||||
}}},
|
||||
origin: "http://allowed.com",
|
||||
method: http.MethodGet,
|
||||
headers: []CORSHeader{"X-Nope"},
|
||||
},
|
||||
output: output{
|
||||
result: nil,
|
||||
err: s3err.GetAPIError(s3err.ErrCORSForbidden),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := tt.input.cfg.IsAllowed(tt.input.origin, tt.input.method, tt.input.headers)
|
||||
assert.EqualValues(t, tt.output.err, err)
|
||||
assert.EqualValues(t, tt.output.result, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCORSRule_Validate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
rule CORSRule
|
||||
want error
|
||||
}{
|
||||
{
|
||||
name: "valid rule",
|
||||
rule: CORSRule{
|
||||
AllowedOrigins: []string{"http://allowed.com"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodGet},
|
||||
AllowedHeaders: []CORSHeader{"X-Test"},
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "invalid allowed methods",
|
||||
rule: CORSRule{
|
||||
AllowedOrigins: []string{"http://allowed.com"},
|
||||
AllowedMethods: []CORSHTTPMethod{"invalid_method"},
|
||||
AllowedHeaders: []CORSHeader{"X-Test"},
|
||||
},
|
||||
want: s3err.GetUnsopportedCORSMethodErr("invalid_method"),
|
||||
},
|
||||
{
|
||||
name: "invalid allowed header",
|
||||
rule: CORSRule{
|
||||
AllowedOrigins: []string{"http://allowed.com"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodGet},
|
||||
AllowedHeaders: []CORSHeader{"Invalid Header"},
|
||||
},
|
||||
want: s3err.GetInvalidCORSHeaderErr("Invalid Header"),
|
||||
},
|
||||
{
|
||||
name: "invalid allowed header",
|
||||
rule: CORSRule{
|
||||
AllowedOrigins: []string{"http://allowed.com"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodGet},
|
||||
AllowedHeaders: []CORSHeader{"Content-Length"},
|
||||
ExposeHeaders: []CORSHeader{"Content-Encoding", "invalid header"},
|
||||
},
|
||||
want: s3err.GetInvalidCORSHeaderErr("invalid header"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.rule.Validate()
|
||||
assert.EqualValues(t, tt.want, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCORSRule_Match(t *testing.T) {
|
||||
type input struct {
|
||||
rule CORSRule
|
||||
origin string
|
||||
method CORSHTTPMethod
|
||||
headers []CORSHeader
|
||||
}
|
||||
type output struct {
|
||||
isAllowed bool
|
||||
isWildcard bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
input input
|
||||
output output
|
||||
}{
|
||||
{
|
||||
name: "exact origin and method match",
|
||||
input: input{
|
||||
rule: CORSRule{
|
||||
AllowedOrigins: []string{"http://allowed.com"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodGet},
|
||||
AllowedHeaders: []CORSHeader{"X-Test"},
|
||||
},
|
||||
origin: "http://allowed.com",
|
||||
method: http.MethodGet,
|
||||
headers: []CORSHeader{"X-Test"},
|
||||
},
|
||||
output: output{isAllowed: true, isWildcard: false},
|
||||
},
|
||||
{
|
||||
name: "wildcard origin match",
|
||||
input: input{
|
||||
rule: CORSRule{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodPost},
|
||||
AllowedHeaders: []CORSHeader{"X-Test"},
|
||||
},
|
||||
origin: "http://random.com",
|
||||
method: http.MethodPost,
|
||||
headers: []CORSHeader{"X-Test"},
|
||||
},
|
||||
output: output{isAllowed: true, isWildcard: true},
|
||||
},
|
||||
{
|
||||
name: "wildcard containing origin match",
|
||||
input: input{
|
||||
rule: CORSRule{
|
||||
AllowedOrigins: []string{"http://random*"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodPost},
|
||||
AllowedHeaders: []CORSHeader{"X-Test"},
|
||||
},
|
||||
origin: "http://random.com",
|
||||
method: http.MethodPost,
|
||||
headers: []CORSHeader{"X-Test"},
|
||||
},
|
||||
output: output{isAllowed: true, isWildcard: false},
|
||||
},
|
||||
{
|
||||
name: "wildcard allowed headers match",
|
||||
input: input{
|
||||
rule: CORSRule{
|
||||
AllowedOrigins: []string{"http://something.com"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodPost},
|
||||
AllowedHeaders: []CORSHeader{"X-*"},
|
||||
},
|
||||
origin: "http://something.com",
|
||||
method: http.MethodPost,
|
||||
headers: []CORSHeader{"X-Test", "X-Something", "X-Anyting"},
|
||||
},
|
||||
output: output{isAllowed: true, isWildcard: false},
|
||||
},
|
||||
{
|
||||
name: "origin mismatch",
|
||||
input: input{
|
||||
rule: CORSRule{
|
||||
AllowedOrigins: []string{"http://allowed.com"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodGet},
|
||||
AllowedHeaders: []CORSHeader{"X-Test"},
|
||||
},
|
||||
origin: "http://notallowed.com",
|
||||
method: http.MethodGet,
|
||||
headers: []CORSHeader{"X-Test"},
|
||||
},
|
||||
output: output{isAllowed: false, isWildcard: false},
|
||||
},
|
||||
{
|
||||
name: "method mismatch",
|
||||
input: input{
|
||||
rule: CORSRule{
|
||||
AllowedOrigins: []string{"http://allowed.com"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodPost},
|
||||
AllowedHeaders: []CORSHeader{"X-Test"},
|
||||
},
|
||||
origin: "http://allowed.com",
|
||||
method: http.MethodGet,
|
||||
headers: []CORSHeader{"X-Test"},
|
||||
},
|
||||
output: output{isAllowed: false, isWildcard: false},
|
||||
},
|
||||
{
|
||||
name: "header mismatch",
|
||||
input: input{
|
||||
rule: CORSRule{
|
||||
AllowedOrigins: []string{"http://allowed.com"},
|
||||
AllowedMethods: []CORSHTTPMethod{http.MethodGet},
|
||||
AllowedHeaders: []CORSHeader{"X-Test"},
|
||||
},
|
||||
origin: "http://allowed.com",
|
||||
method: http.MethodGet,
|
||||
headers: []CORSHeader{"X-Other"},
|
||||
},
|
||||
output: output{isAllowed: false, isWildcard: false},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
isAllowed, wild := tt.input.rule.Match(tt.input.origin, tt.input.method, tt.input.headers)
|
||||
assert.Equal(t, tt.output.isAllowed, isAllowed)
|
||||
assert.Equal(t, tt.output.isWildcard, wild)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExposeHeaders(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
rule CORSRule
|
||||
want string
|
||||
}{
|
||||
{"multiple headers", CORSRule{ExposeHeaders: []CORSHeader{"Content-Length", "Content-Type", "Content-Encoding"}}, "Content-Length, Content-Type, Content-Encoding"},
|
||||
{"single header", CORSRule{ExposeHeaders: []CORSHeader{"Authorization"}}, "Authorization"},
|
||||
{"no headers", CORSRule{}, ""},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.rule.GetExposeHeaders()
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildAllowedHeaders(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
headers []CORSHeader
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty slice returns empty string",
|
||||
headers: []CORSHeader{},
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "single header lowercase",
|
||||
headers: []CORSHeader{"Content-Type"},
|
||||
want: "content-type",
|
||||
},
|
||||
{
|
||||
name: "multiple headers lowercased with commas",
|
||||
headers: []CORSHeader{"Content-Type", "X-Custom-Header", "Authorization"},
|
||||
want: "content-type, x-custom-header, authorization",
|
||||
},
|
||||
{
|
||||
name: "already lowercase header",
|
||||
headers: []CORSHeader{"accept"},
|
||||
want: "accept",
|
||||
},
|
||||
{
|
||||
name: "mixed case headers",
|
||||
headers: []CORSHeader{"ACCEPT", "x-Powered-By"},
|
||||
want: "accept, x-powered-by",
|
||||
},
|
||||
{
|
||||
name: "empty header value",
|
||||
headers: []CORSHeader{""},
|
||||
want: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := buildAllowedHeaders(tt.headers)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAllowedMethods(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
rule CORSRule
|
||||
want string
|
||||
}{
|
||||
{"multiple methods", CORSRule{AllowedMethods: []CORSHTTPMethod{http.MethodGet, http.MethodPost, http.MethodPut}}, "GET, POST, PUT"},
|
||||
{"single method", CORSRule{AllowedMethods: []CORSHTTPMethod{http.MethodGet}}, "GET"},
|
||||
{"no methods", CORSRule{}, ""},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.rule.GetAllowedMethods()
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCORSOutput(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
data string
|
||||
want bool
|
||||
}{
|
||||
{"valid", `<CORSConfiguration><CORSRule></CORSRule></CORSConfiguration>`, true},
|
||||
{"invalid xml", `<CORSConfiguration><CORSRule>`, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg, err := ParseCORSOutput([]byte(tt.data))
|
||||
if (err == nil) != tt.want {
|
||||
t.Errorf("ParseCORSOutput() err = %v, want success=%v", err, tt.want)
|
||||
}
|
||||
if tt.want && cfg == nil {
|
||||
t.Errorf("Expected non-nil config")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheCORSProps(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in []CORSHTTPMethod
|
||||
want map[string]struct{}
|
||||
}{
|
||||
{
|
||||
name: "empty CORSHTTPMethod slice",
|
||||
in: []CORSHTTPMethod{},
|
||||
want: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "single CORSHTTPMethod",
|
||||
in: []CORSHTTPMethod{http.MethodGet},
|
||||
want: map[string]struct{}{http.MethodGet: {}},
|
||||
},
|
||||
{
|
||||
name: "multiple CORSHTTPMethods",
|
||||
in: []CORSHTTPMethod{http.MethodGet, http.MethodPost, http.MethodPut},
|
||||
want: map[string]struct{}{
|
||||
http.MethodGet: {},
|
||||
http.MethodPost: {},
|
||||
http.MethodPut: {},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := cacheCORSMethods(tt.in)
|
||||
assert.Equal(t, len(tt.want), len(got))
|
||||
for key := range tt.want {
|
||||
_, ok := got[CORSHTTPMethod(key)]
|
||||
assert.True(t, ok)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCORSHeaders(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
want []CORSHeader
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "empty string",
|
||||
in: "",
|
||||
want: []CORSHeader{},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
name: "single valid header",
|
||||
in: "X-Test",
|
||||
want: []CORSHeader{"X-Test"},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
name: "multiple valid headers with spaces",
|
||||
in: "X-Test, Content-Type, Authorization",
|
||||
want: []CORSHeader{"X-Test", "Content-Type", "Authorization"},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
name: "header with leading/trailing spaces",
|
||||
in: " X-Test ",
|
||||
want: []CORSHeader{"X-Test"},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
name: "contains invalid header",
|
||||
in: "X-Test, Invalid Header, Content-Type",
|
||||
want: nil,
|
||||
err: s3err.GetInvalidCORSRequestHeaderErr(" Invalid Header"),
|
||||
},
|
||||
{
|
||||
name: "only invalid header",
|
||||
in: "Invalid Header",
|
||||
want: nil,
|
||||
err: s3err.GetInvalidCORSRequestHeaderErr("Invalid Header"),
|
||||
},
|
||||
{
|
||||
name: "multiple commas in a row",
|
||||
in: "X-Test,,Content-Type",
|
||||
want: nil,
|
||||
err: s3err.GetInvalidCORSRequestHeaderErr(""),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := ParseCORSHeaders(tt.in)
|
||||
assert.EqualValues(t, tt.err, err)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWildcardMatch(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pattern string
|
||||
input string
|
||||
want bool
|
||||
}{
|
||||
// Exact match, no wildcards
|
||||
{"exact match", "hello", "hello", true},
|
||||
{"exact mismatch", "hello", "hell", false},
|
||||
// Single '*' matching zero chars
|
||||
{"star matches zero chars", "he*lo", "helo", true},
|
||||
// Single '*' matching multiple chars
|
||||
{"star matches multiple chars", "he*o", "heyyyyyo", true},
|
||||
// '*' at start
|
||||
{"star at start", "*world", "hello world", true},
|
||||
// '*' at end
|
||||
{"star at end", "hello*", "hello there", true},
|
||||
// '*' matches whole string
|
||||
{"only star", "*", "anything", true},
|
||||
{"only star empty", "*", "", true},
|
||||
// Multiple '*'s
|
||||
{"multiple stars", "a*b*c", "axxxbzzzzyc", true},
|
||||
{"multiple stars no match", "a*b*c", "axxxbzzzzy", false},
|
||||
// Backtracking needed
|
||||
{"backtracking required", "a*b*c", "ab123c", true},
|
||||
// No match with star present
|
||||
{"star but mismatch", "he*world", "hey there", false},
|
||||
// Trailing stars in pattern
|
||||
{"trailing stars match", "abc**", "abc", true},
|
||||
{"trailing stars match longer", "abc**", "abccc", true},
|
||||
// Empty pattern cases
|
||||
{"empty pattern and empty input", "", "", true},
|
||||
{"empty pattern non-empty input", "", "a", false},
|
||||
{"only stars pattern with empty input", "***", "", true},
|
||||
// Pattern longer than input
|
||||
{"pattern longer no star", "abcd", "abc", false},
|
||||
// Input longer but no star
|
||||
{"input longer no star", "abc", "abcd", false},
|
||||
// Complex interleaved match
|
||||
{"complex interleaved", "*a*b*cd*", "xxaYYbZZcd123", true},
|
||||
// Star match at the end after mismatch
|
||||
{"mismatch then star match", "ab*xyz", "abzzzxyz", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := wildcardMatch(tt.pattern, tt.input)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -16,69 +16,17 @@ package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
var ErrAccessDenied = errors.New("access denied")
|
||||
|
||||
type policyErr string
|
||||
|
||||
func (p policyErr) Error() string {
|
||||
return string(p)
|
||||
}
|
||||
|
||||
const (
|
||||
policyErrResourceMismatch = policyErr("Action does not apply to any resource(s) in statement")
|
||||
policyErrInvalidResource = policyErr("Policy has invalid resource")
|
||||
policyErrInvalidPrincipal = policyErr("Invalid principal in policy")
|
||||
policyErrInvalidAction = policyErr("Policy has invalid action")
|
||||
policyErrInvalidPolicy = policyErr("This policy contains invalid Json")
|
||||
policyErrInvalidFirstChar = policyErr("Policies must be valid JSON and the first byte must be '{'")
|
||||
policyErrEmptyStatement = policyErr("Could not parse the policy: Statement is empty!")
|
||||
policyErrMissingStatmentField = policyErr("Missing required field Statement")
|
||||
policyErrInvalidVersion = policyErr("The policy must contain a valid version string")
|
||||
)
|
||||
|
||||
type BucketPolicy struct {
|
||||
Version PolicyVersion `json:"Version"`
|
||||
Statement []BucketPolicyItem `json:"Statement"`
|
||||
}
|
||||
|
||||
func (bp *BucketPolicy) UnmarshalJSON(data []byte) error {
|
||||
var tmp struct {
|
||||
Version *PolicyVersion
|
||||
Statement *[]BucketPolicyItem `json:"Statement"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &tmp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If Statement is nil (not present in JSON), return an error
|
||||
if tmp.Statement == nil {
|
||||
return policyErrMissingStatmentField
|
||||
}
|
||||
|
||||
if tmp.Version == nil {
|
||||
// bucket policy version should defualt to '2008-10-17'
|
||||
bp.Version = PolicyVersion2008
|
||||
} else {
|
||||
bp.Version = *tmp.Version
|
||||
}
|
||||
|
||||
bp.Statement = *tmp.Statement
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bp *BucketPolicy) Validate(bucket string, iam IAMService) error {
|
||||
if !bp.Version.isValid() {
|
||||
return policyErrInvalidVersion
|
||||
}
|
||||
|
||||
for _, statement := range bp.Statement {
|
||||
err := statement.Validate(bucket, iam)
|
||||
if err != nil {
|
||||
@@ -90,48 +38,17 @@ func (bp *BucketPolicy) Validate(bucket string, iam IAMService) error {
|
||||
}
|
||||
|
||||
func (bp *BucketPolicy) isAllowed(principal string, action Action, resource string) bool {
|
||||
var isAllowed bool
|
||||
for _, statement := range bp.Statement {
|
||||
if statement.findMatch(principal, action, resource) {
|
||||
switch statement.Effect {
|
||||
case BucketPolicyAccessTypeAllow:
|
||||
isAllowed = true
|
||||
return true
|
||||
case BucketPolicyAccessTypeDeny:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return isAllowed
|
||||
}
|
||||
|
||||
// IsPublicFor checks if the bucket policy statements contain
|
||||
// an entity granting public access to the given resource and action
|
||||
func (bp *BucketPolicy) isPublicFor(resource string, action Action) bool {
|
||||
var isAllowed bool
|
||||
for _, statement := range bp.Statement {
|
||||
if statement.isPublicFor(resource, action) {
|
||||
switch statement.Effect {
|
||||
case BucketPolicyAccessTypeAllow:
|
||||
isAllowed = true
|
||||
case BucketPolicyAccessTypeDeny:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return isAllowed
|
||||
}
|
||||
|
||||
// IsPublic checks if one of bucket policy statments grant
|
||||
// public access to ALL users
|
||||
func (bp *BucketPolicy) IsPublic() bool {
|
||||
for _, statement := range bp.Statement {
|
||||
if statement.isPublic() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -158,14 +75,11 @@ func (bpi *BucketPolicyItem) Validate(bucket string, iam IAMService) error {
|
||||
|
||||
for action := range bpi.Actions {
|
||||
isObjectAction := action.IsObjectAction()
|
||||
if isObjectAction == nil {
|
||||
break
|
||||
if isObjectAction && !containsObjectAction {
|
||||
return fmt.Errorf("unsupported object action '%v' on the specified resources", action)
|
||||
}
|
||||
if *isObjectAction && !containsObjectAction {
|
||||
return policyErrResourceMismatch
|
||||
}
|
||||
if !*isObjectAction && !containsBucketAction {
|
||||
return policyErrResourceMismatch
|
||||
if !isObjectAction && !containsBucketAction {
|
||||
return fmt.Errorf("unsupported bucket action '%v' on the specified resources", action)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,18 +94,6 @@ func (bpi *BucketPolicyItem) findMatch(principal string, action Action, resource
|
||||
return false
|
||||
}
|
||||
|
||||
// isPublicFor checks if the bucket policy statemant grants public access
|
||||
// for given resource and action
|
||||
func (bpi *BucketPolicyItem) isPublicFor(resource string, action Action) bool {
|
||||
return bpi.Principals.isPublic() && bpi.Actions.FindMatch(action) && bpi.Resources.FindMatch(resource)
|
||||
}
|
||||
|
||||
// isPublic checks if the statement grants public access
|
||||
// to ALL users
|
||||
func (bpi *BucketPolicyItem) isPublic() bool {
|
||||
return bpi.Principals.isPublic()
|
||||
}
|
||||
|
||||
func getMalformedPolicyError(err error) error {
|
||||
return s3err.APIError{
|
||||
Code: "MalformedPolicy",
|
||||
@@ -200,31 +102,10 @@ func getMalformedPolicyError(err error) error {
|
||||
}
|
||||
}
|
||||
|
||||
// ParsePolicyDocument parses raw bytes to 'BucketPolicy'
|
||||
func ParsePolicyDocument(data []byte) (*BucketPolicy, error) {
|
||||
var policy BucketPolicy
|
||||
if err := json.Unmarshal(data, &policy); err != nil {
|
||||
var pe policyErr
|
||||
if errors.As(err, &pe) {
|
||||
return nil, getMalformedPolicyError(err)
|
||||
}
|
||||
return nil, getMalformedPolicyError(policyErrInvalidPolicy)
|
||||
}
|
||||
|
||||
return &policy, nil
|
||||
}
|
||||
|
||||
func ValidatePolicyDocument(policyBin []byte, bucket string, iam IAMService) error {
|
||||
if len(policyBin) == 0 || policyBin[0] != '{' {
|
||||
return getMalformedPolicyError(policyErrInvalidFirstChar)
|
||||
}
|
||||
policy, err := ParsePolicyDocument(policyBin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(policy.Statement) == 0 {
|
||||
return getMalformedPolicyError(policyErrEmptyStatement)
|
||||
var policy BucketPolicy
|
||||
if err := json.Unmarshal(policyBin, &policy); err != nil {
|
||||
return getMalformedPolicyError(err)
|
||||
}
|
||||
|
||||
if err := policy.Validate(bucket, iam); err != nil {
|
||||
@@ -234,26 +115,12 @@ func ValidatePolicyDocument(policyBin []byte, bucket string, iam IAMService) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func VerifyBucketPolicy(policy []byte, access, bucket, object string, action Action) error {
|
||||
var bucketPolicy BucketPolicy
|
||||
if err := json.Unmarshal(policy, &bucketPolicy); err != nil {
|
||||
return fmt.Errorf("failed to parse the bucket policy: %w", err)
|
||||
func verifyBucketPolicy(policy []byte, access, bucket, object string, action Action) error {
|
||||
// If bucket policy is not set
|
||||
if len(policy) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
resource := bucket
|
||||
if object != "" {
|
||||
resource += "/" + object
|
||||
}
|
||||
|
||||
if !bucketPolicy.isAllowed(access, action, resource) {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks if the bucket policy grants public access
|
||||
func VerifyPublicBucketPolicy(policy []byte, bucket, object string, action Action) error {
|
||||
var bucketPolicy BucketPolicy
|
||||
if err := json.Unmarshal(policy, &bucketPolicy); err != nil {
|
||||
return err
|
||||
@@ -264,40 +131,10 @@ func VerifyPublicBucketPolicy(policy []byte, bucket, object string, action Actio
|
||||
resource += "/" + object
|
||||
}
|
||||
|
||||
if !bucketPolicy.isPublicFor(resource, action) {
|
||||
return ErrAccessDenied
|
||||
fmt.Println(access, action, resource)
|
||||
if !bucketPolicy.isAllowed(access, action, resource) {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// matchPattern checks if the input string matches the given pattern with wildcard(`*`) and any character(`?`).
|
||||
// - `?` matches exactly one occurrence of any character.
|
||||
// - `*` matches arbitrary many (including zero) occurrences of any character.
|
||||
func matchPattern(pattern, input string) bool {
|
||||
pIdx, sIdx := 0, 0
|
||||
starIdx, matchIdx := -1, 0
|
||||
|
||||
for sIdx < len(input) {
|
||||
if pIdx < len(pattern) && (pattern[pIdx] == '?' || pattern[pIdx] == input[sIdx]) {
|
||||
sIdx++
|
||||
pIdx++
|
||||
} else if pIdx < len(pattern) && pattern[pIdx] == '*' {
|
||||
starIdx = pIdx
|
||||
matchIdx = sIdx
|
||||
pIdx++
|
||||
} else if starIdx != -1 {
|
||||
pIdx = starIdx + 1
|
||||
matchIdx++
|
||||
sIdx = matchIdx
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for pIdx < len(pattern) && pattern[pIdx] == '*' {
|
||||
pIdx++
|
||||
}
|
||||
|
||||
return pIdx == len(pattern)
|
||||
}
|
||||
|
||||
@@ -16,247 +16,139 @@ package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Action string
|
||||
|
||||
const (
|
||||
GetBucketAclAction Action = "s3:GetBucketAcl"
|
||||
CreateBucketAction Action = "s3:CreateBucket"
|
||||
PutBucketAclAction Action = "s3:PutBucketAcl"
|
||||
DeleteBucketAction Action = "s3:DeleteBucket"
|
||||
PutBucketVersioningAction Action = "s3:PutBucketVersioning"
|
||||
GetBucketVersioningAction Action = "s3:GetBucketVersioning"
|
||||
PutBucketPolicyAction Action = "s3:PutBucketPolicy"
|
||||
GetBucketPolicyAction Action = "s3:GetBucketPolicy"
|
||||
DeleteBucketPolicyAction Action = "s3:DeleteBucketPolicy"
|
||||
AbortMultipartUploadAction Action = "s3:AbortMultipartUpload"
|
||||
ListMultipartUploadPartsAction Action = "s3:ListMultipartUploadParts"
|
||||
ListBucketMultipartUploadsAction Action = "s3:ListBucketMultipartUploads"
|
||||
PutObjectAction Action = "s3:PutObject"
|
||||
GetObjectAction Action = "s3:GetObject"
|
||||
GetObjectVersionAction Action = "s3:GetObjectVersion"
|
||||
DeleteObjectAction Action = "s3:DeleteObject"
|
||||
DeleteObjectVersionAction Action = "s3:DeleteObjectVersion"
|
||||
GetObjectAclAction Action = "s3:GetObjectAcl"
|
||||
GetObjectAttributesAction Action = "s3:GetObjectAttributes"
|
||||
GetObjectVersionAttributesAction Action = "s3:GetObjectVersionAttributes"
|
||||
PutObjectAclAction Action = "s3:PutObjectAcl"
|
||||
RestoreObjectAction Action = "s3:RestoreObject"
|
||||
GetBucketTaggingAction Action = "s3:GetBucketTagging"
|
||||
PutBucketTaggingAction Action = "s3:PutBucketTagging"
|
||||
GetObjectTaggingAction Action = "s3:GetObjectTagging"
|
||||
GetObjectVersionTaggingAction Action = "s3:GetObjectVersionTagging"
|
||||
PutObjectTaggingAction Action = "s3:PutObjectTagging"
|
||||
PutObjectVersionTaggingAction Action = "s3:PutObjectVersionTagging"
|
||||
DeleteObjectTaggingAction Action = "s3:DeleteObjectTagging"
|
||||
DeleteObjectVersionTaggingAction Action = "s3:DeleteObjectVersionTagging"
|
||||
ListBucketVersionsAction Action = "s3:ListBucketVersions"
|
||||
ListBucketAction Action = "s3:ListBucket"
|
||||
GetBucketObjectLockConfigurationAction Action = "s3:GetBucketObjectLockConfiguration"
|
||||
PutBucketObjectLockConfigurationAction Action = "s3:PutBucketObjectLockConfiguration"
|
||||
GetObjectLegalHoldAction Action = "s3:GetObjectLegalHold"
|
||||
PutObjectLegalHoldAction Action = "s3:PutObjectLegalHold"
|
||||
GetObjectRetentionAction Action = "s3:GetObjectRetention"
|
||||
PutObjectRetentionAction Action = "s3:PutObjectRetention"
|
||||
BypassGovernanceRetentionAction Action = "s3:BypassGovernanceRetention"
|
||||
PutBucketOwnershipControlsAction Action = "s3:PutBucketOwnershipControls"
|
||||
GetBucketOwnershipControlsAction Action = "s3:GetBucketOwnershipControls"
|
||||
PutBucketCorsAction Action = "s3:PutBucketCORS"
|
||||
GetBucketCorsAction Action = "s3:GetBucketCORS"
|
||||
PutAnalyticsConfigurationAction Action = "s3:PutAnalyticsConfiguration"
|
||||
GetAnalyticsConfigurationAction Action = "s3:GetAnalyticsConfiguration"
|
||||
PutEncryptionConfigurationAction Action = "s3:PutEncryptionConfiguration"
|
||||
GetEncryptionConfigurationAction Action = "s3:GetEncryptionConfiguration"
|
||||
PutIntelligentTieringConfigurationAction Action = "s3:PutIntelligentTieringConfiguration"
|
||||
GetIntelligentTieringConfigurationAction Action = "s3:GetIntelligentTieringConfiguration"
|
||||
PutInventoryConfigurationAction Action = "s3:PutInventoryConfiguration"
|
||||
GetInventoryConfigurationAction Action = "s3:GetInventoryConfiguration"
|
||||
PutLifecycleConfigurationAction Action = "s3:PutLifecycleConfiguration"
|
||||
GetLifecycleConfigurationAction Action = "s3:GetLifecycleConfiguration"
|
||||
PutBucketLoggingAction Action = "s3:PutBucketLogging"
|
||||
GetBucketLoggingAction Action = "s3:GetBucketLogging"
|
||||
PutBucketRequestPaymentAction Action = "s3:PutBucketRequestPayment"
|
||||
GetBucketRequestPaymentAction Action = "s3:GetBucketRequestPayment"
|
||||
PutMetricsConfigurationAction Action = "s3:PutMetricsConfiguration"
|
||||
GetMetricsConfigurationAction Action = "s3:GetMetricsConfiguration"
|
||||
PutReplicationConfigurationAction Action = "s3:PutReplicationConfiguration"
|
||||
GetReplicationConfigurationAction Action = "s3:GetReplicationConfiguration"
|
||||
PutBucketPublicAccessBlockAction Action = "s3:PutBucketPublicAccessBlock"
|
||||
GetBucketPublicAccessBlockAction Action = "s3:GetBucketPublicAccessBlock"
|
||||
PutBucketNotificationAction Action = "s3:PutBucketNotification"
|
||||
GetBucketNotificationAction Action = "s3:GetBucketNotification"
|
||||
PutAccelerateConfigurationAction Action = "s3:PutAccelerateConfiguration"
|
||||
GetAccelerateConfigurationAction Action = "s3:GetAccelerateConfiguration"
|
||||
PutBucketWebsiteAction Action = "s3:PutBucketWebsite"
|
||||
GetBucketWebsiteAction Action = "s3:GetBucketWebsite"
|
||||
GetBucketPolicyStatusAction Action = "s3:GetBucketPolicyStatus"
|
||||
GetBucketLocationAction Action = "s3:GetBucketLocation"
|
||||
|
||||
AllActions Action = "s3:*"
|
||||
GetBucketAclAction Action = "s3:GetBucketAcl"
|
||||
CreateBucketAction Action = "s3:CreateBucket"
|
||||
PutBucketAclAction Action = "s3:PutBucketAcl"
|
||||
DeleteBucketAction Action = "s3:DeleteBucket"
|
||||
PutBucketVersioningAction Action = "s3:PutBucketVersioning"
|
||||
GetBucketVersioningAction Action = "s3:GetBucketVersioning"
|
||||
PutBucketPolicyAction Action = "s3:PutBucketPolicy"
|
||||
GetBucketPolicyAction Action = "s3:GetBucketPolicy"
|
||||
DeleteBucketPolicyAction Action = "s3:DeleteBucketPolicy"
|
||||
AbortMultipartUploadAction Action = "s3:AbortMultipartUpload"
|
||||
ListMultipartUploadPartsAction Action = "s3:ListMultipartUploadParts"
|
||||
ListBucketMultipartUploadsAction Action = "s3:ListBucketMultipartUploads"
|
||||
PutObjectAction Action = "s3:PutObject"
|
||||
GetObjectAction Action = "s3:GetObject"
|
||||
DeleteObjectAction Action = "s3:DeleteObject"
|
||||
GetObjectAclAction Action = "s3:GetObjectAcl"
|
||||
GetObjectAttributesAction Action = "s3:GetObjectAttributes"
|
||||
PutObjectAclAction Action = "s3:PutObjectAcl"
|
||||
RestoreObjectAction Action = "s3:RestoreObject"
|
||||
GetBucketTaggingAction Action = "s3:GetBucketTagging"
|
||||
PutBucketTaggingAction Action = "s3:PutBucketTagging"
|
||||
GetObjectTaggingAction Action = "s3:GetObjectTagging"
|
||||
PutObjectTaggingAction Action = "s3:PutObjectTagging"
|
||||
DeleteObjectTaggingAction Action = "s3:DeleteObjectTagging"
|
||||
ListBucketVersionsAction Action = "s3:ListBucketVersions"
|
||||
ListBucketAction Action = "s3:ListBucket"
|
||||
AllActions Action = "s3:*"
|
||||
)
|
||||
|
||||
var supportedActionList = map[Action]struct{}{
|
||||
GetBucketAclAction: {},
|
||||
CreateBucketAction: {},
|
||||
PutBucketAclAction: {},
|
||||
DeleteBucketAction: {},
|
||||
PutBucketVersioningAction: {},
|
||||
GetBucketVersioningAction: {},
|
||||
PutBucketPolicyAction: {},
|
||||
GetBucketPolicyAction: {},
|
||||
DeleteBucketPolicyAction: {},
|
||||
AbortMultipartUploadAction: {},
|
||||
ListMultipartUploadPartsAction: {},
|
||||
ListBucketMultipartUploadsAction: {},
|
||||
PutObjectAction: {},
|
||||
GetObjectAction: {},
|
||||
GetObjectVersionAction: {},
|
||||
DeleteObjectAction: {},
|
||||
DeleteObjectVersionAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
GetObjectVersionAttributesAction: {},
|
||||
PutObjectAclAction: {},
|
||||
RestoreObjectAction: {},
|
||||
GetBucketTaggingAction: {},
|
||||
PutBucketTaggingAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
GetObjectVersionTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
PutObjectVersionTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
DeleteObjectVersionTaggingAction: {},
|
||||
ListBucketVersionsAction: {},
|
||||
ListBucketAction: {},
|
||||
GetBucketObjectLockConfigurationAction: {},
|
||||
PutBucketObjectLockConfigurationAction: {},
|
||||
GetObjectLegalHoldAction: {},
|
||||
PutObjectLegalHoldAction: {},
|
||||
GetObjectRetentionAction: {},
|
||||
PutObjectRetentionAction: {},
|
||||
BypassGovernanceRetentionAction: {},
|
||||
PutBucketOwnershipControlsAction: {},
|
||||
GetBucketOwnershipControlsAction: {},
|
||||
PutBucketCorsAction: {},
|
||||
GetBucketCorsAction: {},
|
||||
PutAnalyticsConfigurationAction: {},
|
||||
GetAnalyticsConfigurationAction: {},
|
||||
PutEncryptionConfigurationAction: {},
|
||||
GetEncryptionConfigurationAction: {},
|
||||
PutIntelligentTieringConfigurationAction: {},
|
||||
GetIntelligentTieringConfigurationAction: {},
|
||||
PutInventoryConfigurationAction: {},
|
||||
GetInventoryConfigurationAction: {},
|
||||
PutLifecycleConfigurationAction: {},
|
||||
GetLifecycleConfigurationAction: {},
|
||||
PutBucketLoggingAction: {},
|
||||
GetBucketLoggingAction: {},
|
||||
PutBucketRequestPaymentAction: {},
|
||||
GetBucketRequestPaymentAction: {},
|
||||
PutMetricsConfigurationAction: {},
|
||||
GetMetricsConfigurationAction: {},
|
||||
PutReplicationConfigurationAction: {},
|
||||
GetReplicationConfigurationAction: {},
|
||||
PutBucketPublicAccessBlockAction: {},
|
||||
GetBucketPublicAccessBlockAction: {},
|
||||
PutBucketNotificationAction: {},
|
||||
GetBucketNotificationAction: {},
|
||||
PutAccelerateConfigurationAction: {},
|
||||
GetAccelerateConfigurationAction: {},
|
||||
PutBucketWebsiteAction: {},
|
||||
GetBucketWebsiteAction: {},
|
||||
GetBucketPolicyStatusAction: {},
|
||||
GetBucketLocationAction: {},
|
||||
AllActions: {},
|
||||
GetBucketAclAction: {},
|
||||
CreateBucketAction: {},
|
||||
PutBucketAclAction: {},
|
||||
DeleteBucketAction: {},
|
||||
PutBucketVersioningAction: {},
|
||||
GetBucketVersioningAction: {},
|
||||
PutBucketPolicyAction: {},
|
||||
GetBucketPolicyAction: {},
|
||||
DeleteBucketPolicyAction: {},
|
||||
AbortMultipartUploadAction: {},
|
||||
ListMultipartUploadPartsAction: {},
|
||||
ListBucketMultipartUploadsAction: {},
|
||||
PutObjectAction: {},
|
||||
GetObjectAction: {},
|
||||
DeleteObjectAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
PutObjectAclAction: {},
|
||||
RestoreObjectAction: {},
|
||||
GetBucketTaggingAction: {},
|
||||
PutBucketTaggingAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
ListBucketVersionsAction: {},
|
||||
ListBucketAction: {},
|
||||
AllActions: {},
|
||||
}
|
||||
|
||||
var supportedObjectActionList = map[Action]struct{}{
|
||||
AbortMultipartUploadAction: {},
|
||||
ListMultipartUploadPartsAction: {},
|
||||
PutObjectAction: {},
|
||||
GetObjectAction: {},
|
||||
GetObjectVersionAction: {},
|
||||
DeleteObjectAction: {},
|
||||
DeleteObjectVersionAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
GetObjectVersionAttributesAction: {},
|
||||
PutObjectAclAction: {},
|
||||
RestoreObjectAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
GetObjectVersionTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
PutObjectVersionTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
DeleteObjectVersionTaggingAction: {},
|
||||
GetObjectLegalHoldAction: {},
|
||||
PutObjectLegalHoldAction: {},
|
||||
GetObjectRetentionAction: {},
|
||||
PutObjectRetentionAction: {},
|
||||
BypassGovernanceRetentionAction: {},
|
||||
AllActions: {},
|
||||
AbortMultipartUploadAction: {},
|
||||
ListMultipartUploadPartsAction: {},
|
||||
PutObjectAction: {},
|
||||
GetObjectAction: {},
|
||||
DeleteObjectAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
PutObjectAclAction: {},
|
||||
RestoreObjectAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
AllActions: {},
|
||||
}
|
||||
|
||||
// Validates Action: it should either wildcard match with supported actions list or be in it
|
||||
func (a Action) IsValid() error {
|
||||
if !strings.HasPrefix(string(a), "s3:") {
|
||||
return policyErrInvalidAction
|
||||
return fmt.Errorf("invalid action: %v", a)
|
||||
}
|
||||
|
||||
if a == AllActions {
|
||||
return nil
|
||||
}
|
||||
|
||||
// first check for an exact match
|
||||
if _, ok := supportedActionList[a]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// walk through the supported actions and try wildcard match
|
||||
for action := range supportedActionList {
|
||||
if action.Match(a) {
|
||||
return nil
|
||||
if a[len(a)-1] == '*' {
|
||||
pattern := strings.TrimSuffix(string(a), "*")
|
||||
for act := range supportedActionList {
|
||||
if strings.HasPrefix(string(act), pattern) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid wildcard usage: %v prefix is not in the supported actions list", pattern)
|
||||
}
|
||||
|
||||
return policyErrInvalidAction
|
||||
}
|
||||
|
||||
func getBoolPtr(bl bool) *bool {
|
||||
return &bl
|
||||
}
|
||||
|
||||
// String converts the action to string
|
||||
func (a Action) String() string {
|
||||
return string(a)
|
||||
}
|
||||
|
||||
// Match wildcard matches the given pattern to the action
|
||||
func (a Action) Match(pattern Action) bool {
|
||||
return matchPattern(pattern.String(), a.String())
|
||||
_, found := supportedActionList[a]
|
||||
if !found {
|
||||
return fmt.Errorf("unsupported action: %v", a)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks if the action is object action
|
||||
// nil points to 's3:*'
|
||||
func (a Action) IsObjectAction() *bool {
|
||||
if a == AllActions {
|
||||
return nil
|
||||
}
|
||||
|
||||
// first find an exact match
|
||||
if _, ok := supportedObjectActionList[a]; ok {
|
||||
return &ok
|
||||
}
|
||||
|
||||
for action := range supportedObjectActionList {
|
||||
if action.Match(a) {
|
||||
return getBoolPtr(true)
|
||||
func (a Action) IsObjectAction() bool {
|
||||
if a[len(a)-1] == '*' {
|
||||
pattern := strings.TrimSuffix(string(a), "*")
|
||||
for act := range supportedObjectActionList {
|
||||
if strings.HasPrefix(string(act), pattern) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return getBoolPtr(false)
|
||||
_, found := supportedObjectActionList[a]
|
||||
return found
|
||||
}
|
||||
|
||||
func (a Action) WildCardMatch(act Action) bool {
|
||||
if strings.HasSuffix(string(a), "*") {
|
||||
pattern := strings.TrimSuffix(string(a), "*")
|
||||
return strings.HasPrefix(string(act), pattern)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Actions map[Action]struct{}
|
||||
@@ -267,7 +159,7 @@ func (a *Actions) UnmarshalJSON(data []byte) error {
|
||||
var err error
|
||||
if err = json.Unmarshal(data, &ss); err == nil {
|
||||
if len(ss) == 0 {
|
||||
return policyErrInvalidAction
|
||||
return fmt.Errorf("actions can't be empty")
|
||||
}
|
||||
*a = make(Actions)
|
||||
for _, s := range ss {
|
||||
@@ -280,7 +172,7 @@ func (a *Actions) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err = json.Unmarshal(data, &s); err == nil {
|
||||
if s == "" {
|
||||
return policyErrInvalidAction
|
||||
return fmt.Errorf("actions can't be empty")
|
||||
}
|
||||
*a = make(Actions)
|
||||
err = a.Add(s)
|
||||
@@ -305,7 +197,6 @@ func (a Actions) Add(str string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindMatch tries to match the given action to the actions list
|
||||
func (a Actions) FindMatch(action Action) bool {
|
||||
_, ok := a[AllActions]
|
||||
if ok {
|
||||
@@ -317,9 +208,8 @@ func (a Actions) FindMatch(action Action) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// search for a wildcard match
|
||||
for act := range a {
|
||||
if action.Match(act) {
|
||||
if strings.HasSuffix(string(act), "*") && act.WildCardMatch(action) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,175 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAction_IsValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
action Action
|
||||
wantErr bool
|
||||
}{
|
||||
{"valid exact action", GetObjectAction, false},
|
||||
{"valid all actions", AllActions, false},
|
||||
{"invalid prefix", "invalid:Action", true},
|
||||
{"unsupported action 1", "s3:Unsupported", true},
|
||||
{"unsupported action 2", "s3:HeadObject", true},
|
||||
{"valid wildcard match 1", "s3:Get*", false},
|
||||
{"valid wildcard match 2", "s3:*Object*", false},
|
||||
{"valid wildcard match 3", "s3:*Multipart*", false},
|
||||
{"any char match 1", "s3:Get?bject", false},
|
||||
{"any char match 2", "s3:Get??bject", true},
|
||||
{"any char match 3", "s3:???", true},
|
||||
{"mixed match 1", "s3:Get?*", false},
|
||||
{"mixed match 2", "s3:*Object?????", true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.action.IsValid()
|
||||
if tt.wantErr {
|
||||
assert.EqualValues(t, policyErrInvalidAction, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAction_String(t *testing.T) {
|
||||
a := Action("s3:TestAction")
|
||||
assert.Equal(t, "s3:TestAction", a.String())
|
||||
}
|
||||
|
||||
func TestAction_Match(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
action Action
|
||||
pattern Action
|
||||
want bool
|
||||
}{
|
||||
{"exact match", "s3:GetObject", "s3:GetObject", true},
|
||||
{"wildcard match", "s3:GetObject", "s3:Get*", true},
|
||||
{"wildcard mismatch", "s3:PutObject", "s3:Get*", false},
|
||||
{"any character match", "s3:Get1", "s3:Get?", true},
|
||||
{"any character mismatch", "s3:Get12", "s3:Get?", false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.action.Match(tt.pattern)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAction_IsObjectAction(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
action Action
|
||||
want *bool
|
||||
}{
|
||||
{"all actions", AllActions, nil},
|
||||
{"object action exact", GetObjectAction, getBoolPtr(true)},
|
||||
{"object action wildcard", "s3:Get*", getBoolPtr(true)},
|
||||
{"non object action", GetBucketAclAction, getBoolPtr(false)},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.action.IsObjectAction()
|
||||
if tt.want == nil {
|
||||
assert.Nil(t, got)
|
||||
} else {
|
||||
assert.NotNil(t, got)
|
||||
assert.Equal(t, *tt.want, *got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestActions_UnmarshalJSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
wantErr bool
|
||||
}{
|
||||
{"valid slice", `["s3:GetObject","s3:PutObject"]`, false},
|
||||
{"empty slice", `[]`, true},
|
||||
{"invalid action in slice", `["s3:Invalid"]`, true},
|
||||
{"valid string", `"s3:GetObject"`, false},
|
||||
{"empty string", `""`, true},
|
||||
{"invalid string", `"s3:Invalid"`, true},
|
||||
{"invalid json", `{}`, true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var a Actions
|
||||
err := json.Unmarshal([]byte(tt.input), &a)
|
||||
if tt.wantErr {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestActions_Add(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
action string
|
||||
wantErr bool
|
||||
}{
|
||||
{"valid add", "s3:GetObject", false},
|
||||
{"invalid add", "s3:InvalidAction", true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
a := make(Actions)
|
||||
err := a.Add(tt.action)
|
||||
if tt.wantErr {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
_, ok := a[Action(tt.action)]
|
||||
assert.True(t, ok)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestActions_FindMatch(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
actions Actions
|
||||
check Action
|
||||
want bool
|
||||
}{
|
||||
{"all actions present", Actions{AllActions: {}}, GetObjectAction, true},
|
||||
{"exact match", Actions{GetObjectAction: {}}, GetObjectAction, true},
|
||||
{"wildcard match", Actions{"s3:Get*": {}}, GetObjectAction, true},
|
||||
{"no match", Actions{"s3:Put*": {}}, GetObjectAction, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.actions.FindMatch(tt.check)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -30,6 +30,5 @@ func (bpat BucketPolicyAccessType) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//lint:ignore ST1005 Reason: This error message is intended for end-user clarity and follows their expectations
|
||||
return fmt.Errorf("Invalid effect: %v", bpat)
|
||||
return fmt.Errorf("invalid effect: %v", bpat)
|
||||
}
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestBucketPolicyAccessType_Validate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input BucketPolicyAccessType
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid allow",
|
||||
input: BucketPolicyAccessTypeAllow,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid deny",
|
||||
input: BucketPolicyAccessTypeDeny,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid type",
|
||||
input: BucketPolicyAccessType("InvalidValue"),
|
||||
wantErr: true,
|
||||
errMsg: "Invalid effect: InvalidValue",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.input.Validate()
|
||||
if tt.wantErr {
|
||||
assert.EqualError(t, err, tt.errMsg)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,7 @@ package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Principals map[string]struct{}
|
||||
@@ -27,50 +28,23 @@ func (p Principals) Add(key string) {
|
||||
// Override UnmarshalJSON method to decode both []string and string properties
|
||||
func (p *Principals) UnmarshalJSON(data []byte) error {
|
||||
ss := []string{}
|
||||
var s string
|
||||
var k struct {
|
||||
AWS string
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
if err = json.Unmarshal(data, &ss); err == nil {
|
||||
if len(ss) == 0 {
|
||||
return policyErrInvalidPrincipal
|
||||
return fmt.Errorf("principals can't be empty")
|
||||
}
|
||||
*p = make(Principals)
|
||||
for _, s := range ss {
|
||||
p.Add(s)
|
||||
}
|
||||
return nil
|
||||
} else if err = json.Unmarshal(data, &s); err == nil {
|
||||
if s == "" {
|
||||
return policyErrInvalidPrincipal
|
||||
}
|
||||
*p = make(Principals)
|
||||
p.Add(s)
|
||||
|
||||
return nil
|
||||
} else if err = json.Unmarshal(data, &k); err == nil {
|
||||
if k.AWS == "" {
|
||||
return policyErrInvalidPrincipal
|
||||
}
|
||||
*p = make(Principals)
|
||||
p.Add(k.AWS)
|
||||
|
||||
return nil
|
||||
} else {
|
||||
var sk struct {
|
||||
AWS []string
|
||||
}
|
||||
if err = json.Unmarshal(data, &sk); err == nil {
|
||||
if len(sk.AWS) == 0 {
|
||||
return policyErrInvalidPrincipal
|
||||
var s string
|
||||
if err = json.Unmarshal(data, &s); err == nil {
|
||||
if s == "" {
|
||||
return fmt.Errorf("principals can't be empty")
|
||||
}
|
||||
*p = make(Principals)
|
||||
for _, s := range sk.AWS {
|
||||
p.Add(s)
|
||||
}
|
||||
p.Add(s)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,7 +71,7 @@ func (p Principals) Validate(iam IAMService) error {
|
||||
if len(p) == 1 {
|
||||
return nil
|
||||
}
|
||||
return policyErrInvalidPrincipal
|
||||
return fmt.Errorf("principals should either contain * or user access keys")
|
||||
}
|
||||
|
||||
accs, err := CheckIfAccountsExist(p.ToSlice(), iam)
|
||||
@@ -105,7 +79,7 @@ func (p Principals) Validate(iam IAMService) error {
|
||||
return err
|
||||
}
|
||||
if len(accs) > 0 {
|
||||
return policyErrInvalidPrincipal
|
||||
return fmt.Errorf("user accounts don't exist: %v", accs)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -121,10 +95,3 @@ func (p Principals) Contains(userAccess string) bool {
|
||||
_, found := p[userAccess]
|
||||
return found
|
||||
}
|
||||
|
||||
// Bucket policy grants public access, if it contains
|
||||
// a wildcard match to all the users
|
||||
func (p Principals) isPublic() bool {
|
||||
_, ok := p["*"]
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPrincipals_Add(t *testing.T) {
|
||||
p := make(Principals)
|
||||
p.Add("user1")
|
||||
_, ok := p["user1"]
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestPrincipals_UnmarshalJSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
want Principals
|
||||
wantErr bool
|
||||
}{
|
||||
{"valid slice", `["user1","user2"]`, Principals{"user1": {}, "user2": {}}, false},
|
||||
{"empty slice", `[]`, nil, true},
|
||||
{"valid string", `"user1"`, Principals{"user1": {}}, false},
|
||||
{"empty string", `""`, nil, true},
|
||||
{"valid AWS object", `{"AWS":"user1"}`, Principals{"user1": {}}, false},
|
||||
{"empty AWS object", `{"AWS":""}`, nil, true},
|
||||
{"valid AWS array", `{"AWS":["user1","user2"]}`, Principals{"user1": {}, "user2": {}}, false},
|
||||
{"empty AWS array", `{"AWS":[]}`, nil, true},
|
||||
{"invalid json", `{invalid}`, nil, true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var p Principals
|
||||
err := json.Unmarshal([]byte(tt.input), &p)
|
||||
if tt.wantErr {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.want, p)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrincipals_ToSlice(t *testing.T) {
|
||||
p := Principals{"user1": {}, "user2": {}, "*": {}}
|
||||
got := p.ToSlice()
|
||||
assert.Contains(t, got, "user1")
|
||||
assert.Contains(t, got, "user2")
|
||||
assert.NotContains(t, got, "*")
|
||||
}
|
||||
|
||||
func TestPrincipals_Validate(t *testing.T) {
|
||||
iamSingle := NewIAMServiceSingle(Account{
|
||||
Access: "user1",
|
||||
})
|
||||
tests := []struct {
|
||||
name string
|
||||
principals Principals
|
||||
mockIAM IAMService
|
||||
err error
|
||||
}{
|
||||
{"only wildcard", Principals{"*": {}}, iamSingle, nil},
|
||||
{"wildcard and user", Principals{"*": {}, "user1": {}}, iamSingle, policyErrInvalidPrincipal},
|
||||
{"accounts exist returns err", Principals{"user2": {}, "user3": {}}, iamSingle, policyErrInvalidPrincipal},
|
||||
{"accounts exist non-empty", Principals{"user1": {}}, iamSingle, nil},
|
||||
{"accounts valid", Principals{"user1": {}}, iamSingle, nil},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.principals.Validate(tt.mockIAM)
|
||||
assert.EqualValues(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrincipals_Contains(t *testing.T) {
|
||||
p := Principals{"user1": {}}
|
||||
assert.True(t, p.Contains("user1"))
|
||||
assert.False(t, p.Contains("user2"))
|
||||
|
||||
p = Principals{"*": {}}
|
||||
assert.True(t, p.Contains("anyuser"))
|
||||
}
|
||||
|
||||
func TestPrincipals_isPublic(t *testing.T) {
|
||||
assert.True(t, Principals{"*": {}}.isPublic())
|
||||
assert.False(t, Principals{"user1": {}}.isPublic())
|
||||
}
|
||||
@@ -16,6 +16,7 @@ package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -29,7 +30,7 @@ func (r *Resources) UnmarshalJSON(data []byte) error {
|
||||
var err error
|
||||
if err = json.Unmarshal(data, &ss); err == nil {
|
||||
if len(ss) == 0 {
|
||||
return policyErrInvalidResource
|
||||
return fmt.Errorf("resources can't be empty")
|
||||
}
|
||||
*r = make(Resources)
|
||||
for _, s := range ss {
|
||||
@@ -42,7 +43,7 @@ func (r *Resources) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err = json.Unmarshal(data, &s); err == nil {
|
||||
if s == "" {
|
||||
return policyErrInvalidResource
|
||||
return fmt.Errorf("resources can't be empty")
|
||||
}
|
||||
*r = make(Resources)
|
||||
err = r.Add(s)
|
||||
@@ -59,7 +60,12 @@ func (r *Resources) UnmarshalJSON(data []byte) error {
|
||||
func (r Resources) Add(rc string) error {
|
||||
ok, pattern := isValidResource(rc)
|
||||
if !ok {
|
||||
return policyErrInvalidResource
|
||||
return fmt.Errorf("invalid resource: %v", rc)
|
||||
}
|
||||
|
||||
_, found := r[pattern]
|
||||
if found {
|
||||
return fmt.Errorf("duplicate resource: %v", rc)
|
||||
}
|
||||
|
||||
r[pattern] = struct{}{}
|
||||
@@ -93,7 +99,7 @@ func (r Resources) ContainsBucketPattern() bool {
|
||||
func (r Resources) Validate(bucket string) error {
|
||||
for resource := range r {
|
||||
if !strings.HasPrefix(resource, bucket) {
|
||||
return policyErrInvalidResource
|
||||
return fmt.Errorf("incorrect bucket name in %v", resource)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,19 +108,21 @@ func (r Resources) Validate(bucket string) error {
|
||||
|
||||
func (r Resources) FindMatch(resource string) bool {
|
||||
for res := range r {
|
||||
if r.Match(res, resource) {
|
||||
return true
|
||||
if strings.HasSuffix(res, "*") {
|
||||
pattern := strings.TrimSuffix(res, "*")
|
||||
if strings.HasPrefix(resource, pattern) {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
if res == resource {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Match matches the given input resource with the pattern
|
||||
func (r Resources) Match(pattern, input string) bool {
|
||||
return matchPattern(pattern, input)
|
||||
}
|
||||
|
||||
// Checks the resource to have arn prefix and not starting with /
|
||||
func isValidResource(rc string) (isValid bool, pattern string) {
|
||||
if !strings.HasPrefix(rc, ResourceArnPrefix) {
|
||||
|
||||
@@ -1,182 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUnmarshalJSON(t *testing.T) {
|
||||
var r Resources
|
||||
|
||||
cases := []struct {
|
||||
input string
|
||||
expected int
|
||||
wantErr bool
|
||||
}{
|
||||
{`"arn:aws:s3:::my-bucket/*"`, 1, false},
|
||||
{`["arn:aws:s3:::my-bucket/*", "arn:aws:s3:::other-bucket"]`, 2, false},
|
||||
{`""`, 0, true},
|
||||
{`[]`, 0, true},
|
||||
{`["invalid-bucket"]`, 0, true},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
r = Resources{}
|
||||
err := json.Unmarshal([]byte(tc.input), &r)
|
||||
if (err != nil) != tc.wantErr {
|
||||
t.Errorf("Unexpected error status for input %s: %v", tc.input, err)
|
||||
}
|
||||
if len(r) != tc.expected {
|
||||
t.Errorf("Expected %d resources, got %d", tc.expected, len(r))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
r := Resources{}
|
||||
|
||||
cases := []struct {
|
||||
input string
|
||||
wantErr bool
|
||||
}{
|
||||
{"arn:aws:s3:::valid-bucket/*", false},
|
||||
{"arn:aws:s3:::valid-bucket/object", false},
|
||||
{"invalid-bucket/*", true},
|
||||
{"/invalid-start", true},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
err := r.Add(tc.input)
|
||||
if (err != nil) != tc.wantErr {
|
||||
t.Errorf("Unexpected error status for input %s: %v", tc.input, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainsObjectPattern(t *testing.T) {
|
||||
cases := []struct {
|
||||
resources []string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"arn:aws:s3:::my-bucket/my-object"}, true},
|
||||
{[]string{"arn:aws:s3:::my-bucket/*"}, true},
|
||||
{[]string{"arn:aws:s3:::my-bucket"}, false},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
r := Resources{}
|
||||
for _, res := range tc.resources {
|
||||
r.Add(res)
|
||||
}
|
||||
if r.ContainsObjectPattern() != tc.expected {
|
||||
t.Errorf("Expected object pattern to be %v for %v", tc.expected, tc.resources)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainsBucketPattern(t *testing.T) {
|
||||
cases := []struct {
|
||||
resources []string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"arn:aws:s3:::my-bucket"}, true},
|
||||
{[]string{"arn:aws:s3:::my-bucket/*"}, false},
|
||||
{[]string{"arn:aws:s3:::my-bucket/object"}, false},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
r := Resources{}
|
||||
for _, res := range tc.resources {
|
||||
r.Add(res)
|
||||
}
|
||||
if r.ContainsBucketPattern() != tc.expected {
|
||||
t.Errorf("Expected bucket pattern to be %v for %v", tc.expected, tc.resources)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
cases := []struct {
|
||||
resources []string
|
||||
bucket string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"arn:aws:s3:::valid-bucket/*"}, "valid-bucket", true},
|
||||
{[]string{"arn:aws:s3:::wrong-bucket/*"}, "valid-bucket", false},
|
||||
{[]string{"arn:aws:s3:::valid-bucket/*", "arn:aws:s3:::valid-bucket/object/*"}, "valid-bucket", true},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
r := Resources{}
|
||||
for _, res := range tc.resources {
|
||||
r.Add(res)
|
||||
}
|
||||
if (r.Validate(tc.bucket) == nil) != tc.expected {
|
||||
t.Errorf("Expected validation to be %v for bucket %s", tc.expected, tc.bucket)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindMatch(t *testing.T) {
|
||||
cases := []struct {
|
||||
resources []string
|
||||
input string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"arn:aws:s3:::my-bucket/*"}, "my-bucket/my-object", true},
|
||||
{[]string{"arn:aws:s3:::my-bucket/object"}, "other-bucket/my-object", false},
|
||||
{[]string{"arn:aws:s3:::my-bucket/object"}, "my-bucket/object", true},
|
||||
{[]string{"arn:aws:s3:::my-bucket/*", "arn:aws:s3:::other-bucket/*"}, "other-bucket/something", true},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
r := Resources{}
|
||||
for _, res := range tc.resources {
|
||||
r.Add(res)
|
||||
}
|
||||
if r.FindMatch(tc.input) != tc.expected {
|
||||
t.Errorf("Expected FindMatch to be %v for input %s", tc.expected, tc.input)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatch(t *testing.T) {
|
||||
r := Resources{}
|
||||
cases := []struct {
|
||||
pattern string
|
||||
input string
|
||||
expected bool
|
||||
}{
|
||||
{"my-bucket/*", "my-bucket/object", true},
|
||||
{"my-bucket/?bject", "my-bucket/object", true},
|
||||
{"my-bucket/*", "other-bucket/object", false},
|
||||
{"*", "any-bucket/object", true},
|
||||
{"my-bucket/*", "my-bucket/subdir/object", true},
|
||||
{"my-bucket/*", "other-bucket", false},
|
||||
{"my-bucket/*/*", "my-bucket/hello", false},
|
||||
{"my-bucket/*/*", "my-bucket/hello/world", true},
|
||||
{"foo/???/bar", "foo/qux/bar", true},
|
||||
{"foo/???/bar", "foo/quxx/bar", false},
|
||||
{"foo/???/bar/*/?", "foo/qux/bar/hello/g", true},
|
||||
{"foo/???/bar/*/?", "foo/qux/bar/hello/smth", false},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
if r.Match(tc.pattern, tc.input) != tc.expected {
|
||||
t.Errorf("Match(%s, %s) failed, expected %v", tc.pattern, tc.input, tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
type PolicyVersion string
|
||||
|
||||
const (
|
||||
PolicyVersion2008 PolicyVersion = "2008-10-17"
|
||||
PolicyVersion2012 PolicyVersion = "2012-10-17"
|
||||
)
|
||||
|
||||
// isValid checks if the policy version is valid or not
|
||||
func (pv PolicyVersion) isValid() bool {
|
||||
switch pv {
|
||||
case PolicyVersion2008, PolicyVersion2012:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPolicyVersion_isValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string // description of this test case
|
||||
value string
|
||||
want bool
|
||||
}{
|
||||
{"valid 2008", "2008-10-17", true},
|
||||
{"valid 2012", "2012-10-17", true},
|
||||
{"invalid empty", "", false},
|
||||
{"invalid 1", "invalid", false},
|
||||
{"invalid 2", "2010-10-17", false},
|
||||
{"invalid 3", "2006-00-12", false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := PolicyVersion(tt.value).isValid()
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
144
auth/iam.go
144
auth/iam.go
@@ -18,8 +18,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
type Role string
|
||||
@@ -30,19 +28,6 @@ const (
|
||||
RoleUserPlus Role = "userplus"
|
||||
)
|
||||
|
||||
func (r Role) IsValid() bool {
|
||||
switch r {
|
||||
case RoleAdmin:
|
||||
return true
|
||||
case RoleUser:
|
||||
return true
|
||||
case RoleUserPlus:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Account is a gateway IAM account
|
||||
type Account struct {
|
||||
Access string `json:"access"`
|
||||
@@ -53,106 +38,39 @@ type Account struct {
|
||||
ProjectID int `json:"projectID"`
|
||||
}
|
||||
|
||||
type ListUserAccountsResult struct {
|
||||
Accounts []Account
|
||||
}
|
||||
|
||||
// Mutable props, which could be changed when updating an IAM account
|
||||
type MutableProps struct {
|
||||
Secret *string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID *int `json:"userID"`
|
||||
GroupID *int `json:"groupID"`
|
||||
ProjectID *int `json:"projectID"`
|
||||
}
|
||||
|
||||
func (m MutableProps) Validate() error {
|
||||
if m.Role != "" && !m.Role.IsValid() {
|
||||
return s3err.GetAPIError(s3err.ErrAdminInvalidUserRole)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateAcc(acc *Account, props MutableProps) {
|
||||
if props.Secret != nil {
|
||||
acc.Secret = *props.Secret
|
||||
}
|
||||
if props.GroupID != nil {
|
||||
acc.GroupID = *props.GroupID
|
||||
}
|
||||
if props.UserID != nil {
|
||||
acc.UserID = *props.UserID
|
||||
}
|
||||
if props.ProjectID != nil {
|
||||
acc.ProjectID = *props.ProjectID
|
||||
}
|
||||
if props.Role != "" {
|
||||
acc.Role = props.Role
|
||||
}
|
||||
}
|
||||
|
||||
// IAMService is the interface for all IAM service implementations
|
||||
//
|
||||
//go:generate moq -out ../s3api/controllers/iam_moq_test.go -pkg controllers . IAMService
|
||||
type IAMService interface {
|
||||
CreateAccount(account Account) error
|
||||
GetUserAccount(access string) (Account, error)
|
||||
UpdateUserAccount(access string, props MutableProps) error
|
||||
DeleteUserAccount(access string) error
|
||||
ListUserAccounts() ([]Account, error)
|
||||
Shutdown() error
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrUserExists is returned when the user already exists
|
||||
ErrUserExists = errors.New("user already exists")
|
||||
// ErrNoSuchUser is returned when the user does not exist
|
||||
ErrNoSuchUser = errors.New("user not found")
|
||||
)
|
||||
var ErrNoSuchUser = errors.New("user not found")
|
||||
|
||||
type Opts struct {
|
||||
RootAccount Account
|
||||
Dir string
|
||||
LDAPServerURL string
|
||||
LDAPBindDN string
|
||||
LDAPPassword string
|
||||
LDAPQueryBase string
|
||||
LDAPObjClasses string
|
||||
LDAPAccessAtr string
|
||||
LDAPSecretAtr string
|
||||
LDAPRoleAtr string
|
||||
LDAPUserIdAtr string
|
||||
LDAPGroupIdAtr string
|
||||
LDAPProjectIdAtr string
|
||||
LDAPTLSSkipVerify bool
|
||||
VaultEndpointURL string
|
||||
VaultNamespace string
|
||||
VaultSecretStoragePath string
|
||||
VaultSecretStorageNamespace string
|
||||
VaultAuthMethod string
|
||||
VaultAuthNamespace string
|
||||
VaultMountPath string
|
||||
VaultRootToken string
|
||||
VaultRoleId string
|
||||
VaultRoleSecret string
|
||||
VaultServerCert string
|
||||
VaultClientCert string
|
||||
VaultClientCertKey string
|
||||
S3Access string
|
||||
S3Secret string
|
||||
S3Region string
|
||||
S3Bucket string
|
||||
S3Endpoint string
|
||||
S3DisableSSlVerfiy bool
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
IpaHost string
|
||||
IpaVaultName string
|
||||
IpaUser string
|
||||
IpaPassword string
|
||||
IpaInsecure bool
|
||||
Dir string
|
||||
LDAPServerURL string
|
||||
LDAPBindDN string
|
||||
LDAPPassword string
|
||||
LDAPQueryBase string
|
||||
LDAPObjClasses string
|
||||
LDAPAccessAtr string
|
||||
LDAPSecretAtr string
|
||||
LDAPRoleAtr string
|
||||
S3Access string
|
||||
S3Secret string
|
||||
S3Region string
|
||||
S3Bucket string
|
||||
S3Endpoint string
|
||||
S3DisableSSlVerfiy bool
|
||||
S3Debug bool
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
}
|
||||
|
||||
func New(o *Opts) (IAMService, error) {
|
||||
@@ -161,30 +79,22 @@ func New(o *Opts) (IAMService, error) {
|
||||
|
||||
switch {
|
||||
case o.Dir != "":
|
||||
svc, err = NewInternal(o.RootAccount, o.Dir)
|
||||
svc, err = NewInternal(o.Dir)
|
||||
fmt.Printf("initializing internal IAM with %q\n", o.Dir)
|
||||
case o.LDAPServerURL != "":
|
||||
svc, err = NewLDAPService(o.RootAccount, o.LDAPServerURL, o.LDAPBindDN, o.LDAPPassword,
|
||||
o.LDAPQueryBase, o.LDAPAccessAtr, o.LDAPSecretAtr, o.LDAPRoleAtr, o.LDAPUserIdAtr,
|
||||
o.LDAPGroupIdAtr, o.LDAPProjectIdAtr, o.LDAPObjClasses, o.LDAPTLSSkipVerify)
|
||||
svc, err = NewLDAPService(o.LDAPServerURL, o.LDAPBindDN, o.LDAPPassword,
|
||||
o.LDAPQueryBase, o.LDAPAccessAtr, o.LDAPSecretAtr, o.LDAPRoleAtr,
|
||||
o.LDAPObjClasses)
|
||||
fmt.Printf("initializing LDAP IAM with %q\n", o.LDAPServerURL)
|
||||
case o.S3Endpoint != "":
|
||||
svc, err = NewS3(o.RootAccount, o.S3Access, o.S3Secret, o.S3Region, o.S3Bucket,
|
||||
o.S3Endpoint, o.S3DisableSSlVerfiy)
|
||||
svc, err = NewS3(o.S3Access, o.S3Secret, o.S3Region, o.S3Bucket,
|
||||
o.S3Endpoint, o.S3DisableSSlVerfiy, o.S3Debug)
|
||||
fmt.Printf("initializing S3 IAM with '%v/%v'\n",
|
||||
o.S3Endpoint, o.S3Bucket)
|
||||
case o.VaultEndpointURL != "":
|
||||
svc, err = NewVaultIAMService(o.RootAccount, o.VaultEndpointURL, o.VaultNamespace, o.VaultSecretStoragePath, o.VaultSecretStorageNamespace,
|
||||
o.VaultAuthMethod, o.VaultAuthNamespace, o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
|
||||
o.VaultServerCert, o.VaultClientCert, o.VaultClientCertKey)
|
||||
fmt.Printf("initializing Vault IAM with %q\n", o.VaultEndpointURL)
|
||||
case o.IpaHost != "":
|
||||
svc, err = NewIpaIAMService(o.RootAccount, o.IpaHost, o.IpaVaultName, o.IpaUser, o.IpaPassword, o.IpaInsecure)
|
||||
fmt.Printf("initializing IPA IAM with %q\n", o.IpaHost)
|
||||
default:
|
||||
// if no iam options selected, default to the single user mode
|
||||
fmt.Println("No IAM service configured, enabling single account mode")
|
||||
return NewIAMServiceSingle(o.RootAccount), nil
|
||||
return IAMServiceSingle{}, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -66,21 +66,6 @@ func (i *icache) get(k string) (Account, bool) {
|
||||
return v.value, true
|
||||
}
|
||||
|
||||
func (i *icache) update(k string, props MutableProps) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
|
||||
item, found := i.items[k]
|
||||
if found {
|
||||
updateAcc(&item.value, props)
|
||||
|
||||
// refresh the expiration date
|
||||
item.exp = time.Now().Add(i.expire)
|
||||
|
||||
i.items[k] = item
|
||||
}
|
||||
}
|
||||
|
||||
func (i *icache) Delete(k string) {
|
||||
i.Lock()
|
||||
delete(i.items, k)
|
||||
@@ -181,16 +166,6 @@ func (c *IAMCache) DeleteUserAccount(access string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *IAMCache) UpdateUserAccount(access string, props MutableProps) error {
|
||||
err := c.service.UpdateUserAccount(access, props)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.iamcache.update(access, props)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListUserAccounts is a passthrough to the underlying service and
|
||||
// does not make use of the cache
|
||||
func (c *IAMCache) ListUserAccounts() ([]Account, error) {
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -33,15 +32,7 @@ const (
|
||||
|
||||
// IAMServiceInternal manages the internal IAM service
|
||||
type IAMServiceInternal struct {
|
||||
// This mutex will help with racing updates to the IAM data
|
||||
// from multiple requests to this gateway instance, but
|
||||
// will not help with racing updates to multiple load balanced
|
||||
// gateway instances. This is a limitation of the internal
|
||||
// IAM service. All account updates should be sent to a single
|
||||
// gateway instance if possible.
|
||||
sync.RWMutex
|
||||
dir string
|
||||
rootAcc Account
|
||||
dir string
|
||||
}
|
||||
|
||||
// UpdateAcctFunc accepts the current data and returns the new data to be stored
|
||||
@@ -55,10 +46,9 @@ type iAMConfig struct {
|
||||
var _ IAMService = &IAMServiceInternal{}
|
||||
|
||||
// NewInternal creates a new instance for the Internal IAM service
|
||||
func NewInternal(rootAcc Account, dir string) (*IAMServiceInternal, error) {
|
||||
func NewInternal(dir string) (*IAMServiceInternal, error) {
|
||||
i := &IAMServiceInternal{
|
||||
dir: dir,
|
||||
rootAcc: rootAcc,
|
||||
dir: dir,
|
||||
}
|
||||
|
||||
err := i.initIAM()
|
||||
@@ -72,13 +62,6 @@ func NewInternal(rootAcc Account, dir string) (*IAMServiceInternal, error) {
|
||||
// CreateAccount creates a new IAM account. Returns an error if the account
|
||||
// already exists.
|
||||
func (s *IAMServiceInternal) CreateAccount(account Account) error {
|
||||
if account.Access == s.rootAcc.Access {
|
||||
return ErrUserExists
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
return s.storeIAM(func(data []byte) ([]byte, error) {
|
||||
conf, err := parseIAM(data)
|
||||
if err != nil {
|
||||
@@ -87,7 +70,7 @@ func (s *IAMServiceInternal) CreateAccount(account Account) error {
|
||||
|
||||
_, ok := conf.AccessAccounts[account.Access]
|
||||
if ok {
|
||||
return nil, ErrUserExists
|
||||
return nil, fmt.Errorf("account already exists")
|
||||
}
|
||||
conf.AccessAccounts[account.Access] = account
|
||||
|
||||
@@ -103,13 +86,6 @@ func (s *IAMServiceInternal) CreateAccount(account Account) error {
|
||||
// GetUserAccount retrieves account info for the requested user. Returns
|
||||
// ErrNoSuchUser if the account does not exist.
|
||||
func (s *IAMServiceInternal) GetUserAccount(access string) (Account, error) {
|
||||
if access == s.rootAcc.Access {
|
||||
return s.rootAcc, nil
|
||||
}
|
||||
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
conf, err := s.getIAM()
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("get iam data: %w", err)
|
||||
@@ -123,41 +99,9 @@ func (s *IAMServiceInternal) GetUserAccount(access string) (Account, error) {
|
||||
return acct, nil
|
||||
}
|
||||
|
||||
// UpdateUserAccount updates the specified user account fields. Returns
|
||||
// ErrNoSuchUser if the account does not exist.
|
||||
func (s *IAMServiceInternal) UpdateUserAccount(access string, props MutableProps) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
return s.storeIAM(func(data []byte) ([]byte, error) {
|
||||
conf, err := parseIAM(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get iam data: %w", err)
|
||||
}
|
||||
|
||||
acc, found := conf.AccessAccounts[access]
|
||||
if !found {
|
||||
return nil, ErrNoSuchUser
|
||||
}
|
||||
|
||||
updateAcc(&acc, props)
|
||||
conf.AccessAccounts[access] = acc
|
||||
|
||||
b, err := json.Marshal(conf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize iam: %w", err)
|
||||
}
|
||||
|
||||
return b, nil
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteUserAccount deletes the specified user account. Does not check if
|
||||
// account exists.
|
||||
func (s *IAMServiceInternal) DeleteUserAccount(access string) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
return s.storeIAM(func(data []byte) ([]byte, error) {
|
||||
conf, err := parseIAM(data)
|
||||
if err != nil {
|
||||
@@ -177,9 +121,6 @@ func (s *IAMServiceInternal) DeleteUserAccount(access string) error {
|
||||
|
||||
// ListUserAccounts lists all the user accounts stored.
|
||||
func (s *IAMServiceInternal) ListUserAccounts() ([]Account, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
conf, err := s.getIAM()
|
||||
if err != nil {
|
||||
return []Account{}, fmt.Errorf("get iam data: %w", err)
|
||||
@@ -248,10 +189,6 @@ func parseIAM(b []byte) (iAMConfig, error) {
|
||||
return iAMConfig{}, fmt.Errorf("failed to parse the config file: %w", err)
|
||||
}
|
||||
|
||||
if conf.AccessAccounts == nil {
|
||||
conf.AccessAccounts = make(map[string]Account)
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
@@ -291,49 +228,93 @@ func (s *IAMServiceInternal) readIAMData() ([]byte, error) {
|
||||
|
||||
func (s *IAMServiceInternal) storeIAM(update UpdateAcctFunc) error {
|
||||
// We are going to be racing with other running gateways without any
|
||||
// coordination. So the strategy here is to read the current file data,
|
||||
// update the data, write back out to a temp file, then rename the
|
||||
// temp file to the original file. This rename will replace the
|
||||
// original file with the new file. This is atomic and should always
|
||||
// allow for a consistent view of the data. There is a small
|
||||
// window where the file could be read and then updated by
|
||||
// another process. In this case any updates the other process did
|
||||
// will be lost. This is a limitation of the internal IAM service.
|
||||
// This should be rare, and even when it does happen should result
|
||||
// in a valid IAM file, just without the other process's updates.
|
||||
// coordination. So the strategy here is to read the current file data.
|
||||
// If the file doesn't exist, then we assume someone else is currently
|
||||
// updating the file. So we just need to keep retrying. We also need
|
||||
// to make sure the data is consistent within a single update. So racing
|
||||
// writes to a file would possibly leave this in some invalid state.
|
||||
// We can get atomic updates with rename. If we read the data, update
|
||||
// the data, write to a temp file, then rename the tempfile back to the
|
||||
// data file. This should always result in a complete data image.
|
||||
|
||||
iamFname := filepath.Join(s.dir, iamFile)
|
||||
backupFname := filepath.Join(s.dir, iamBackupFile)
|
||||
// There is at least one unsolved failure mode here.
|
||||
// If a gateway removes the data file and then crashes, all other
|
||||
// gateways will retry forever thinking that the original will eventually
|
||||
// write the file.
|
||||
|
||||
b, err := os.ReadFile(iamFname)
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("read iam file: %w", err)
|
||||
}
|
||||
retries := 0
|
||||
fname := filepath.Join(s.dir, iamFile)
|
||||
|
||||
// save copy of data
|
||||
datacopy := make([]byte, len(b))
|
||||
copy(datacopy, b)
|
||||
for {
|
||||
b, err := os.ReadFile(fname)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// racing with someone else updating
|
||||
// keep retrying after backoff
|
||||
retries++
|
||||
if retries < maxretry {
|
||||
time.Sleep(backoff)
|
||||
continue
|
||||
}
|
||||
|
||||
// make a backup copy in case something happens
|
||||
err = s.writeUsingTempFile(b, backupFname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write backup iam file: %w", err)
|
||||
}
|
||||
// we have been unsuccessful trying to read the iam file
|
||||
// so this must be the case where something happened and
|
||||
// the file did not get updated successfully, and probably
|
||||
// isn't going to be. The recovery procedure would be to
|
||||
// copy the backup file into place of the original.
|
||||
return fmt.Errorf("no iam file, needs backup recovery")
|
||||
}
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("read iam file: %w", err)
|
||||
}
|
||||
|
||||
b, err = update(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update iam data: %w", err)
|
||||
}
|
||||
// reset retries on successful read
|
||||
retries = 0
|
||||
|
||||
err = s.writeUsingTempFile(b, iamFname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write iam file: %w", err)
|
||||
err = os.Remove(fname)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// racing with someone else updating
|
||||
// keep retrying after backoff
|
||||
time.Sleep(backoff)
|
||||
continue
|
||||
}
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove old iam file: %w", err)
|
||||
}
|
||||
|
||||
// save copy of data
|
||||
datacopy := make([]byte, len(b))
|
||||
copy(datacopy, b)
|
||||
|
||||
// make a backup copy in case we crash before update
|
||||
// this is after remove, so there is a small window something
|
||||
// can go wrong, but the remove should barrier other gateways
|
||||
// from trying to write backup at the same time. Only one
|
||||
// gateway will successfully remove the file.
|
||||
os.WriteFile(filepath.Join(s.dir, iamBackupFile), b, iamMode)
|
||||
|
||||
b, err = update(b)
|
||||
if err != nil {
|
||||
// update failed, try to write old data back out
|
||||
os.WriteFile(fname, datacopy, iamMode)
|
||||
return fmt.Errorf("update iam data: %w", err)
|
||||
}
|
||||
|
||||
err = s.writeTempFile(b)
|
||||
if err != nil {
|
||||
// update failed, try to write old data back out
|
||||
os.WriteFile(fname, datacopy, iamMode)
|
||||
return err
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *IAMServiceInternal) writeUsingTempFile(b []byte, fname string) error {
|
||||
func (s *IAMServiceInternal) writeTempFile(b []byte) error {
|
||||
fname := filepath.Join(s.dir, iamFile)
|
||||
|
||||
f, err := os.CreateTemp(s.dir, iamFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create temp file: %w", err)
|
||||
@@ -341,7 +322,6 @@ func (s *IAMServiceInternal) writeUsingTempFile(b []byte, fname string) error {
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
_, err = f.Write(b)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("write temp file: %w", err)
|
||||
}
|
||||
|
||||
519
auth/iam_ipa.go
519
auth/iam_ipa.go
@@ -1,519 +0,0 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
)
|
||||
|
||||
const IpaVersion = "2.254"
|
||||
|
||||
type IpaIAMService struct {
|
||||
client http.Client
|
||||
id int
|
||||
version string
|
||||
host string
|
||||
vaultName string
|
||||
username string
|
||||
password string
|
||||
kraTransportKey *rsa.PublicKey
|
||||
rootAcc Account
|
||||
}
|
||||
|
||||
var _ IAMService = &IpaIAMService{}
|
||||
|
||||
func NewIpaIAMService(rootAcc Account, host, vaultName, username, password string, isInsecure bool) (*IpaIAMService, error) {
|
||||
ipa := IpaIAMService{
|
||||
id: 0,
|
||||
version: IpaVersion,
|
||||
host: host,
|
||||
vaultName: vaultName,
|
||||
username: username,
|
||||
password: password,
|
||||
rootAcc: rootAcc,
|
||||
}
|
||||
jar, err := cookiejar.New(nil)
|
||||
if err != nil {
|
||||
// this should never happen
|
||||
return nil, fmt.Errorf("cookie jar creation: %w", err)
|
||||
}
|
||||
|
||||
mTLSConfig := &tls.Config{InsecureSkipVerify: isInsecure}
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: mTLSConfig,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
ipa.client = http.Client{Jar: jar, Transport: tr}
|
||||
|
||||
err = ipa.login()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ipa login failed: %w", err)
|
||||
}
|
||||
|
||||
req, err := ipa.newRequest("vaultconfig_show/1", []string{}, map[string]any{"all": true})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ipa vaultconfig_show: %w", err)
|
||||
}
|
||||
vaultConfig := struct {
|
||||
Kra_Server_Server []string
|
||||
Transport_Cert Base64EncodedWrapped
|
||||
Wrapping_default_algorithm string
|
||||
Wrapping_supported_algorithms []string
|
||||
}{}
|
||||
err = ipa.rpc(req, &vaultConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ipa vault config: %w", err)
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate(vaultConfig.Transport_Cert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ipa cannot parse vault certificate: %w", err)
|
||||
}
|
||||
|
||||
ipa.kraTransportKey = cert.PublicKey.(*rsa.PublicKey)
|
||||
|
||||
isSupported := slices.Contains(vaultConfig.Wrapping_supported_algorithms, "aes-128-cbc")
|
||||
|
||||
if !isSupported {
|
||||
return nil,
|
||||
fmt.Errorf("IPA vault does not support aes-128-cbc. Only %v supported",
|
||||
vaultConfig.Wrapping_supported_algorithms)
|
||||
}
|
||||
return &ipa, nil
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) CreateAccount(account Account) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) GetUserAccount(access string) (Account, error) {
|
||||
if access == ipa.rootAcc.Access {
|
||||
return ipa.rootAcc, nil
|
||||
}
|
||||
|
||||
req, err := ipa.newRequest("user_show/1", []string{access}, map[string]any{})
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("ipa user_show: %w", err)
|
||||
}
|
||||
|
||||
userResult := struct {
|
||||
Gidnumber []string
|
||||
Uidnumber []string
|
||||
PidNumber []string
|
||||
}{}
|
||||
|
||||
err = ipa.rpc(req, &userResult)
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
|
||||
uid, err := parseToInt(userResult.Uidnumber, "userID")
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
gid, err := parseToInt(userResult.Gidnumber, "groupID")
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
pId, err := parseToInt(userResult.PidNumber, "projectID")
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
|
||||
account := Account{
|
||||
Access: access,
|
||||
Role: RoleUser,
|
||||
UserID: uid,
|
||||
GroupID: gid,
|
||||
ProjectID: pId,
|
||||
}
|
||||
|
||||
session_key := make([]byte, 16)
|
||||
|
||||
_, err = rand.Read(session_key)
|
||||
if err != nil {
|
||||
return account, fmt.Errorf("ipa cannot generate session key: %w", err)
|
||||
}
|
||||
|
||||
encryptedKey, err := rsa.EncryptPKCS1v15(rand.Reader, ipa.kraTransportKey, session_key)
|
||||
if err != nil {
|
||||
return account, fmt.Errorf("ipa vault secret retrieval: %w", err)
|
||||
}
|
||||
|
||||
req, err = ipa.newRequest("vault_retrieve_internal/1", []string{ipa.vaultName},
|
||||
map[string]any{"username": access,
|
||||
"session_key": Base64EncodedWrapped(encryptedKey),
|
||||
"wrapping_algo": "aes-128-cbc"})
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("ipa vault_retrieve_internal: %w", err)
|
||||
}
|
||||
|
||||
data := struct {
|
||||
Vault_data Base64EncodedWrapped
|
||||
Nonce Base64EncodedWrapped
|
||||
}{}
|
||||
|
||||
err = ipa.rpc(req, &data)
|
||||
if err != nil {
|
||||
return account, err
|
||||
}
|
||||
|
||||
aes, err := aes.NewCipher(session_key)
|
||||
if err != nil {
|
||||
return account, fmt.Errorf("ipa cannot create AES cipher: %w", err)
|
||||
}
|
||||
cbc := cipher.NewCBCDecrypter(aes, data.Nonce)
|
||||
cbc.CryptBlocks(data.Vault_data, data.Vault_data)
|
||||
secretUnpaddedJson, err := pkcs7Unpad(data.Vault_data, 16)
|
||||
if err != nil {
|
||||
return account, fmt.Errorf("ipa cannot unpad decrypted result: %w", err)
|
||||
}
|
||||
|
||||
secret := struct {
|
||||
Data Base64Encoded
|
||||
}{}
|
||||
json.Unmarshal(secretUnpaddedJson, &secret)
|
||||
account.Secret = string(secret.Data)
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) UpdateUserAccount(access string, props MutableProps) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) DeleteUserAccount(access string) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) ListUserAccounts() ([]Account, error) {
|
||||
return []Account{}, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) Shutdown() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implementation
|
||||
|
||||
const requestRetries = 3
|
||||
|
||||
func (ipa *IpaIAMService) login() error {
|
||||
form := url.Values{}
|
||||
form.Set("user", ipa.username)
|
||||
form.Set("password", ipa.password)
|
||||
|
||||
req, err := http.NewRequest(
|
||||
"POST",
|
||||
fmt.Sprintf("%s/ipa/session/login_password", ipa.host),
|
||||
strings.NewReader(form.Encode()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
var resp *http.Response
|
||||
for i := range requestRetries {
|
||||
resp, err = ipa.client.Do(req)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
// Check for transient network errors
|
||||
if isRetryable(err) {
|
||||
time.Sleep(time.Second * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("login POST to %s failed: %w", req.URL, err)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("login POST to %s failed after retries: %w",
|
||||
req.URL, err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == 401 {
|
||||
return errors.New("cannot login to FreeIPA: invalid credentials")
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("cannot login to FreeIPA: status code %d",
|
||||
resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type rpcRequest = string
|
||||
|
||||
type rpcResponse struct {
|
||||
Result json.RawMessage
|
||||
Principal string
|
||||
Id int
|
||||
Version string
|
||||
}
|
||||
|
||||
func (p rpcResponse) String() string {
|
||||
return string(p.Result)
|
||||
}
|
||||
|
||||
var errRpc = errors.New("IPA RPC error")
|
||||
|
||||
func (ipa *IpaIAMService) rpc(req rpcRequest, value any) error {
|
||||
err := ipa.login()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := ipa.rpcInternal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return json.Unmarshal(res.Result, value)
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) rpcInternal(req rpcRequest) (rpcResponse, error) {
|
||||
httpReq, err := http.NewRequest("POST",
|
||||
fmt.Sprintf("%s/ipa/session/json", ipa.host),
|
||||
strings.NewReader(req))
|
||||
if err != nil {
|
||||
return rpcResponse{}, err
|
||||
}
|
||||
|
||||
debuglogger.IAMLogf("IPA request: %v", req)
|
||||
httpReq.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
var httpResp *http.Response
|
||||
for i := range requestRetries {
|
||||
httpResp, err = ipa.client.Do(httpReq)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
// Check for transient network errors
|
||||
if isRetryable(err) {
|
||||
time.Sleep(time.Second * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
return rpcResponse{}, fmt.Errorf("ipa request to %s failed: %w",
|
||||
httpReq.URL, err)
|
||||
}
|
||||
if err != nil {
|
||||
return rpcResponse{},
|
||||
fmt.Errorf("ipa request to %s failed after retries: %w",
|
||||
httpReq.URL, err)
|
||||
}
|
||||
|
||||
defer httpResp.Body.Close()
|
||||
|
||||
bytes, err := io.ReadAll(httpResp.Body)
|
||||
debuglogger.IAMLogf("IPA response (%v): %v", err, string(bytes))
|
||||
if err != nil {
|
||||
return rpcResponse{}, err
|
||||
}
|
||||
|
||||
result := struct {
|
||||
Result struct {
|
||||
Json json.RawMessage `json:"result"`
|
||||
Value string `json:"value"`
|
||||
Summary any `json:"summary"`
|
||||
} `json:"result"`
|
||||
Error json.RawMessage `json:"error"`
|
||||
Id int `json:"id"`
|
||||
Principal string `json:"principal"`
|
||||
Version string `json:"version"`
|
||||
}{}
|
||||
|
||||
err = json.Unmarshal(bytes, &result)
|
||||
if err != nil {
|
||||
return rpcResponse{}, err
|
||||
}
|
||||
if string(result.Error) != "null" {
|
||||
return rpcResponse{}, fmt.Errorf("%s: %w", string(result.Error), errRpc)
|
||||
}
|
||||
|
||||
return rpcResponse{
|
||||
Result: result.Result.Json,
|
||||
Principal: result.Principal,
|
||||
Id: result.Id,
|
||||
Version: result.Version,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func isRetryable(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if errors.Is(err, io.EOF) {
|
||||
return true
|
||||
}
|
||||
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
return true
|
||||
}
|
||||
|
||||
if opErr, ok := err.(*net.OpError); ok {
|
||||
if sysErr, ok := opErr.Err.(*syscall.Errno); ok {
|
||||
if *sysErr == syscall.ECONNRESET {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) newRequest(method string, args []string, dict map[string]any) (rpcRequest, error) {
|
||||
|
||||
id := ipa.id
|
||||
ipa.id++
|
||||
|
||||
dict["version"] = ipa.version
|
||||
|
||||
jmethod, errMethod := json.Marshal(method)
|
||||
jargs, errArgs := json.Marshal(args)
|
||||
jdict, errDict := json.Marshal(dict)
|
||||
|
||||
err := errors.Join(errMethod, errArgs, errDict)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("ipa request invalid: %w", err)
|
||||
}
|
||||
|
||||
request := map[string]interface{}{
|
||||
"id": id,
|
||||
"method": json.RawMessage(jmethod),
|
||||
"params": []json.RawMessage{json.RawMessage(jargs), json.RawMessage(jdict)},
|
||||
}
|
||||
|
||||
requestJSON, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal request: %w", err)
|
||||
}
|
||||
|
||||
return string(requestJSON), nil
|
||||
}
|
||||
|
||||
// pkcs7Unpad validates and unpads data from the given bytes slice.
|
||||
// The returned value will be 1 to n bytes smaller depending on the
|
||||
// amount of padding, where n is the block size.
|
||||
func pkcs7Unpad(b []byte, blocksize int) ([]byte, error) {
|
||||
if blocksize <= 0 {
|
||||
return nil, errors.New("invalid blocksize")
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return nil, errors.New("invalid PKCS7 data (empty or not padded)")
|
||||
}
|
||||
if len(b)%blocksize != 0 {
|
||||
return nil, errors.New("invalid padding on input")
|
||||
}
|
||||
c := b[len(b)-1]
|
||||
n := int(c)
|
||||
if n == 0 || n > len(b) {
|
||||
return nil, errors.New("invalid padding on input")
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
if b[len(b)-n+i] != c {
|
||||
return nil, errors.New("invalid padding on input")
|
||||
}
|
||||
}
|
||||
return b[:len(b)-n], nil
|
||||
}
|
||||
|
||||
/*
|
||||
e.g.
|
||||
|
||||
"value" {
|
||||
"__base64__": "aGVsbG93b3JsZAo="
|
||||
}
|
||||
*/
|
||||
type Base64EncodedWrapped []byte
|
||||
|
||||
func (b *Base64EncodedWrapped) UnmarshalJSON(data []byte) error {
|
||||
intermediate := struct {
|
||||
Base64 string `json:"__base64__"`
|
||||
}{}
|
||||
err := json.Unmarshal(data, &intermediate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*b, err = base64.StdEncoding.DecodeString(intermediate.Base64)
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *Base64EncodedWrapped) MarshalJSON() ([]byte, error) {
|
||||
intermediate := struct {
|
||||
Base64 string `json:"__base64__"`
|
||||
}{Base64: base64.StdEncoding.EncodeToString(*b)}
|
||||
return json.Marshal(intermediate)
|
||||
}
|
||||
|
||||
/*
|
||||
e.g.
|
||||
|
||||
"value": "aGVsbG93b3JsZAo="
|
||||
*/
|
||||
type Base64Encoded []byte
|
||||
|
||||
func (b *Base64Encoded) UnmarshalJSON(data []byte) error {
|
||||
var intermediate string
|
||||
err := json.Unmarshal(data, &intermediate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*b, err = base64.StdEncoding.DecodeString(intermediate)
|
||||
return err
|
||||
}
|
||||
|
||||
// parseToInt parses the first argument of input string slice
|
||||
// to an integer. If slice is empty, it defaults to 0
|
||||
func parseToInt(input []string, argName string) (int, error) {
|
||||
if len(input) == 0 {
|
||||
debuglogger.IAMLogf("empty %s slice: defaulting to 0", argName)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
id, err := strconv.Atoi(input[0])
|
||||
if err != nil {
|
||||
debuglogger.IAMLogf("failed to parse %s: %v", argName, err)
|
||||
return 0, fmt.Errorf("invalid %s: %w", argName, err)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
273
auth/iam_ldap.go
273
auth/iam_ldap.go
@@ -1,154 +1,54 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/go-ldap/ldap/v3"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
)
|
||||
|
||||
type LdapIAMService struct {
|
||||
conn *ldap.Conn
|
||||
queryBase string
|
||||
objClasses []string
|
||||
accessAtr string
|
||||
secretAtr string
|
||||
roleAtr string
|
||||
groupIdAtr string
|
||||
userIdAtr string
|
||||
projectIdAtr string
|
||||
rootAcc Account
|
||||
url string
|
||||
bindDN string
|
||||
pass string
|
||||
tlsSkipVerify bool
|
||||
mu sync.Mutex
|
||||
conn *ldap.Conn
|
||||
queryBase string
|
||||
objClasses []string
|
||||
accessAtr string
|
||||
secretAtr string
|
||||
roleAtr string
|
||||
}
|
||||
|
||||
var _ IAMService = &LdapIAMService{}
|
||||
|
||||
func NewLDAPService(rootAcc Account, ldapURL, bindDN, pass, queryBase, accAtr, secAtr, roleAtr, userIdAtr, groupIdAtr, projectIdAtr, objClasses string, tlsSkipVerify bool) (IAMService, error) {
|
||||
if ldapURL == "" || bindDN == "" || pass == "" || queryBase == "" || accAtr == "" ||
|
||||
secAtr == "" || roleAtr == "" || userIdAtr == "" || groupIdAtr == "" || projectIdAtr == "" || objClasses == "" {
|
||||
func NewLDAPService(url, bindDN, pass, queryBase, accAtr, secAtr, roleAtr, objClasses string) (IAMService, error) {
|
||||
if url == "" || bindDN == "" || pass == "" || queryBase == "" || accAtr == "" || secAtr == "" || roleAtr == "" || objClasses == "" {
|
||||
return nil, fmt.Errorf("required parameters list not fully provided")
|
||||
}
|
||||
|
||||
conn, err := dialLDAP(ldapURL, tlsSkipVerify)
|
||||
conn, err := ldap.Dial("tcp", url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to LDAP server: %w", err)
|
||||
}
|
||||
|
||||
err = conn.Bind(bindDN, pass)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, fmt.Errorf("failed to bind to LDAP server %w", err)
|
||||
}
|
||||
return &LdapIAMService{
|
||||
conn: conn,
|
||||
queryBase: queryBase,
|
||||
objClasses: strings.Split(objClasses, ","),
|
||||
accessAtr: accAtr,
|
||||
secretAtr: secAtr,
|
||||
roleAtr: roleAtr,
|
||||
userIdAtr: userIdAtr,
|
||||
groupIdAtr: groupIdAtr,
|
||||
projectIdAtr: projectIdAtr,
|
||||
rootAcc: rootAcc,
|
||||
url: ldapURL,
|
||||
bindDN: bindDN,
|
||||
pass: pass,
|
||||
tlsSkipVerify: tlsSkipVerify,
|
||||
conn: conn,
|
||||
queryBase: queryBase,
|
||||
objClasses: strings.Split(objClasses, ","),
|
||||
accessAtr: accAtr,
|
||||
secretAtr: secAtr,
|
||||
roleAtr: roleAtr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// dialLDAP establishes an LDAP connection with optional TLS configuration
|
||||
func dialLDAP(ldapURL string, tlsSkipVerify bool) (*ldap.Conn, error) {
|
||||
u, err := url.Parse(ldapURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid LDAP URL: %w", err)
|
||||
}
|
||||
|
||||
// For ldaps:// URLs, use DialURL with custom TLS config if needed
|
||||
if u.Scheme == "ldaps" && tlsSkipVerify {
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: tlsSkipVerify,
|
||||
}
|
||||
return ldap.DialURL(ldapURL, ldap.DialWithTLSConfig(tlsConfig))
|
||||
}
|
||||
|
||||
// For ldap:// or when TLS verification is enabled, use standard DialURL
|
||||
return ldap.DialURL(ldapURL)
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) reconnect() error {
|
||||
ld.conn.Close()
|
||||
|
||||
conn, err := dialLDAP(ld.url, ld.tlsSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to reconnect to LDAP server: %w", err)
|
||||
}
|
||||
|
||||
err = conn.Bind(ld.bindDN, ld.pass)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return fmt.Errorf("failed to bind to LDAP server on reconnect: %w", err)
|
||||
}
|
||||
ld.conn = conn
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) execute(f func(*ldap.Conn) error) error {
|
||||
ld.mu.Lock()
|
||||
defer ld.mu.Unlock()
|
||||
|
||||
err := f(ld.conn)
|
||||
if err != nil {
|
||||
if e, ok := err.(*ldap.Error); ok && e.ResultCode == ldap.ErrorNetwork {
|
||||
if reconnErr := ld.reconnect(); reconnErr != nil {
|
||||
return reconnErr
|
||||
}
|
||||
return f(ld.conn)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) CreateAccount(account Account) error {
|
||||
if ld.rootAcc.Access == account.Access {
|
||||
return ErrUserExists
|
||||
}
|
||||
userEntry := ldap.NewAddRequest(fmt.Sprintf("%v=%v,%v", ld.accessAtr, account.Access, ld.queryBase), nil)
|
||||
userEntry := ldap.NewAddRequest(fmt.Sprintf("%v=%v, %v", ld.accessAtr, account.Access, ld.queryBase), nil)
|
||||
userEntry.Attribute("objectClass", ld.objClasses)
|
||||
userEntry.Attribute(ld.accessAtr, []string{account.Access})
|
||||
userEntry.Attribute(ld.secretAtr, []string{account.Secret})
|
||||
userEntry.Attribute(ld.roleAtr, []string{string(account.Role)})
|
||||
userEntry.Attribute(ld.groupIdAtr, []string{fmt.Sprint(account.GroupID)})
|
||||
userEntry.Attribute(ld.userIdAtr, []string{fmt.Sprint(account.UserID)})
|
||||
userEntry.Attribute(ld.projectIdAtr, []string{fmt.Sprint(account.ProjectID)})
|
||||
|
||||
err := ld.execute(func(c *ldap.Conn) error {
|
||||
return c.Add(userEntry)
|
||||
})
|
||||
err := ld.conn.Add(userEntry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding an entry: %w", err)
|
||||
}
|
||||
@@ -156,22 +56,7 @@ func (ld *LdapIAMService) CreateAccount(account Account) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) buildSearchFilter(access string) string {
|
||||
var searchFilter strings.Builder
|
||||
for _, el := range ld.objClasses {
|
||||
searchFilter.WriteString(fmt.Sprintf("(objectClass=%v)", el))
|
||||
}
|
||||
if access != "" {
|
||||
searchFilter.WriteString(fmt.Sprintf("(%v=%v)", ld.accessAtr, access))
|
||||
}
|
||||
return fmt.Sprintf("(&%v)", searchFilter.String())
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) GetUserAccount(access string) (Account, error) {
|
||||
if access == ld.rootAcc.Access {
|
||||
return ld.rootAcc, nil
|
||||
}
|
||||
var result *ldap.SearchResult
|
||||
searchRequest := ldap.NewSearchRequest(
|
||||
ld.queryBase,
|
||||
ldap.ScopeWholeSubtree,
|
||||
@@ -179,96 +64,28 @@ func (ld *LdapIAMService) GetUserAccount(access string) (Account, error) {
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
ld.buildSearchFilter(access),
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.userIdAtr, ld.groupIdAtr, ld.projectIdAtr},
|
||||
fmt.Sprintf("(%v=%v)", ld.accessAtr, access),
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr},
|
||||
nil,
|
||||
)
|
||||
|
||||
if debuglogger.IsIAMDebugEnabled() {
|
||||
debuglogger.IAMLogf("LDAP Search Request")
|
||||
debuglogger.IAMLogf(spew.Sdump(searchRequest))
|
||||
}
|
||||
|
||||
err := ld.execute(func(c *ldap.Conn) error {
|
||||
var err error
|
||||
result, err = c.Search(searchRequest)
|
||||
return err
|
||||
})
|
||||
|
||||
if debuglogger.IsIAMDebugEnabled() {
|
||||
debuglogger.IAMLogf("LDAP Search Result")
|
||||
debuglogger.IAMLogf(spew.Sdump(result))
|
||||
}
|
||||
|
||||
result, err := ld.conn.Search(searchRequest)
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
|
||||
if len(result.Entries) == 0 {
|
||||
return Account{}, ErrNoSuchUser
|
||||
}
|
||||
|
||||
entry := result.Entries[0]
|
||||
groupId, err := strconv.Atoi(entry.GetAttributeValue(ld.groupIdAtr))
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("invalid entry value for group-id %q: %w",
|
||||
entry.GetAttributeValue(ld.groupIdAtr), err)
|
||||
}
|
||||
userId, err := strconv.Atoi(entry.GetAttributeValue(ld.userIdAtr))
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("invalid entry value for user-id %q: %w",
|
||||
entry.GetAttributeValue(ld.userIdAtr), err)
|
||||
}
|
||||
projectID, err := strconv.Atoi(entry.GetAttributeValue(ld.projectIdAtr))
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("invalid entry value for project-id %q: %w",
|
||||
entry.GetAttributeValue(ld.projectIdAtr), err)
|
||||
}
|
||||
|
||||
return Account{
|
||||
Access: entry.GetAttributeValue(ld.accessAtr),
|
||||
Secret: entry.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(entry.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
UserID: userId,
|
||||
ProjectID: projectID,
|
||||
Access: entry.GetAttributeValue(ld.accessAtr),
|
||||
Secret: entry.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(entry.GetAttributeValue(ld.roleAtr)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) UpdateUserAccount(access string, props MutableProps) error {
|
||||
req := ldap.NewModifyRequest(fmt.Sprintf("%v=%v, %v", ld.accessAtr, access, ld.queryBase), nil)
|
||||
if props.Secret != nil {
|
||||
req.Replace(ld.secretAtr, []string{*props.Secret})
|
||||
}
|
||||
if props.GroupID != nil {
|
||||
req.Replace(ld.groupIdAtr, []string{fmt.Sprint(*props.GroupID)})
|
||||
}
|
||||
if props.UserID != nil {
|
||||
req.Replace(ld.userIdAtr, []string{fmt.Sprint(*props.UserID)})
|
||||
}
|
||||
if props.ProjectID != nil {
|
||||
req.Replace(ld.projectIdAtr, []string{fmt.Sprint(*props.ProjectID)})
|
||||
}
|
||||
if props.Role != "" {
|
||||
req.Replace(ld.roleAtr, []string{string(props.Role)})
|
||||
}
|
||||
|
||||
err := ld.execute(func(c *ldap.Conn) error {
|
||||
return c.Modify(req)
|
||||
})
|
||||
//TODO: Handle non existing user case
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) DeleteUserAccount(access string) error {
|
||||
delReq := ldap.NewDelRequest(fmt.Sprintf("%v=%v, %v", ld.accessAtr, access, ld.queryBase), nil)
|
||||
|
||||
err := ld.execute(func(c *ldap.Conn) error {
|
||||
return c.Del(delReq)
|
||||
})
|
||||
err := ld.conn.Del(delReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -277,7 +94,10 @@ func (ld *LdapIAMService) DeleteUserAccount(access string) error {
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) ListUserAccounts() ([]Account, error) {
|
||||
var resp *ldap.SearchResult
|
||||
searchFilter := ""
|
||||
for _, el := range ld.objClasses {
|
||||
searchFilter += fmt.Sprintf("(objectClass=%v)", el)
|
||||
}
|
||||
searchRequest := ldap.NewSearchRequest(
|
||||
ld.queryBase,
|
||||
ldap.ScopeWholeSubtree,
|
||||
@@ -285,45 +105,22 @@ func (ld *LdapIAMService) ListUserAccounts() ([]Account, error) {
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
ld.buildSearchFilter(""),
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.groupIdAtr, ld.projectIdAtr, ld.userIdAtr},
|
||||
fmt.Sprintf("(&%v)", searchFilter),
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr},
|
||||
nil,
|
||||
)
|
||||
|
||||
err := ld.execute(func(c *ldap.Conn) error {
|
||||
var err error
|
||||
resp, err = c.Search(searchRequest)
|
||||
return err
|
||||
})
|
||||
resp, err := ld.conn.Search(searchRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := []Account{}
|
||||
for _, el := range resp.Entries {
|
||||
groupId, err := strconv.Atoi(el.GetAttributeValue(ld.groupIdAtr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid entry value for group-id %q: %w",
|
||||
el.GetAttributeValue(ld.groupIdAtr), err)
|
||||
}
|
||||
userId, err := strconv.Atoi(el.GetAttributeValue(ld.userIdAtr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid entry value for user-id %q: %w",
|
||||
el.GetAttributeValue(ld.userIdAtr), err)
|
||||
}
|
||||
projectID, err := strconv.Atoi(el.GetAttributeValue(ld.projectIdAtr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid entry value for project-id %q: %w",
|
||||
el.GetAttributeValue(ld.groupIdAtr), err)
|
||||
}
|
||||
|
||||
result = append(result, Account{
|
||||
Access: el.GetAttributeValue(ld.accessAtr),
|
||||
Secret: el.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(el.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
ProjectID: projectID,
|
||||
UserID: userId,
|
||||
Access: el.GetAttributeValue(ld.accessAtr),
|
||||
Secret: el.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(el.GetAttributeValue(ld.roleAtr)),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -332,7 +129,5 @@ func (ld *LdapIAMService) ListUserAccounts() ([]Account, error) {
|
||||
|
||||
// Shutdown graceful termination of service
|
||||
func (ld *LdapIAMService) Shutdown() error {
|
||||
ld.mu.Lock()
|
||||
defer ld.mu.Unlock()
|
||||
return ld.conn.Close()
|
||||
}
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package auth
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestLdapIAMService_BuildSearchFilter(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
objClasses []string
|
||||
accessAtr string
|
||||
access string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "single object class with access",
|
||||
objClasses: []string{"inetOrgPerson"},
|
||||
accessAtr: "uid",
|
||||
access: "testuser",
|
||||
expected: "(&(objectClass=inetOrgPerson)(uid=testuser))",
|
||||
},
|
||||
{
|
||||
name: "single object class without access",
|
||||
objClasses: []string{"inetOrgPerson"},
|
||||
accessAtr: "uid",
|
||||
access: "",
|
||||
expected: "(&(objectClass=inetOrgPerson))",
|
||||
},
|
||||
{
|
||||
name: "multiple object classes with access",
|
||||
objClasses: []string{"inetOrgPerson", "organizationalPerson"},
|
||||
accessAtr: "cn",
|
||||
access: "john.doe",
|
||||
expected: "(&(objectClass=inetOrgPerson)(objectClass=organizationalPerson)(cn=john.doe))",
|
||||
},
|
||||
{
|
||||
name: "multiple object classes without access",
|
||||
objClasses: []string{"inetOrgPerson", "organizationalPerson", "person"},
|
||||
accessAtr: "cn",
|
||||
access: "",
|
||||
expected: "(&(objectClass=inetOrgPerson)(objectClass=organizationalPerson)(objectClass=person))",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ld := &LdapIAMService{
|
||||
objClasses: tt.objClasses,
|
||||
accessAtr: tt.accessAtr,
|
||||
}
|
||||
|
||||
result := ld.buildSearchFilter(tt.access)
|
||||
if result != tt.expected {
|
||||
t.Errorf("BuildSearchFilter() = %v, want %v", result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
@@ -33,7 +32,6 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
)
|
||||
|
||||
// IAMServiceS3 stores user accounts in an S3 object
|
||||
@@ -43,27 +41,19 @@ import (
|
||||
// coming from iAMConfig and iamFile in iam_internal.
|
||||
|
||||
type IAMServiceS3 struct {
|
||||
// This mutex will help with racing updates to the IAM data
|
||||
// from multiple requests to this gateway instance, but
|
||||
// will not help with racing updates to multiple load balanced
|
||||
// gateway instances. This is a limitation of the internal
|
||||
// IAM service. All account updates should be sent to a single
|
||||
// gateway instance if possible.
|
||||
sync.RWMutex
|
||||
|
||||
access string
|
||||
secret string
|
||||
region string
|
||||
bucket string
|
||||
endpoint string
|
||||
sslSkipVerify bool
|
||||
rootAcc Account
|
||||
debug bool
|
||||
client *s3.Client
|
||||
}
|
||||
|
||||
var _ IAMService = &IAMServiceS3{}
|
||||
|
||||
func NewS3(rootAcc Account, access, secret, region, bucket, endpoint string, sslSkipVerify bool) (*IAMServiceS3, error) {
|
||||
func NewS3(access, secret, region, bucket, endpoint string, sslSkipVerify, debug bool) (*IAMServiceS3, error) {
|
||||
if access == "" {
|
||||
return nil, fmt.Errorf("must provide s3 IAM service access key")
|
||||
}
|
||||
@@ -87,7 +77,7 @@ func NewS3(rootAcc Account, access, secret, region, bucket, endpoint string, ssl
|
||||
bucket: bucket,
|
||||
endpoint: endpoint,
|
||||
sslSkipVerify: sslSkipVerify,
|
||||
rootAcc: rootAcc,
|
||||
debug: debug,
|
||||
}
|
||||
|
||||
cfg, err := i.getConfig()
|
||||
@@ -95,25 +85,11 @@ func NewS3(rootAcc Account, access, secret, region, bucket, endpoint string, ssl
|
||||
return nil, fmt.Errorf("init s3 IAM: %v", err)
|
||||
}
|
||||
|
||||
if endpoint != "" {
|
||||
i.client = s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
o.BaseEndpoint = &endpoint
|
||||
})
|
||||
return i, nil
|
||||
}
|
||||
|
||||
i.client = s3.NewFromConfig(cfg)
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) CreateAccount(account Account) error {
|
||||
if s.rootAcc.Access == account.Access {
|
||||
return ErrUserExists
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
conf, err := s.getAccounts()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -121,7 +97,7 @@ func (s *IAMServiceS3) CreateAccount(account Account) error {
|
||||
|
||||
_, ok := conf.AccessAccounts[account.Access]
|
||||
if ok {
|
||||
return ErrUserExists
|
||||
return fmt.Errorf("account already exists")
|
||||
}
|
||||
conf.AccessAccounts[account.Access] = account
|
||||
|
||||
@@ -129,13 +105,6 @@ func (s *IAMServiceS3) CreateAccount(account Account) error {
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) GetUserAccount(access string) (Account, error) {
|
||||
if access == s.rootAcc.Access {
|
||||
return s.rootAcc, nil
|
||||
}
|
||||
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
conf, err := s.getAccounts()
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
@@ -149,30 +118,7 @@ func (s *IAMServiceS3) GetUserAccount(access string) (Account, error) {
|
||||
return acct, nil
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) UpdateUserAccount(access string, props MutableProps) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
conf, err := s.getAccounts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acc, ok := conf.AccessAccounts[access]
|
||||
if !ok {
|
||||
return ErrNoSuchUser
|
||||
}
|
||||
|
||||
updateAcc(&acc, props)
|
||||
conf.AccessAccounts[access] = acc
|
||||
|
||||
return s.storeAccts(conf)
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) DeleteUserAccount(access string) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
conf, err := s.getAccounts()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -188,9 +134,6 @@ func (s *IAMServiceS3) DeleteUserAccount(access string) error {
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) ListUserAccounts() ([]Account, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
conf, err := s.getAccounts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -217,6 +160,16 @@ func (s *IAMServiceS3) ListUserAccounts() ([]Account, error) {
|
||||
return accs, nil
|
||||
}
|
||||
|
||||
// ResolveEndpoint is used for on prem or non-aws endpoints
|
||||
func (s *IAMServiceS3) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{
|
||||
PartitionID: "aws",
|
||||
URL: s.endpoint,
|
||||
SigningRegion: s.region,
|
||||
HostnameImmutable: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) Shutdown() error {
|
||||
return nil
|
||||
}
|
||||
@@ -235,7 +188,12 @@ func (s *IAMServiceS3) getConfig() (aws.Config, error) {
|
||||
config.WithHTTPClient(client),
|
||||
}
|
||||
|
||||
if debuglogger.IsIAMDebugEnabled() {
|
||||
if s.endpoint != "" {
|
||||
opts = append(opts,
|
||||
config.WithEndpointResolverWithOptions(s))
|
||||
}
|
||||
|
||||
if s.debug {
|
||||
opts = append(opts,
|
||||
config.WithClientLogMode(aws.LogSigning|aws.LogRetries|aws.LogRequest|aws.LogResponse|aws.LogRequestEventMessage|aws.LogResponseEventMessage))
|
||||
}
|
||||
@@ -252,15 +210,15 @@ func (s *IAMServiceS3) getAccounts() (iAMConfig, error) {
|
||||
})
|
||||
if err != nil {
|
||||
// if the error is object not exists,
|
||||
// init empty accounts struct and return that
|
||||
// init empty accounts stuct and return that
|
||||
var nsk *types.NoSuchKey
|
||||
if errors.As(err, &nsk) {
|
||||
return iAMConfig{AccessAccounts: map[string]Account{}}, nil
|
||||
return iAMConfig{}, nil
|
||||
}
|
||||
var apiErr smithy.APIError
|
||||
if errors.As(err, &apiErr) {
|
||||
if apiErr.ErrorCode() == "NotFound" {
|
||||
return iAMConfig{AccessAccounts: map[string]Account{}}, nil
|
||||
return iAMConfig{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,49 +15,34 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// IAMServiceSingle manages the single tenant (root-only) IAM service
|
||||
type IAMServiceSingle struct {
|
||||
root Account
|
||||
}
|
||||
type IAMServiceSingle struct{}
|
||||
|
||||
var _ IAMService = &IAMServiceSingle{}
|
||||
|
||||
func NewIAMServiceSingle(r Account) IAMService {
|
||||
return &IAMServiceSingle{
|
||||
root: r,
|
||||
}
|
||||
}
|
||||
var ErrNotSupported = errors.New("method is not supported")
|
||||
|
||||
// CreateAccount not valid in single tenant mode
|
||||
func (IAMServiceSingle) CreateAccount(account Account) error {
|
||||
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
// GetUserAccount returns root account, if the root access key
|
||||
// is provided and "ErrAdminUserNotFound" otherwise
|
||||
func (s IAMServiceSingle) GetUserAccount(access string) (Account, error) {
|
||||
if access == s.root.Access {
|
||||
return s.root, nil
|
||||
}
|
||||
return Account{}, s3err.GetAPIError(s3err.ErrAdminUserNotFound)
|
||||
}
|
||||
|
||||
// UpdateUserAccount no accounts in single tenant mode
|
||||
func (IAMServiceSingle) UpdateUserAccount(access string, props MutableProps) error {
|
||||
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
|
||||
// GetUserAccount no accounts in single tenant mode
|
||||
func (IAMServiceSingle) GetUserAccount(access string) (Account, error) {
|
||||
return Account{}, ErrNotSupported
|
||||
}
|
||||
|
||||
// DeleteUserAccount no accounts in single tenant mode
|
||||
func (IAMServiceSingle) DeleteUserAccount(access string) error {
|
||||
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
// ListUserAccounts no accounts in single tenant mode
|
||||
func (IAMServiceSingle) ListUserAccounts() ([]Account, error) {
|
||||
return []Account{}, s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
|
||||
return []Account{}, nil
|
||||
}
|
||||
|
||||
// Shutdown graceful termination of service
|
||||
|
||||
@@ -1,389 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
vault "github.com/hashicorp/vault-client-go"
|
||||
"github.com/hashicorp/vault-client-go/schema"
|
||||
)
|
||||
|
||||
const requestTimeout = 10 * time.Second
|
||||
|
||||
type VaultIAMService struct {
|
||||
client *vault.Client
|
||||
authReqOpts []vault.RequestOption
|
||||
kvReqOpts []vault.RequestOption
|
||||
secretStoragePath string
|
||||
rootAcc Account
|
||||
creds schema.AppRoleLoginRequest
|
||||
}
|
||||
|
||||
type VaultIAMNamespace struct {
|
||||
Auth string
|
||||
SecretStorage string
|
||||
}
|
||||
|
||||
// Resolve empty specific namespaces to the fallback.
|
||||
// Empty result means root namespace.
|
||||
func resolveVaultNamespaces(authNamespace, secretStorageNamespace, fallback string) VaultIAMNamespace {
|
||||
ns := VaultIAMNamespace{
|
||||
Auth: authNamespace,
|
||||
SecretStorage: secretStorageNamespace,
|
||||
}
|
||||
|
||||
if ns.Auth == "" {
|
||||
ns.Auth = fallback
|
||||
}
|
||||
if ns.SecretStorage == "" {
|
||||
ns.SecretStorage = fallback
|
||||
}
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
var _ IAMService = &VaultIAMService{}
|
||||
|
||||
func NewVaultIAMService(rootAcc Account, endpoint, namespace, secretStoragePath, secretStorageNamespace,
|
||||
authMethod, authNamespace, mountPath, rootToken, roleID, roleSecret, serverCert,
|
||||
clientCert, clientCertKey string) (IAMService, error) {
|
||||
opts := []vault.ClientOption{
|
||||
vault.WithAddress(endpoint),
|
||||
vault.WithRequestTimeout(requestTimeout),
|
||||
}
|
||||
|
||||
if serverCert != "" {
|
||||
tls := vault.TLSConfiguration{}
|
||||
|
||||
tls.ServerCertificate.FromBytes = []byte(serverCert)
|
||||
if clientCert != "" {
|
||||
if clientCertKey == "" {
|
||||
return nil, fmt.Errorf("client certificate and client certificate key should both be specified")
|
||||
}
|
||||
|
||||
tls.ClientCertificate.FromBytes = []byte(clientCert)
|
||||
tls.ClientCertificateKey.FromBytes = []byte(clientCertKey)
|
||||
}
|
||||
|
||||
opts = append(opts, vault.WithTLS(tls))
|
||||
}
|
||||
|
||||
client, err := vault.New(opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init vault client: %w", err)
|
||||
}
|
||||
|
||||
authReqOpts := []vault.RequestOption{}
|
||||
// if auth method path is not specified, it defaults to "approle"
|
||||
if authMethod != "" {
|
||||
authReqOpts = append(authReqOpts, vault.WithMountPath(authMethod))
|
||||
}
|
||||
|
||||
kvReqOpts := []vault.RequestOption{}
|
||||
// if mount path is not specified, it defaults to "kv-v2"
|
||||
if mountPath != "" {
|
||||
kvReqOpts = append(kvReqOpts, vault.WithMountPath(mountPath))
|
||||
}
|
||||
|
||||
// Resolve namespaces using optional generic fallback "namespace"
|
||||
ns := resolveVaultNamespaces(authNamespace, secretStorageNamespace, namespace)
|
||||
|
||||
// Guard: AppRole tokens are namespace scoped. If using AppRole and namespaces differ, error early.
|
||||
// Root token can span namespaces because each request carries X-Vault-Namespace.
|
||||
if rootToken == "" && ns.Auth != "" && ns.SecretStorage != "" && ns.Auth != ns.SecretStorage {
|
||||
return nil, fmt.Errorf(
|
||||
"approle tokens are namespace scoped. auth namespace %q and secret storage namespace %q differ. "+
|
||||
"use the same namespace or authenticate with a root token",
|
||||
ns.Auth, ns.SecretStorage,
|
||||
)
|
||||
}
|
||||
|
||||
// Apply namespaces to the correct request option sets.
|
||||
// For root token we do not need an auth namespace since we are not logging in via auth.
|
||||
if rootToken == "" && ns.Auth != "" {
|
||||
authReqOpts = append(authReqOpts, vault.WithNamespace(ns.Auth))
|
||||
}
|
||||
if ns.SecretStorage != "" {
|
||||
kvReqOpts = append(kvReqOpts, vault.WithNamespace(ns.SecretStorage))
|
||||
}
|
||||
|
||||
creds := schema.AppRoleLoginRequest{
|
||||
RoleId: roleID,
|
||||
SecretId: roleSecret,
|
||||
}
|
||||
|
||||
// Authentication
|
||||
switch {
|
||||
case rootToken != "":
|
||||
err := client.SetToken(rootToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("root token authentication failure: %w", err)
|
||||
}
|
||||
case roleID != "":
|
||||
if roleSecret == "" {
|
||||
return nil, fmt.Errorf("role id and role secret must both be specified")
|
||||
}
|
||||
|
||||
resp, err := client.Auth.AppRoleLogin(context.Background(),
|
||||
creds, authReqOpts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("approle authentication failure: %w", err)
|
||||
}
|
||||
|
||||
if err := client.SetToken(resp.Auth.ClientToken); err != nil {
|
||||
return nil, fmt.Errorf("approle authentication set token failure: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("vault authentication requires either roleid/rolesecret or root token")
|
||||
}
|
||||
|
||||
return &VaultIAMService{
|
||||
client: client,
|
||||
authReqOpts: authReqOpts,
|
||||
kvReqOpts: kvReqOpts,
|
||||
secretStoragePath: secretStoragePath,
|
||||
rootAcc: rootAcc,
|
||||
creds: creds,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) reAuthIfNeeded(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Vault returns 403 for expired/revoked tokens
|
||||
// pass all other errors back unchanged
|
||||
if !vault.IsErrorStatus(err, http.StatusForbidden) {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, authErr := vt.client.Auth.AppRoleLogin(context.Background(),
|
||||
vt.creds, vt.authReqOpts...)
|
||||
if authErr != nil {
|
||||
return fmt.Errorf("vault re-authentication failure: %w", authErr)
|
||||
}
|
||||
if err := vt.client.SetToken(resp.Auth.ClientToken); err != nil {
|
||||
return fmt.Errorf("vault re-authentication set token failure: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) CreateAccount(account Account) error {
|
||||
if vt.rootAcc.Access == account.Access {
|
||||
return ErrUserExists
|
||||
}
|
||||
_, err := vt.client.Secrets.KvV2Write(context.Background(),
|
||||
vt.secretStoragePath+"/"+account.Access, schema.KvV2WriteRequest{
|
||||
Data: map[string]any{
|
||||
account.Access: account,
|
||||
},
|
||||
Options: map[string]any{
|
||||
"cas": 0,
|
||||
},
|
||||
}, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "check-and-set") {
|
||||
return ErrUserExists
|
||||
}
|
||||
|
||||
reauthErr := vt.reAuthIfNeeded(err)
|
||||
if reauthErr != nil {
|
||||
return reauthErr
|
||||
}
|
||||
// retry once after re-auth
|
||||
_, err = vt.client.Secrets.KvV2Write(context.Background(),
|
||||
vt.secretStoragePath+"/"+account.Access, schema.KvV2WriteRequest{
|
||||
Data: map[string]any{
|
||||
account.Access: account,
|
||||
},
|
||||
Options: map[string]any{
|
||||
"cas": 0,
|
||||
},
|
||||
}, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "check-and-set") {
|
||||
return ErrUserExists
|
||||
}
|
||||
if vault.IsErrorStatus(err, http.StatusForbidden) {
|
||||
return fmt.Errorf("vault 403 permission denied on path %q. check KV mount path and policy. original: %w",
|
||||
vt.secretStoragePath+"/"+account.Access, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) GetUserAccount(access string) (Account, error) {
|
||||
if vt.rootAcc.Access == access {
|
||||
return vt.rootAcc, nil
|
||||
}
|
||||
resp, err := vt.client.Secrets.KvV2Read(context.Background(),
|
||||
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
reauthErr := vt.reAuthIfNeeded(err)
|
||||
if reauthErr != nil {
|
||||
return Account{}, reauthErr
|
||||
}
|
||||
// retry once after re-auth
|
||||
resp, err = vt.client.Secrets.KvV2Read(context.Background(),
|
||||
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
}
|
||||
acc, err := parseVaultUserAccount(resp.Data.Data, access)
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
return acc, nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) UpdateUserAccount(access string, props MutableProps) error {
|
||||
acc, err := vt.GetUserAccount(access)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateAcc(&acc, props)
|
||||
err = vt.DeleteUserAccount(access)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = vt.CreateAccount(acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) DeleteUserAccount(access string) error {
|
||||
_, err := vt.client.Secrets.KvV2DeleteMetadataAndAllVersions(context.Background(),
|
||||
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
reauthErr := vt.reAuthIfNeeded(err)
|
||||
if reauthErr != nil {
|
||||
return reauthErr
|
||||
}
|
||||
// retry once after re-auth
|
||||
_, err = vt.client.Secrets.KvV2DeleteMetadataAndAllVersions(context.Background(),
|
||||
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) ListUserAccounts() ([]Account, error) {
|
||||
resp, err := vt.client.Secrets.KvV2List(context.Background(),
|
||||
vt.secretStoragePath, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
reauthErr := vt.reAuthIfNeeded(err)
|
||||
if reauthErr != nil {
|
||||
if vault.IsErrorStatus(err, http.StatusNotFound) {
|
||||
return []Account{}, nil
|
||||
}
|
||||
return nil, reauthErr
|
||||
}
|
||||
// retry once after re-auth
|
||||
resp, err = vt.client.Secrets.KvV2List(context.Background(),
|
||||
vt.secretStoragePath, vt.kvReqOpts...)
|
||||
if err != nil {
|
||||
if vault.IsErrorStatus(err, http.StatusNotFound) {
|
||||
return []Account{}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
accs := []Account{}
|
||||
for _, acss := range resp.Data.Keys {
|
||||
acc, err := vt.GetUserAccount(acss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accs = append(accs, acc)
|
||||
}
|
||||
return accs, nil
|
||||
}
|
||||
|
||||
// the client doesn't have explicit shutdown, as it uses http.Client
|
||||
func (vt *VaultIAMService) Shutdown() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errInvalidUser error = errors.New("invalid user account entry in secrets engine")
|
||||
|
||||
func parseVaultUserAccount(data map[string]any, access string) (acc Account, err error) {
|
||||
usrAcc, ok := data[access].(map[string]any)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
|
||||
acss, ok := usrAcc["access"].(string)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
secret, ok := usrAcc["secret"].(string)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
role, ok := usrAcc["role"].(string)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
userIdJson, ok := usrAcc["userID"].(json.Number)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
userId, err := userIdJson.Int64()
|
||||
if err != nil {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
groupIdJson, ok := usrAcc["groupID"].(json.Number)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
groupId, err := groupIdJson.Int64()
|
||||
if err != nil {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
projectIdJson, ok := usrAcc["projectID"].(json.Number)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
projectID, err := projectIdJson.Int64()
|
||||
if err != nil {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
|
||||
return Account{
|
||||
Access: acss,
|
||||
Secret: secret,
|
||||
Role: Role(role),
|
||||
UserID: int(userId),
|
||||
GroupID: int(groupId),
|
||||
ProjectID: int(projectID),
|
||||
}, nil
|
||||
}
|
||||
@@ -1,380 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
type BucketLockConfig struct {
|
||||
Enabled bool
|
||||
DefaultRetention *types.DefaultRetention
|
||||
CreatedAt *time.Time
|
||||
}
|
||||
|
||||
func ParseBucketLockConfigurationInput(input []byte) ([]byte, error) {
|
||||
var lockConfig types.ObjectLockConfiguration
|
||||
if err := xml.Unmarshal(input, &lockConfig); err != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if lockConfig.ObjectLockEnabled != types.ObjectLockEnabledEnabled {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
config := BucketLockConfig{
|
||||
Enabled: lockConfig.ObjectLockEnabled == types.ObjectLockEnabledEnabled,
|
||||
}
|
||||
|
||||
if lockConfig.Rule != nil && lockConfig.Rule.DefaultRetention != nil {
|
||||
retention := lockConfig.Rule.DefaultRetention
|
||||
|
||||
if retention.Mode != types.ObjectLockRetentionModeCompliance && retention.Mode != types.ObjectLockRetentionModeGovernance {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
if retention.Years != nil && retention.Days != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if retention.Days != nil && *retention.Days <= 0 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrObjectLockInvalidRetentionPeriod)
|
||||
}
|
||||
if retention.Years != nil && *retention.Years <= 0 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrObjectLockInvalidRetentionPeriod)
|
||||
}
|
||||
|
||||
config.DefaultRetention = retention
|
||||
now := time.Now()
|
||||
config.CreatedAt = &now
|
||||
}
|
||||
|
||||
return json.Marshal(config)
|
||||
}
|
||||
|
||||
func ParseBucketLockConfigurationOutput(input []byte) (*types.ObjectLockConfiguration, error) {
|
||||
var config BucketLockConfig
|
||||
if err := json.Unmarshal(input, &config); err != nil {
|
||||
return nil, fmt.Errorf("parse object lock config: %w", err)
|
||||
}
|
||||
|
||||
result := &types.ObjectLockConfiguration{
|
||||
Rule: &types.ObjectLockRule{
|
||||
DefaultRetention: config.DefaultRetention,
|
||||
},
|
||||
}
|
||||
|
||||
if config.Enabled {
|
||||
result.ObjectLockEnabled = types.ObjectLockEnabledEnabled
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func ParseObjectLockRetentionInput(input []byte) (*s3response.PutObjectRetentionInput, error) {
|
||||
var retention s3response.PutObjectRetentionInput
|
||||
if err := xml.Unmarshal(input, &retention); err != nil {
|
||||
debuglogger.Logf("invalid object lock retention request body: %v", err)
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if retention.RetainUntilDate.Before(time.Now()) {
|
||||
debuglogger.Logf("object lock retain until date must be in the future")
|
||||
return nil, s3err.GetAPIError(s3err.ErrPastObjectLockRetainDate)
|
||||
}
|
||||
switch retention.Mode {
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
default:
|
||||
debuglogger.Logf("invalid object lock retention mode: %s", retention.Mode)
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
return &retention, nil
|
||||
}
|
||||
|
||||
func ParseObjectLockRetentionInputToJSON(input *s3response.PutObjectRetentionInput) ([]byte, error) {
|
||||
data, err := json.Marshal(input)
|
||||
if err != nil {
|
||||
debuglogger.Logf("parse object lock retention to JSON: %v", err)
|
||||
return nil, fmt.Errorf("parse object lock retention: %w", err)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// IsObjectLockRetentionPutAllowed checks if the object lock retention PUT request
|
||||
// is allowed against the current state of the object lock
|
||||
func IsObjectLockRetentionPutAllowed(ctx context.Context, be backend.Backend, bucket, object, versionId, userAccess string, input *s3response.PutObjectRetentionInput, bypass bool) error {
|
||||
ret, err := be.GetObjectRetention(ctx, bucket, object, versionId)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration)) {
|
||||
// if object lock configuration is not set
|
||||
// allow the retention modification without any checks
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to get object retention: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
retention, err := ParseObjectLockRetentionOutput(ret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if retention.Mode == input.Mode {
|
||||
// if retention mode is the same
|
||||
// the operation is allowed
|
||||
return nil
|
||||
}
|
||||
|
||||
if retention.Mode == types.ObjectLockRetentionModeCompliance {
|
||||
// COMPLIANCE mode is by definition not allowed to modify
|
||||
debuglogger.Logf("object lock retention change request from 'COMPLIANCE' to 'GOVERNANCE' is not allowed")
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
|
||||
if !bypass {
|
||||
// if x-amz-bypass-governance-retention is not provided
|
||||
// return error: object is locked
|
||||
debuglogger.Logf("object lock retention mode change is not allowed and bypass governence is not forced")
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
|
||||
// the last case left, when user tries to chenge
|
||||
// from 'GOVERNANCE' to 'COMPLIANCE' with
|
||||
// 'x-amz-bypass-governance-retention' header
|
||||
// first we need to check if user has 's3:BypassGovernanceRetention'
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if err != nil {
|
||||
// if it fails to get the policy, return object is locked
|
||||
debuglogger.Logf("failed to get the bucket policy: %v", err)
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, object, BypassGovernanceRetentionAction)
|
||||
if err != nil {
|
||||
// if user doesn't have "s3:BypassGovernanceRetention" permission
|
||||
// return object is locked
|
||||
debuglogger.Logf("the user is missing 's3:BypassGovernanceRetention' permission")
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseObjectLockRetentionOutput(input []byte) (*types.ObjectLockRetention, error) {
|
||||
var retention types.ObjectLockRetention
|
||||
if err := json.Unmarshal(input, &retention); err != nil {
|
||||
debuglogger.Logf("parse object lock retention output: %v", err)
|
||||
return nil, fmt.Errorf("parse object lock retention: %w", err)
|
||||
}
|
||||
|
||||
return &retention, nil
|
||||
}
|
||||
|
||||
func ParseObjectLegalHoldOutput(status *bool) *s3response.GetObjectLegalHoldResult {
|
||||
if status == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if *status {
|
||||
return &s3response.GetObjectLegalHoldResult{
|
||||
Status: types.ObjectLockLegalHoldStatusOn,
|
||||
}
|
||||
}
|
||||
|
||||
return &s3response.GetObjectLegalHoldResult{
|
||||
Status: types.ObjectLockLegalHoldStatusOff,
|
||||
}
|
||||
}
|
||||
|
||||
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []types.ObjectIdentifier, bypass, isBucketPublic bool, be backend.Backend, isOverwrite bool) error {
|
||||
if isOverwrite {
|
||||
// if bucket versioning is enabled, any overwrite request
|
||||
// should be enabled, as it leads to a new object version
|
||||
// creation
|
||||
res, err := be.GetBucketVersioning(ctx, bucket)
|
||||
if err == nil && res.Status != nil && *res.Status == types.BucketVersioningStatusEnabled {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
data, err := be.GetObjectLockConfiguration(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
var bucketLockConfig BucketLockConfig
|
||||
if err := json.Unmarshal(data, &bucketLockConfig); err != nil {
|
||||
return fmt.Errorf("parse object lock config: %w", err)
|
||||
}
|
||||
|
||||
if !bucketLockConfig.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
checkDefaultRetention := false
|
||||
|
||||
if bucketLockConfig.DefaultRetention != nil && bucketLockConfig.CreatedAt != nil {
|
||||
expirationDate := *bucketLockConfig.CreatedAt
|
||||
if bucketLockConfig.DefaultRetention.Days != nil {
|
||||
expirationDate = expirationDate.AddDate(0, 0, int(*bucketLockConfig.DefaultRetention.Days))
|
||||
}
|
||||
if bucketLockConfig.DefaultRetention.Years != nil {
|
||||
expirationDate = expirationDate.AddDate(int(*bucketLockConfig.DefaultRetention.Years), 0, 0)
|
||||
}
|
||||
|
||||
if expirationDate.After(time.Now()) {
|
||||
checkDefaultRetention = true
|
||||
}
|
||||
}
|
||||
|
||||
var versioningEnabled bool
|
||||
vers, err := be.GetBucketVersioning(ctx, bucket)
|
||||
if err == nil && vers.Status != nil {
|
||||
versioningEnabled = *vers.Status == types.BucketVersioningStatusEnabled
|
||||
}
|
||||
|
||||
for _, obj := range objects {
|
||||
var key, versionId string
|
||||
if obj.Key != nil {
|
||||
key = *obj.Key
|
||||
}
|
||||
if obj.VersionId != nil {
|
||||
versionId = *obj.VersionId
|
||||
}
|
||||
// if bucket versioning is enabled and versionId isn't provided
|
||||
// no lock check is needed, as it leads to a new delete marker creation
|
||||
if versioningEnabled && versionId == "" {
|
||||
continue
|
||||
}
|
||||
checkRetention := true
|
||||
retentionData, err := be.GetObjectRetention(ctx, bucket, key, versionId)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
|
||||
continue
|
||||
}
|
||||
// the object is a delete marker, if a `MethodNotAllowed` error is returned
|
||||
// no object lock check is needed
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrMethodNotAllowed)) {
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration)) {
|
||||
checkRetention = false
|
||||
}
|
||||
if err != nil && checkRetention {
|
||||
return err
|
||||
}
|
||||
|
||||
if checkRetention {
|
||||
retention, err := ParseObjectLockRetentionOutput(retentionData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if retention.Mode != "" && retention.RetainUntilDate != nil {
|
||||
if retention.RetainUntilDate.Before(time.Now()) {
|
||||
// if the object retention is expired, the object
|
||||
// is allowed for write operations(delete, modify)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch retention.Mode {
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
if !bypass {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
} else {
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBucketPublic {
|
||||
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
|
||||
} else {
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
|
||||
}
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
}
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
checkLegalHold := true
|
||||
|
||||
status, err := be.GetObjectLegalHold(ctx, bucket, key, versionId)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration)) {
|
||||
checkLegalHold = false
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if checkLegalHold && *status {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
|
||||
if checkDefaultRetention {
|
||||
switch bucketLockConfig.DefaultRetention.Mode {
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
if !bypass {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
} else {
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBucketPublic {
|
||||
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
|
||||
} else {
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
|
||||
}
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
}
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -8,8 +8,7 @@ var IgnoredHeaders = Rules{
|
||||
// some clients use user-agent in signed headers
|
||||
// "User-Agent": struct{}{},
|
||||
"X-Amzn-Trace-Id": struct{}{},
|
||||
// Expect might appear in signed headers
|
||||
// "Expect": struct{}{},
|
||||
"Expect": struct{}{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestIgnoredHeaders(t *testing.T) {
|
||||
}{
|
||||
"expect": {
|
||||
Header: "Expect",
|
||||
ExpectIgnored: false,
|
||||
ExpectIgnored: true,
|
||||
},
|
||||
"authorization": {
|
||||
Header: "Authorization",
|
||||
|
||||
@@ -87,13 +87,13 @@ func TestStandaloneSign(t *testing.T) {
|
||||
|
||||
actual := req.Header.Get("Authorization")
|
||||
if e, a := c.ExpSig, actual; e != a {
|
||||
t.Errorf("expected %v, but received %v", e, a)
|
||||
t.Errorf("expected %v, but recieved %v", e, a)
|
||||
}
|
||||
if e, a := c.OrigURI, req.URL.Path; e != a {
|
||||
t.Errorf("expected %v, but received %v", e, a)
|
||||
t.Errorf("expected %v, but recieved %v", e, a)
|
||||
}
|
||||
if e, a := c.EscapedURI, req.URL.EscapedPath(); e != a {
|
||||
t.Errorf("expected %v, but received %v", e, a)
|
||||
t.Errorf("expected %v, but recieved %v", e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -127,13 +127,13 @@ func TestStandaloneSign_RawPath(t *testing.T) {
|
||||
|
||||
actual := req.Header.Get("Authorization")
|
||||
if e, a := c.ExpSig, actual; e != a {
|
||||
t.Errorf("expected %v, but received %v", e, a)
|
||||
t.Errorf("expected %v, but recieved %v", e, a)
|
||||
}
|
||||
if e, a := c.OrigURI, req.URL.Path; e != a {
|
||||
t.Errorf("expected %v, but received %v", e, a)
|
||||
t.Errorf("expected %v, but recieved %v", e, a)
|
||||
}
|
||||
if e, a := c.EscapedURI, req.URL.EscapedPath(); e != a {
|
||||
t.Errorf("expected %v, but received %v", e, a)
|
||||
t.Errorf("expected %v, but recieved %v", e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -40,7 +40,7 @@ func azErrToS3err(azErr *azcore.ResponseError) s3err.APIError {
|
||||
case "BlobNotFound":
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
case "TagsTooLarge":
|
||||
return s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
return s3err.GetAPIError(s3err.ErrInvalidTag)
|
||||
case "Requested Range Not Satisfiable":
|
||||
return s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
@@ -18,9 +18,9 @@ import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
"github.com/versity/versitygw/s3select"
|
||||
@@ -32,46 +32,40 @@ type Backend interface {
|
||||
Shutdown()
|
||||
|
||||
// bucket operations
|
||||
ListBuckets(context.Context, s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error)
|
||||
ListBuckets(_ context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error)
|
||||
HeadBucket(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
|
||||
GetBucketAcl(context.Context, *s3.GetBucketAclInput) ([]byte, error)
|
||||
CreateBucket(_ context.Context, _ *s3.CreateBucketInput, defaultACL []byte) error
|
||||
PutBucketAcl(_ context.Context, bucket string, data []byte) error
|
||||
DeleteBucket(_ context.Context, bucket string) error
|
||||
PutBucketVersioning(_ context.Context, bucket string, status types.BucketVersioningStatus) error
|
||||
GetBucketVersioning(_ context.Context, bucket string) (s3response.GetBucketVersioningOutput, error)
|
||||
DeleteBucket(context.Context, *s3.DeleteBucketInput) error
|
||||
PutBucketVersioning(context.Context, *s3.PutBucketVersioningInput) error
|
||||
GetBucketVersioning(_ context.Context, bucket string) (*s3.GetBucketVersioningOutput, error)
|
||||
PutBucketPolicy(_ context.Context, bucket string, policy []byte) error
|
||||
GetBucketPolicy(_ context.Context, bucket string) ([]byte, error)
|
||||
DeleteBucketPolicy(_ context.Context, bucket string) error
|
||||
PutBucketOwnershipControls(_ context.Context, bucket string, ownership types.ObjectOwnership) error
|
||||
GetBucketOwnershipControls(_ context.Context, bucket string) (types.ObjectOwnership, error)
|
||||
DeleteBucketOwnershipControls(_ context.Context, bucket string) error
|
||||
PutBucketCors(_ context.Context, bucket string, cors []byte) error
|
||||
GetBucketCors(_ context.Context, bucket string) ([]byte, error)
|
||||
DeleteBucketCors(_ context.Context, bucket string) error
|
||||
|
||||
// multipart operations
|
||||
CreateMultipartUpload(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
|
||||
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (_ s3response.CompleteMultipartUploadResult, versionid string, _ error)
|
||||
CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
|
||||
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
|
||||
AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error
|
||||
ListMultipartUploads(context.Context, *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error)
|
||||
ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error)
|
||||
UploadPart(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error)
|
||||
UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error)
|
||||
UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error)
|
||||
UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error)
|
||||
|
||||
// standard object operations
|
||||
PutObject(context.Context, s3response.PutObjectInput) (s3response.PutObjectOutput, error)
|
||||
PutObject(context.Context, *s3.PutObjectInput) (string, error)
|
||||
HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
|
||||
GetObject(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error)
|
||||
GetObject(context.Context, *s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error)
|
||||
GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
|
||||
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error)
|
||||
CopyObject(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error)
|
||||
ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error)
|
||||
ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error)
|
||||
DeleteObject(context.Context, *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
|
||||
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error)
|
||||
CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
|
||||
ListObjects(context.Context, *s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
|
||||
ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
|
||||
DeleteObject(context.Context, *s3.DeleteObjectInput) error
|
||||
DeleteObjects(context.Context, *s3.DeleteObjectsInput) (s3response.DeleteResult, error)
|
||||
PutObjectAcl(context.Context, *s3.PutObjectAclInput) error
|
||||
ListObjectVersions(context.Context, *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error)
|
||||
ListObjectVersions(context.Context, *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error)
|
||||
|
||||
// special case object operations
|
||||
RestoreObject(context.Context, *s3.RestoreObjectInput) error
|
||||
@@ -83,20 +77,12 @@ type Backend interface {
|
||||
DeleteBucketTagging(_ context.Context, bucket string) error
|
||||
|
||||
// object tagging operations
|
||||
GetObjectTagging(_ context.Context, bucket, object, versionId string) (map[string]string, error)
|
||||
PutObjectTagging(_ context.Context, bucket, object, versionId string, tags map[string]string) error
|
||||
DeleteObjectTagging(_ context.Context, bucket, object, versionId string) error
|
||||
|
||||
// object lock operations
|
||||
PutObjectLockConfiguration(_ context.Context, bucket string, config []byte) error
|
||||
GetObjectLockConfiguration(_ context.Context, bucket string) ([]byte, error)
|
||||
PutObjectRetention(_ context.Context, bucket, object, versionId string, retention []byte) error
|
||||
GetObjectRetention(_ context.Context, bucket, object, versionId string) ([]byte, error)
|
||||
PutObjectLegalHold(_ context.Context, bucket, object, versionId string, status bool) error
|
||||
GetObjectLegalHold(_ context.Context, bucket, object, versionId string) (*bool, error)
|
||||
GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error)
|
||||
PutObjectTagging(_ context.Context, bucket, object string, tags map[string]string) error
|
||||
DeleteObjectTagging(_ context.Context, bucket, object string) error
|
||||
|
||||
// non AWS actions
|
||||
ChangeBucketOwner(_ context.Context, bucket, owner string) error
|
||||
ChangeBucketOwner(_ context.Context, bucket, newOwner string) error
|
||||
ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error)
|
||||
}
|
||||
|
||||
@@ -111,7 +97,7 @@ func (BackendUnsupported) Shutdown() {}
|
||||
func (BackendUnsupported) String() string {
|
||||
return "Unsupported"
|
||||
}
|
||||
func (BackendUnsupported) ListBuckets(context.Context, s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
|
||||
func (BackendUnsupported) ListBuckets(context.Context, string, bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
return s3response.ListAllMyBucketsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) HeadBucket(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
@@ -126,14 +112,14 @@ func (BackendUnsupported) CreateBucket(context.Context, *s3.CreateBucketInput, [
|
||||
func (BackendUnsupported) PutBucketAcl(_ context.Context, bucket string, data []byte) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteBucket(_ context.Context, bucket string) error {
|
||||
func (BackendUnsupported) DeleteBucket(context.Context, *s3.DeleteBucketInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutBucketVersioning(_ context.Context, bucket string, status types.BucketVersioningStatus) error {
|
||||
func (BackendUnsupported) PutBucketVersioning(context.Context, *s3.PutBucketVersioningInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetBucketVersioning(_ context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
|
||||
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) GetBucketVersioning(_ context.Context, bucket string) (*s3.GetBucketVersioningOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutBucketPolicy(_ context.Context, bucket string, policy []byte) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
@@ -144,30 +130,12 @@ func (BackendUnsupported) GetBucketPolicy(_ context.Context, bucket string) ([]b
|
||||
func (BackendUnsupported) DeleteBucketPolicy(_ context.Context, bucket string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutBucketOwnershipControls(_ context.Context, bucket string, ownership types.ObjectOwnership) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetBucketOwnershipControls(_ context.Context, bucket string) (types.ObjectOwnership, error) {
|
||||
return types.ObjectOwnershipBucketOwnerEnforced, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteBucketOwnershipControls(_ context.Context, bucket string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutBucketCors(context.Context, string, []byte) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetBucketCors(_ context.Context, bucket string) ([]byte, error) {
|
||||
|
||||
func (BackendUnsupported) CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteBucketCors(_ context.Context, bucket string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) CreateMultipartUpload(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
|
||||
return s3response.CompleteMultipartUploadResult{}, "", s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
@@ -178,40 +146,40 @@ func (BackendUnsupported) ListMultipartUploads(context.Context, *s3.ListMultipar
|
||||
func (BackendUnsupported) ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error) {
|
||||
return s3response.ListPartsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) UploadPart(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
|
||||
return s3response.CopyPartResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) PutObject(context.Context, s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
return s3response.PutObjectOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) PutObject(context.Context, *s3.PutObjectInput) (string, error) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObject(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
func (BackendUnsupported) GetObject(context.Context, *s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
|
||||
return s3response.GetObjectAttributesResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CopyObject(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
|
||||
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||
return s3response.ListObjectsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
||||
return s3response.ListObjectsV2Result{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObject(context.Context, *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
|
||||
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjects(context.Context, *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObject(context.Context, *s3.DeleteObjectInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObjects(context.Context, *s3.DeleteObjectsInput) (s3response.DeleteResult, error) {
|
||||
return s3response.DeleteResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
@@ -237,8 +205,8 @@ func (BackendUnsupported) SelectObjectContent(ctx context.Context, input *s3.Sel
|
||||
}
|
||||
}
|
||||
|
||||
func (BackendUnsupported) ListObjectVersions(context.Context, *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
|
||||
return s3response.ListVersionsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) ListObjectVersions(context.Context, *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) GetBucketTagging(_ context.Context, bucket string) (map[string]string, error) {
|
||||
@@ -251,36 +219,17 @@ func (BackendUnsupported) DeleteBucketTagging(_ context.Context, bucket string)
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) GetObjectTagging(_ context.Context, bucket, object, versionId string) (map[string]string, error) {
|
||||
func (BackendUnsupported) GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutObjectTagging(_ context.Context, bucket, object, versionId string, tags map[string]string) error {
|
||||
func (BackendUnsupported) PutObjectTagging(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObjectTagging(_ context.Context, bucket, object, versionId string) error {
|
||||
func (BackendUnsupported) DeleteObjectTagging(_ context.Context, bucket, object string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) PutObjectLockConfiguration(_ context.Context, bucket string, config []byte) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectLockConfiguration(_ context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutObjectRetention(_ context.Context, bucket, object, versionId string, retention []byte) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectRetention(_ context.Context, bucket, object, versionId string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutObjectLegalHold(_ context.Context, bucket, object, versionId string, status bool) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectLegalHold(_ context.Context, bucket, object, versionId string) (*bool, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) ChangeBucketOwner(_ context.Context, bucket, owner string) error {
|
||||
func (BackendUnsupported) ChangeBucketOwner(_ context.Context, bucket, newOwner string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error) {
|
||||
|
||||
@@ -17,18 +17,10 @@ package backend
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/fs"
|
||||
"math"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
@@ -36,13 +28,9 @@ import (
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
const (
|
||||
// this is the media type for directories in AWS and Nextcloud
|
||||
DirContentType = "application/x-directory"
|
||||
DefaultContentType = "binary/octet-stream"
|
||||
|
||||
// this is the minimum allowed size for mp parts
|
||||
MinPartSize = 5 * 1024 * 1024
|
||||
var (
|
||||
// RFC3339TimeFormat RFC3339 time format
|
||||
RFC3339TimeFormat = "2006-01-02T15:04:05.999Z"
|
||||
)
|
||||
|
||||
func IsValidBucketName(name string) bool { return true }
|
||||
@@ -59,328 +47,65 @@ func (d ByObjectName) Len() int { return len(d) }
|
||||
func (d ByObjectName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||
func (d ByObjectName) Less(i, j int) bool { return *d[i].Key < *d[j].Key }
|
||||
|
||||
func GetPtrFromString(str string) *string {
|
||||
if str == "" {
|
||||
return nil
|
||||
}
|
||||
return &str
|
||||
}
|
||||
|
||||
func GetStringFromPtr(str *string) string {
|
||||
if str == nil {
|
||||
return ""
|
||||
}
|
||||
return *str
|
||||
func GetStringPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
func GetTimePtr(t time.Time) *time.Time {
|
||||
return &t
|
||||
}
|
||||
|
||||
func TrimEtag(etag *string) *string {
|
||||
if etag == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return GetPtrFromString(strings.Trim(*etag, "\""))
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
errInvalidCopySourceRange = s3err.GetAPIError(s3err.ErrInvalidCopySourceRange)
|
||||
errPreconditionFailed = s3err.GetAPIError(s3err.ErrPreconditionFailed)
|
||||
errNotModified = s3err.GetAPIError(s3err.ErrNotModified)
|
||||
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
)
|
||||
|
||||
// ParseObjectRange parses input range header and returns startoffset, length, isValid
|
||||
// and error. If no endoffset specified, then length is set to the object size
|
||||
// for invalid inputs, it returns no error, but isValid=false
|
||||
// `InvalidRange` error is returnd, only if startoffset is greater than the object size
|
||||
func ParseObjectRange(size int64, acceptRange string) (int64, int64, bool, error) {
|
||||
// Return full object (invalid range, no error) if header empty
|
||||
// ParseRange parses input range header and returns startoffset, length, and
|
||||
// error. If no endoffset specified, then length is set to -1.
|
||||
func ParseRange(fi fs.FileInfo, acceptRange string) (int64, int64, error) {
|
||||
if acceptRange == "" {
|
||||
return 0, size, false, nil
|
||||
}
|
||||
|
||||
rangeKv := strings.Split(acceptRange, "=")
|
||||
if len(rangeKv) != 2 {
|
||||
return 0, size, false, nil
|
||||
}
|
||||
if rangeKv[0] != "bytes" { // unsupported unit -> ignore
|
||||
return 0, size, false, nil
|
||||
}
|
||||
|
||||
bRange := strings.Split(rangeKv[1], "-")
|
||||
if len(bRange) != 2 { // malformed / multi-range
|
||||
return 0, size, false, nil
|
||||
}
|
||||
|
||||
// Parse start; empty start indicates a suffix-byte-range-spec (e.g. bytes=-100)
|
||||
startOffset, err := strconv.ParseInt(bRange[0], 10, strconv.IntSize)
|
||||
if startOffset > int64(math.MaxInt) || startOffset < int64(math.MinInt) {
|
||||
return 0, size, false, errInvalidRange
|
||||
}
|
||||
if err != nil && bRange[0] != "" { // invalid numeric start (non-empty) -> ignore range
|
||||
return 0, size, false, nil
|
||||
}
|
||||
|
||||
// If end part missing (e.g. bytes=100-)
|
||||
if bRange[1] == "" {
|
||||
if bRange[0] == "" { // bytes=- (meaningless) -> ignore
|
||||
return 0, size, false, nil
|
||||
}
|
||||
// start beyond or at size is unsatisfiable -> error (RequestedRangeNotSatisfiable)
|
||||
if startOffset >= size {
|
||||
return 0, 0, false, errInvalidRange
|
||||
}
|
||||
// bytes=100- => from start to end
|
||||
return startOffset, size - startOffset, true, nil
|
||||
}
|
||||
|
||||
endOffset, err := strconv.ParseInt(bRange[1], 10, strconv.IntSize)
|
||||
if endOffset > int64(math.MaxInt) {
|
||||
return 0, size, false, errInvalidRange
|
||||
}
|
||||
if err != nil { // invalid numeric end -> ignore range
|
||||
return 0, size, false, nil
|
||||
}
|
||||
|
||||
// Suffix range handling (bRange[0] == "")
|
||||
if bRange[0] == "" {
|
||||
// Disallow -0 (always unsatisfiable)
|
||||
if endOffset == 0 {
|
||||
return 0, 0, false, errInvalidRange
|
||||
}
|
||||
// For zero-sized objects any positive suffix is treated as invalid (ignored, no error)
|
||||
if size == 0 {
|
||||
return 0, size, false, nil
|
||||
}
|
||||
// Clamp to object size (request more bytes than exist -> entire object)
|
||||
endOffset = min(endOffset, size)
|
||||
return size - endOffset, endOffset, true, nil
|
||||
}
|
||||
|
||||
// Normal range (start-end)
|
||||
if startOffset > endOffset { // start > end -> ignore
|
||||
return 0, size, false, nil
|
||||
}
|
||||
// Start beyond or at end of object -> error
|
||||
if startOffset >= size {
|
||||
return 0, 0, false, errInvalidRange
|
||||
}
|
||||
// Adjust end beyond object size (trim)
|
||||
if endOffset >= size {
|
||||
endOffset = size - 1
|
||||
}
|
||||
return startOffset, endOffset - startOffset + 1, true, nil
|
||||
}
|
||||
|
||||
// ParseCopySourceRange parses input range header and returns startoffset, length
|
||||
// and error. If no endoffset specified, then length is set to the object size
|
||||
func ParseCopySourceRange(size int64, acceptRange string) (int64, int64, error) {
|
||||
if acceptRange == "" {
|
||||
return 0, size, nil
|
||||
return 0, fi.Size(), nil
|
||||
}
|
||||
|
||||
rangeKv := strings.Split(acceptRange, "=")
|
||||
|
||||
if len(rangeKv) != 2 {
|
||||
return 0, 0, errInvalidCopySourceRange
|
||||
}
|
||||
|
||||
if rangeKv[0] != "bytes" {
|
||||
return 0, 0, errInvalidCopySourceRange
|
||||
if len(rangeKv) < 2 {
|
||||
return 0, 0, errInvalidRange
|
||||
}
|
||||
|
||||
bRange := strings.Split(rangeKv[1], "-")
|
||||
if len(bRange) != 2 {
|
||||
return 0, 0, errInvalidCopySourceRange
|
||||
if len(bRange) < 1 || len(bRange) > 2 {
|
||||
return 0, 0, errInvalidRange
|
||||
}
|
||||
|
||||
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errInvalidCopySourceRange
|
||||
return 0, 0, errInvalidRange
|
||||
}
|
||||
|
||||
if startOffset >= size {
|
||||
return 0, 0, s3err.CreateExceedingRangeErr(size)
|
||||
endOffset := int64(-1)
|
||||
if len(bRange) == 1 || bRange[1] == "" {
|
||||
return startOffset, endOffset, nil
|
||||
}
|
||||
|
||||
if bRange[1] == "" {
|
||||
return startOffset, size - startOffset + 1, nil
|
||||
}
|
||||
|
||||
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
|
||||
endOffset, err = strconv.ParseInt(bRange[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errInvalidCopySourceRange
|
||||
return 0, 0, errInvalidRange
|
||||
}
|
||||
|
||||
if endOffset < startOffset {
|
||||
return 0, 0, errInvalidCopySourceRange
|
||||
}
|
||||
|
||||
if endOffset >= size {
|
||||
return 0, 0, s3err.CreateExceedingRangeErr(size)
|
||||
return 0, 0, errInvalidRange
|
||||
}
|
||||
|
||||
return startOffset, endOffset - startOffset + 1, nil
|
||||
}
|
||||
|
||||
// ParseCopySource parses x-amz-copy-source header and returns source bucket,
|
||||
// source object, versionId, error respectively
|
||||
func ParseCopySource(copySourceHeader string) (string, string, string, error) {
|
||||
if copySourceHeader[0] == '/' {
|
||||
copySourceHeader = copySourceHeader[1:]
|
||||
}
|
||||
|
||||
var copySource, versionId string
|
||||
i := strings.LastIndex(copySourceHeader, "?versionId=")
|
||||
if i == -1 {
|
||||
copySource = copySourceHeader
|
||||
} else {
|
||||
copySource = copySourceHeader[:i]
|
||||
versionId = copySourceHeader[i+11:]
|
||||
}
|
||||
|
||||
srcBucket, srcObject, ok := strings.Cut(copySource, "/")
|
||||
if !ok {
|
||||
return "", "", "", s3err.GetAPIError(s3err.ErrInvalidCopySourceBucket)
|
||||
}
|
||||
|
||||
return srcBucket, srcObject, versionId, nil
|
||||
}
|
||||
|
||||
// ParseObjectTags parses the url encoded input string into
|
||||
// map[string]string with unescaped key/value pair
|
||||
func ParseObjectTags(tagging string) (map[string]string, error) {
|
||||
if tagging == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tagSet := make(map[string]string)
|
||||
|
||||
for tagging != "" {
|
||||
var tag string
|
||||
tag, tagging, _ = strings.Cut(tagging, "&")
|
||||
// if 'tag' before the first appearance of '&' is empty continue
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
key, value, found := strings.Cut(tag, "=")
|
||||
// if key is empty, but "=" is present, return invalid url ecnoding err
|
||||
if found && key == "" {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
|
||||
}
|
||||
|
||||
// return invalid tag key, if the key is longer than 128
|
||||
if len(key) > 128 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// return invalid tag value, if tag value is longer than 256
|
||||
if len(value) > 256 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// query unescape tag key
|
||||
key, err := url.QueryUnescape(key)
|
||||
if err != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
|
||||
}
|
||||
|
||||
// query unescape tag value
|
||||
value, err = url.QueryUnescape(value)
|
||||
if err != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
|
||||
}
|
||||
|
||||
// check tag key to be valid
|
||||
if !isValidTagComponent(key) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// check tag value to be valid
|
||||
if !isValidTagComponent(value) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// duplicate keys are not allowed: return invalid url encoding err
|
||||
_, ok := tagSet[key]
|
||||
if ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
|
||||
}
|
||||
|
||||
tagSet[key] = value
|
||||
}
|
||||
|
||||
return tagSet, nil
|
||||
}
|
||||
|
||||
// ParseCreateBucketTags parses and validates the bucket
|
||||
// tagging from CreateBucket input
|
||||
func ParseCreateBucketTags(tagging []types.Tag) (map[string]string, error) {
|
||||
if len(tagging) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tagset := make(map[string]string, len(tagging))
|
||||
|
||||
if len(tagging) > 50 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrBucketTaggingLimited)
|
||||
}
|
||||
|
||||
for _, tag := range tagging {
|
||||
// validate tag key length
|
||||
key := GetStringFromPtr(tag.Key)
|
||||
if len(key) == 0 || len(key) > 128 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// validate tag key string chars
|
||||
if !isValidTagComponent(key) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// validate tag value length
|
||||
value := GetStringFromPtr(tag.Value)
|
||||
if len(value) > 256 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// validate tag value string chars
|
||||
if !isValidTagComponent(value) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// make sure there are no duplicate keys
|
||||
_, ok := tagset[key]
|
||||
if ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrDuplicateTagKey)
|
||||
}
|
||||
|
||||
tagset[key] = value
|
||||
}
|
||||
|
||||
return tagset, nil
|
||||
}
|
||||
|
||||
// tag component (key/value) name rule regexp
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_Tag.html
|
||||
var validTagComponent = regexp.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`)
|
||||
|
||||
// isValidTagComponent validates the tag component(key/value) name
|
||||
func isValidTagComponent(str string) bool {
|
||||
return validTagComponent.Match([]byte(str))
|
||||
}
|
||||
|
||||
func GetMultipartMD5(parts []types.CompletedPart) string {
|
||||
var partsEtagBytes []byte
|
||||
for _, part := range parts {
|
||||
partsEtagBytes = append(partsEtagBytes, getEtagBytes(*part.ETag)...)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("\"%s-%d\"", md5String(partsEtagBytes), len(parts))
|
||||
s3MD5 := fmt.Sprintf("%s-%d", md5String(partsEtagBytes), len(parts))
|
||||
return s3MD5
|
||||
}
|
||||
|
||||
func getEtagBytes(etag string) []byte {
|
||||
@@ -395,275 +120,3 @@ func md5String(data []byte) string {
|
||||
sum := md5.Sum(data)
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
|
||||
type FileSectionReadCloser struct {
|
||||
R io.Reader
|
||||
F *os.File
|
||||
}
|
||||
|
||||
func (f *FileSectionReadCloser) Read(p []byte) (int, error) {
|
||||
return f.R.Read(p)
|
||||
}
|
||||
|
||||
func (f *FileSectionReadCloser) Close() error {
|
||||
return f.F.Close()
|
||||
}
|
||||
|
||||
// MoveFile moves a file from source to destination.
|
||||
func MoveFile(source, destination string, perm os.FileMode) error {
|
||||
// We use Rename as the atomic operation for object puts. The upload is
|
||||
// written to a temp file to not conflict with any other simultaneous
|
||||
// uploads. The final operation is to move the temp file into place for
|
||||
// the object. This ensures the object semantics of last upload completed
|
||||
// wins and is not some combination of writes from simultaneous uploads.
|
||||
err := os.Rename(source, destination)
|
||||
if err == nil || !errors.Is(err, syscall.EXDEV) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rename can fail if the source and destination are not on the same
|
||||
// filesystem. The fallback is to copy the file and then remove the source.
|
||||
// We need to be careful that the desination does not exist before copying
|
||||
// to prevent any other simultaneous writes to the file.
|
||||
sourceFile, err := os.Open(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open source: %w", err)
|
||||
}
|
||||
defer sourceFile.Close()
|
||||
|
||||
var destFile *os.File
|
||||
for {
|
||||
destFile, err = os.OpenFile(destination, os.O_CREATE|os.O_EXCL|os.O_WRONLY, perm)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrExist) {
|
||||
if removeErr := os.Remove(destination); removeErr != nil {
|
||||
return fmt.Errorf("remove existing destination: %w", removeErr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("create destination: %w", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
defer destFile.Close()
|
||||
|
||||
_, err = io.Copy(destFile, sourceFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copy data: %w", err)
|
||||
}
|
||||
|
||||
err = os.Remove(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove source: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateEtag generates a new quoted etag from the provided hash.Hash
|
||||
func GenerateEtag(h hash.Hash) string {
|
||||
dataSum := h.Sum(nil)
|
||||
return fmt.Sprintf("\"%s\"", hex.EncodeToString(dataSum[:]))
|
||||
}
|
||||
|
||||
// AreEtagsSame compares 2 etags by ignoring quotes
|
||||
func AreEtagsSame(e1, e2 string) bool {
|
||||
return strings.Trim(e1, `"`) == strings.Trim(e2, `"`)
|
||||
}
|
||||
|
||||
func getBoolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
type PreConditions struct {
|
||||
IfMatch *string
|
||||
IfNoneMatch *string
|
||||
IfModSince *time.Time
|
||||
IfUnmodeSince *time.Time
|
||||
}
|
||||
|
||||
// EvaluatePreconditions takes the object ETag, the last modified time and
|
||||
// evaluates the read preconditions:
|
||||
// - if-match,
|
||||
// - if-none-match
|
||||
// - if-modified-since
|
||||
// - if-unmodified-since
|
||||
// if-match and if-none-match are ETag comparisions
|
||||
// if-modified-since and if-unmodified-since are last modifed time comparisons
|
||||
func EvaluatePreconditions(etag string, modTime time.Time, preconditions PreConditions) error {
|
||||
if preconditions.IfMatch == nil && preconditions.IfNoneMatch == nil && preconditions.IfModSince == nil && preconditions.IfUnmodeSince == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
etag = strings.Trim(etag, `"`)
|
||||
|
||||
// convert all conditions to *bool to evaluate the conditions
|
||||
var ifMatch, ifNoneMatch, ifModSince, ifUnmodeSince *bool
|
||||
if preconditions.IfMatch != nil {
|
||||
ifMatch = getBoolPtr(*preconditions.IfMatch == etag)
|
||||
}
|
||||
if preconditions.IfNoneMatch != nil {
|
||||
ifNoneMatch = getBoolPtr(*preconditions.IfNoneMatch != etag)
|
||||
}
|
||||
if preconditions.IfModSince != nil {
|
||||
ifModSince = getBoolPtr(preconditions.IfModSince.UTC().Before(modTime.UTC()))
|
||||
}
|
||||
if preconditions.IfUnmodeSince != nil {
|
||||
ifUnmodeSince = getBoolPtr(preconditions.IfUnmodeSince.UTC().After(modTime.UTC()))
|
||||
}
|
||||
|
||||
if ifMatch != nil {
|
||||
// if `if-match` doesn't matches, return PreconditionFailed
|
||||
if !*ifMatch {
|
||||
return errPreconditionFailed
|
||||
}
|
||||
|
||||
// if-match matches
|
||||
if *ifMatch {
|
||||
if ifNoneMatch != nil {
|
||||
// if `if-none-match` doesn't match return NotModified
|
||||
if !*ifNoneMatch {
|
||||
return errNotModified
|
||||
}
|
||||
|
||||
// if both `if-match` and `if-none-match` match, return no error
|
||||
return nil
|
||||
}
|
||||
|
||||
// if `if-match` matches but `if-modified-since` is false return NotModified
|
||||
if ifModSince != nil && !*ifModSince {
|
||||
return errNotModified
|
||||
}
|
||||
|
||||
// ignore `if-unmodified-since` as `if-match` is true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if ifNoneMatch != nil {
|
||||
if *ifNoneMatch {
|
||||
// if `if-none-match` is true, but `if-unmodified-since` is false
|
||||
// return PreconditionFailed
|
||||
if ifUnmodeSince != nil && !*ifUnmodeSince {
|
||||
return errPreconditionFailed
|
||||
}
|
||||
|
||||
// ignore `if-modified-since` as `if-none-match` is true
|
||||
return nil
|
||||
} else {
|
||||
// if `if-none-match` is false and `if-unmodified-since` is false
|
||||
// return PreconditionFailed
|
||||
if ifUnmodeSince != nil && !*ifUnmodeSince {
|
||||
return errPreconditionFailed
|
||||
}
|
||||
|
||||
// in all other cases when `if-none-match` is false return NotModified
|
||||
return errNotModified
|
||||
}
|
||||
}
|
||||
|
||||
if ifModSince != nil && !*ifModSince {
|
||||
// if both `if-modified-since` and `if-unmodified-since` are false
|
||||
// return PreconditionFailed
|
||||
if ifUnmodeSince != nil && !*ifUnmodeSince {
|
||||
return errPreconditionFailed
|
||||
}
|
||||
|
||||
// if only `if-modified-since` is false, return NotModified
|
||||
return errNotModified
|
||||
}
|
||||
|
||||
// if `if-unmodified-since` is false return PreconditionFailed
|
||||
if ifUnmodeSince != nil && !*ifUnmodeSince {
|
||||
return errPreconditionFailed
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EvaluateMatchPreconditions evaluates if-match and if-none-match preconditions
|
||||
func EvaluateMatchPreconditions(etag string, ifMatch, ifNoneMatch *string) error {
|
||||
etag = strings.Trim(etag, `"`)
|
||||
if ifMatch != nil && *ifMatch != etag {
|
||||
return errPreconditionFailed
|
||||
}
|
||||
if ifNoneMatch != nil && *ifNoneMatch == etag {
|
||||
return errPreconditionFailed
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EvaluateObjectPutPreconditions evaluates if-match and if-none-match preconditions
|
||||
// for object PUT(PutObject, CompleteMultipartUpload) actions
|
||||
func EvaluateObjectPutPreconditions(etag string, ifMatch, ifNoneMatch *string, objExists bool) error {
|
||||
if ifMatch == nil && ifNoneMatch == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ifNoneMatch != nil && *ifNoneMatch != "*" {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
if ifNoneMatch != nil && ifMatch != nil {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
if ifNoneMatch != nil && objExists {
|
||||
return s3err.GetAPIError(s3err.ErrPreconditionFailed)
|
||||
}
|
||||
|
||||
if ifMatch != nil && !objExists {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
etag = strings.Trim(etag, `"`)
|
||||
|
||||
if ifMatch != nil && *ifMatch != etag {
|
||||
return s3err.GetAPIError(s3err.ErrPreconditionFailed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ObjectDeletePreconditions struct {
|
||||
IfMatch *string
|
||||
IfMatchLastModTime *time.Time
|
||||
IfMatchSize *int64
|
||||
}
|
||||
|
||||
// EvaluateObjectDeletePreconditions evaluates preconditions for DeleteObject
|
||||
func EvaluateObjectDeletePreconditions(etag string, modTime time.Time, size int64, preconditions ObjectDeletePreconditions) error {
|
||||
ifMatch := preconditions.IfMatch
|
||||
if ifMatch != nil && *ifMatch != etag {
|
||||
return errPreconditionFailed
|
||||
}
|
||||
|
||||
ifMatchTime := preconditions.IfMatchLastModTime
|
||||
if ifMatchTime != nil && ifMatchTime.Unix() != modTime.Unix() {
|
||||
return errPreconditionFailed
|
||||
}
|
||||
|
||||
ifMatchSize := preconditions.IfMatchSize
|
||||
if ifMatchSize != nil && *ifMatchSize != size {
|
||||
return errPreconditionFailed
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValidDirectoryName returns true if the string is a valid name
|
||||
// for a directory
|
||||
func IsValidDirectoryName(name string) bool {
|
||||
// directories may not contain a path separator
|
||||
if strings.ContainsRune(name, '/') {
|
||||
return false
|
||||
}
|
||||
|
||||
// directories may not contain null character
|
||||
if strings.ContainsRune(name, 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
// Copyright 2024 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package meta
|
||||
|
||||
import "os"
|
||||
|
||||
// MetadataStorer defines the interface for managing metadata.
|
||||
// When object == "", the operation is on the bucket.
|
||||
type MetadataStorer interface {
|
||||
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
|
||||
// Returns the value of the attribute, or an error if the attribute does not exist.
|
||||
RetrieveAttribute(f *os.File, bucket, object, attribute string) ([]byte, error)
|
||||
|
||||
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
|
||||
// If attribute already exists, new attribute should replace existing.
|
||||
// Returns an error if the operation fails.
|
||||
StoreAttribute(f *os.File, bucket, object, attribute string, value []byte) error
|
||||
|
||||
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
|
||||
// Returns an error if the operation fails.
|
||||
DeleteAttribute(bucket, object, attribute string) error
|
||||
|
||||
// ListAttributes lists all attributes for an object or a bucket.
|
||||
// Returns list of attribute names, or an error if the operation fails.
|
||||
ListAttributes(bucket, object string) ([]string, error)
|
||||
|
||||
// DeleteAttributes removes all attributes for an object or a bucket.
|
||||
// Returns an error if the operation fails.
|
||||
DeleteAttributes(bucket, object string) error
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package meta
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// NoMeta is a metadata storer that does not store metadata.
|
||||
// This can be useful for read only mounts where attempting to store metadata
|
||||
// would fail.
|
||||
type NoMeta struct{}
|
||||
|
||||
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
|
||||
// always returns ErrNoSuchKey
|
||||
func (NoMeta) RetrieveAttribute(_ *os.File, _, _, _ string) ([]byte, error) {
|
||||
return nil, ErrNoSuchKey
|
||||
}
|
||||
|
||||
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
|
||||
// always returns nil without storing the attribute
|
||||
func (NoMeta) StoreAttribute(_ *os.File, _, _, _ string, _ []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
|
||||
// always returns nil without deleting the attribute
|
||||
func (NoMeta) DeleteAttribute(_, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListAttributes lists all attributes for an object or a bucket.
|
||||
// always returns an empty list of attributes
|
||||
func (NoMeta) ListAttributes(_, _ string) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
// DeleteAttributes removes all attributes for an object or a bucket.
|
||||
// always returns nil without deleting any attributes
|
||||
func (NoMeta) DeleteAttributes(bucket, object string) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package meta
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// SideCar is a metadata storer that uses sidecar files to store metadata.
|
||||
type SideCar struct {
|
||||
dir string
|
||||
}
|
||||
|
||||
const (
|
||||
sidecarmeta = "meta"
|
||||
)
|
||||
|
||||
// NewSideCar creates a new SideCar metadata storer.
|
||||
func NewSideCar(dir string) (SideCar, error) {
|
||||
fi, err := os.Lstat(dir)
|
||||
if err != nil {
|
||||
return SideCar{}, fmt.Errorf("failed to stat directory: %v", err)
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return SideCar{}, fmt.Errorf("not a directory")
|
||||
}
|
||||
|
||||
return SideCar{dir: dir}, nil
|
||||
}
|
||||
|
||||
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
|
||||
func (s SideCar) RetrieveAttribute(_ *os.File, bucket, object, attribute string) ([]byte, error) {
|
||||
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
|
||||
if object == "" {
|
||||
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
|
||||
}
|
||||
attr := filepath.Join(metadir, attribute)
|
||||
|
||||
value, err := os.ReadFile(attr)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil, ErrNoSuchKey
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read attribute: %v", err)
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
|
||||
func (s SideCar) StoreAttribute(_ *os.File, bucket, object, attribute string, value []byte) error {
|
||||
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
|
||||
if object == "" {
|
||||
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
|
||||
}
|
||||
err := os.MkdirAll(metadir, 0777)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create metadata directory: %v", err)
|
||||
}
|
||||
|
||||
attr := filepath.Join(metadir, attribute)
|
||||
err = os.WriteFile(attr, value, 0666)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write attribute: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
|
||||
func (s SideCar) DeleteAttribute(bucket, object, attribute string) error {
|
||||
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
|
||||
if object == "" {
|
||||
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
|
||||
}
|
||||
attr := filepath.Join(metadir, attribute)
|
||||
|
||||
err := os.Remove(attr)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return ErrNoSuchKey
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove attribute: %v", err)
|
||||
}
|
||||
|
||||
s.cleanupEmptyDirs(metadir, bucket, object)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListAttributes lists all attributes for an object or a bucket.
|
||||
func (s SideCar) ListAttributes(bucket, object string) ([]string, error) {
|
||||
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
|
||||
if object == "" {
|
||||
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
|
||||
}
|
||||
|
||||
ents, err := os.ReadDir(metadir)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return []string{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list attributes: %v", err)
|
||||
}
|
||||
|
||||
var attrs []string
|
||||
for _, ent := range ents {
|
||||
attrs = append(attrs, ent.Name())
|
||||
}
|
||||
|
||||
return attrs, nil
|
||||
}
|
||||
|
||||
// DeleteAttributes removes all attributes for an object or a bucket.
|
||||
func (s SideCar) DeleteAttributes(bucket, object string) error {
|
||||
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
|
||||
if object == "" {
|
||||
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
|
||||
}
|
||||
|
||||
err := os.RemoveAll(metadir)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return fmt.Errorf("failed to remove attributes: %v", err)
|
||||
}
|
||||
s.cleanupEmptyDirs(metadir, bucket, object)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s SideCar) cleanupEmptyDirs(metadir, bucket, object string) {
|
||||
removeIfEmpty(metadir)
|
||||
if bucket == "" {
|
||||
return
|
||||
}
|
||||
bucketDir := filepath.Join(s.dir, bucket)
|
||||
if object != "" {
|
||||
removeEmptyParents(filepath.Dir(metadir), bucketDir)
|
||||
}
|
||||
removeIfEmpty(bucketDir)
|
||||
}
|
||||
|
||||
func removeIfEmpty(dir string) {
|
||||
empty, err := isDirEmpty(dir)
|
||||
if err != nil || !empty {
|
||||
return
|
||||
}
|
||||
_ = os.Remove(dir)
|
||||
}
|
||||
|
||||
func removeEmptyParents(dir, stopDir string) {
|
||||
for {
|
||||
if dir == stopDir || dir == "." || dir == string(filepath.Separator) {
|
||||
return
|
||||
}
|
||||
empty, err := isDirEmpty(dir)
|
||||
if err != nil || !empty {
|
||||
return
|
||||
}
|
||||
err = os.Remove(dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dir = filepath.Dir(dir)
|
||||
}
|
||||
}
|
||||
|
||||
func isDirEmpty(dir string) (bool, error) {
|
||||
f, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
ents, err := f.Readdirnames(1)
|
||||
if err == io.EOF {
|
||||
return true, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(ents) == 0, nil
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
// Copyright 2024 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package meta
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoSuchKey is returned when the key does not exist.
|
||||
ErrNoSuchKey = errors.New("no such key")
|
||||
)
|
||||
|
||||
type XattrMeta struct{}
|
||||
|
||||
// RetrieveAttribute retrieves the value of a specific attribute for an object in a bucket.
|
||||
func (x XattrMeta) RetrieveAttribute(f *os.File, bucket, object, attribute string) ([]byte, error) {
|
||||
if f != nil {
|
||||
b, err := xattr.FGet(f, xattrPrefix+attribute)
|
||||
if errors.Is(err, xattr.ENOATTR) {
|
||||
return nil, ErrNoSuchKey
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
||||
b, err := xattr.Get(filepath.Join(bucket, object), xattrPrefix+attribute)
|
||||
if errors.Is(err, xattr.ENOATTR) {
|
||||
return nil, ErrNoSuchKey
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
||||
// StoreAttribute stores the value of a specific attribute for an object in a bucket.
|
||||
func (x XattrMeta) StoreAttribute(f *os.File, bucket, object, attribute string, value []byte) error {
|
||||
if f != nil {
|
||||
err := xattr.FSet(f, xattrPrefix+attribute, value)
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err := xattr.Set(filepath.Join(bucket, object), xattrPrefix+attribute, value)
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteAttribute removes the value of a specific attribute for an object in a bucket.
|
||||
func (x XattrMeta) DeleteAttribute(bucket, object, attribute string) error {
|
||||
err := xattr.Remove(filepath.Join(bucket, object), xattrPrefix+attribute)
|
||||
if errors.Is(err, xattr.ENOATTR) {
|
||||
return ErrNoSuchKey
|
||||
}
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteAttributes is not implemented for xattr since xattrs
|
||||
// are automatically removed when the file is deleted.
|
||||
func (x XattrMeta) DeleteAttributes(bucket, object string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListAttributes lists all attributes for an object in a bucket.
|
||||
func (x XattrMeta) ListAttributes(bucket, object string) ([]string, error) {
|
||||
attrs, err := xattr.List(filepath.Join(bucket, object))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attributes := make([]string, 0, len(attrs))
|
||||
for _, attr := range attrs {
|
||||
if !isUserAttr(attr) {
|
||||
continue
|
||||
}
|
||||
attributes = append(attributes, strings.TrimPrefix(attr, xattrPrefix))
|
||||
}
|
||||
return attributes, nil
|
||||
}
|
||||
|
||||
func isUserAttr(attr string) bool {
|
||||
return strings.HasPrefix(attr, xattrPrefix)
|
||||
}
|
||||
|
||||
// Test is a helper function to test if xattrs are supported.
|
||||
func (x XattrMeta) Test(path string) error {
|
||||
// check for platform support
|
||||
if !xattr.XATTR_SUPPORTED {
|
||||
return fmt.Errorf("xattrs are not supported on this platform")
|
||||
}
|
||||
|
||||
// check if the filesystem supports xattrs
|
||||
_, err := xattr.Get(path, "user.test")
|
||||
if errors.Is(err, syscall.ENOTSUP) {
|
||||
return fmt.Errorf("xattrs are not supported on this filesystem")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// Copyright 2024 Versity Software
|
||||
|
||||
// MkdirAll borrowed from stdlib to add ability to set ownership
|
||||
// as directories are created
|
||||
@@ -15,6 +14,11 @@ import (
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO: make this configurable
|
||||
defaultDirPerm fs.FileMode = 0755
|
||||
)
|
||||
|
||||
// MkdirAll is similar to os.MkdirAll but it will return
|
||||
// ErrObjectParentIsFile when appropriate
|
||||
// MkdirAll creates a directory named path,
|
||||
@@ -25,9 +29,9 @@ import (
|
||||
// Any newly created directory is set to provided uid/gid ownership.
|
||||
// If path is already a directory, MkdirAll does nothing
|
||||
// and returns nil.
|
||||
// Any directory created will be set to provided uid/gid ownership
|
||||
// Any directoy created will be set to provided uid/gid ownership
|
||||
// if doChown is true.
|
||||
func MkdirAll(path string, uid, gid int, doChown bool, dirPerm fs.FileMode) error {
|
||||
func MkdirAll(path string, uid, gid int, doChown bool) error {
|
||||
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
||||
dir, err := os.Stat(path)
|
||||
if err == nil {
|
||||
@@ -50,14 +54,14 @@ func MkdirAll(path string, uid, gid int, doChown bool, dirPerm fs.FileMode) erro
|
||||
|
||||
if j > 1 {
|
||||
// Create parent.
|
||||
err = MkdirAll(path[:j-1], uid, gid, doChown, dirPerm)
|
||||
err = MkdirAll(path[:j-1], uid, gid, doChown)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Parent now exists; invoke Mkdir and use its result.
|
||||
err = os.Mkdir(path, dirPerm)
|
||||
err = os.Mkdir(path, defaultDirPerm)
|
||||
if err != nil {
|
||||
// Handle arguments like "foo/." by
|
||||
// double-checking that directory doesn't exist.
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
//go:build windows
|
||||
|
||||
package posix
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func handleParentDirError(name string) error {
|
||||
dir := filepath.Dir(name)
|
||||
|
||||
// Walk up the directory hierarchy
|
||||
for dir != "." && dir != "/" {
|
||||
d, statErr := os.Stat(dir)
|
||||
if statErr == nil {
|
||||
// Path component exists
|
||||
if !d.IsDir() {
|
||||
// Found a file in the ancestor path
|
||||
return s3err.GetAPIError(s3err.ErrObjectParentIsFile)
|
||||
}
|
||||
// Found a valid directory ancestor, parent truly doesn't exist
|
||||
break
|
||||
}
|
||||
// Continue checking parent directories
|
||||
dir = filepath.Dir(dir)
|
||||
}
|
||||
// Parent doesn't exist or is a directory, treat as ENOENT
|
||||
return nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// Copyright 2024 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
@@ -12,13 +12,13 @@
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
//go:build !freebsd && !openbsd && !netbsd
|
||||
// +build !freebsd,!openbsd,!netbsd
|
||||
|
||||
func IsObjectNameValid(name string) bool {
|
||||
switch clean(name) {
|
||||
case "", ".", "..", "/":
|
||||
return false
|
||||
}
|
||||
package posix
|
||||
|
||||
return isObjectLocal(name)
|
||||
}
|
||||
import "syscall"
|
||||
|
||||
var (
|
||||
errNoData = syscall.ENODATA
|
||||
)
|
||||
@@ -26,11 +26,9 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -45,7 +43,6 @@ type tmpfile struct {
|
||||
needsChown bool
|
||||
uid int
|
||||
gid int
|
||||
newDirPerm fs.FileMode
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -53,13 +50,9 @@ var (
|
||||
defaultFilePerm uint32 = 0644
|
||||
)
|
||||
|
||||
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, dofalloc bool, forceNoTmpFile bool) (*tmpfile, error) {
|
||||
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account) (*tmpfile, error) {
|
||||
uid, gid, doChown := p.getChownIDs(acct)
|
||||
|
||||
if forceNoTmpFile {
|
||||
return p.openMkTemp(dir, bucket, obj, size, dofalloc, uid, gid, doChown)
|
||||
}
|
||||
|
||||
// O_TMPFILE allows for a file handle to an unnamed file in the filesystem.
|
||||
// This can help reduce contention within the namespace (parent directories),
|
||||
// etc. And will auto cleanup the inode on close if we never link this
|
||||
@@ -68,12 +61,38 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
|
||||
// this is not supported.
|
||||
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, defaultFilePerm)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
// O_TMPFILE not supported, try fallback
|
||||
err = backend.MkdirAll(dir, uid, gid, doChown)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("make temp dir: %w", err)
|
||||
}
|
||||
f, err := os.CreateTemp(dir,
|
||||
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmp := &tmpfile{
|
||||
f: f,
|
||||
bucket: bucket,
|
||||
objname: obj,
|
||||
size: size,
|
||||
needsChown: doChown,
|
||||
uid: uid,
|
||||
gid: gid,
|
||||
}
|
||||
// falloc is best effort, its fine if this fails
|
||||
if size > 0 {
|
||||
tmp.falloc()
|
||||
}
|
||||
|
||||
// O_TMPFILE not supported, try fallback
|
||||
return p.openMkTemp(dir, bucket, obj, size, dofalloc, uid, gid, doChown)
|
||||
if doChown {
|
||||
err := f.Chown(uid, gid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("set temp file ownership: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
// for O_TMPFILE, filename is /proc/self/fd/<fd> to be used
|
||||
@@ -89,51 +108,10 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
|
||||
needsChown: doChown,
|
||||
uid: uid,
|
||||
gid: gid,
|
||||
newDirPerm: p.newDirPerm,
|
||||
}
|
||||
|
||||
// falloc is best effort, its fine if this fails
|
||||
if size > 0 && dofalloc {
|
||||
tmp.falloc()
|
||||
}
|
||||
|
||||
if doChown {
|
||||
err := f.Chown(uid, gid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("set temp file ownership: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (p *Posix) openMkTemp(dir, bucket, obj string, size int64, dofalloc bool, uid, gid int, doChown bool) (*tmpfile, error) {
|
||||
err := backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return nil, fmt.Errorf("make temp dir: %w", err)
|
||||
}
|
||||
f, err := os.CreateTemp(dir,
|
||||
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
tmp := &tmpfile{
|
||||
f: f,
|
||||
bucket: bucket,
|
||||
objname: obj,
|
||||
size: size,
|
||||
needsChown: doChown,
|
||||
uid: uid,
|
||||
gid: gid,
|
||||
}
|
||||
// falloc is best effort, its fine if this fails
|
||||
if size > 0 && dofalloc {
|
||||
if size > 0 {
|
||||
tmp.falloc()
|
||||
}
|
||||
|
||||
@@ -156,9 +134,6 @@ func (tmp *tmpfile) falloc() error {
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) link() error {
|
||||
// make sure this is cleaned up in all error cases
|
||||
defer tmp.f.Close()
|
||||
|
||||
// We use Linkat/Rename as the atomic operation for object puts. The
|
||||
// upload is written to a temp (or unnamed/O_TMPFILE) file to not conflict
|
||||
// with any other simultaneous uploads. The final operation is to move the
|
||||
@@ -166,10 +141,14 @@ func (tmp *tmpfile) link() error {
|
||||
// of last upload completed wins and is not some combination of writes
|
||||
// from simultaneous uploads.
|
||||
objPath := filepath.Join(tmp.bucket, tmp.objname)
|
||||
err := os.Remove(objPath)
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove stale path: %w", err)
|
||||
}
|
||||
|
||||
dir := filepath.Dir(objPath)
|
||||
|
||||
err := backend.MkdirAll(dir, tmp.uid, tmp.gid, tmp.needsChown, tmp.newDirPerm)
|
||||
err = backend.MkdirAll(dir, tmp.uid, tmp.gid, tmp.needsChown)
|
||||
if err != nil {
|
||||
return fmt.Errorf("make parent dir: %w", err)
|
||||
}
|
||||
@@ -193,31 +172,9 @@ func (tmp *tmpfile) link() error {
|
||||
|
||||
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
|
||||
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
|
||||
if errors.Is(err, syscall.EEXIST) {
|
||||
// Linkat cannot overwrite files; we will allocate a temporary file, Linkat to it and then Renameat it
|
||||
// to avoid potential race condition
|
||||
retries := 1
|
||||
for {
|
||||
tmpName := fmt.Sprintf(".%s.sgwtmp.%d", filepath.Base(objPath), time.Now().UnixNano())
|
||||
err := unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
|
||||
int(dirf.Fd()), tmpName, unix.AT_SYMLINK_FOLLOW)
|
||||
if errors.Is(err, syscall.EEXIST) && retries < 3 {
|
||||
retries += 1
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot find free temporary file: %w", err)
|
||||
}
|
||||
|
||||
err = unix.Renameat(int(dirf.Fd()), tmpName, int(dirf.Fd()), filepath.Base(objPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("overwriting renameat failed: %w", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("link tmpfile (fd %q as %q): %w",
|
||||
filepath.Base(tmp.f.Name()), objPath, err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("link tmpfile (%q in %q): %w",
|
||||
filepath.Dir(objPath), filepath.Base(tmp.f.Name()), err)
|
||||
}
|
||||
|
||||
err = tmp.f.Close()
|
||||
@@ -245,9 +202,7 @@ func (tmp *tmpfile) fallbackLink() error {
|
||||
objPath := filepath.Join(tmp.bucket, tmp.objname)
|
||||
err = os.Rename(tempname, objPath)
|
||||
if err != nil {
|
||||
// rename only works for files within the same filesystem
|
||||
// if this fails fallback to copy
|
||||
return backend.MoveFile(tempname, objPath, fs.FileMode(defaultFilePerm))
|
||||
return fmt.Errorf("rename tmpfile: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -266,7 +221,3 @@ func (tmp *tmpfile) Write(b []byte) (int, error) {
|
||||
func (tmp *tmpfile) cleanup() {
|
||||
tmp.f.Close()
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) File() *os.File {
|
||||
return tmp.f
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// Copyright 2024 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
@@ -12,8 +12,13 @@
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
//go:build freebsd
|
||||
//go:build freebsd || openbsd || netbsd
|
||||
// +build freebsd openbsd netbsd
|
||||
|
||||
package meta
|
||||
package posix
|
||||
|
||||
const xattrPrefix = ""
|
||||
import "syscall"
|
||||
|
||||
var (
|
||||
errNoData = syscall.ENOATTR
|
||||
)
|
||||
@@ -24,11 +24,9 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
type tmpfile struct {
|
||||
@@ -38,24 +36,18 @@ type tmpfile struct {
|
||||
size int64
|
||||
}
|
||||
|
||||
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, _ bool, _ bool) (*tmpfile, error) {
|
||||
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account) (*tmpfile, error) {
|
||||
uid, gid, doChown := p.getChownIDs(acct)
|
||||
|
||||
// Create a temp file for upload while in progress (see link comments below).
|
||||
var err error
|
||||
err = backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
|
||||
err = backend.MkdirAll(dir, uid, gid, doChown)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return nil, fmt.Errorf("make temp dir: %w", err)
|
||||
}
|
||||
f, err := os.CreateTemp(dir,
|
||||
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
return nil, fmt.Errorf("create temp file: %w", err)
|
||||
}
|
||||
|
||||
@@ -80,17 +72,31 @@ func (tmp *tmpfile) link() error {
|
||||
// this will no longer exist
|
||||
defer os.Remove(tempname)
|
||||
|
||||
// We use Rename as the atomic operation for object puts. The upload is
|
||||
// written to a temp file to not conflict with any other simultaneous
|
||||
// uploads. The final operation is to move the temp file into place for
|
||||
// the object. This ensures the object semantics of last upload completed
|
||||
// wins and is not some combination of writes from simultaneous uploads.
|
||||
objPath := filepath.Join(tmp.bucket, tmp.objname)
|
||||
err := os.Remove(objPath)
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove stale path: %w", err)
|
||||
}
|
||||
|
||||
// reset default file mode because CreateTemp uses 0600
|
||||
tmp.f.Chmod(defaultFilePerm)
|
||||
|
||||
err := tmp.f.Close()
|
||||
err = tmp.f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("close tmpfile: %w", err)
|
||||
}
|
||||
|
||||
return backend.MoveFile(tempname, objPath, defaultFilePerm)
|
||||
err = os.Rename(tempname, objPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rename tmpfile: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) Write(b []byte) (int, error) {
|
||||
@@ -106,7 +112,3 @@ func (tmp *tmpfile) Write(b []byte) (int, error) {
|
||||
func (tmp *tmpfile) cleanup() {
|
||||
tmp.f.Close()
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) File() *os.File {
|
||||
return tmp.f
|
||||
}
|
||||
|
||||
@@ -33,17 +33,6 @@ func (s *S3Proxy) getClientWithCtx(ctx context.Context) (*s3.Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.endpoint != "" {
|
||||
return s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
o.BaseEndpoint = &s.endpoint
|
||||
o.UsePathStyle = s.usePathStyle
|
||||
// The http body stream is not seekable, so most operations cannot
|
||||
// be retried. The error returned to the original client may be
|
||||
// retried by the client.
|
||||
o.Retryer = aws.NopRetryer{}
|
||||
}), nil
|
||||
}
|
||||
|
||||
return s3.NewFromConfig(cfg), nil
|
||||
}
|
||||
|
||||
@@ -61,6 +50,11 @@ func (s *S3Proxy) getConfig(ctx context.Context, access, secret string) (aws.Con
|
||||
config.WithHTTPClient(client),
|
||||
}
|
||||
|
||||
if s.endpoint != "" {
|
||||
opts = append(opts,
|
||||
config.WithEndpointResolverWithOptions(s))
|
||||
}
|
||||
|
||||
if s.disableChecksum {
|
||||
opts = append(opts,
|
||||
config.WithAPIOptions([]func(*middleware.Stack) error{v4.SwapComputePayloadSHA256ForUnsignedPayloadMiddleware}))
|
||||
@@ -73,3 +67,13 @@ func (s *S3Proxy) getConfig(ctx context.Context, access, secret string) (aws.Con
|
||||
|
||||
return config.LoadDefaultConfig(ctx, opts...)
|
||||
}
|
||||
|
||||
// ResolveEndpoint is used for on prem or non-aws endpoints
|
||||
func (s *S3Proxy) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{
|
||||
PartitionID: "aws",
|
||||
URL: s.endpoint,
|
||||
SigningRegion: s.awsRegion,
|
||||
HostnameImmutable: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -15,33 +15,827 @@
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// ScoutfsOpts are the options for the ScoutFS backend
|
||||
type ScoutfsOpts struct {
|
||||
// ChownUID sets the UID of the object to the UID of the user on PUT
|
||||
ChownUID bool
|
||||
// ChownGID sets the GID of the object to the GID of the user on PUT
|
||||
ChownGID bool
|
||||
// SetProjectID sets the Project ID of the bucket/object to the project ID of the user on PUT
|
||||
SetProjectID bool
|
||||
// BucketLinks enables symlinks to directories to be treated as buckets
|
||||
BucketLinks bool
|
||||
//VersioningDir sets the version directory to enable object versioning
|
||||
VersioningDir string
|
||||
// NewDirPerm specifies the permission to set on newly created directories
|
||||
NewDirPerm fs.FileMode
|
||||
// GlacierMode enables glacier emulation for offline files
|
||||
ChownUID bool
|
||||
ChownGID bool
|
||||
ReadOnly bool
|
||||
GlacierMode bool
|
||||
// DisableNoArchive prevents setting noarchive on temporary files
|
||||
DisableNoArchive bool
|
||||
// ValidateBucketNames enables minimal bucket name validation to prevent
|
||||
// incorrect access to the filesystem. This is only needed if the
|
||||
// frontend is not already validating bucket names.
|
||||
ValidateBucketNames bool
|
||||
}
|
||||
|
||||
type ScoutFS struct {
|
||||
*posix.Posix
|
||||
rootfd *os.File
|
||||
rootdir string
|
||||
|
||||
// glaciermode enables the following behavior:
|
||||
// GET object: if file offline, return invalid object state
|
||||
// HEAD object: if file offline, set obj storage class to GLACIER
|
||||
// if file offline and staging, x-amz-restore: ongoing-request="true"
|
||||
// if file offline and not staging, x-amz-restore: ongoing-request="false"
|
||||
// if file online, x-amz-restore: ongoing-request="false", expiry-date="Fri, 2 Dec 2050 00:00:00 GMT"
|
||||
// note: this expiry-date is not used but provided for client glacier compatibility
|
||||
// ListObjects: if file offline, set obj storage class to GLACIER
|
||||
// RestoreObject: add batch stage request to file
|
||||
glaciermode bool
|
||||
|
||||
// chownuid/gid enable chowning of files to the account uid/gid
|
||||
// when objects are uploaded
|
||||
chownuid bool
|
||||
chowngid bool
|
||||
|
||||
// read only mode prevents any backend modifications
|
||||
readonly bool
|
||||
|
||||
// euid/egid are the effective uid/gid of the running versitygw process
|
||||
// used to determine if chowning is needed
|
||||
euid int
|
||||
egid int
|
||||
}
|
||||
|
||||
var _ backend.Backend = &ScoutFS{}
|
||||
|
||||
const (
|
||||
metaTmpDir = ".sgwtmp"
|
||||
metaTmpMultipartDir = metaTmpDir + "/multipart"
|
||||
tagHdr = "X-Amz-Tagging"
|
||||
emptyMD5 = "d41d8cd98f00b204e9800998ecf8427e"
|
||||
etagkey = "user.etag"
|
||||
)
|
||||
|
||||
var (
|
||||
stageComplete = "ongoing-request=\"false\", expiry-date=\"Fri, 2 Dec 2050 00:00:00 GMT\""
|
||||
stageInProgress = "true"
|
||||
stageNotInProgress = "false"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScoutFS special xattr types
|
||||
|
||||
systemPrefix = "scoutfs.hide."
|
||||
onameAttr = systemPrefix + "objname"
|
||||
flagskey = systemPrefix + "sam_flags"
|
||||
stagecopykey = systemPrefix + "sam_stagereq"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScoutAM Flags
|
||||
|
||||
// Staging - file requested stage
|
||||
Staging uint64 = 1 << iota
|
||||
// StageFail - all copies failed to stage
|
||||
StageFail
|
||||
// NoArchive - no archive copies of file should be made
|
||||
NoArchive
|
||||
// ExtCacheRequested means file policy requests Ext Cache
|
||||
ExtCacheRequested
|
||||
// ExtCacheDone means this file ext cache copy has been
|
||||
// created already (and possibly pruned, so may not exist)
|
||||
ExtCacheDone
|
||||
)
|
||||
|
||||
func (s *ScoutFS) Shutdown() {
|
||||
s.Posix.Shutdown()
|
||||
s.rootfd.Close()
|
||||
_ = s.rootdir
|
||||
}
|
||||
|
||||
func (*ScoutFS) String() string {
|
||||
return "ScoutFS Gateway"
|
||||
}
|
||||
|
||||
// getChownIDs returns the uid and gid that should be used for chowning
|
||||
// the object to the account uid/gid. It also returns a boolean indicating
|
||||
// if chowning is needed.
|
||||
func (s *ScoutFS) getChownIDs(acct auth.Account) (int, int, bool) {
|
||||
uid := s.euid
|
||||
gid := s.egid
|
||||
var needsChown bool
|
||||
if s.chownuid && acct.UserID != s.euid {
|
||||
uid = acct.UserID
|
||||
needsChown = true
|
||||
}
|
||||
if s.chowngid && acct.GroupID != s.egid {
|
||||
gid = acct.GroupID
|
||||
needsChown = true
|
||||
}
|
||||
|
||||
return uid, gid, needsChown
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
|
||||
// ioctl to not have to read and copy the part data to the final object. This
|
||||
// saves a read and write cycle for all mutlipart uploads.
|
||||
func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
if s.readonly {
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if input.Key == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if input.UploadId == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchUpload)
|
||||
}
|
||||
if input.MultipartUpload == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
parts := input.MultipartUpload.Parts
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
sum, err := s.checkUploadIDExists(bucket, object, uploadID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
// check all parts ok
|
||||
last := len(parts) - 1
|
||||
partsize := int64(0)
|
||||
var totalsize int64
|
||||
for i, p := range parts {
|
||||
if p.PartNumber == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *p.PartNumber))
|
||||
fi, err := os.Lstat(partPath)
|
||||
if err != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
partsize = fi.Size()
|
||||
}
|
||||
totalsize += fi.Size()
|
||||
// all parts except the last need to be the same size
|
||||
if i < last && partsize != fi.Size() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
// non-last part sizes need to be multiples of 4k for move blocks
|
||||
// TODO: fallback to no move blocks if not 4k aligned?
|
||||
if i == 0 && i < last && fi.Size()%4096 != 0 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
|
||||
b, err := xattr.Get(partPath, "user.etag")
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
if etag != *parts[i].ETag {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
}
|
||||
|
||||
// use totalsize=0 because we wont be writing to the file, only moving
|
||||
// extents around. so we dont want to fallocate this.
|
||||
f, err := s.openTmpFile(filepath.Join(bucket, metaTmpDir), bucket, object, 0, acct)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EDQUOT) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrQuotaExceeded)
|
||||
}
|
||||
return nil, fmt.Errorf("open temp file: %w", err)
|
||||
}
|
||||
defer f.cleanup()
|
||||
|
||||
for _, p := range parts {
|
||||
pf, err := os.Open(filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *p.PartNumber)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open part %v: %v", *p.PartNumber, err)
|
||||
}
|
||||
|
||||
// scoutfs move data is a metadata only operation that moves the data
|
||||
// extent references from the source, appeding to the destination.
|
||||
// this needs to be 4k aligned.
|
||||
err = moveData(pf, f.f)
|
||||
pf.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move blocks part %v: %v", *p.PartNumber, err)
|
||||
}
|
||||
}
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
upiddir := filepath.Join(objdir, uploadID)
|
||||
loadUserMetaData(upiddir, userMetaData)
|
||||
|
||||
objname := filepath.Join(bucket, object)
|
||||
dir := filepath.Dir(objname)
|
||||
if dir != "" {
|
||||
uid, gid, doChown := s.getChownIDs(acct)
|
||||
err = backend.MkdirAll(dir, uid, gid, doChown)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = f.link()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("link object in namespace: %w", err)
|
||||
}
|
||||
|
||||
for k, v := range userMetaData {
|
||||
err = xattr.Set(objname, "user."+k, []byte(v))
|
||||
if err != nil {
|
||||
// cleanup object if returning error
|
||||
os.Remove(objname)
|
||||
return nil, fmt.Errorf("set user attr %q: %w", k, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate s3 compatible md5sum for complete multipart.
|
||||
s3MD5 := backend.GetMultipartMD5(parts)
|
||||
|
||||
err = xattr.Set(objname, "user.etag", []byte(s3MD5))
|
||||
if err != nil {
|
||||
// cleanup object if returning error
|
||||
os.Remove(objname)
|
||||
return nil, fmt.Errorf("set etag attr: %w", err)
|
||||
}
|
||||
|
||||
// cleanup tmp dirs
|
||||
os.RemoveAll(upiddir)
|
||||
// use Remove for objdir in case there are still other uploads
|
||||
// for same object name outstanding
|
||||
os.Remove(objdir)
|
||||
|
||||
return &s3.CompleteMultipartUploadOutput{
|
||||
Bucket: &bucket,
|
||||
ETag: &s3MD5,
|
||||
Key: &object,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) checkUploadIDExists(bucket, object, uploadID string) ([32]byte, error) {
|
||||
sum := sha256.Sum256([]byte(object))
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
_, err := os.Stat(filepath.Join(objdir, uploadID))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return [32]byte{}, s3err.GetAPIError(s3err.ErrNoSuchUpload)
|
||||
}
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("stat upload: %w", err)
|
||||
}
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
func loadUserMetaData(path string, m map[string]string) (contentType, contentEncoding string) {
|
||||
ents, err := xattr.List(path)
|
||||
if err != nil || len(ents) == 0 {
|
||||
return
|
||||
}
|
||||
for _, e := range ents {
|
||||
if !isValidMeta(e) {
|
||||
continue
|
||||
}
|
||||
b, err := xattr.Get(path, e)
|
||||
if err == errNoData {
|
||||
m[strings.TrimPrefix(e, "user.")] = ""
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
m[strings.TrimPrefix(e, "user.")] = string(b)
|
||||
}
|
||||
|
||||
b, err := xattr.Get(path, "user.content-type")
|
||||
contentType = string(b)
|
||||
if err != nil {
|
||||
contentType = ""
|
||||
}
|
||||
if contentType != "" {
|
||||
m["content-type"] = contentType
|
||||
}
|
||||
|
||||
b, err = xattr.Get(path, "user.content-encoding")
|
||||
contentEncoding = string(b)
|
||||
if err != nil {
|
||||
contentEncoding = ""
|
||||
}
|
||||
if contentEncoding != "" {
|
||||
m["content-encoding"] = contentEncoding
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func isValidMeta(val string) bool {
|
||||
if strings.HasPrefix(val, "user.X-Amz-Meta") {
|
||||
return true
|
||||
}
|
||||
if strings.EqualFold(val, "user.Expires") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ScoutFS) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
|
||||
|
||||
b, err := xattr.Get(objPath, etagkey)
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
|
||||
stclass := types.StorageClassStandard
|
||||
requestOngoing := ""
|
||||
if s.glaciermode {
|
||||
requestOngoing = stageComplete
|
||||
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will set storage class to glacier.
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
stclass = types.StorageClassGlacier
|
||||
requestOngoing = stageNotInProgress
|
||||
|
||||
ok, err := isStaging(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("check stage status: %w", err)
|
||||
}
|
||||
if ok {
|
||||
requestOngoing = stageInProgress
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
contentLength := fi.Size()
|
||||
|
||||
return &s3.HeadObjectOutput{
|
||||
ContentLength: &contentLength,
|
||||
ContentType: &contentType,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
StorageClass: stclass,
|
||||
Restore: &requestOngoing,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
acceptRange := *input.Range
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
startOffset, length, err := backend.ParseRange(fi, acceptRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objSize := fi.Size()
|
||||
if fi.IsDir() {
|
||||
// directory objects are always 0 len
|
||||
objSize = 0
|
||||
length = 0
|
||||
}
|
||||
|
||||
if length == -1 {
|
||||
length = fi.Size() - startOffset + 1
|
||||
}
|
||||
|
||||
if startOffset+length > fi.Size() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
var contentRange string
|
||||
if acceptRange != "" {
|
||||
contentRange = fmt.Sprintf("bytes %v-%v/%v", startOffset, startOffset+length-1, objSize)
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the InvalidObjectState error.
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectState)
|
||||
}
|
||||
}
|
||||
|
||||
f, err := os.Open(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open object: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
rdr := io.NewSectionReader(f, startOffset, length)
|
||||
_, err = io.Copy(writer, rdr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy data: %w", err)
|
||||
}
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
|
||||
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
|
||||
|
||||
b, err := xattr.Get(objPath, etagkey)
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
|
||||
tags, err := s.getXattrTags(bucket, object)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object tags: %w", err)
|
||||
}
|
||||
|
||||
tagCount := int32(len(tags))
|
||||
|
||||
return &s3.GetObjectOutput{
|
||||
AcceptRanges: &acceptRange,
|
||||
ContentLength: &length,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ContentType: &contentType,
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
TagCount: &tagCount,
|
||||
StorageClass: types.StorageClassStandard,
|
||||
ContentRange: &contentRange,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) getXattrTags(bucket, object string) (map[string]string, error) {
|
||||
tags := make(map[string]string)
|
||||
b, err := xattr.Get(filepath.Join(bucket, object), "user."+tagHdr)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if isNoAttr(err) {
|
||||
return tags, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get tags: %w", err)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(b, &tags)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unmarshal tags: %w", err)
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
bucket := *input.Bucket
|
||||
prefix := ""
|
||||
if input.Prefix != nil {
|
||||
prefix = *input.Prefix
|
||||
}
|
||||
marker := ""
|
||||
if input.Marker != nil {
|
||||
marker = *input.Marker
|
||||
}
|
||||
delim := ""
|
||||
if input.Delimiter != nil {
|
||||
delim = *input.Delimiter
|
||||
}
|
||||
maxkeys := int32(0)
|
||||
if input.MaxKeys != nil {
|
||||
maxkeys = *input.MaxKeys
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
|
||||
s.fileToObj(bucket), []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
}
|
||||
|
||||
return &s3.ListObjectsOutput{
|
||||
CommonPrefixes: results.CommonPrefixes,
|
||||
Contents: results.Objects,
|
||||
Delimiter: &delim,
|
||||
IsTruncated: &results.Truncated,
|
||||
Marker: &marker,
|
||||
MaxKeys: &maxkeys,
|
||||
Name: &bucket,
|
||||
NextMarker: &results.NextMarker,
|
||||
Prefix: &prefix,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
bucket := *input.Bucket
|
||||
prefix := ""
|
||||
if input.Prefix != nil {
|
||||
prefix = *input.Prefix
|
||||
}
|
||||
marker := ""
|
||||
if input.ContinuationToken != nil {
|
||||
marker = *input.ContinuationToken
|
||||
}
|
||||
delim := ""
|
||||
if input.Delimiter != nil {
|
||||
delim = *input.Delimiter
|
||||
}
|
||||
maxkeys := int32(0)
|
||||
if input.MaxKeys != nil {
|
||||
maxkeys = *input.MaxKeys
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, int32(maxkeys),
|
||||
s.fileToObj(bucket), []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
}
|
||||
|
||||
return &s3.ListObjectsV2Output{
|
||||
CommonPrefixes: results.CommonPrefixes,
|
||||
Contents: results.Objects,
|
||||
Delimiter: &delim,
|
||||
IsTruncated: &results.Truncated,
|
||||
ContinuationToken: &marker,
|
||||
MaxKeys: &maxkeys,
|
||||
Name: &bucket,
|
||||
NextContinuationToken: &results.NextMarker,
|
||||
Prefix: &prefix,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
|
||||
return func(path string, d fs.DirEntry) (types.Object, error) {
|
||||
objPath := filepath.Join(bucket, path)
|
||||
if d.IsDir() {
|
||||
// directory object only happens if directory empty
|
||||
// check to see if this is a directory object by checking etag
|
||||
etagBytes, err := xattr.Get(objPath, etagkey)
|
||||
if isNoAttr(err) || errors.Is(err, fs.ErrNotExist) {
|
||||
return types.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return types.Object{}, fmt.Errorf("get etag: %w", err)
|
||||
}
|
||||
etag := string(etagBytes)
|
||||
|
||||
fi, err := d.Info()
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return types.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return types.Object{}, fmt.Errorf("get fileinfo: %w", err)
|
||||
}
|
||||
|
||||
key := path + "/"
|
||||
|
||||
return types.Object{
|
||||
ETag: &etag,
|
||||
Key: &key,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// file object, get object info and fill out object data
|
||||
etagBytes, err := xattr.Get(objPath, etagkey)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return types.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return types.Object{}, fmt.Errorf("get etag: %w", err)
|
||||
}
|
||||
etag := string(etagBytes)
|
||||
|
||||
fi, err := d.Info()
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return types.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return types.Object{}, fmt.Errorf("get fileinfo: %w", err)
|
||||
}
|
||||
|
||||
sc := types.ObjectStorageClassStandard
|
||||
if s.glaciermode {
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the InvalidObjectState error.
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return types.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return types.Object{}, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
sc = types.ObjectStorageClassGlacier
|
||||
}
|
||||
}
|
||||
|
||||
size := fi.Size()
|
||||
|
||||
return types.Object{
|
||||
ETag: &etag,
|
||||
Key: &path,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Size: &size,
|
||||
StorageClass: sc,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreObject will set stage request on file if offline and do nothing if
|
||||
// file is online
|
||||
func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput) error {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
err = setStaging(filepath.Join(bucket, object))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stage object: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setStaging(objname string) error {
|
||||
b, err := xattr.Get(objname, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
var oldflags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &oldflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newflags := oldflags | Staging
|
||||
|
||||
if newflags == oldflags {
|
||||
// no flags change, just return
|
||||
return nil
|
||||
}
|
||||
|
||||
return fSetNewGlobalFlags(objname, newflags)
|
||||
}
|
||||
|
||||
func isStaging(objname string) (bool, error) {
|
||||
b, err := xattr.Get(objname, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var flags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return flags&Staging == Staging, nil
|
||||
}
|
||||
|
||||
func fSetNewGlobalFlags(objname string, flags uint64) error {
|
||||
b, err := json.Marshal(&flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return xattr.Set(objname, flagskey, b)
|
||||
}
|
||||
|
||||
func isNoAttr(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
xerr, ok := err.(*xattr.Error)
|
||||
if ok && xerr.Err == xattr.ENOATTR {
|
||||
return true
|
||||
}
|
||||
if err == errNoData {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -17,70 +17,27 @@
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/pkg/xattr"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/versity/scoutfs-go"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/meta"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
type ScoutFS struct {
|
||||
*posix.Posix
|
||||
rootfd *os.File
|
||||
rootdir string
|
||||
|
||||
// glaciermode enables the following behavior:
|
||||
// GET object: if file offline, return invalid object state
|
||||
// HEAD object: if file offline, set obj storage class to GLACIER
|
||||
// if file offline and staging, x-amz-restore: ongoing-request="true"
|
||||
// if file offline and not staging, x-amz-restore: ongoing-request="false"
|
||||
// if file online, x-amz-restore: ongoing-request="false", expiry-date="Fri, 2 Dec 2050 00:00:00 GMT"
|
||||
// note: this expiry-date is not used but provided for client glacier compatibility
|
||||
// ListObjects: if file offline, set obj storage class to GLACIER
|
||||
// RestoreObject: add batch stage request to file
|
||||
glaciermode bool
|
||||
|
||||
// disableNoArchive is used to disable setting scoutam noarchive flag
|
||||
// on multipart parts. This is enabled by default to prevent archive
|
||||
// copies of temporary multipart parts.
|
||||
disableNoArchive bool
|
||||
|
||||
// enable posix level bucket name validations, not needed if the
|
||||
// frontend handlers are already validating bucket names
|
||||
validateBucketName bool
|
||||
|
||||
// projectIDEnabled enables setting projectid of new buckets and objects
|
||||
// to the account project id when non-0
|
||||
projectIDEnabled bool
|
||||
}
|
||||
|
||||
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
metastore := meta.XattrMeta{}
|
||||
|
||||
p, err := posix.New(rootdir, metastore, posix.PosixOpts{
|
||||
ChownUID: opts.ChownUID,
|
||||
ChownGID: opts.ChownGID,
|
||||
BucketLinks: opts.BucketLinks,
|
||||
NewDirPerm: opts.NewDirPerm,
|
||||
VersioningDir: opts.VersioningDir,
|
||||
ValidateBucketNames: opts.ValidateBucketNames,
|
||||
p, err := posix.New(rootdir, posix.PosixOpts{
|
||||
ChownUID: opts.ChownUID,
|
||||
ChownGID: opts.ChownGID,
|
||||
ReadOnly: opts.ReadOnly,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -91,491 +48,160 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
return nil, fmt.Errorf("open %v: %w", rootdir, err)
|
||||
}
|
||||
|
||||
setProjectID := opts.SetProjectID
|
||||
if opts.SetProjectID {
|
||||
setProjectID = fGetFormatVersion(f).AtLeast(versionScoutFsV2)
|
||||
if !setProjectID {
|
||||
fmt.Println("WARNING:")
|
||||
fmt.Println("Disabling ProjectIDs for unsupported FS format version")
|
||||
fmt.Println("See documentation for format version upgrades")
|
||||
}
|
||||
}
|
||||
|
||||
return &ScoutFS{
|
||||
Posix: p,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
glaciermode: opts.GlacierMode,
|
||||
disableNoArchive: opts.DisableNoArchive,
|
||||
projectIDEnabled: setProjectID,
|
||||
Posix: p,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
chownuid: opts.ChownUID,
|
||||
chowngid: opts.ChownGID,
|
||||
readonly: opts.ReadOnly,
|
||||
}, nil
|
||||
}
|
||||
|
||||
const (
|
||||
stageComplete = "ongoing-request=\"false\", expiry-date=\"Fri, 2 Dec 2050 00:00:00 GMT\""
|
||||
stageInProgress = "true"
|
||||
stageNotInProgress = "false"
|
||||
)
|
||||
const procfddir = "/proc/self/fd"
|
||||
|
||||
const (
|
||||
// ScoutFS special xattr types
|
||||
systemPrefix = "scoutfs.hide."
|
||||
flagskey = systemPrefix + "sam_flags"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScoutAM Flags
|
||||
|
||||
// Staging - file requested stage
|
||||
Staging uint64 = 1 << iota
|
||||
// StageFail - all copies failed to stage
|
||||
StageFail
|
||||
// NoArchive - no archive copies of file should be made
|
||||
NoArchive
|
||||
// ExtCacheRequested means file policy requests Ext Cache
|
||||
ExtCacheRequested
|
||||
// ExtCacheDone means this file ext cache copy has been
|
||||
// created already (and possibly pruned, so may not exist)
|
||||
ExtCacheDone
|
||||
)
|
||||
|
||||
func (s *ScoutFS) Shutdown() {
|
||||
s.Posix.Shutdown()
|
||||
s.rootfd.Close()
|
||||
type tmpfile struct {
|
||||
f *os.File
|
||||
bucket string
|
||||
objname string
|
||||
size int64
|
||||
needsChown bool
|
||||
uid int
|
||||
gid int
|
||||
}
|
||||
|
||||
func (*ScoutFS) String() string {
|
||||
return "ScoutFS Gateway"
|
||||
}
|
||||
var (
|
||||
// TODO: make this configurable
|
||||
defaultFilePerm uint32 = 0644
|
||||
)
|
||||
|
||||
func (s *ScoutFS) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, acl []byte) error {
|
||||
err := s.Posix.CreateBucket(ctx, input, acl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (s *ScoutFS) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account) (*tmpfile, error) {
|
||||
uid, gid, doChown := s.getChownIDs(acct)
|
||||
|
||||
if s.projectIDEnabled {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
if !isValidProjectID(acct.ProjectID) {
|
||||
// early return to avoid the open if we dont have a valid
|
||||
// project id
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := os.Open(*input.Bucket)
|
||||
if err != nil {
|
||||
debuglogger.InternalError(fmt.Errorf("create bucket %q set project id - open: %v",
|
||||
*input.Bucket, err))
|
||||
return nil
|
||||
}
|
||||
|
||||
err = s.setProjectID(f, acct.ProjectID)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
debuglogger.InternalError(fmt.Errorf("create bucket %q set project id: %v",
|
||||
*input.Bucket, err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
res, err := s.Posix.HeadObject(ctx, input)
|
||||
// O_TMPFILE allows for a file handle to an unnamed file in the filesystem.
|
||||
// This can help reduce contention within the namespace (parent directories),
|
||||
// etc. And will auto cleanup the inode on close if we never link this
|
||||
// file descriptor into the namespace.
|
||||
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, defaultFilePerm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
objPath := filepath.Join(*input.Bucket, *input.Key)
|
||||
// for O_TMPFILE, filename is /proc/self/fd/<fd> to be used
|
||||
// later to link file into namespace
|
||||
f := os.NewFile(uintptr(fd), filepath.Join(procfddir, strconv.Itoa(fd)))
|
||||
|
||||
stclass := types.StorageClassStandard
|
||||
requestOngoing := ""
|
||||
tmp := &tmpfile{
|
||||
f: f,
|
||||
bucket: bucket,
|
||||
objname: obj,
|
||||
size: size,
|
||||
needsChown: doChown,
|
||||
uid: uid,
|
||||
gid: gid,
|
||||
}
|
||||
|
||||
requestOngoing = stageComplete
|
||||
// falloc is best effort, its fine if this fails
|
||||
if size > 0 {
|
||||
tmp.falloc()
|
||||
}
|
||||
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will set storage class to glacier.
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if doChown {
|
||||
err := f.Chown(uid, gid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
return nil, fmt.Errorf("set temp file ownership: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
stclass = types.StorageClassGlacier
|
||||
requestOngoing = stageNotInProgress
|
||||
|
||||
ok, err := isStaging(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("check stage status: %w", err)
|
||||
}
|
||||
if ok {
|
||||
requestOngoing = stageInProgress
|
||||
}
|
||||
}
|
||||
|
||||
res.Restore = &requestOngoing
|
||||
res.StorageClass = stclass
|
||||
}
|
||||
|
||||
return res, nil
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
return s.Posix.PutObjectWithPostFunc(ctx, po, func(f *os.File) error {
|
||||
err := s.setProjectID(f, acct.ProjectID)
|
||||
if err != nil {
|
||||
debuglogger.InternalError(fmt.Errorf("put object %v/%v set project id: %v",
|
||||
filepath.Join(*po.Bucket, *po.Key), acct.ProjectID, err))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
return s.Posix.UploadPartWithPostFunc(ctx, input,
|
||||
func(f *os.File) error {
|
||||
if !s.disableNoArchive {
|
||||
err := setNoArchive(f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set noarchive: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := s.setProjectID(f, acct.ProjectID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set project id %v: %w", acct.ProjectID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
|
||||
// ioctl to not have to read and copy the part data to the final object. This
|
||||
// saves a read and write cycle for all mutlipart uploads.
|
||||
func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
return s.Posix.CompleteMultipartUploadWithCopy(ctx, input,
|
||||
func(from *os.File, to *os.File) error {
|
||||
// May fail if the files are not 4K aligned; check for alignment
|
||||
ffi, err := from.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("complete-mpu stat from: %w", err)
|
||||
}
|
||||
tfi, err := to.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("complete-mpu stat to: %w", err)
|
||||
}
|
||||
if ffi.Size()%4096 != 0 || tfi.Size()%4096 != 0 {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
err = s.setProjectID(to, acct.ProjectID)
|
||||
if err != nil {
|
||||
debuglogger.InternalError(fmt.Errorf("complete-mpu %q/%q set project id %v: %v",
|
||||
*input.Bucket, *input.Key, acct.ProjectID, err))
|
||||
}
|
||||
|
||||
err = scoutfs.MoveData(from, to)
|
||||
if err != nil {
|
||||
return fmt.Errorf("complete-mpu movedata: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ScoutFS) isBucketValid(bucket string) bool {
|
||||
if !s.validateBucketName {
|
||||
return true
|
||||
}
|
||||
|
||||
return backend.IsValidDirectoryName(bucket)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
if !s.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
func (tmp *tmpfile) falloc() error {
|
||||
err := syscall.Fallocate(int(tmp.f.Fd()), 0, 0, tmp.size)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
return fmt.Errorf("fallocate: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) link() error {
|
||||
// We use Linkat/Rename as the atomic operation for object puts. The
|
||||
// upload is written to a temp (or unnamed/O_TMPFILE) file to not conflict
|
||||
// with any other simultaneous uploads. The final operation is to move the
|
||||
// temp file into place for the object. This ensures the object semantics
|
||||
// of last upload completed wins and is not some combination of writes
|
||||
// from simultaneous uploads.
|
||||
objPath := filepath.Join(tmp.bucket, tmp.objname)
|
||||
err := os.Remove(objPath)
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove stale path: %w", err)
|
||||
}
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
dir := filepath.Dir(objPath)
|
||||
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, syscall.ENAMETOOLONG) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrKeyTooLong)
|
||||
}
|
||||
err = backend.MkdirAll(dir, tmp.uid, tmp.gid, tmp.needsChown)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
return fmt.Errorf("make parent dir: %w", err)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(object, "/") && !fi.IsDir() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the InvalidObjectState error.
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectState)
|
||||
}
|
||||
}
|
||||
|
||||
return s.Posix.GetObject(ctx, input)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsParametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjects(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsV2Parametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjectsV2(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
// FileToObj function for ListObject calls that adds a Glacier storage class if the file is offline
|
||||
func (s *ScoutFS) glacierFileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
||||
posixFileToObj := s.Posix.FileToObj(bucket, fetchOwner)
|
||||
|
||||
return func(path string, d fs.DirEntry) (s3response.Object, error) {
|
||||
res, err := posixFileToObj(path, d)
|
||||
if err != nil || d.IsDir() {
|
||||
return res, err
|
||||
}
|
||||
objPath := filepath.Join(bucket, path)
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the Glacier storage class
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.Object{}, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
res.StorageClass = types.ObjectStorageClassGlacier
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreObject will set stage request on file if offline and do nothing if
|
||||
// file is online
|
||||
func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput) error {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
if !s.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
procdir, err := os.Open(procfddir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
return fmt.Errorf("open proc dir: %w", err)
|
||||
}
|
||||
defer procdir.Close()
|
||||
|
||||
dirf, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open parent dir: %w", err)
|
||||
}
|
||||
defer dirf.Close()
|
||||
|
||||
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
|
||||
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
|
||||
if err != nil {
|
||||
return fmt.Errorf("link tmpfile: %w", err)
|
||||
}
|
||||
|
||||
err = setStaging(filepath.Join(bucket, object))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
err = tmp.f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("stage object: %w", err)
|
||||
return fmt.Errorf("close tmpfile: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isStaging(objname string) (bool, error) {
|
||||
b, err := xattr.Get(objname, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return false, err
|
||||
func (tmp *tmpfile) Write(b []byte) (int, error) {
|
||||
if int64(len(b)) > tmp.size {
|
||||
return 0, fmt.Errorf("write exceeds content length %v", tmp.size)
|
||||
}
|
||||
|
||||
var flags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return flags&Staging == Staging, nil
|
||||
n, err := tmp.f.Write(b)
|
||||
tmp.size -= int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func setFlag(objname string, flag uint64) error {
|
||||
f, err := os.Open(objname)
|
||||
func (tmp *tmpfile) cleanup() {
|
||||
tmp.f.Close()
|
||||
}
|
||||
|
||||
func moveData(from *os.File, to *os.File) error {
|
||||
return scoutfs.MoveData(from, to)
|
||||
}
|
||||
|
||||
func statMore(path string) (stat, error) {
|
||||
st, err := scoutfs.StatMore(path)
|
||||
if err != nil {
|
||||
return err
|
||||
return stat{}, err
|
||||
}
|
||||
defer f.Close()
|
||||
var s stat
|
||||
|
||||
return fsetFlag(f, flag)
|
||||
}
|
||||
|
||||
func fsetFlag(f *os.File, flag uint64) error {
|
||||
b, err := xattr.FGet(f, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
var oldflags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &oldflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newflags := oldflags | flag
|
||||
|
||||
if newflags == oldflags {
|
||||
// no flags change, just return
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err = json.Marshal(&newflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return xattr.FSet(f, flagskey, b)
|
||||
}
|
||||
|
||||
func setStaging(objname string) error {
|
||||
return setFlag(objname, Staging)
|
||||
}
|
||||
|
||||
func setNoArchive(f *os.File) error {
|
||||
return fsetFlag(f, NoArchive)
|
||||
}
|
||||
|
||||
func isNoAttr(err error) bool {
|
||||
xerr, ok := err.(*xattr.Error)
|
||||
if ok && xerr.Err == xattr.ENOATTR {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ScoutFS) setProjectID(f *os.File, proj int) error {
|
||||
if s.projectIDEnabled && isValidProjectID(proj) {
|
||||
err := scoutfs.SetProjectID(f, uint64(proj))
|
||||
if err != nil {
|
||||
return fmt.Errorf("set project id: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isValidProjectID(proj int) bool {
|
||||
return proj > 0
|
||||
}
|
||||
|
||||
const (
|
||||
sysscoutfs = "/sys/fs/scoutfs/"
|
||||
formatversion = "format_version"
|
||||
)
|
||||
|
||||
// GetFormatVersion returns ScoutFS version reported by sysfs
|
||||
func fGetFormatVersion(f *os.File) scoutFsVersion {
|
||||
fsid, err := scoutfs.GetIDs(f)
|
||||
if err != nil {
|
||||
return versionScoutFsNotScoutFS
|
||||
}
|
||||
|
||||
path := filepath.Join(sysscoutfs, fsid.ShortID, formatversion)
|
||||
buf, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return versionScoutFsUnknown
|
||||
}
|
||||
|
||||
str := strings.TrimSpace(string(buf))
|
||||
vers, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return versionScoutFsUnknown
|
||||
}
|
||||
|
||||
return scoutFsVersion(vers)
|
||||
}
|
||||
|
||||
const (
|
||||
// versionScoutFsUnknown is unknown version
|
||||
versionScoutFsUnknown scoutFsVersion = iota
|
||||
// versionScoutFsV1 is version 1
|
||||
versionScoutFsV1
|
||||
// versionScoutFsV2 is version 2
|
||||
versionScoutFsV2
|
||||
// versionScoutFsMin is minimum scoutfs version
|
||||
versionScoutFsMin = versionScoutFsV1
|
||||
// versionScoutFsMax is maximum scoutfs version
|
||||
versionScoutFsMax = versionScoutFsV2
|
||||
// versionScoutFsNotScoutFS means the target FS is not scoutfs
|
||||
versionScoutFsNotScoutFS = versionScoutFsMax + 1
|
||||
)
|
||||
|
||||
// scoutFsVersion version
|
||||
type scoutFsVersion int
|
||||
|
||||
// AtLeast returns true if version is valid and at least b
|
||||
func (a scoutFsVersion) AtLeast(b scoutFsVersion) bool {
|
||||
return a.IsValid() && a >= b
|
||||
}
|
||||
|
||||
func (a scoutFsVersion) IsValid() bool {
|
||||
return a >= versionScoutFsMin && a <= versionScoutFsMax
|
||||
s.Meta_seq = st.Meta_seq
|
||||
s.Data_seq = st.Data_seq
|
||||
s.Data_version = st.Data_version
|
||||
s.Online_blocks = st.Online_blocks
|
||||
s.Offline_blocks = st.Offline_blocks
|
||||
s.Crtime_sec = st.Crtime_sec
|
||||
s.Crtime_nsec = st.Crtime_nsec
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@@ -17,15 +17,50 @@
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/auth"
|
||||
)
|
||||
|
||||
type ScoutFS struct {
|
||||
backend.BackendUnsupported
|
||||
}
|
||||
|
||||
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
return nil, fmt.Errorf("scoutfs only available on linux")
|
||||
}
|
||||
|
||||
type tmpfile struct {
|
||||
f *os.File
|
||||
}
|
||||
|
||||
var (
|
||||
errNotSupported = errors.New("not supported")
|
||||
)
|
||||
|
||||
func (s *ScoutFS) openTmpFile(_, _, _ string, _ int64, _ auth.Account) (*tmpfile, error) {
|
||||
// make these look used for static check
|
||||
_ = s.chownuid
|
||||
_ = s.chowngid
|
||||
_ = s.euid
|
||||
_ = s.egid
|
||||
_ = s.readonly
|
||||
return nil, errNotSupported
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) link() error {
|
||||
return errNotSupported
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) Write(b []byte) (int, error) {
|
||||
return 0, errNotSupported
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) cleanup() {
|
||||
}
|
||||
|
||||
func moveData(_, _ *os.File) error {
|
||||
return errNotSupported
|
||||
}
|
||||
|
||||
func statMore(_ string) (stat, error) {
|
||||
return stat{}, errNotSupported
|
||||
}
|
||||
|
||||
25
backend/scoutfs/stat.go
Normal file
25
backend/scoutfs/stat.go
Normal file
@@ -0,0 +1,25 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package scoutfs
|
||||
|
||||
type stat struct {
|
||||
Meta_seq uint64
|
||||
Data_seq uint64
|
||||
Data_version uint64
|
||||
Online_blocks uint64
|
||||
Offline_blocks uint64
|
||||
Crtime_sec uint64
|
||||
Crtime_nsec uint32
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// Copyright 2024 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
@@ -12,14 +12,13 @@
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
//go:build !windows
|
||||
//go:build !freebsd && !openbsd && !netbsd
|
||||
// +build !freebsd,!openbsd,!netbsd
|
||||
|
||||
package posix
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"github.com/versity/versitygw/s3err"
|
||||
import "syscall"
|
||||
|
||||
var (
|
||||
errNoData = syscall.ENODATA
|
||||
)
|
||||
|
||||
func handleParentDirError(_ string) error {
|
||||
return s3err.GetAPIError(s3err.ErrObjectParentIsFile)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2026 Versity Software
|
||||
// Copyright 2024 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
@@ -12,8 +12,13 @@
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
//go:build !freebsd
|
||||
//go:build freebsd || openbsd || netbsd
|
||||
// +build freebsd openbsd netbsd
|
||||
|
||||
package meta
|
||||
package scoutfs
|
||||
|
||||
const xattrPrefix = "user."
|
||||
import "syscall"
|
||||
|
||||
var (
|
||||
errNoData = syscall.ENOATTR
|
||||
)
|
||||
1107
backend/walk.go
1107
backend/walk.go
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -17,23 +17,17 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
@@ -42,9 +36,7 @@ import (
|
||||
var (
|
||||
adminAccess string
|
||||
adminSecret string
|
||||
adminRegion string
|
||||
adminEndpoint string
|
||||
allowInsecure bool
|
||||
)
|
||||
|
||||
func adminCommand() *cli.Command {
|
||||
@@ -93,44 +85,6 @@ func adminCommand() *cli.Command {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "update-user",
|
||||
Usage: "Updates a user account",
|
||||
Action: updateUser,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "access",
|
||||
Usage: "user access key id to be updated",
|
||||
Required: true,
|
||||
Aliases: []string{"a"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "secret",
|
||||
Usage: "secret access key for the new user",
|
||||
Aliases: []string{"s"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "role",
|
||||
Usage: "the new user role",
|
||||
Aliases: []string{"r"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "user-id",
|
||||
Usage: "userID for the new user",
|
||||
Aliases: []string{"ui"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "group-id",
|
||||
Usage: "groupID for the new user",
|
||||
Aliases: []string{"gi"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "project-id",
|
||||
Usage: "projectID for the new user",
|
||||
Aliases: []string{"pi"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "delete-user",
|
||||
Usage: "Delete a user",
|
||||
@@ -173,66 +127,6 @@ func adminCommand() *cli.Command {
|
||||
Usage: "Lists all the gateway buckets and owners.",
|
||||
Action: listBuckets,
|
||||
},
|
||||
{
|
||||
Name: "create-bucket",
|
||||
Usage: "Create a new bucket with owner",
|
||||
Action: createBucket,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "owner",
|
||||
Usage: "access key id of the bucket owner",
|
||||
Required: true,
|
||||
Aliases: []string{"o"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "bucket",
|
||||
Usage: "bucket name",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "acl",
|
||||
Usage: "canned ACL to apply to the bucket",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-full-control",
|
||||
Usage: "Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-read",
|
||||
Usage: "Allows grantee to list the objects in the bucket.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-read-acp",
|
||||
Usage: "Allows grantee to read the bucket ACL.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-write",
|
||||
Usage: `Allows grantee to create new objects in the bucket.
|
||||
For the bucket and object owners of existing objects, also allows deletions and overwrites of those objects.`,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grant-write-acp",
|
||||
Usage: "Allows grantee to write the ACL for the applicable bucket.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "create-bucket-configuration",
|
||||
Usage: "bucket configuration (LocationConstraint, Tags)",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "object-lock-enabled-for-bucket",
|
||||
Usage: "enable object lock for the bucket",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "no-object-lock-enabled-for-bucket",
|
||||
Usage: "disable object lock for the bucket",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "object-ownership",
|
||||
Usage: "bucket object ownership setting",
|
||||
Value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
// TODO: create a configuration file for this
|
||||
@@ -241,6 +135,7 @@ func adminCommand() *cli.Command {
|
||||
Usage: "admin access key id",
|
||||
EnvVars: []string{"ADMIN_ACCESS_KEY_ID", "ADMIN_ACCESS_KEY"},
|
||||
Aliases: []string{"a"},
|
||||
Required: true,
|
||||
Destination: &adminAccess,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@@ -248,16 +143,9 @@ func adminCommand() *cli.Command {
|
||||
Usage: "admin secret access key",
|
||||
EnvVars: []string{"ADMIN_SECRET_ACCESS_KEY", "ADMIN_SECRET_KEY"},
|
||||
Aliases: []string{"s"},
|
||||
Required: true,
|
||||
Destination: &adminSecret,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "region",
|
||||
Usage: "admin s3 region string",
|
||||
EnvVars: []string{"ADMIN_REGION"},
|
||||
Value: "us-east-1",
|
||||
Destination: &adminRegion,
|
||||
Aliases: []string{"r"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "endpoint-url",
|
||||
Usage: "admin apis endpoint url",
|
||||
@@ -266,59 +154,15 @@ func adminCommand() *cli.Command {
|
||||
Required: true,
|
||||
Destination: &adminEndpoint,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "allow-insecure",
|
||||
Usage: "disable tls certificate verification for the admin endpoint",
|
||||
EnvVars: []string{"ADMIN_ALLOW_INSECURE"},
|
||||
Aliases: []string{"ai"},
|
||||
Destination: &allowInsecure,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// getAdminCreds returns the effective admin access key ID and secret key.
|
||||
// If admin-specific credentials are not provided, it falls back to the
|
||||
// root user credentials. Both resulting values must be non-empty;
|
||||
// otherwise, an error is returned.
|
||||
func getAdminCreds() (string, string, error) {
|
||||
access := adminAccess
|
||||
secret := adminSecret
|
||||
|
||||
// Fallbacks to root user credentials
|
||||
if access == "" {
|
||||
access = rootUserAccess
|
||||
}
|
||||
if secret == "" {
|
||||
secret = rootUserSecret
|
||||
}
|
||||
|
||||
if access == "" {
|
||||
return "", "", errors.New("subcommand admin access key id is not set")
|
||||
}
|
||||
if secret == "" {
|
||||
return "", "", errors.New("subcommand admin secret access key is not set")
|
||||
}
|
||||
|
||||
return access, secret, nil
|
||||
}
|
||||
|
||||
func initHTTPClient() *http.Client {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: allowInsecure},
|
||||
}
|
||||
return &http.Client{Transport: tr}
|
||||
}
|
||||
|
||||
func createUser(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
access, secret, role := ctx.String("access"), ctx.String("secret"), ctx.String("role")
|
||||
userID, groupID, projectID := ctx.Int("user-id"), ctx.Int("group-id"), ctx.Int("project-id")
|
||||
userID, groupID, projectID := ctx.Int("user-id"), ctx.Int("group-id"), ctx.Int("projectID")
|
||||
if access == "" || secret == "" {
|
||||
return fmt.Errorf("invalid input parameters for the new user access/secret keys")
|
||||
return fmt.Errorf("invalid input parameters for the new user")
|
||||
}
|
||||
if role != string(auth.RoleAdmin) && role != string(auth.RoleUser) && role != string(auth.RoleUserPlus) {
|
||||
return fmt.Errorf("invalid input parameter for role: %v", role)
|
||||
@@ -333,56 +177,50 @@ func createUser(ctx *cli.Context) error {
|
||||
ProjectID: projectID,
|
||||
}
|
||||
|
||||
accxml, err := xml.Marshal(acc)
|
||||
accJson, err := json.Marshal(acc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse user data: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/create-user", adminEndpoint), bytes.NewBuffer(accxml))
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/create-user", adminEndpoint), bytes.NewBuffer(accJson))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
hashedPayload := sha256.Sum256(accxml)
|
||||
hashedPayload := sha256.Sum256(accJson)
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := initHTTPClient()
|
||||
client := http.Client{}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return parseApiError(body)
|
||||
}
|
||||
fmt.Printf("%s\n", body)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteUser(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
access := ctx.String("access")
|
||||
if access == "" {
|
||||
return fmt.Errorf("invalid input parameter for the user access key")
|
||||
return fmt.Errorf("invalid input parameter for the new user")
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/delete-user?access=%v", adminEndpoint, access), nil)
|
||||
@@ -397,113 +235,30 @@ func deleteUser(ctx *cli.Context) error {
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := initHTTPClient()
|
||||
client := http.Client{}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return parseApiError(body)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateUser(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
access, secret, userId, groupId, projectID, role :=
|
||||
ctx.String("access"),
|
||||
ctx.String("secret"),
|
||||
ctx.Int("user-id"),
|
||||
ctx.Int("group-id"),
|
||||
ctx.Int("projectID"),
|
||||
auth.Role(ctx.String("role"))
|
||||
|
||||
props := auth.MutableProps{}
|
||||
if ctx.IsSet("role") {
|
||||
if !role.IsValid() {
|
||||
return fmt.Errorf("invalid user role: %v", role)
|
||||
}
|
||||
props.Role = role
|
||||
}
|
||||
if ctx.IsSet("secret") {
|
||||
props.Secret = &secret
|
||||
}
|
||||
if ctx.IsSet("user-id") {
|
||||
props.UserID = &userId
|
||||
}
|
||||
if ctx.IsSet("group-id") {
|
||||
props.GroupID = &groupId
|
||||
}
|
||||
if ctx.IsSet("project-id") {
|
||||
props.ProjectID = &projectID
|
||||
}
|
||||
|
||||
propsxml, err := xml.Marshal(props)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse user attributes: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/update-user?access=%v", adminEndpoint, access), bytes.NewBuffer(propsxml))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
hashedPayload := sha256.Sum256(propsxml)
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := initHTTPClient()
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return parseApiError(body)
|
||||
}
|
||||
fmt.Printf("%s\n", body)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func listUsers(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/list-users", adminEndpoint), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
@@ -516,279 +271,34 @@ func listUsers(ctx *cli.Context) error {
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := initHTTPClient()
|
||||
client := http.Client{}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return parseApiError(body)
|
||||
}
|
||||
|
||||
var accs auth.ListUserAccountsResult
|
||||
if err := xml.Unmarshal(body, &accs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printAcctTable(accs.Accounts)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type createBucketInput struct {
|
||||
LocationConstraint *string
|
||||
Tags []types.Tag
|
||||
}
|
||||
|
||||
// parseCreateBucketPayload parses the
|
||||
func parseCreateBucketPayload(input string) ([]byte, error) {
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
// try to parse as json, if the input starts with '{'
|
||||
if input[0] == '{' {
|
||||
var raw createBucketInput
|
||||
err := json.Unmarshal([]byte(input), &raw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid JSON input: %w", err)
|
||||
}
|
||||
|
||||
return xml.Marshal(s3response.CreateBucketConfiguration{
|
||||
LocationConstraint: raw.LocationConstraint,
|
||||
TagSet: raw.Tags,
|
||||
})
|
||||
}
|
||||
|
||||
var config s3response.CreateBucketConfiguration
|
||||
|
||||
// parse as string - shorthand syntax
|
||||
inputParts, err := splitTopLevel(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, part := range inputParts {
|
||||
part = strings.TrimSpace(part)
|
||||
if strings.HasPrefix(part, "LocationConstraint=") {
|
||||
locConstraint := strings.TrimPrefix(part, "LocationConstraint=")
|
||||
config.LocationConstraint = &locConstraint
|
||||
} else if strings.HasPrefix(part, "Tags=") {
|
||||
tags, err := parseTagging(strings.TrimPrefix(part, "Tags="))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.TagSet = tags
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid component: %v", part)
|
||||
}
|
||||
}
|
||||
|
||||
return xml.Marshal(config)
|
||||
}
|
||||
|
||||
var errInvalidTagsSyntax = errors.New("invalid tags syntax")
|
||||
|
||||
// splitTopLevel splits a shorthand configuration string into top-level components.
|
||||
// The function splits only on commas that are not nested inside '{}' or '[]'.
|
||||
func splitTopLevel(s string) ([]string, error) {
|
||||
var parts []string
|
||||
start := 0
|
||||
depth := 0
|
||||
|
||||
for i, r := range s {
|
||||
switch r {
|
||||
case '{', '[':
|
||||
depth++
|
||||
case '}', ']':
|
||||
depth--
|
||||
case ',':
|
||||
if depth == 0 {
|
||||
parts = append(parts, s[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
return nil, errors.New("invalid string format")
|
||||
}
|
||||
|
||||
// add last segment
|
||||
if start < len(s) {
|
||||
parts = append(parts, s[start:])
|
||||
}
|
||||
|
||||
return parts, nil
|
||||
}
|
||||
|
||||
// parseTagging parses a tag set expressed in shorthand syntax into AWS CLI tags.
|
||||
// Expected format:
|
||||
//
|
||||
// [{Key=string,Value=string},{Key=string,Value=string}]
|
||||
//
|
||||
// The function validates bracket structure, splits tag objects at the top level,
|
||||
// and delegates individual tag parsing to parseTag. It returns an error if the
|
||||
// syntax is invalid or if any tag entry cannot be parsed.
|
||||
func parseTagging(input string) ([]types.Tag, error) {
|
||||
if len(input) < 2 {
|
||||
return nil, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
if input[0] != '[' || input[len(input)-1] != ']' {
|
||||
return nil, errInvalidTagsSyntax
|
||||
}
|
||||
// strip []
|
||||
input = input[1 : len(input)-1]
|
||||
|
||||
tagComponents, err := splitTopLevel(input)
|
||||
if err != nil {
|
||||
return nil, errInvalidTagsSyntax
|
||||
}
|
||||
result := make([]types.Tag, 0, len(tagComponents))
|
||||
for _, tagComponent := range tagComponents {
|
||||
tagComponent = strings.TrimSpace(tagComponent)
|
||||
tag, err := parseTag(tagComponent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, tag)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// parseTag parses a single tag definition in shorthand form.
|
||||
// Expected format:
|
||||
//
|
||||
// {Key=string,Value=string}
|
||||
func parseTag(input string) (types.Tag, error) {
|
||||
input = strings.TrimSpace(input)
|
||||
|
||||
if len(input) < 2 {
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
if input[0] != '{' || input[len(input)-1] != '}' {
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
// strip {}
|
||||
input = input[1 : len(input)-1]
|
||||
|
||||
components := strings.Split(input, ",")
|
||||
if len(components) != 2 {
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
var key, value string
|
||||
|
||||
for _, c := range components {
|
||||
c = strings.TrimSpace(c)
|
||||
|
||||
switch {
|
||||
case strings.HasPrefix(c, "Key="):
|
||||
key = strings.TrimPrefix(c, "Key=")
|
||||
case strings.HasPrefix(c, "Value="):
|
||||
value = strings.TrimPrefix(c, "Value=")
|
||||
default:
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
}
|
||||
|
||||
if key == "" {
|
||||
return types.Tag{}, errInvalidTagsSyntax
|
||||
}
|
||||
|
||||
return types.Tag{
|
||||
Key: &key,
|
||||
Value: &value,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func createBucket(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket, owner := ctx.String("bucket"), ctx.String("owner")
|
||||
|
||||
payload, err := parseCreateBucketPayload(ctx.String("create-bucket-configuration"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid create bucket configuration: %w", err)
|
||||
}
|
||||
|
||||
hashedPayload := sha256.Sum256(payload)
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
headers := map[string]string{
|
||||
"x-amz-content-sha256": hexPayload,
|
||||
"x-vgw-owner": owner,
|
||||
"x-amz-acl": ctx.String("acl"),
|
||||
"x-amz-grant-full-control": ctx.String("grant-full-control"),
|
||||
"x-amz-grant-read": ctx.String("grant-read"),
|
||||
"x-amz-grant-read-acp": ctx.String("grant-read-acp"),
|
||||
"x-amz-grant-write": ctx.String("grant-write"),
|
||||
"x-amz-grant-write-acp": ctx.String("grant-write-acp"),
|
||||
"x-amz-object-ownership": ctx.String("object-ownership"),
|
||||
}
|
||||
|
||||
if ctx.Bool("object-lock-enabled-for-bucket") {
|
||||
headers["x-amz-bucket-object-lock-enabled"] = "true"
|
||||
}
|
||||
if ctx.Bool("no-object-lock-enabled-for-bucket") {
|
||||
headers["x-amz-bucket-object-lock-enabled"] = "false"
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx.Context, http.MethodPatch, fmt.Sprintf("%s/%s/create", adminEndpoint, bucket), bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for key, value := range headers {
|
||||
if value != "" {
|
||||
req.Header.Set(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
err = signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := initHTTPClient()
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
if resp.StatusCode >= 400 {
|
||||
return fmt.Errorf("%s", body)
|
||||
}
|
||||
|
||||
var accs []auth.Account
|
||||
if err := json.Unmarshal(body, &accs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return parseApiError(body)
|
||||
}
|
||||
printAcctTable(accs)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -815,11 +325,6 @@ func printAcctTable(accs []auth.Account) {
|
||||
}
|
||||
|
||||
func changeBucketOwner(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket, owner := ctx.String("bucket"), ctx.String("owner")
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/change-bucket-owner/?bucket=%v&owner=%v", adminEndpoint, bucket, owner), nil)
|
||||
if err != nil {
|
||||
@@ -833,27 +338,25 @@ func changeBucketOwner(ctx *cli.Context) error {
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := initHTTPClient()
|
||||
client := http.Client{}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return parseApiError(body)
|
||||
}
|
||||
fmt.Println(string(body))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -871,11 +374,6 @@ func printBuckets(buckets []s3response.Bucket) {
|
||||
}
|
||||
|
||||
func listBuckets(ctx *cli.Context) error {
|
||||
adminAccess, adminSecret, err := getAdminCreds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/list-buckets", adminEndpoint), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
@@ -888,45 +386,34 @@ func listBuckets(ctx *cli.Context) error {
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := initHTTPClient()
|
||||
client := http.Client{}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return parseApiError(body)
|
||||
return fmt.Errorf("%s", body)
|
||||
}
|
||||
|
||||
var result s3response.ListBucketsResult
|
||||
if err := xml.Unmarshal(body, &result); err != nil {
|
||||
var buckets []s3response.Bucket
|
||||
if err := json.Unmarshal(body, &buckets); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printBuckets(result.Buckets)
|
||||
printBuckets(buckets)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseApiError(body []byte) error {
|
||||
var apiErr smithy.GenericAPIError
|
||||
err := xml.Unmarshal(body, &apiErr)
|
||||
if err != nil {
|
||||
apiErr.Code = "InternalServerError"
|
||||
apiErr.Message = err.Error()
|
||||
}
|
||||
|
||||
return &apiErr
|
||||
}
|
||||
|
||||
@@ -7,9 +7,7 @@ import (
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/versity/versitygw/backend/meta"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/tests/integration"
|
||||
)
|
||||
@@ -58,9 +56,7 @@ func initPosix(ctx context.Context) {
|
||||
log.Fatalf("make temp directory: %v", err)
|
||||
}
|
||||
|
||||
be, err := posix.New(tempdir, meta.XattrMeta{}, posix.PosixOpts{
|
||||
NewDirPerm: 0755,
|
||||
})
|
||||
be, err := posix.New(tempdir, posix.PosixOpts{})
|
||||
if err != nil {
|
||||
log.Fatalf("init posix: %v", err)
|
||||
}
|
||||
@@ -78,9 +74,6 @@ func initPosix(ctx context.Context) {
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// wait for server to start
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,75 +0,0 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"plugin"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/plugins"
|
||||
)
|
||||
|
||||
func pluginCommand() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "plugin",
|
||||
Usage: "load a backend from a plugin",
|
||||
Description: "Runs a s3 gateway and redirects the requests to the backend defined in the plugin",
|
||||
Action: runPluginBackend,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "location of the plugin config file",
|
||||
Aliases: []string{"c"},
|
||||
EnvVars: []string{"VGW_PLUGIN_CONFIG"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runPluginBackend(ctx *cli.Context) error {
|
||||
if ctx.NArg() == 0 {
|
||||
return fmt.Errorf("no plugin file provided to be loaded")
|
||||
}
|
||||
|
||||
pluginPath := ctx.Args().Get(0)
|
||||
config := ctx.String("config")
|
||||
|
||||
p, err := plugin.Open(pluginPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
backendSymbol, err := p.Lookup("Backend")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
backendPluginPtr, ok := backendSymbol.(*plugins.BackendPlugin)
|
||||
if !ok {
|
||||
return errors.New("plugin is not of type *plugins.BackendPlugin")
|
||||
}
|
||||
|
||||
if backendPluginPtr == nil {
|
||||
return errors.New("variable Backend is nil")
|
||||
}
|
||||
|
||||
be, err := (*backendPluginPtr).New(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return runGateway(ctx.Context, be)
|
||||
}
|
||||
@@ -16,22 +16,14 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"math"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/backend/meta"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
)
|
||||
|
||||
var (
|
||||
chownuid, chowngid bool
|
||||
bucketlinks bool
|
||||
versioningDir string
|
||||
dirPerms uint
|
||||
sidecar string
|
||||
nometa bool
|
||||
forceNoTmpFile bool
|
||||
readonly bool
|
||||
)
|
||||
|
||||
func posixCommand() *cli.Command {
|
||||
@@ -63,42 +55,10 @@ will be translated into the file /mnt/fs/gwroot/mybucket/a/b/c/myobject`,
|
||||
Destination: &chowngid,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "bucketlinks",
|
||||
Usage: "allow symlinked directories at bucket level to be treated as buckets",
|
||||
EnvVars: []string{"VGW_BUCKET_LINKS"},
|
||||
Destination: &bucketlinks,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "versioning-dir",
|
||||
Usage: "the directory path to enable bucket versioning",
|
||||
EnvVars: []string{"VGW_VERSIONING_DIR"},
|
||||
Destination: &versioningDir,
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "dir-perms",
|
||||
Usage: "default directory permissions for new directories",
|
||||
EnvVars: []string{"VGW_DIR_PERMS"},
|
||||
Destination: &dirPerms,
|
||||
DefaultText: "0755",
|
||||
Value: 0755,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "sidecar",
|
||||
Usage: "use provided sidecar directory to store metadata",
|
||||
EnvVars: []string{"VGW_META_SIDECAR"},
|
||||
Destination: &sidecar,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "nometa",
|
||||
Usage: "disable metadata storage",
|
||||
EnvVars: []string{"VGW_META_NONE"},
|
||||
Destination: &nometa,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "disableotmp",
|
||||
Usage: "disable O_TMPFILE support for new objects",
|
||||
EnvVars: []string{"VGW_DISABLE_OTMP"},
|
||||
Destination: &forceNoTmpFile,
|
||||
Name: "readonly",
|
||||
Usage: "allow only read operations to backend",
|
||||
EnvVars: []string{"VGW_READ_ONLY"},
|
||||
Destination: &readonly,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -109,48 +69,13 @@ func runPosix(ctx *cli.Context) error {
|
||||
return fmt.Errorf("no directory provided for operation")
|
||||
}
|
||||
|
||||
gwroot := (ctx.Args().Get(0))
|
||||
|
||||
if dirPerms > math.MaxUint32 {
|
||||
return fmt.Errorf("invalid directory permissions: %d", dirPerms)
|
||||
}
|
||||
|
||||
if nometa && sidecar != "" {
|
||||
return fmt.Errorf("cannot use both nometa and sidecar metadata")
|
||||
}
|
||||
|
||||
opts := posix.PosixOpts{
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
BucketLinks: bucketlinks,
|
||||
VersioningDir: versioningDir,
|
||||
NewDirPerm: fs.FileMode(dirPerms),
|
||||
ForceNoTmpFile: forceNoTmpFile,
|
||||
ValidateBucketNames: disableStrictBucketNames,
|
||||
}
|
||||
|
||||
var ms meta.MetadataStorer
|
||||
switch {
|
||||
case sidecar != "":
|
||||
sc, err := meta.NewSideCar(sidecar)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init sidecar metadata: %w", err)
|
||||
}
|
||||
ms = sc
|
||||
opts.SideCarDir = sidecar
|
||||
case nometa:
|
||||
ms = meta.NoMeta{}
|
||||
default:
|
||||
ms = meta.XattrMeta{}
|
||||
err := meta.XattrMeta{}.Test(gwroot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("xattr check failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
be, err := posix.New(gwroot, ms, opts)
|
||||
be, err := posix.New(ctx.Args().Get(0), posix.PosixOpts{
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
ReadOnly: readonly,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init posix backend: %w", err)
|
||||
return fmt.Errorf("init posix: %v", err)
|
||||
}
|
||||
|
||||
return runGateway(ctx.Context, be)
|
||||
|
||||
@@ -26,10 +26,8 @@ var (
|
||||
s3proxySecret string
|
||||
s3proxyEndpoint string
|
||||
s3proxyRegion string
|
||||
s3proxyMetaBucket string
|
||||
s3proxyDisableChecksum bool
|
||||
s3proxySslSkipVerify bool
|
||||
s3proxyUsePathStyle bool
|
||||
s3proxyDebug bool
|
||||
)
|
||||
|
||||
@@ -73,12 +71,6 @@ to an s3 storage backend service.`,
|
||||
EnvVars: []string{"VGW_S3_REGION"},
|
||||
Destination: &s3proxyRegion,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "meta-bucket",
|
||||
Usage: "s3 service meta bucket to store buckets acl/policy",
|
||||
EnvVars: []string{"VGW_S3_META_BUCKET"},
|
||||
Destination: &s3proxyMetaBucket,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "disable-checksum",
|
||||
Usage: "disable gateway to server object checksums",
|
||||
@@ -93,13 +85,6 @@ to an s3 storage backend service.`,
|
||||
Value: false,
|
||||
Destination: &s3proxySslSkipVerify,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "use-path-style",
|
||||
Usage: "use path style addressing for s3 proxy",
|
||||
EnvVars: []string{"VGW_S3_USE_PATH_STYLE"},
|
||||
Value: false,
|
||||
Destination: &s3proxyUsePathStyle,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "output extra debug tracing",
|
||||
@@ -112,8 +97,8 @@ to an s3 storage backend service.`,
|
||||
}
|
||||
|
||||
func runS3(ctx *cli.Context) error {
|
||||
be, err := s3proxy.New(ctx.Context, s3proxyAccess, s3proxySecret, s3proxyEndpoint, s3proxyRegion,
|
||||
s3proxyMetaBucket, s3proxyDisableChecksum, s3proxySslSkipVerify, s3proxyUsePathStyle, s3proxyDebug)
|
||||
be, err := s3proxy.New(s3proxyAccess, s3proxySecret, s3proxyEndpoint, s3proxyRegion,
|
||||
s3proxyDisableChecksum, s3proxySslSkipVerify, s3proxyDebug)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init s3 backend: %w", err)
|
||||
}
|
||||
|
||||
@@ -16,17 +16,17 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"math"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/backend/scoutfs"
|
||||
)
|
||||
|
||||
var (
|
||||
glacier bool
|
||||
disableNoArchive bool
|
||||
setProjectID bool
|
||||
glacier bool
|
||||
|
||||
// defined in posix.go:
|
||||
// chownuid, chowngid bool
|
||||
// readonly bool
|
||||
)
|
||||
|
||||
func scoutfsCommand() *cli.Command {
|
||||
@@ -68,36 +68,10 @@ move interfaces as well as support for tiered filesystems.`,
|
||||
Destination: &chowngid,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "projectid",
|
||||
Usage: "set project id on newly created buckets, files, and directories to client account ProjectID",
|
||||
EnvVars: []string{"VGW_SET_PROJECT_ID"},
|
||||
Destination: &setProjectID,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "bucketlinks",
|
||||
Usage: "allow symlinked directories at bucket level to be treated as buckets",
|
||||
EnvVars: []string{"VGW_BUCKET_LINKS"},
|
||||
Destination: &bucketlinks,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "versioning-dir",
|
||||
Usage: "the directory path to enable bucket versioning",
|
||||
EnvVars: []string{"VGW_VERSIONING_DIR"},
|
||||
Destination: &versioningDir,
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "dir-perms",
|
||||
Usage: "default directory permissions for new directories",
|
||||
EnvVars: []string{"VGW_DIR_PERMS"},
|
||||
Destination: &dirPerms,
|
||||
DefaultText: "0755",
|
||||
Value: 0755,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "disable-noarchive",
|
||||
Usage: "disable setting noarchive for multipart part uploads",
|
||||
EnvVars: []string{"VGW_DISABLE_NOARCHIVE"},
|
||||
Destination: &disableNoArchive,
|
||||
Name: "readonly",
|
||||
Usage: "allow only read operations to backend",
|
||||
EnvVars: []string{"VGW_READ_ONLY"},
|
||||
Destination: &readonly,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -108,20 +82,11 @@ func runScoutfs(ctx *cli.Context) error {
|
||||
return fmt.Errorf("no directory provided for operation")
|
||||
}
|
||||
|
||||
if dirPerms > math.MaxUint32 {
|
||||
return fmt.Errorf("invalid directory permissions: %d", dirPerms)
|
||||
}
|
||||
|
||||
var opts scoutfs.ScoutfsOpts
|
||||
opts.GlacierMode = glacier
|
||||
opts.ChownUID = chownuid
|
||||
opts.ChownGID = chowngid
|
||||
opts.BucketLinks = bucketlinks
|
||||
opts.NewDirPerm = fs.FileMode(dirPerms)
|
||||
opts.DisableNoArchive = disableNoArchive
|
||||
opts.VersioningDir = versioningDir
|
||||
opts.ValidateBucketNames = disableStrictBucketNames
|
||||
opts.SetProjectID = setProjectID
|
||||
opts.ReadOnly = readonly
|
||||
|
||||
be, err := scoutfs.New(ctx.Args().Get(0), opts)
|
||||
if err != nil {
|
||||
|
||||
@@ -22,24 +22,20 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
awsID string
|
||||
awsSecret string
|
||||
endpoint string
|
||||
prefix string
|
||||
dstBucket string
|
||||
partSize int64
|
||||
objSize int64
|
||||
concurrency int
|
||||
files int
|
||||
totalReqs int
|
||||
upload bool
|
||||
download bool
|
||||
hostStyle bool
|
||||
checksumDisable bool
|
||||
versioningEnabled bool
|
||||
azureTests bool
|
||||
tlsStatus bool
|
||||
parallel bool
|
||||
awsID string
|
||||
awsSecret string
|
||||
endpoint string
|
||||
prefix string
|
||||
dstBucket string
|
||||
partSize int64
|
||||
objSize int64
|
||||
concurrency int
|
||||
files int
|
||||
totalReqs int
|
||||
upload bool
|
||||
download bool
|
||||
pathStyle bool
|
||||
checksumDisable bool
|
||||
)
|
||||
|
||||
func testCommand() *cli.Command {
|
||||
@@ -75,24 +71,12 @@ func initTestFlags() []cli.Flag {
|
||||
Destination: &endpoint,
|
||||
Aliases: []string{"e"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "host-style",
|
||||
Usage: "Use host-style bucket addressing",
|
||||
Value: false,
|
||||
Destination: &hostStyle,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "enable debug mode",
|
||||
Aliases: []string{"d"},
|
||||
Destination: &debug,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "allow-insecure",
|
||||
Usage: "skip tls verification",
|
||||
Aliases: []string{"ai"},
|
||||
Destination: &tlsStatus,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,44 +87,11 @@ func initTestCommands() []*cli.Command {
|
||||
Usage: "Tests the full flow of gateway.",
|
||||
Description: `Runs all the available tests to test the full flow of the gateway.`,
|
||||
Action: getAction(integration.TestFullFlow),
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "versioning-enabled",
|
||||
Usage: "Test the bucket object versioning, if the versioning is enabled",
|
||||
Destination: &versioningEnabled,
|
||||
Aliases: []string{"vs"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "azure-test-mode",
|
||||
Usage: "Skips tests that are not supported by Azure",
|
||||
Destination: &azureTests,
|
||||
Aliases: []string{"azure"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "parallel",
|
||||
Usage: "executes the tests concurrently",
|
||||
Destination: ¶llel,
|
||||
Aliases: []string{"p"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "posix",
|
||||
Usage: "Tests posix specific features",
|
||||
Action: getAction(integration.TestPosix),
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "versioning-enabled",
|
||||
Usage: "Test posix when versioning is enabled",
|
||||
Destination: &versioningEnabled,
|
||||
Aliases: []string{"vs"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "scoutfs",
|
||||
Usage: "Tests scoutfs full flow",
|
||||
Action: getAction(integration.TestScoutfs),
|
||||
},
|
||||
{
|
||||
Name: "iam",
|
||||
@@ -204,6 +155,12 @@ func initTestCommands() []*cli.Command {
|
||||
Value: 1,
|
||||
Destination: &concurrency,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "pathStyle",
|
||||
Usage: "Use Pathstyle bucket addressing",
|
||||
Value: false,
|
||||
Destination: &pathStyle,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "checksumDis",
|
||||
Usage: "Disable server checksum",
|
||||
@@ -230,13 +187,12 @@ func initTestCommands() []*cli.Command {
|
||||
integration.WithEndpoint(endpoint),
|
||||
integration.WithConcurrency(concurrency),
|
||||
integration.WithPartSize(partSize),
|
||||
integration.WithTLSStatus(tlsStatus),
|
||||
}
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
}
|
||||
if hostStyle {
|
||||
opts = append(opts, integration.WithHostStyle())
|
||||
if pathStyle {
|
||||
opts = append(opts, integration.WithPathStyle())
|
||||
}
|
||||
if checksumDisable {
|
||||
opts = append(opts, integration.WithDisableChecksum())
|
||||
@@ -291,7 +247,6 @@ func initTestCommands() []*cli.Command {
|
||||
integration.WithRegion(region),
|
||||
integration.WithEndpoint(endpoint),
|
||||
integration.WithConcurrency(concurrency),
|
||||
integration.WithTLSStatus(tlsStatus),
|
||||
}
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
@@ -299,9 +254,6 @@ func initTestCommands() []*cli.Command {
|
||||
if checksumDisable {
|
||||
opts = append(opts, integration.WithDisableChecksum())
|
||||
}
|
||||
if hostStyle {
|
||||
opts = append(opts, integration.WithHostStyle())
|
||||
}
|
||||
|
||||
s3conf := integration.NewS3Conf(opts...)
|
||||
|
||||
@@ -311,39 +263,27 @@ func initTestCommands() []*cli.Command {
|
||||
}, extractIntTests()...)
|
||||
}
|
||||
|
||||
type testFunc func(*integration.TestState)
|
||||
type testFunc func(*integration.S3Conf)
|
||||
|
||||
func getAction(tf testFunc) func(ctx *cli.Context) error {
|
||||
func getAction(tf testFunc) func(*cli.Context) error {
|
||||
return func(ctx *cli.Context) error {
|
||||
opts := []integration.Option{
|
||||
integration.WithAccess(awsID),
|
||||
integration.WithSecret(awsSecret),
|
||||
integration.WithRegion(region),
|
||||
integration.WithEndpoint(endpoint),
|
||||
integration.WithTLSStatus(tlsStatus),
|
||||
}
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
}
|
||||
if versioningEnabled {
|
||||
opts = append(opts, integration.WithVersioningEnabled())
|
||||
}
|
||||
if azureTests {
|
||||
opts = append(opts, integration.WithAzureMode())
|
||||
}
|
||||
if hostStyle {
|
||||
opts = append(opts, integration.WithHostStyle())
|
||||
}
|
||||
|
||||
s := integration.NewS3Conf(opts...)
|
||||
ts := integration.NewTestState(ctx.Context, s, parallel)
|
||||
tf(ts)
|
||||
ts.Wait()
|
||||
tf(s)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("RAN:", integration.RunCount.Load(), "PASS:", integration.PassCount.Load(), "FAIL:", integration.FailCount.Load())
|
||||
if integration.FailCount.Load() > 0 {
|
||||
return fmt.Errorf("test failed with %v errors", integration.FailCount.Load())
|
||||
fmt.Println("RAN:", integration.RunCount, "PASS:", integration.PassCount, "FAIL:", integration.FailCount)
|
||||
if integration.FailCount > 0 {
|
||||
return fmt.Errorf("test failed with %v errors", integration.FailCount)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -363,30 +303,15 @@ func extractIntTests() (commands []*cli.Command) {
|
||||
integration.WithSecret(awsSecret),
|
||||
integration.WithRegion(region),
|
||||
integration.WithEndpoint(endpoint),
|
||||
integration.WithTLSStatus(tlsStatus),
|
||||
}
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
}
|
||||
if versioningEnabled {
|
||||
opts = append(opts, integration.WithVersioningEnabled())
|
||||
}
|
||||
if hostStyle {
|
||||
opts = append(opts, integration.WithHostStyle())
|
||||
}
|
||||
|
||||
s := integration.NewS3Conf(opts...)
|
||||
err := testFunc(s)
|
||||
return err
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "versioning-enabled",
|
||||
Usage: "Test the bucket object versioning, if the versioning is enabled",
|
||||
Destination: &versioningEnabled,
|
||||
Aliases: []string{"vs"},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
return
|
||||
|
||||
@@ -54,24 +54,22 @@ func generateEventFiltersConfig(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
config := s3event.EventFilter{
|
||||
s3event.EventObjectCreated: true,
|
||||
s3event.EventObjectCreatedPut: true,
|
||||
s3event.EventObjectCreatedPost: true,
|
||||
s3event.EventObjectCreatedCopy: true,
|
||||
s3event.EventCompleteMultipartUpload: true,
|
||||
s3event.EventObjectRemoved: true,
|
||||
s3event.EventObjectRemovedDelete: true,
|
||||
s3event.EventObjectRemovedDeleteObjects: true,
|
||||
s3event.EventObjectTagging: true,
|
||||
s3event.EventObjectTaggingPut: true,
|
||||
s3event.EventObjectTaggingDelete: true,
|
||||
s3event.EventObjectAclPut: true,
|
||||
s3event.EventObjectRestore: true,
|
||||
s3event.EventObjectRestorePost: true,
|
||||
s3event.EventObjectRestoreCompleted: true,
|
||||
s3event.EventObjectCreated: true,
|
||||
s3event.EventObjectCreatedPut: true,
|
||||
s3event.EventObjectCreatedPost: true,
|
||||
s3event.EventObjectCreatedCopy: true,
|
||||
s3event.EventCompleteMultipartUpload: true,
|
||||
s3event.EventObjectDeleted: true,
|
||||
s3event.EventObjectTagging: true,
|
||||
s3event.EventObjectTaggingPut: true,
|
||||
s3event.EventObjectTaggingDelete: true,
|
||||
s3event.EventObjectAclPut: true,
|
||||
s3event.EventObjectRestore: true,
|
||||
s3event.EventObjectRestorePost: true,
|
||||
s3event.EventObjectRestoreCompleted: true,
|
||||
}
|
||||
|
||||
configBytes, err := json.MarshalIndent(config, "", " ")
|
||||
configBytes, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse event config: %w", err)
|
||||
}
|
||||
|
||||
@@ -1,275 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package debuglogger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
type Color string
|
||||
type prefix string
|
||||
|
||||
const (
|
||||
green Color = "\033[32m"
|
||||
yellow Color = "\033[33m"
|
||||
blue Color = "\033[34m"
|
||||
red Color = "\033[31m"
|
||||
Purple Color = "\033[0;35m"
|
||||
|
||||
prefixPanic prefix = "[PANIC]: "
|
||||
prefixInernalError prefix = "[INTERNAL ERROR]: "
|
||||
prefixInfo prefix = "[INFO]: "
|
||||
prefixDebug prefix = "[DEBUG]: "
|
||||
|
||||
reset = "\033[0m"
|
||||
borderChar = "─"
|
||||
boxWidth = 120
|
||||
)
|
||||
|
||||
// Panic prints the panics out in the console
|
||||
func Panic(er error) {
|
||||
printError(prefixPanic, er)
|
||||
}
|
||||
|
||||
// InternalError prints the internal error out in the console
|
||||
func InternalError(er error) {
|
||||
printError(prefixInernalError, er)
|
||||
}
|
||||
|
||||
func printError(prefix prefix, er error) {
|
||||
fmt.Fprintf(os.Stderr, string(red)+string(prefix)+"%v"+reset+"\n", er)
|
||||
}
|
||||
|
||||
// Logs http request details: headers, body, params, query args
|
||||
func LogFiberRequestDetails(ctx *fiber.Ctx) {
|
||||
// Log the full request url
|
||||
fullURL := ctx.Protocol() + "://" + ctx.Hostname() + ctx.OriginalURL()
|
||||
fmt.Printf("%s[URL]: %s%s\n", green, fullURL, reset)
|
||||
|
||||
// log request headers
|
||||
wrapInBox(green, "REQUEST HEADERS", boxWidth, func() {
|
||||
for key, value := range ctx.Request().Header.All() {
|
||||
printWrappedLine(yellow, string(key), string(value))
|
||||
}
|
||||
})
|
||||
// skip request body log for PutObject and UploadPart
|
||||
skipBodyLog := isLargeDataAction(ctx)
|
||||
if !skipBodyLog {
|
||||
body := ctx.Request().Body()
|
||||
if len(body) != 0 {
|
||||
printBoxTitleLine(blue, "REQUEST BODY", boxWidth, false)
|
||||
fmt.Printf("%s%s%s\n", blue, body, reset)
|
||||
printHorizontalBorder(blue, boxWidth, false)
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Len() != 0 {
|
||||
for key, value := range ctx.Request().URI().QueryArgs().All() {
|
||||
log.Printf("%s: %s", key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Logs http response details: body, headers
|
||||
func LogFiberResponseDetails(ctx *fiber.Ctx) {
|
||||
wrapInBox(green, "RESPONSE HEADERS", boxWidth, func() {
|
||||
for key, value := range ctx.Response().Header.All() {
|
||||
printWrappedLine(yellow, string(key), string(value))
|
||||
}
|
||||
})
|
||||
|
||||
_, ok := ctx.Locals("skip-res-body-log").(bool)
|
||||
if !ok {
|
||||
body := ctx.Response().Body()
|
||||
if len(body) != 0 {
|
||||
PrintInsideHorizontalBorders(blue, "RESPONSE BODY", string(body), boxWidth)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var debugEnabled atomic.Bool
|
||||
|
||||
// SetDebugEnabled sets the debug mode
|
||||
func SetDebugEnabled() {
|
||||
debugEnabled.Store(true)
|
||||
}
|
||||
|
||||
// IsDebugEnabled returns true if debugging is enabled
|
||||
func IsDebugEnabled() bool {
|
||||
return debugEnabled.Load()
|
||||
}
|
||||
|
||||
// Logf is the same as 'fmt.Printf' with debug prefix,
|
||||
// a color added and '\n' at the end
|
||||
func Logf(format string, v ...any) {
|
||||
if !debugEnabled.Load() {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf(string(yellow)+string(prefixDebug)+format+reset+"\n", v...)
|
||||
}
|
||||
|
||||
// Infof prints out green info block with [INFO]: prefix
|
||||
func Infof(format string, v ...any) {
|
||||
if !debugEnabled.Load() {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf(string(green)+string(prefixInfo)+format+reset+"\n", v...)
|
||||
}
|
||||
|
||||
var debugIAMEnabled atomic.Bool
|
||||
|
||||
// SetIAMDebugEnabled sets the IAM debug mode
|
||||
func SetIAMDebugEnabled() {
|
||||
debugIAMEnabled.Store(true)
|
||||
}
|
||||
|
||||
// IsDebugEnabled returns true if debugging enabled
|
||||
func IsIAMDebugEnabled() bool {
|
||||
return debugEnabled.Load()
|
||||
}
|
||||
|
||||
// IAMLogf is the same as 'fmt.Printf' with debug prefix,
|
||||
// a color added and '\n' at the end
|
||||
func IAMLogf(format string, v ...any) {
|
||||
if !debugIAMEnabled.Load() {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf(string(yellow)+string(prefixDebug)+format+reset+"\n", v...)
|
||||
}
|
||||
|
||||
// PrintInsideHorizontalBorders prints the text inside horizontal
|
||||
// border and title in the center of upper border
|
||||
func PrintInsideHorizontalBorders(color Color, title, text string, width int) {
|
||||
if !debugEnabled.Load() {
|
||||
return
|
||||
}
|
||||
printBoxTitleLine(color, title, width, false)
|
||||
fmt.Printf("%s%s%s\n", color, text, reset)
|
||||
printHorizontalBorder(color, width, false)
|
||||
}
|
||||
|
||||
// Prints out box title either with closing characters or not: "┌", "┐"
|
||||
// e.g ┌────────────────[ RESPONSE HEADERS ]────────────────┐
|
||||
func printBoxTitleLine(color Color, title string, length int, closing bool) {
|
||||
leftCorner, rightCorner := "┌", "┐"
|
||||
|
||||
if !closing {
|
||||
leftCorner, rightCorner = borderChar, borderChar
|
||||
}
|
||||
|
||||
// Calculate how many border characters are needed
|
||||
titleFormatted := fmt.Sprintf("[ %s ]", title)
|
||||
borderSpace := length - len(titleFormatted) - 2 // 2 for corners
|
||||
leftLen := borderSpace / 2
|
||||
rightLen := borderSpace - leftLen
|
||||
|
||||
// Build the line
|
||||
line := leftCorner +
|
||||
strings.Repeat(borderChar, leftLen) +
|
||||
titleFormatted +
|
||||
strings.Repeat(borderChar, rightLen) +
|
||||
rightCorner
|
||||
|
||||
fmt.Println(string(color) + line + reset)
|
||||
}
|
||||
|
||||
// Prints out a horizontal line either with closing characters or not: "└", "┘"
|
||||
func printHorizontalBorder(color Color, length int, closing bool) {
|
||||
leftCorner, rightCorner := "└", "┘"
|
||||
if !closing {
|
||||
leftCorner, rightCorner = borderChar, borderChar
|
||||
}
|
||||
|
||||
line := leftCorner + strings.Repeat(borderChar, length-2) + rightCorner + reset
|
||||
fmt.Println(string(color) + line)
|
||||
}
|
||||
|
||||
// wrapInBox wraps the output of a function call (fn) inside a styled box with a title.
|
||||
func wrapInBox(color Color, title string, length int, fn func()) {
|
||||
printBoxTitleLine(color, title, length, true)
|
||||
fn()
|
||||
printHorizontalBorder(color, length, true)
|
||||
}
|
||||
|
||||
// returns the provided string length
|
||||
// defaulting to 13 for exceeding lengths
|
||||
func getLen(str string) int {
|
||||
if len(str) < 13 {
|
||||
return 13
|
||||
}
|
||||
|
||||
return len(str)
|
||||
}
|
||||
|
||||
// prints a formatted key-value pair within a box layout,
|
||||
// wrapping the value text if it exceeds the allowed width.
|
||||
func printWrappedLine(keyColor Color, key, value string) {
|
||||
prefix := fmt.Sprintf("%s│%s %s%-13s%s : ", green, reset, keyColor, key, reset)
|
||||
prefixLen := len(prefix) - len(green) - len(reset) - len(keyColor) - len(reset)
|
||||
// the actual prefix size without colors
|
||||
actualPrefixLen := getLen(key) + 5
|
||||
|
||||
lineWidth := boxWidth - prefixLen
|
||||
valueLines := wrapText(value, lineWidth)
|
||||
|
||||
for i, line := range valueLines {
|
||||
if i == 0 {
|
||||
if len(line) < lineWidth {
|
||||
line += strings.Repeat(" ", lineWidth-len(line))
|
||||
}
|
||||
fmt.Printf("%s%s%s %s│%s\n", prefix, reset, line, green, reset)
|
||||
} else {
|
||||
line = strings.Repeat(" ", actualPrefixLen-2) + line
|
||||
if len(line) < boxWidth-4 {
|
||||
line += strings.Repeat(" ", boxWidth-len(line)-4)
|
||||
}
|
||||
fmt.Printf("%s│ %s%s %s│%s\n", green, reset, line, green, reset)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// wrapText splits the input text into lines of at most `width` characters each.
|
||||
func wrapText(text string, width int) []string {
|
||||
var lines []string
|
||||
for len(text) > width {
|
||||
lines = append(lines, text[:width])
|
||||
text = text[width:]
|
||||
}
|
||||
if text != "" {
|
||||
lines = append(lines, text)
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
// TODO: remove this and use utils.IsBidDataAction after refactoring
|
||||
// and creating 'internal' package
|
||||
func isLargeDataAction(ctx *fiber.Ctx) bool {
|
||||
if ctx.Method() == http.MethodPut && len(strings.Split(ctx.Path(), "/")) >= 3 {
|
||||
if !ctx.Request().URI().QueryArgs().Has("tagging") && ctx.Get("X-Amz-Copy-Source") == "" && !ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
version: "3"
|
||||
services:
|
||||
posix:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: tests/Dockerfile.dev
|
||||
dockerfile: ./Dockerfile.dev
|
||||
args:
|
||||
- IAM_DIR=${IAM_DIR}
|
||||
- SETUP_DIR=${SETUP_DIR}
|
||||
@@ -14,7 +15,7 @@ services:
|
||||
proxy:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: tests/Dockerfile.dev
|
||||
dockerfile: ./Dockerfile.dev
|
||||
volumes:
|
||||
- ./:/app
|
||||
ports:
|
||||
@@ -28,15 +29,15 @@ services:
|
||||
- "10002:10002"
|
||||
restart: always
|
||||
hostname: azurite
|
||||
command: "azurite --oauth basic --cert /tests/certs/azurite.pem --key /tests/certs/azurite-key.pem --blobHost 0.0.0.0 --skipApiVersionCheck"
|
||||
command: "azurite --oauth basic --cert /tests/certs/azurite.pem --key /tests/certs/azurite-key.pem --blobHost 0.0.0.0"
|
||||
volumes:
|
||||
- ./tests/certs:/tests/certs
|
||||
- ./certs:/certs
|
||||
azuritegw:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: tests/Dockerfile.dev
|
||||
dockerfile: ./Dockerfile.dev
|
||||
volumes:
|
||||
- ./:/app
|
||||
ports:
|
||||
- 7070:7070
|
||||
command: ["sh", "-c", CompileDaemon -build="go build -C ./cmd/versitygw -buildvcs=false -o versitygw" -command="./cmd/versitygw/versitygw -a $ACCESS_KEY_ID -s $SECRET_ACCESS_KEY --iam-dir $IAM_DIR azure -a $AZ_ACCOUNT_NAME -k $AZ_ACCOUNT_KEY --url https://azurite:10000/$AZ_ACCOUNT_NAME"]
|
||||
command: ["sh", "-c", CompileDaemon -build="go build -C ./cmd/versitygw -o versitygw" -command="./cmd/versitygw/versitygw -a $ACCESS_KEY_ID -s $SECRET_ACCESS_KEY --iam-dir $IAM_DIR azure -a $AZ_ACCOUNT_NAME -k $AZ_ACCOUNT_KEY --url https://azurite:10000/$AZ_ACCOUNT_NAME"]
|
||||
@@ -1,51 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
BIN="${VGW_BINARY:-/usr/local/bin/versitygw}"
|
||||
|
||||
if [ ! -x "$BIN" ]; then
|
||||
echo "Entrypoint error: versitygw binary not found at $BIN" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# If arguments were provided, run them directly for backward compatibility.
|
||||
if [ "$#" -gt 0 ]; then
|
||||
exec "$BIN" "$@"
|
||||
fi
|
||||
|
||||
backend="${VGW_BACKEND:-}"
|
||||
if [ -z "$backend" ]; then
|
||||
cat >&2 <<'EOF'
|
||||
No command arguments were provided and VGW_BACKEND is unset.
|
||||
Set VGW_BACKEND to one of: posix, scoutfs, s3, azure, plugin
|
||||
or pass explicit arguments to the container to run the versitygw command directly.
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$backend" in
|
||||
posix|scoutfs|s3|azure|plugin)
|
||||
;;
|
||||
*)
|
||||
echo "VGW_BACKEND invalid backend (was '$backend')." >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
set -- "$backend"
|
||||
|
||||
if [ -n "${VGW_BACKEND_ARG:-}" ]; then
|
||||
set -- "$@" "$VGW_BACKEND_ARG"
|
||||
fi
|
||||
|
||||
if [ -n "${VGW_BACKEND_ARGS:-}" ]; then
|
||||
# shellcheck disable=SC2086
|
||||
set -- "$@" ${VGW_BACKEND_ARGS}
|
||||
fi
|
||||
|
||||
if [ -n "${VGW_ARGS:-}" ]; then
|
||||
# shellcheck disable=SC2086
|
||||
set -- "$@" ${VGW_ARGS}
|
||||
fi
|
||||
|
||||
exec "$BIN" "$@"
|
||||
@@ -1,19 +0,0 @@
|
||||
# Versity Gateway Dashboard
|
||||
|
||||
This project is a dashboard that visualizes data in the six metrics emitted by the Versity Gateway, displayed in Grafana.
|
||||
|
||||
The Versity Gateway emits metrics in the statsd format. We used Telegraf as the bridge from statsd to influxdb.
|
||||
|
||||
This implementation uses the influxql query language.
|
||||
|
||||
## Usage
|
||||
|
||||
From the root of this repository, run `docker compose -f docker-compose-metrics.yml up` to start the stack.
|
||||
|
||||
To shut it down, run `docker compose -f docker-compose-metrics.yml down -v`.
|
||||
|
||||
The Grafana database is explicitly not destroyed when shutting down containers. The influxdb one, however, is.
|
||||
|
||||
The dashbaord is automatically provisioned at container bring up and is visible at http://localhost:3000 with username: `admin` and password: `admin`.
|
||||
|
||||
To use the gateway and generate metrics, `source metrics-exploration/aws_env_setup.sh` and start using your aws cli as usual.
|
||||
@@ -1,4 +0,0 @@
|
||||
export AWS_SECRET_ACCESS_KEY=password
|
||||
export AWS_ACCESS_KEY_ID=user
|
||||
export AWS_ENDPOINT_URL=http://127.0.0.1:7070
|
||||
export AWS_REGION=us-east-1
|
||||
@@ -1,64 +0,0 @@
|
||||
services:
|
||||
telegraf:
|
||||
image: telegraf
|
||||
container_name: telegraf
|
||||
restart: always
|
||||
volumes:
|
||||
- ./metrics-exploration/telegraf.conf:/etc/telegraf/telegraf.conf:ro
|
||||
depends_on:
|
||||
- influxdb
|
||||
links:
|
||||
- influxdb
|
||||
ports:
|
||||
- '8125:8125/udp'
|
||||
|
||||
influxdb:
|
||||
image: influxdb
|
||||
container_name: influxdb
|
||||
restart: always
|
||||
environment:
|
||||
- DOCKER_INFLUXDB_INIT_MODE=setup
|
||||
- DOCKER_INFLUXDB_INIT_USERNAME=admin
|
||||
- DOCKER_INFLUXDB_INIT_PASSWORD=adminpass
|
||||
- DOCKER_INFLUXDB_INIT_ORG=myorg
|
||||
- DOCKER_INFLUXDB_INIT_BUCKET=metrics
|
||||
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token
|
||||
ports:
|
||||
- '8086:8086'
|
||||
volumes:
|
||||
- influxdb_data:/var/lib/influxdb
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana
|
||||
container_name: grafana-server
|
||||
restart: always
|
||||
depends_on:
|
||||
- influxdb
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- GF_INSTALL_PLUGINS=
|
||||
links:
|
||||
- influxdb
|
||||
ports:
|
||||
- '3000:3000'
|
||||
volumes:
|
||||
- ./metrics-exploration/grafana_data:/var/lib/grafana
|
||||
- ./metrics-exploration/provisioning:/etc/grafana/provisioning
|
||||
|
||||
versitygw:
|
||||
image: versity/versitygw:latest
|
||||
container_name: versitygw
|
||||
ports:
|
||||
- "7070:7070"
|
||||
environment:
|
||||
- ROOT_ACCESS_KEY=user
|
||||
- ROOT_SECRET_KEY=password
|
||||
- VGW_METRICS_STATSD_SERVERS=telegraf:8125
|
||||
depends_on:
|
||||
- telegraf
|
||||
command: >
|
||||
posix /tmp/vgw
|
||||
|
||||
volumes:
|
||||
influxdb_data: {}
|
||||
@@ -1,64 +0,0 @@
|
||||
services:
|
||||
telegraf:
|
||||
image: telegraf
|
||||
container_name: telegraf
|
||||
restart: always
|
||||
volumes:
|
||||
- ./telegraf.conf:/etc/telegraf/telegraf.conf:ro
|
||||
depends_on:
|
||||
- influxdb
|
||||
links:
|
||||
- influxdb
|
||||
ports:
|
||||
- '8125:8125/udp'
|
||||
|
||||
influxdb:
|
||||
image: influxdb
|
||||
container_name: influxdb
|
||||
restart: always
|
||||
environment:
|
||||
- DOCKER_INFLUXDB_INIT_MODE=setup
|
||||
- DOCKER_INFLUXDB_INIT_USERNAME=admin
|
||||
- DOCKER_INFLUXDB_INIT_PASSWORD=adminpass
|
||||
- DOCKER_INFLUXDB_INIT_ORG=myorg
|
||||
- DOCKER_INFLUXDB_INIT_BUCKET=metrics
|
||||
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token
|
||||
ports:
|
||||
- '8086:8086'
|
||||
volumes:
|
||||
- influxdb_data:/var/lib/influxdb
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana
|
||||
container_name: grafana-server
|
||||
restart: always
|
||||
depends_on:
|
||||
- influxdb
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- GF_INSTALL_PLUGINS=
|
||||
links:
|
||||
- influxdb
|
||||
ports:
|
||||
- '3000:3000'
|
||||
volumes:
|
||||
- ./grafana_data:/var/lib/grafana
|
||||
- ./provisioning:/etc/grafana/provisioning
|
||||
|
||||
versitygw:
|
||||
image: versity/versitygw:latest
|
||||
container_name: versitygw
|
||||
ports:
|
||||
- "7070:7070"
|
||||
environment:
|
||||
- ROOT_ACCESS_KEY=user
|
||||
- ROOT_SECRET_KEY=password
|
||||
- VGW_METRICS_STATSD_SERVERS=telegraf:8125
|
||||
depends_on:
|
||||
- telegraf
|
||||
command: >
|
||||
posix /tmp/vgw
|
||||
|
||||
volumes:
|
||||
influxdb_data: {}
|
||||
@@ -1,25 +0,0 @@
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
# <string> an unique provider name. Required
|
||||
- name: 'influxql'
|
||||
# <int> Org id. Default to 1
|
||||
orgId: 1
|
||||
# <string> name of the dashboard folder.
|
||||
folder: 'influxql'
|
||||
# <string> folder UID. will be automatically generated if not specified
|
||||
folderUid: ''
|
||||
# <string> provider type. Default to 'file'
|
||||
type: file
|
||||
# <bool> disable dashboard deletion
|
||||
disableDeletion: false
|
||||
# <int> how often Grafana will scan for changed dashboards
|
||||
updateIntervalSeconds: 10
|
||||
# <bool> allow updating provisioned dashboards from the UI
|
||||
allowUiUpdates: true
|
||||
options:
|
||||
# <string, required> path to dashboard files on disk. Required when using the 'file' type
|
||||
path: /etc/grafana/provisioning/dashboards/influxql
|
||||
# <bool> use folder names from filesystem to create folders in Grafana
|
||||
foldersFromFilesStructure: true
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,13 +0,0 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: influxdb
|
||||
type: influxdb
|
||||
isDefault: true
|
||||
access: proxy
|
||||
url: http://influxdb:8086
|
||||
jsonData:
|
||||
dbName: 'metrics'
|
||||
httpHeaderName1: 'Authorization'
|
||||
secureJsonData:
|
||||
httpHeaderValue1: 'Token my-super-secret-auth-token'
|
||||
@@ -1,34 +0,0 @@
|
||||
[global_tags]
|
||||
|
||||
[agent]
|
||||
debug = true
|
||||
quiet = false
|
||||
interval = "60s"
|
||||
round_interval = true
|
||||
metric_batch_size = 1000
|
||||
metric_buffer_limit = 10000
|
||||
collection_jitter = "0s"
|
||||
flush_interval = "10s"
|
||||
flush_jitter = "0s"
|
||||
precision = ""
|
||||
hostname = "versitygw"
|
||||
omit_hostname = false
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
|
||||
[[outputs.influxdb_v2]]
|
||||
urls = ["http://influxdb:8086"]
|
||||
timeout = "5s"
|
||||
token = "my-super-secret-auth-token"
|
||||
organization = "myorg"
|
||||
bucket = "metrics"
|
||||
|
||||
[[inputs.statsd]]
|
||||
protocol = "udp4"
|
||||
service_address = ":8125"
|
||||
percentiles = [90]
|
||||
metric_separator = "_"
|
||||
datadog_extensions = false
|
||||
allowed_pending_messages = 10000
|
||||
percentile_limit = 1000
|
||||
@@ -1,6 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
. ./aws_env_setup.sh
|
||||
|
||||
aws s3 mb s3://test
|
||||
aws s3 cp docker-compose.yml s3://test/test.yaml
|
||||
@@ -23,8 +23,7 @@
|
||||
# VersityGW Required Options #
|
||||
##############################
|
||||
|
||||
# VGW_BACKEND must be defined, and must be one of: posix, scoutfs, s3, azure,
|
||||
# or plugin
|
||||
# VGW_BACKEND must be defined, and must be one of: posix, scoutfs, or s3
|
||||
# This defines the backend that the VGW will use for data access.
|
||||
VGW_BACKEND=posix
|
||||
|
||||
@@ -100,32 +99,6 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
# endpoint is unauthenticated, and returns a 200 status for GET.
|
||||
#VGW_HEALTH=
|
||||
|
||||
# Enable VGW_READ_ONLY to only allow read operations to the S3 server. No write
|
||||
# operations will be allowed.
|
||||
#VGW_READ_ONLY=false
|
||||
|
||||
# The VGW_VIRTUAL_DOMAIN option enables the virtual host style bucket
|
||||
# addressing. The path style addressing is the default, and remains enabled
|
||||
# even when virtual host style is enabled. The VGW_VIRTUAL_DOMAIN option
|
||||
# specifies the domain name that will be used for the virtual host style
|
||||
# addressing. For virtual addressing, access to a bucket is in the request
|
||||
# form:
|
||||
# https://<bucket>.<VGW_VIRTUAL_DOMAIN>/
|
||||
# for example: https://mybucket.example.com/ where
|
||||
# VGW_VIRTUAL_DOMAIN=example.com
|
||||
# and all subdomains of VGW_VIRTUAL_DOMAIN should be reserved for buckets.
|
||||
# This means that virtual host addressing will generally require a DNS
|
||||
# entry for each bucket that needs to be accessed.
|
||||
# The default path style request is of the form:
|
||||
# https://<VGW_ENDPOINT>/<bucket>
|
||||
#VGW_VIRTUAL_DOMAIN=
|
||||
|
||||
# By default, versitygw will enforce similar bucket naming rules as described
|
||||
# in https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
|
||||
# Set to true to allow legacy or non-DNS-compliant bucket names by skipping
|
||||
# strict validation checks.
|
||||
#VGW_DISABLE_STRICT_BUCKET_NAMES=false
|
||||
|
||||
###############
|
||||
# Access Logs #
|
||||
###############
|
||||
@@ -176,67 +149,10 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_EVENT_NATS_URL=
|
||||
#VGW_EVENT_NATS_TOPIC=
|
||||
|
||||
# Bucket events can be sent to a RabbitMQ messaging service. When
|
||||
# VGW_EVENT_RABBITMQ_URL is specified, events will be published to the specified
|
||||
# exchange (VGW_EVENT_RABBITMQ_EXCHANGE) using the routing key
|
||||
# (VGW_EVENT_RABBITMQ_ROUTING_KEY). If exchange is blank the default exchange is
|
||||
# used. If routing key is blank, it will be left empty (the server can bind a
|
||||
# queue with an empty binding key or you can set an explicit key).
|
||||
# Example URL formats:
|
||||
# amqp://user:pass@rabbitmq:5672/
|
||||
# amqps://user:pass@rabbitmq:5671/vhost
|
||||
#VGW_EVENT_RABBITMQ_URL=
|
||||
#VGW_EVENT_RABBITMQ_EXCHANGE=
|
||||
#VGW_EVENT_RABBITMQ_ROUTING_KEY=
|
||||
|
||||
# Bucket events can be sent to a webhook. When VGW_EVENT_WEBHOOK_URL is
|
||||
# specified, all configured bucket events will be sent to the webhook.
|
||||
#VGW_EVENT_WEBHOOK_URL=
|
||||
|
||||
# Bucket events can be filtered for any of the above event types. The
|
||||
# VGW_EVENT_FILTER option specifies a config file that contains the event
|
||||
# filter rules. The event filter rules are used to determine which events are
|
||||
# sent to the configured event services. Run:
|
||||
# versitygw utils gen-event-filter-config --path .
|
||||
# to generate a default rules file "event_config.json" in the current directory.
|
||||
#VGW_EVENT_FILTER=
|
||||
|
||||
###########
|
||||
# Web GUI #
|
||||
###########
|
||||
|
||||
# The VGW_WEBUI_PORT option enables the Web GUI server on the specified
|
||||
# listening address. The Web GUI provides a browser-based interface for managing
|
||||
# users, buckets and objects. The format can be either ':port' to listen on all
|
||||
# interfaces (e.g., ':7071') or 'host:port' to listen on a specific interface
|
||||
# (e.g., '127.0.0.1:7071' or 'localhost:7071'). When omitted, the Web GUI is
|
||||
# disabled.
|
||||
#VGW_WEBUI_PORT=
|
||||
|
||||
# The VGW_WEBUI_CERT and VGW_WEBUI_KEY options specify the TLS certificate and
|
||||
# private key for the Web GUI server. If these are not specified and TLS is
|
||||
# configured for the gateway (VGW_CERT and VGW_KEY), the Web GUI will use the
|
||||
# same certificates as the gateway. If neither are specified, the Web GUI will
|
||||
# run without TLS (HTTP only). These options allow the Web GUI to use different
|
||||
# certificates than the main S3 gateway.
|
||||
#VGW_WEBUI_CERT=
|
||||
#VGW_WEBUI_KEY=
|
||||
|
||||
# The VGW_WEBUI_NO_TLS option disables TLS for the Web GUI even if TLS
|
||||
# certificates are configured for the gateway. Set to true to force the Web GUI
|
||||
# to use HTTP instead of HTTPS. This can be useful when running the Web GUI
|
||||
# behind a reverse proxy that handles TLS termination.
|
||||
#VGW_WEBUI_NO_TLS=false
|
||||
|
||||
# The VGW_CORS_ALLOW_ORIGIN option sets the default CORS (Cross-Origin Resource
|
||||
# Sharing) Access-Control-Allow-Origin header value. This header is applied to
|
||||
# responses when no bucket-specific CORS configuration exists, and for all admin
|
||||
# API responses. When the Web GUI is enabled and this option is not set, it
|
||||
# defaults to '*' (allow all origins) for usability. For production environments,
|
||||
# it is recommended to set this to a specific origin (e.g.,
|
||||
# 'https://webui.example.com') to improve security.
|
||||
#VGW_CORS_ALLOW_ORIGIN=
|
||||
|
||||
#######################
|
||||
# Debug / Diagnostics #
|
||||
#######################
|
||||
@@ -266,40 +182,7 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
# as a dedicated IAM service.
|
||||
#VGW_IAM_DIR=
|
||||
|
||||
# The Vault options will enable the Vault IAM service with accounts stored in
|
||||
# the HashiCorp Vault service. The Vault URL is the address and port of the
|
||||
# Vault server with the format <IP/host>:<port>. A root taken can be used for
|
||||
# testing, but it is recommended to use the role based authentication in
|
||||
# production. The Vault server certificate, client certificate, and client
|
||||
# certificate key are optional, and will default to not verifying the server
|
||||
# certificate and not using client certificates. The Vault server certificate
|
||||
# is used to verify the Vault server, and the client certificate and key are
|
||||
# used to authenticate the gateway to the Vault server. See wiki documentation
|
||||
# for an example of using Vault in dev mode with the gateway.
|
||||
#VGW_IAM_VAULT_ENDPOINT_URL=
|
||||
#VGW_IAM_VAULT_SECRET_STORAGE_PATH=
|
||||
#VGW_IAM_VAULT_MOUNT_PATH=
|
||||
#VGW_IAM_VAULT_ROOT_TOKEN=
|
||||
#VGW_IAM_VAULT_ROLE_ID=
|
||||
#VGW_IAM_VAULT_ROLE_SECRET=
|
||||
#VGW_IAM_VAULT_SERVER_CERT=
|
||||
#VGW_IAM_VAULT_CLIENT_CERT=
|
||||
#VGW_IAM_VAULT_CLIENT_CERT_KEY=
|
||||
|
||||
# The VGW_S3 IAM service is similar to the internal IAM service, but instead
|
||||
# stores the account information JSON encoded in an S3 object. This should use
|
||||
# a bucket that is not accessible to general users when using s3 backend to
|
||||
# prevent access to account credentials. This IAM service is added for
|
||||
# convenience, but is not considered as secure or scalable as a dedicated IAM
|
||||
# service.
|
||||
#VGW_S3_IAM_ACCESS_KEY=
|
||||
#VGW_S3_IAM_SECRET_KEY=
|
||||
#VGW_S3_IAM_REGION=
|
||||
#VGW_S3_IAM_ENDPOINT=
|
||||
#VGW_S3_IAM_BUCKET=
|
||||
#VGW_S3_IAM_NO_VERIFY=
|
||||
|
||||
# The LDAP options will enable the LDAP IAM service with accounts stored in an
|
||||
# The ldap options will enable the LDAP IAM service with accounts stored in an
|
||||
# external LDAP service. The VGW_IAM_LDAP_ACCESS_ATR, VGW_IAM_LDAP_SECRET_ATR,
|
||||
# and VGW_IAM_LDAP_ROLE_ATR define the LDAP attributes that map to access,
|
||||
# secret credentials and role respectively. The other options are used to
|
||||
@@ -313,31 +196,19 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_IAM_LDAP_ACCESS_ATR=
|
||||
#VGW_IAM_LDAP_SECRET_ATR=
|
||||
#VGW_IAM_LDAP_ROLE_ATR=
|
||||
#VGW_IAM_LDAP_USER_ID_ATR=
|
||||
#VGW_IAM_LDAP_GROUP_ID_ATR=
|
||||
# Disable TLS certificate verification for LDAP connections (insecure, allows
|
||||
# self-signed certificates). This should only be used in testing environments
|
||||
# or when using self-signed certificates. The default is false (verification
|
||||
# enabled).
|
||||
#VGW_IAM_LDAP_TLS_SKIP_VERIFY=false
|
||||
|
||||
# The FreeIPA options will enable the FreeIPA IAM service with accounts stored
|
||||
# in an external FreeIPA service. Currently the FreeIPA IAM service only
|
||||
# supports account retrieval. Creating and modifying accounts must be done
|
||||
# outside of the versitygw service.
|
||||
# FreeIPA server url e.g. https://ipa.example.test
|
||||
#VGW_IPA_HOST=
|
||||
# A name of the user vault containing their secret
|
||||
#VGW_IPA_VAULT_NAME=
|
||||
# Username used to connect to FreeIPA (requires permissions to read user vault
|
||||
# contents)
|
||||
#VGW_IPA_USER=
|
||||
# Password of the user used to connect to FreeIPA
|
||||
#VGW_IPA_PASSWORD=
|
||||
# Disable verify TLS certificate of FreeIPA server
|
||||
#VGW_IPA_INSECURE=false
|
||||
# FreeIPA IAM debug output
|
||||
#VGW_IPA_DEBUG=false
|
||||
# The VGW_S3 IAM service is similar to the internal IAM service, but instead
|
||||
# stores the account information JSON encoded in an S3 object. This should use
|
||||
# a bucket that is not accessible to general users when using s3 backend to
|
||||
# prevent access to account credentials. This IAM service is added for
|
||||
# convenience, but is not considered as secure or scalable as a dedicated IAM
|
||||
# service.
|
||||
#VGW_S3_IAM_ACCESS_KEY=
|
||||
#VGW_S3_IAM_SECRET_KEY=
|
||||
#VGW_S3_IAM_REGION=
|
||||
#VGW_S3_IAM_ENDPOINT=
|
||||
#VGW_S3_IAM_BUCKET=
|
||||
#VGW_S3_IAM_NO_VERIFY=
|
||||
|
||||
###############
|
||||
# IAM caching #
|
||||
@@ -357,29 +228,6 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_IAM_CACHE_TTL=120
|
||||
#VGW_IAM_CACHE_PRUNE=3600
|
||||
|
||||
###########
|
||||
# Metrics #
|
||||
###########
|
||||
|
||||
# The metrics service name is a tag that is added to all metrics to help
|
||||
# identify the source of the metrics. This is especially useful when multiple
|
||||
# gateways are running. The default is the hostname of the system.
|
||||
#VGW_METRICS_SERVICE_NAME=$HOSTNAME
|
||||
|
||||
# The metrics service will send metrics to the configured statsd servers. The
|
||||
# servers are specified as a comma separated list of host:port pairs. The
|
||||
# default is to not send metrics to any statsd servers. The gateway uses
|
||||
# InfluxDB flavor of statsd metrics tags for the StatsD metrics type.
|
||||
#VGW_METRICS_STATSD_SERVERS=
|
||||
|
||||
# The metrics service will send metrics to the configured dogstatsd servers.
|
||||
# The servers are specified as a comma separated list of host:port pairs. The
|
||||
# default is to not send metrics to any dogstatsd servers. Generally
|
||||
# DataDog recommends installing a local agent to collect metrics and forward
|
||||
# them to the DataDog service. In this case the option value would be the
|
||||
# local agent address: 127.0.0.1:8125.
|
||||
#VGW_METRICS_DOGSTATS_SERVERS=
|
||||
|
||||
######################################
|
||||
# VersityGW Backend Specific Options #
|
||||
######################################
|
||||
@@ -406,49 +254,11 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_CHOWN_UID=false
|
||||
#VGW_CHOWN_GID=false
|
||||
|
||||
# The VGW_BUCKET_LINKS option will enable the gateway to treat symbolic links
|
||||
# to directories at the top level gateway directory as buckets.
|
||||
#VGW_BUCKET_LINKS=false
|
||||
|
||||
# The default permissions mode when creating new directories is 0755. Use
|
||||
# VGW_DIR_PERMS option to set a different mode for any new directory that the
|
||||
# gateway creates. This applies to buckets created through the gateway as well
|
||||
# as any parent directories automatically created with object uploads.
|
||||
#VGW_DIR_PERMS=0755
|
||||
|
||||
# To enable object versions, the VGW_VERSIONING_DIR option must be set to the
|
||||
# directory that will be used to store the object versions. The version
|
||||
# directory must NOT be a subdirectory of the VGW_BACKEND_ARG directory.
|
||||
#VGW_VERSIONING_DIR=
|
||||
|
||||
# The gateway uses xattrs to store metadata for objects by default. For systems
|
||||
# that do not support xattrs, the VGW_META_SIDECAR option can be set to a
|
||||
# directory that will be used to store the metadata for objects. This is
|
||||
# currently experimental, and may have issues for some edge cases.
|
||||
#VGW_META_SIDECAR=
|
||||
|
||||
# The VGW_META_NONE option will disable the metadata functionality for the
|
||||
# gateway. This will cause the gateway to not store any metadata for objects
|
||||
# or buckets. This include bucket ACLs and Policy. This may be useful for
|
||||
# read only access to pre-existing data where the gateway should not modify
|
||||
# the data. It is recommened to enable VGW_READ_ONLY (Global Options) along
|
||||
# with this.
|
||||
#VGW_META_NONE=false
|
||||
|
||||
# The gateway will use O_TMPFILE for writing objects while uploading and
|
||||
# link the file to the final object name when the upload is complete if the
|
||||
# filesystem supports O_TMPFILE. This creates an atomic object creation
|
||||
# that is not visible to other clients or racing uploads until the upload
|
||||
# is complete. This will not work if there is a different filesystem mounted
|
||||
# below the bucket level than where the bucket resides. The VGW_DISABLE_OTMP
|
||||
# option can be set to true to disable this functionality and force the fallback
|
||||
# mode when O_TMPFILE is not available. This fallback will create a temporary
|
||||
# file in the bucket directory and rename it to the final object name when
|
||||
# the upload is complete if the final location is in the same filesystem, or
|
||||
# copy the file to the final location if the final location is in a different
|
||||
# filesystem. This fallback mode is still atomic, but may be less efficient
|
||||
# than O_TMPFILE when the data needs to be copied into the final location.
|
||||
#VGW_DISABLE_OTMP=false
|
||||
# The VGW_READ_ONLY option will disable all write operations to the backend
|
||||
# filesystem. This is useful for creating a read-only gateway for clients.
|
||||
# This will prevent any PUT, POST, DELETE, and multipart upload operations
|
||||
# for objects and buckets as well as preventing updating metadata for objects.
|
||||
#VGW_READ_ONLY=false
|
||||
|
||||
###########
|
||||
# scoutfs #
|
||||
@@ -481,35 +291,11 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_CHOWN_UID=false
|
||||
#VGW_CHOWN_GID=false
|
||||
|
||||
# The VGW_SET_PROJECT_ID option will enable setting account defined ProjectID
|
||||
# for newly created buckets, files, and directories if the account ProjectID
|
||||
# is greater than 0 and the filesystem format version supports project IDs.
|
||||
#VGW_SET_PROJECT_ID=false
|
||||
|
||||
# The VGW_BUCKET_LINKS option will enable the gateway to treat symbolic links
|
||||
# to directories at the top level gateway directory as buckets.
|
||||
#VGW_BUCKET_LINKS=false
|
||||
|
||||
# The default permissions mode when creating new directories is 0755. Use
|
||||
# VGW_DIR_PERMS option to set a different mode for any new directory that the
|
||||
# gateway creates. This applies to buckets created through the gateway as well
|
||||
# as any parent directories automatically created with object uploads.
|
||||
#VGW_DIR_PERMS=0755
|
||||
|
||||
# To enable object versions, the VGW_VERSIONING_DIR option must be set to the
|
||||
# directory that will be used to store the object versions. The version
|
||||
# directory must NOT be a subdirectory of the VGW_BACKEND_ARG directory.
|
||||
# There may be implications for archive policy updates to include version
|
||||
# directory as well. It is recommended to discuss archive implications of
|
||||
# versioning with Versity support before enabling on an archiving filesystem.
|
||||
#VGW_VERSIONING_DIR=
|
||||
|
||||
# The default behavior of the gateway is to automatically set the noarchive
|
||||
# flag on the multipart upload parts while the multipart upload is in progress.
|
||||
# This is to prevent the parts from being archived since they are temporary
|
||||
# and will be deleted after the multipart upload is completed or aborted. The
|
||||
# VGW_DISABLE_NOARCHIVE option can be set to true to disable this behavior.
|
||||
#VGW_DISABLE_NOARCHIVE=false
|
||||
# The VGW_READ_ONLY option will disable all write operations to the backend
|
||||
# filesystem. This is useful for creating a read-only gateway for clients.
|
||||
# This will prevent any PUT, POST, DELETE, and multipart upload operations
|
||||
# for objects and buckets as well as preventing updating metadata for objects.
|
||||
#VGW_READ_ONLY=false
|
||||
|
||||
######
|
||||
# s3 #
|
||||
@@ -533,48 +319,3 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_S3_DISABLE_CHECKSUM=false
|
||||
#VGW_S3_SSL_SKIP_VERIFY=false
|
||||
#VGW_S3_DEBUG=false
|
||||
|
||||
########
|
||||
# azure #
|
||||
########
|
||||
|
||||
# The azure backend allows the gateway to store objects in Azure Blob Storage.
|
||||
# Buckets created through the gateway map to blob containers within the
|
||||
# configured storage account. This backend is useful when existing workflows
|
||||
# expect an S3-compatible interface while data resides in Azure.
|
||||
|
||||
# When the azure backend is selected, configure credentials with one of the
|
||||
# following approaches:
|
||||
# - Shared key: Define AZ_ACCOUNT_NAME with the storage account name and
|
||||
# AZ_ACCESS_KEY with the corresponding account key.
|
||||
# - SAS token: Set AZ_SAS_TOKEN to an account or container scoped SAS token.
|
||||
# Provide AZ_ENDPOINT if the token does not implicitly define the endpoint.
|
||||
# - Default Azure credentials: Leave AZ_ACCOUNT_NAME and AZ_ACCESS_KEY blank
|
||||
# and configure the standard Azure identity environment variables supported
|
||||
# by the DefaultAzureCredential chain (e.g. AZURE_CLIENT_ID, AZURE_TENANT_ID,
|
||||
# AZURE_CLIENT_SECRET, managed identity, etc.).
|
||||
# Use AZ_ENDPOINT to override the service URL (for example when targeting
|
||||
# Azurite or a sovereign cloud). If unset, it defaults to
|
||||
# https://<account>.blob.core.windows.net/ when an account name is provided.
|
||||
#AZ_ACCOUNT_NAME=
|
||||
#AZ_ACCESS_KEY=
|
||||
#AZ_SAS_TOKEN=
|
||||
#AZ_ENDPOINT=
|
||||
|
||||
##########
|
||||
# plugin #
|
||||
##########
|
||||
|
||||
# The plugin backend loads a Go plugin shared object that exposes a variable
|
||||
# named "Backend" of type *plugins.BackendPlugin. The gateway uses the
|
||||
# exported constructor to create the backend implementation at runtime.
|
||||
|
||||
# Set VGW_BACKEND_ARG to the absolute path of the compiled plugin (.so) file.
|
||||
# The path must be readable by the gateway service account and remain stable
|
||||
# across restarts.
|
||||
#VGW_BACKEND_ARG=/usr/lib/versitygw/plugins/example.so
|
||||
|
||||
# Provide the plugin-specific configuration file path via VGW_PLUGIN_CONFIG.
|
||||
# The gateway automatically forwards this value to the plugin backend when it
|
||||
# starts up.
|
||||
#VGW_PLUGIN_CONFIG=/etc/versitygw.d/example-plugin.conf
|
||||
|
||||
@@ -17,7 +17,7 @@ Group=root
|
||||
|
||||
EnvironmentFile=/etc/versitygw.d/%i.conf
|
||||
|
||||
ExecStart=/bin/bash -c 'if [[ ! ("${VGW_BACKEND}" == "posix" || "${VGW_BACKEND}" == "scoutfs" || "${VGW_BACKEND}" == "s3" || "${VGW_BACKEND}" == "azure" || "${VGW_BACKEND}" == "plugin") ]]; then echo "VGW_BACKEND environment variable ${VGW_BACKEND} not set to valid backend type"; exit 1; fi && exec /usr/bin/versitygw "$VGW_BACKEND" "$VGW_BACKEND_ARG"'
|
||||
ExecStart=/bin/bash -c 'if [[ ! ("${VGW_BACKEND}" == "posix" || "${VGW_BACKEND}" == "scoutfs" || "${VGW_BACKEND}" == "s3") ]]; then echo "VGW_BACKEND environment variable not set to one of posix, scoutfs, or s3"; exit 1; fi && exec /usr/bin/versitygw "$VGW_BACKEND" "$VGW_BACKEND_ARG"'
|
||||
|
||||
# Let systemd restart this service always
|
||||
Restart=always
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user