Files
versitygw/s3api/utils/utils_test.go
niksis02 21a636b3b5 fix: add request headers and metadata headers limit
Fixes #1606

According to AWS documentation:
> *“The PUT request header is limited to 8 KB in size. Within the PUT request header, the user-defined metadata is limited to 2 KB in size. The size of user-defined metadata is measured by taking the sum of the number of bytes in the UTF-8 encoding of each key and value.”*

Based on this, object metadata size is now limited to **2 KB** for all object upload operations (`PutObject`, `CopyObject`, and `CreateMultipartUpload`).

Fixes handling of metadata HTTP headers when the same header appears multiple times with different casing or even if they are identical. According to S3 behavior, these headers must be merged into a single lower-cased metadata key, with values concatenated using commas.

Example:

```
x-amz-meta-Key: value1
x-amz-meta-kEy: value2
x-amz-meta-keY: value3
```

Translated to:

```
key: value1,value2,value3
```

This PR also introduces an **8 KB limit for request headers**. Although the S3 documentation explicitly mentions the 8 KB limit only for **PUT requests**, in practice this limit applies to **all requests**.

To enforce the header size limit, the Fiber configuration option `ReadBufferSize` is used. This parameter defines the maximum number of bytes read when parsing an incoming request. Note that this limit does not apply strictly to request headers only, since request parsing also includes other parts of the request line (e.g., the HTTP method, protocol string, and version such as `HTTP/1.1`). So `ReadBufferSize` is effectively a limit for request headers size, but not the exact limit.
2026-03-06 23:25:49 +04:00

1189 lines
27 KiB
Go

// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"bufio"
"bytes"
"encoding/xml"
"errors"
"math/rand"
"net/http"
"reflect"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/gofiber/fiber/v2"
"github.com/stretchr/testify/assert"
"github.com/valyala/fasthttp"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
func TestCreateHttpRequestFromCtx(t *testing.T) {
type args struct {
ctx *fiber.Ctx
}
app := fiber.New()
// Expected output, Case 1
ctx := app.AcquireCtx(&fasthttp.RequestCtx{})
req := ctx.Request()
request, _ := http.NewRequest(string(req.Header.Method()), req.URI().String(), bytes.NewReader(req.Body()))
// Case 2
ctx2 := app.AcquireCtx(&fasthttp.RequestCtx{})
req2 := ctx2.Request()
req2.Header.Add("X-Amz-Mfa", "Some valid Mfa")
request2, _ := http.NewRequest(string(req2.Header.Method()), req2.URI().String(), bytes.NewReader(req2.Body()))
request2.Header.Add("X-Amz-Mfa", "Some valid Mfa")
tests := []struct {
name string
args args
want *http.Request
wantErr bool
hdrs []string
}{
{
name: "Success-response",
args: args{
ctx: ctx,
},
want: request,
wantErr: false,
hdrs: []string{},
},
{
name: "Success-response-With-Headers",
args: args{
ctx: ctx2,
},
want: request2,
wantErr: false,
hdrs: []string{"X-Amz-Mfa"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := createHttpRequestFromCtx(tt.args.ctx, tt.hdrs, 0, true)
if (err != nil) != tt.wantErr {
t.Errorf("CreateHttpRequestFromCtx() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got.Header, tt.want.Header) {
t.Errorf("CreateHttpRequestFromCtx() got = %v, want %v", got, tt.want)
}
})
}
}
// a helper method to construct a raw http request with the given http request headers
// to further parse with fasthttp.Request.Read and return fasthttp.RequestHeader
func createHeadersFromRawRequest(t *testing.T, hdrs [][2]string) *fasthttp.RequestHeader {
t.Helper()
var b strings.Builder
b.WriteString("PUT / HTTP/1.1\r\n")
b.WriteString("Host: example.com\r\n")
for _, kv := range hdrs {
b.WriteString(kv[0])
b.WriteString(": ")
b.WriteString(kv[1])
b.WriteString("\r\n")
}
b.WriteString("\r\n")
var req fasthttp.Request
if err := req.Read(bufio.NewReader(bytes.NewReader([]byte(b.String())))); err != nil {
t.Fatalf("failed to parse raw request: %v", err)
}
return &req.Header
}
func TestGetUserMetaData(t *testing.T) {
tests := []struct {
name string
hdrs [][2]string
want map[string]string
wantErr error
}{
{
name: "no metadata headers",
hdrs: [][2]string{
{"Content-Type", "application/json"},
},
want: map[string]string{},
},
{
name: "single metadata header",
hdrs: [][2]string{
{"x-amz-meta-foo", "bar"},
},
want: map[string]string{"foo": "bar"},
},
{
name: "multiple metadata headers",
hdrs: [][2]string{
{"x-amz-meta-foo", "bar"},
{"x-amz-meta-baz", "qux"},
},
want: map[string]string{"foo": "bar", "baz": "qux"},
},
{
name: "case-insensitive prefix and key lowercasing",
hdrs: [][2]string{
{"X-Amz-Meta-TestKey", "Value"},
},
want: map[string]string{"testkey": "Value"},
},
{
name: "ignores non-metadata headers",
hdrs: [][2]string{
{"authorization", "token"},
{"x-amz-meta-foo", "bar"},
},
want: map[string]string{"foo": "bar"},
},
{
name: "metadata size exceeds limit (single header)",
hdrs: [][2]string{
{"x-amz-meta-big", strings.Repeat("a", maxMetadataSize+1)},
},
wantErr: s3err.GetAPIError(s3err.ErrMetadataTooLarge),
},
{
name: "metadata cumulative size exceeds limit (multiple headers)",
hdrs: [][2]string{
{"x-amz-meta-a", strings.Repeat("a", maxMetadataSize/2)},
{"x-amz-meta-b", strings.Repeat("b", maxMetadataSize/2+10)},
},
wantErr: s3err.GetAPIError(s3err.ErrMetadataTooLarge),
},
{
name: "duplicate keys combined",
hdrs: [][2]string{
{"x-amz-meta-Foo", "first"},
{"x-amz-meta-foo", "second"},
},
want: map[string]string{"foo": "first,second"},
},
{
name: "duplicate same value keys combined",
hdrs: [][2]string{
{"x-amz-meta-Foo", "value"},
{"x-amz-meta-foo", "value"},
},
want: map[string]string{"foo": "value,value"},
},
{
name: "mixed keys",
hdrs: [][2]string{
{"x-amz-meta-Foo", "value2"},
{"x-amz-meta-fOo", "value1"},
{"x-amz-meta-foO", "value3"},
{"x-amz-meta-bar", "baz"},
{"x-amz-meta-quxx", "efg"},
{"x-amz-meta-abc", "value"},
{"x-amz-meta-Abc", "value"},
{"x-amz-meta-aBc", "value"},
{"x-amz-meta-abC", "value"},
{"x-amz-meta-ABC", "value"},
},
want: map[string]string{
"foo": "value2,value1,value3",
"bar": "baz",
"quxx": "efg",
"abc": "value,value,value,value,value",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := createHeadersFromRawRequest(t, tt.hdrs)
got, err := GetUserMetaData(h)
assert.Equal(t, tt.wantErr, err)
assert.Equal(t, tt.want, got)
})
}
}
func Test_includeHeader(t *testing.T) {
type args struct {
hdr string
signedHdrs []string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "include-header-falsy-case",
args: args{
hdr: "Content-Type",
signedHdrs: []string{"X-Amz-Acl", "Content-Encoding"},
},
want: false,
},
{
name: "include-header-falsy-case",
args: args{
hdr: "Content-Type",
signedHdrs: []string{"X-Amz-Acl", "Content-Type"},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := includeHeader(tt.args.hdr, tt.args.signedHdrs); got != tt.want {
t.Errorf("includeHeader() = %v, want %v", got, tt.want)
}
})
}
}
func TestIsValidBucketName(t *testing.T) {
type args struct {
bucket string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "IsValidBucketName-short-name",
args: args{
bucket: "a",
},
want: false,
},
{
name: "IsValidBucketName-start-with-hyphen",
args: args{
bucket: "-bucket",
},
want: false,
},
{
name: "IsValidBucketName-start-with-dot",
args: args{
bucket: ".bucket",
},
want: false,
},
{
name: "IsValidBucketName-contain-invalid-character",
args: args{
bucket: "my@bucket",
},
want: false,
},
{
name: "IsValidBucketName-end-with-hyphen",
args: args{
bucket: "bucket-",
},
want: false,
},
{
name: "IsValidBucketName-end-with-dot",
args: args{
bucket: "bucket.",
},
want: false,
},
{
name: "IsValidBucketName-valid-bucket-name",
args: args{
bucket: "my-bucket",
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsValidBucketName(tt.args.bucket); got != tt.want {
t.Errorf("IsValidBucketName() = %v, want %v", got, tt.want)
}
})
}
}
func TestSetBucketNameValidationStrict(t *testing.T) {
SetBucketNameValidationStrict(true)
t.Cleanup(func() {
SetBucketNameValidationStrict(true)
})
invalidBucket := "Invalid_Bucket"
if IsValidBucketName(invalidBucket) {
t.Fatalf("expected %q to be invalid with strict validation", invalidBucket)
}
SetBucketNameValidationStrict(false)
if !IsValidBucketName(invalidBucket) {
t.Fatalf("expected %q to be accepted when strict validation disabled", invalidBucket)
}
SetBucketNameValidationStrict(true)
if IsValidBucketName(invalidBucket) {
t.Fatalf("expected %q to be invalid after re-enabling strict validation", invalidBucket)
}
}
func TestParseMaxLimiter(t *testing.T) {
type args struct {
str string
lt LimiterType
}
type expected struct {
err error
res int32
}
tests := []struct {
name string
args args
expected expected
}{
{
name: "empty_string",
args: args{
str: "",
lt: LimiterTypeMaxKeys,
},
expected: expected{
err: nil,
res: 1000,
},
},
{
name: "empty_max-buckets",
args: args{
str: "",
lt: LimiterTypeMaxBuckets,
},
expected: expected{
err: nil,
res: 10000,
},
},
{
name: "invalid_max-parts",
args: args{
str: "bla",
lt: LimiterTypeMaxParts,
},
expected: expected{
err: s3err.GetInvalidMaxLimiterErr(string(LimiterTypeMaxParts)),
res: 0,
},
},
{
name: "invalid_max-uploads",
args: args{
str: "invalid",
lt: LimiterTypeMaxUploads,
},
expected: expected{
err: s3err.GetInvalidMaxLimiterErr(string(LimiterTypeMaxUploads)),
res: 0,
},
},
{
name: "invalid_max-buckets",
args: args{
str: "invalid",
lt: LimiterTypeMaxBuckets,
},
expected: expected{
err: s3err.GetInvalidMaxLimiterErr(string(LimiterTypeMaxBuckets)),
res: 0,
},
},
{
name: "invalid_versions_max-keys",
args: args{
str: "invalid",
lt: LimiterTypeMaxKeys,
},
expected: expected{
err: s3err.GetInvalidMaxLimiterErr(string(LimiterTypeMaxKeys)),
res: 0,
},
},
{
name: "negative_max-keys",
args: args{
str: "-5",
lt: LimiterTypeMaxKeys,
},
expected: expected{
err: s3err.GetNegativeMaxLimiterErr(string(LimiterTypeMaxKeys)),
res: 0,
},
},
{
name: "negative_part-number-marker",
args: args{
str: "-5",
lt: LimiterTypePartNumberMarker,
},
expected: expected{
err: s3err.GetNegativeMaxLimiterErr(string(LimiterTypePartNumberMarker)),
res: 0,
},
},
{
name: "negative_max-buckets",
args: args{
str: "-12",
lt: LimiterTypeMaxBuckets,
},
expected: expected{
err: s3err.GetAPIError(s3err.ErrInvalidMaxBuckets),
res: 0,
},
},
{
name: "negative_versions_max-keys",
args: args{
str: "-12",
lt: LimiterTypeVersionsMaxKeys,
},
expected: expected{
err: s3err.GetAPIError(s3err.ErrNegativeMaxKeys),
res: 0,
},
},
{
name: "greater_than_10000_max-buckets",
args: args{
str: "25000",
lt: LimiterTypeMaxBuckets,
},
expected: expected{
err: s3err.GetAPIError(s3err.ErrInvalidMaxBuckets),
res: 0,
},
},
{
name: "greater_than_1000_max-buckets",
args: args{
str: "1300",
lt: LimiterTypeMaxBuckets,
},
expected: expected{
err: nil,
res: 1300,
},
},
{
name: "greater_than_1000",
args: args{
str: "25000",
lt: LimiterTypeMaxParts,
},
expected: expected{
err: nil,
res: 1000,
},
},
{
name: "success",
args: args{
str: "23",
lt: LimiterTypeMaxUploads,
},
expected: expected{
err: nil,
res: 23,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ParseMaxLimiter(tt.args.str, tt.args.lt)
assert.Equal(t, tt.expected.err, err)
assert.Equal(t, tt.expected.res, got)
})
}
}
func TestFilterObjectAttributes(t *testing.T) {
type args struct {
attrs map[s3response.ObjectAttributes]struct{}
output s3response.GetObjectAttributesResponse
}
etag, objSize := "etag", int64(3222)
delMarker := true
tests := []struct {
name string
args args
want s3response.GetObjectAttributesResponse
}{
{
name: "keep only ETag",
args: args{
attrs: map[s3response.ObjectAttributes]struct{}{
s3response.ObjectAttributesEtag: {},
},
output: s3response.GetObjectAttributesResponse{
ObjectSize: &objSize,
ETag: &etag,
},
},
want: s3response.GetObjectAttributesResponse{ETag: &etag},
},
{
name: "keep multiple props",
args: args{
attrs: map[s3response.ObjectAttributes]struct{}{
s3response.ObjectAttributesEtag: {},
s3response.ObjectAttributesObjectSize: {},
s3response.ObjectAttributesStorageClass: {},
},
output: s3response.GetObjectAttributesResponse{
ObjectSize: &objSize,
ETag: &etag,
ObjectParts: &s3response.ObjectParts{},
VersionId: &etag,
},
},
want: s3response.GetObjectAttributesResponse{
ETag: &etag,
ObjectSize: &objSize,
},
},
{
name: "make sure LastModified, DeleteMarker and VersionId are removed",
args: args{
attrs: map[s3response.ObjectAttributes]struct{}{
s3response.ObjectAttributesEtag: {},
},
output: s3response.GetObjectAttributesResponse{
ObjectSize: &objSize,
ETag: &etag,
ObjectParts: &s3response.ObjectParts{},
VersionId: &etag,
LastModified: backend.GetTimePtr(time.Now()),
DeleteMarker: &delMarker,
},
},
want: s3response.GetObjectAttributesResponse{
ETag: &etag,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := FilterObjectAttributes(tt.args.attrs, tt.args.output); !reflect.DeepEqual(got, tt.want) {
t.Errorf("FilterObjectAttributes() = %v, want %v", got, tt.want)
}
})
}
}
func TestIsValidOwnership(t *testing.T) {
type args struct {
val types.ObjectOwnership
}
tests := []struct {
name string
args args
want bool
}{
{
name: "valid-BucketOwnerEnforced",
args: args{
val: types.ObjectOwnershipBucketOwnerEnforced,
},
want: true,
},
{
name: "valid-BucketOwnerPreferred",
args: args{
val: types.ObjectOwnershipBucketOwnerPreferred,
},
want: true,
},
{
name: "valid-ObjectWriter",
args: args{
val: types.ObjectOwnershipObjectWriter,
},
want: true,
},
{
name: "invalid_value",
args: args{
val: types.ObjectOwnership("invalid_value"),
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsValidOwnership(tt.args.val); got != tt.want {
t.Errorf("IsValidOwnership() = %v, want %v", got, tt.want)
}
})
}
}
func TestIsChecksumAlgorithmValid(t *testing.T) {
type args struct {
alg types.ChecksumAlgorithm
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "empty",
args: args{
alg: "",
},
wantErr: false,
},
{
name: "crc32",
args: args{
alg: types.ChecksumAlgorithmCrc32,
},
wantErr: false,
},
{
name: "crc32c",
args: args{
alg: types.ChecksumAlgorithmCrc32c,
},
wantErr: false,
},
{
name: "sha1",
args: args{
alg: types.ChecksumAlgorithmSha1,
},
wantErr: false,
},
{
name: "sha256",
args: args{
alg: types.ChecksumAlgorithmSha256,
},
wantErr: false,
},
{
name: "crc64nvme",
args: args{
alg: types.ChecksumAlgorithmCrc64nvme,
},
wantErr: false,
},
{
name: "invalid",
args: args{
alg: types.ChecksumAlgorithm("invalid_checksum_algorithm"),
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := IsChecksumAlgorithmValid(tt.args.alg); (err != nil) != tt.wantErr {
t.Errorf("IsChecksumAlgorithmValid() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestIsValidChecksum(t *testing.T) {
type args struct {
checksum string
algorithm types.ChecksumAlgorithm
}
tests := []struct {
name string
args args
want bool
}{
{
name: "invalid-base64",
args: args{
checksum: "invalid_base64_string",
algorithm: types.ChecksumAlgorithmCrc32,
},
want: false,
},
{
name: "invalid-crc32",
args: args{
checksum: "YXNkZmFzZGZhc2Rm",
algorithm: types.ChecksumAlgorithmCrc32,
},
want: false,
},
{
name: "valid-crc32",
args: args{
checksum: "ww2FVQ==",
algorithm: types.ChecksumAlgorithmCrc32,
},
want: true,
},
{
name: "invalid-crc32c",
args: args{
checksum: "Zmdoa2doZmtnZmhr",
algorithm: types.ChecksumAlgorithmCrc32c,
},
want: false,
},
{
name: "valid-crc32c",
args: args{
checksum: "DOsb4w==",
algorithm: types.ChecksumAlgorithmCrc32c,
},
want: true,
},
{
name: "invalid-sha1",
args: args{
checksum: "YXNkZmFzZGZhc2RmYXNkZnNhZGZzYWRm",
algorithm: types.ChecksumAlgorithmSha1,
},
want: false,
},
{
name: "valid-sha1",
args: args{
checksum: "L4q6V59Zcwn12wyLIytoE2c1ugk=",
algorithm: types.ChecksumAlgorithmSha1,
},
want: true,
},
{
name: "invalid-sha256",
args: args{
checksum: "Zmdoa2doZmtnZmhrYXNkZmFzZGZhc2RmZHNmYXNkZg==",
algorithm: types.ChecksumAlgorithmSha256,
},
want: false,
},
{
name: "valid-sha256",
args: args{
checksum: "d1SPCd/kZ2rAzbbLUC0n/bEaOSx70FNbXbIqoIxKuPY=",
algorithm: types.ChecksumAlgorithmSha256,
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsValidChecksum(tt.args.checksum, tt.args.algorithm); got != tt.want {
t.Errorf("IsValidChecksum() = %v, want %v", got, tt.want)
}
})
}
}
func TestIsChecksumTypeValid(t *testing.T) {
type args struct {
t types.ChecksumType
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "valid_FULL_OBJECT",
args: args{
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "valid_COMPOSITE",
args: args{
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "invalid",
args: args{
t: types.ChecksumType("invalid_checksum_type"),
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := IsChecksumTypeValid(tt.args.t); (err != nil) != tt.wantErr {
t.Errorf("IsChecksumTypeValid() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_checkChecksumTypeAndAlgo(t *testing.T) {
type args struct {
algo types.ChecksumAlgorithm
t types.ChecksumType
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "full_object-crc32",
args: args{
algo: types.ChecksumAlgorithmCrc32,
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "full_object-crc32c",
args: args{
algo: types.ChecksumAlgorithmCrc32c,
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "full_object-sha1",
args: args{
algo: types.ChecksumAlgorithmSha1,
t: types.ChecksumTypeFullObject,
},
wantErr: true,
},
{
name: "full_object-sha256",
args: args{
algo: types.ChecksumAlgorithmSha1,
t: types.ChecksumTypeFullObject,
},
wantErr: true,
},
{
name: "full_object-crc64nvme",
args: args{
algo: types.ChecksumAlgorithmCrc64nvme,
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "full_object-crc32",
args: args{
algo: types.ChecksumAlgorithmCrc32,
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "composite-crc32",
args: args{
algo: types.ChecksumAlgorithmCrc32,
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "composite-crc32c",
args: args{
algo: types.ChecksumAlgorithmCrc32c,
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "composite-sha1",
args: args{
algo: types.ChecksumAlgorithmSha1,
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "composite-sha256",
args: args{
algo: types.ChecksumAlgorithmSha256,
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "composite-crc64nvme",
args: args{
algo: types.ChecksumAlgorithmCrc64nvme,
t: types.ChecksumTypeComposite,
},
wantErr: true,
},
{
name: "composite-empty",
args: args{
t: types.ChecksumTypeComposite,
},
wantErr: true,
},
{
name: "full_object-empty",
args: args{
t: types.ChecksumTypeFullObject,
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := checkChecksumTypeAndAlgo(tt.args.algo, tt.args.t); (err != nil) != tt.wantErr {
t.Errorf("checkChecksumTypeAndAlgo() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestParseTagging(t *testing.T) {
genRandStr := func(lgth int) string {
b := make([]byte, lgth)
for i := range b {
b[i] = byte(rand.Intn(95) + 32) // 126 - 32 + 1 = 95 printable characters
}
return string(b)
}
getTagSet := func(lgth int) s3response.TaggingInput {
res := s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{},
},
}
for i := 0; i < lgth; i++ {
res.TagSet.Tags = append(res.TagSet.Tags, s3response.Tag{
Key: genRandStr(10),
Value: genRandStr(20),
})
}
return res
}
type args struct {
data s3response.TaggingInput
overrideXML []byte
limit TagLimit
}
tests := []struct {
name string
args args
want map[string]string
wantErr error
}{
{
name: "valid tags within limit",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{
{Key: "key1", Value: "value1"},
{Key: "key2", Value: "value2"},
},
},
},
limit: TagLimitObject,
},
want: map[string]string{"key1": "value1", "key2": "value2"},
wantErr: nil,
},
{
name: "malformed XML",
args: args{
overrideXML: []byte("invalid xml"),
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrMalformedXML),
},
{
name: "exceeds bucket tag limit",
args: args{
data: getTagSet(51),
limit: TagLimitBucket,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrBucketTaggingLimited),
},
{
name: "exceeds object tag limit",
args: args{
data: getTagSet(11),
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrObjectTaggingLimited),
},
{
name: "invalid 0 length tag key",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{{Key: "", Value: "value1"}},
},
},
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrInvalidTagKey),
},
{
name: "invalid long tag key",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{{Key: genRandStr(130), Value: "value1"}},
},
},
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrInvalidTagKey),
},
{
name: "invalid long tag value",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{{Key: "key", Value: genRandStr(257)}},
},
},
limit: TagLimitBucket,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrInvalidTagValue),
},
{
name: "duplicate tag key",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{
{Key: "key", Value: "value1"},
{Key: "key", Value: "value2"},
},
},
},
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrDuplicateTagKey),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var data []byte
if tt.args.overrideXML != nil {
data = tt.args.overrideXML
} else {
var err error
data, err = xml.Marshal(tt.args.data)
if err != nil {
t.Fatalf("error marshalling input: %v", err)
}
}
got, err := ParseTagging(data, tt.args.limit)
if !errors.Is(err, tt.wantErr) {
t.Errorf("expected error %v, got %v", tt.wantErr, err)
}
if err == nil && !reflect.DeepEqual(got, tt.want) {
t.Errorf("expected result %v, got %v", tt.want, got)
}
})
}
}
func TestValidateCopySource(t *testing.T) {
tests := []struct {
name string
copysource string
err error
}{
// invalid encoding
{"invalid encoding 1", "%", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
{"invalid encoding 2", "%2", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
{"invalid encoding 3", "%G1", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
{"invalid encoding 4", "%1Z", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
{"invalid encoding 5", "%0H", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
{"invalid encoding 6", "%XY", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
{"invalid encoding 7", "%E", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
{"invalid encoding 8", "hello%", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
{"invalid encoding 9", "%%", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
{"invalid encoding 10", "%2Gmore", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
{"invalid encoding 11", "100%%sure", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
{"invalid encoding 12", "%#00", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
{"invalid encoding 13", "%0%0", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
{"invalid encoding 14", "%?versionId=id", s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding)},
// invalid bucket name
{"invalid bucket name 1", "168.200.1.255/obj/foo", s3err.GetAPIError(s3err.ErrInvalidCopySourceBucket)},
{"invalid bucket name 2", "/0000:0db8:85a3:0000:0000:8a2e:0370:7224/smth", s3err.GetAPIError(s3err.ErrInvalidCopySourceBucket)},
{"invalid bucket name 3", "", s3err.GetAPIError(s3err.ErrInvalidCopySourceBucket)},
{"invalid bucket name 4", "//obj/foo", s3err.GetAPIError(s3err.ErrInvalidCopySourceBucket)},
{"invalid bucket name 5", "//obj/foo?versionId=id", s3err.GetAPIError(s3err.ErrInvalidCopySourceBucket)},
// invalid object name
{"invalid object name 1", "bucket/../foo", s3err.GetAPIError(s3err.ErrInvalidCopySourceObject)},
{"invalid object name 2", "bucket/", s3err.GetAPIError(s3err.ErrInvalidCopySourceObject)},
{"invalid object name 3", "bucket", s3err.GetAPIError(s3err.ErrInvalidCopySourceObject)},
{"invalid object name 4", "bucket/../foo/dir/../../../", s3err.GetAPIError(s3err.ErrInvalidCopySourceObject)},
{"invalid object name 5", "bucket/.?versionId=smth", s3err.GetAPIError(s3err.ErrInvalidCopySourceObject)},
// invalid versionId
{"invalid versionId 1", "bucket/object?versionId=invalid", s3err.GetAPIError(s3err.ErrInvalidVersionId)},
{"invalid versionId 2", "bucket/object?versionId=01BX5ZZKBKACTAV9WEVGEMMV", s3err.GetAPIError(s3err.ErrInvalidVersionId)},
// success
{"no error 1", "bucket/object", nil},
{"no error 2", "bucket/object/key", nil},
{"no error 3", "bucket/4*&(*&(89765))", nil},
{"no error 4", "bucket/foo/../bar", nil},
{"no error 5", "bucket/foo/bar/baz?versionId=01BX5ZZKBKACTAV9WEVGEMMVRZ", nil},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := ValidateCopySource(tt.copysource)
assert.Equal(t, tt.err, err)
})
}
}