feat: cleanup calling of debuglogger with managed debug setting

This commit is contained in:
Ben McClelland
2025-05-02 16:59:58 -07:00
parent d19c446f72
commit a9fcf63063
7 changed files with 91 additions and 127 deletions

View File

@@ -1763,7 +1763,7 @@ func validatePartChecksum(checksum s3response.Checksum, part types.CompletedPart
continue
}
if !utils.IsValidChecksum(*cs.checksum, cs.algo, false) {
if !utils.IsValidChecksum(*cs.checksum, cs.algo) {
return s3err.GetAPIError(s3err.ErrInvalidChecksumPart)
}

View File

@@ -505,7 +505,7 @@ func validatePartChecksum(checksum s3response.Checksum, part types.CompletedPart
continue
}
if !utils.IsValidChecksum(*cs.checksum, cs.algo, false) {
if !utils.IsValidChecksum(*cs.checksum, cs.algo) {
return s3err.GetAPIError(s3err.ErrInvalidChecksumPart)
}

View File

@@ -57,6 +57,10 @@ const (
)
func New(be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, evs s3event.S3EventSender, mm *metrics.Manager, debug bool, readonly bool) S3ApiController {
if debug {
debuglogger.SetDebugEnabled()
}
return S3ApiController{
be: be,
iam: iam,
@@ -267,7 +271,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
}
}
mxParts := ctx.Query("max-parts")
maxParts, err := utils.ParseUint(mxParts, c.debug)
maxParts, err := utils.ParseUint(mxParts)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing max parts %q: %v",
@@ -374,7 +378,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
}
maxParts := ctx.Get("X-Amz-Max-Parts")
partNumberMarker := ctx.Get("X-Amz-Part-Number-Marker")
maxPartsParsed, err := utils.ParseUint(maxParts, c.debug)
maxPartsParsed, err := utils.ParseUint(maxParts)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing max parts %q: %v",
@@ -388,7 +392,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
BucketOwner: parsedAcl.Owner,
})
}
attrs, err := utils.ParseObjectAttributes(ctx, c.debug)
attrs, err := utils.ParseObjectAttributes(ctx)
if err != nil {
return SendXMLResponse(ctx, nil, err,
&MetaOpts{
@@ -922,7 +926,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
})
}
maxkeys, err := utils.ParseUint(maxkeysStr, c.debug)
maxkeys, err := utils.ParseUint(maxkeysStr)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing max keys %q: %v",
@@ -1055,7 +1059,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
BucketOwner: parsedAcl.Owner,
})
}
maxUploads, err := utils.ParseUint(maxUploadsStr, c.debug)
maxUploads, err := utils.ParseUint(maxUploadsStr)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing max uploads %q: %v",
@@ -1106,7 +1110,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
BucketOwner: parsedAcl.Owner,
})
}
maxkeys, err := utils.ParseUint(maxkeysStr, c.debug)
maxkeys, err := utils.ParseUint(maxkeysStr)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing max keys %q: %v",
@@ -1160,7 +1164,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
})
}
maxkeys, err := utils.ParseUint(maxkeysStr, c.debug)
maxkeys, err := utils.ParseUint(maxkeysStr)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing max keys %q: %v",
@@ -1211,7 +1215,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
if ctx.Request().URI().QueryArgs().Has("tagging") {
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
tagging, err := utils.ParseTagging(ctx.Body(), utils.TagLimitBucket, c.debug)
tagging, err := utils.ParseTagging(ctx.Body(), utils.TagLimitBucket)
if err != nil {
return SendResponse(ctx, err,
&MetaOpts{
@@ -1269,7 +1273,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
}
rulesCount := len(ownershipControls.Rules)
isValidOwnership := utils.IsValidOwnership(ownershipControls.Rules[0].ObjectOwnership, c.debug)
isValidOwnership := utils.IsValidOwnership(ownershipControls.Rules[0].ObjectOwnership)
if rulesCount != 1 || !isValidOwnership {
if c.debug && rulesCount != 1 {
debuglogger.Logf("ownership control rules should be 1, got %v", rulesCount)
@@ -1679,7 +1683,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
})
}
if ok := utils.IsValidBucketName(bucket, c.debug); !ok {
if ok := utils.IsValidBucketName(bucket); !ok {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidBucketName),
&MetaOpts{
Logger: c.logger,
@@ -1687,7 +1691,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
Action: metrics.ActionCreateBucket,
})
}
if ok := utils.IsValidOwnership(objectOwnership, c.debug); !ok {
if ok := utils.IsValidOwnership(objectOwnership); !ok {
return SendResponse(ctx, s3err.APIError{
Code: "InvalidArgument",
Description: fmt.Sprintf("Invalid x-amz-object-ownership header: %v", objectOwnership),
@@ -1838,7 +1842,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
}
if ctx.Request().URI().QueryArgs().Has("tagging") {
tagging, err := utils.ParseTagging(ctx.Body(), utils.TagLimitObject, c.debug)
tagging, err := utils.ParseTagging(ctx.Body(), utils.TagLimitObject)
if err != nil {
return SendResponse(ctx, err,
&MetaOpts{
@@ -2127,7 +2131,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
})
}
algorithm, checksums, err := utils.ParseChecksumHeaders(ctx, c.debug)
algorithm, checksums, err := utils.ParseChecksumHeaders(ctx)
if err != nil {
if c.debug {
debuglogger.Logf("err parsing checksum headers: %v", err)
@@ -2456,7 +2460,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
}
checksumAlgorithm := types.ChecksumAlgorithm(ctx.Get("x-amz-checksum-algorithm"))
err = utils.IsChecksumAlgorithmValid(checksumAlgorithm, c.debug)
err = utils.IsChecksumAlgorithmValid(checksumAlgorithm)
if err != nil {
return SendXMLResponse(ctx, nil, err,
&MetaOpts{
@@ -2467,7 +2471,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
})
}
objLock, err := utils.ParsObjectLockHdrs(ctx, c.debug)
objLock, err := utils.ParsObjectLockHdrs(ctx)
if err != nil {
return SendResponse(ctx, err,
&MetaOpts{
@@ -2591,7 +2595,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
})
}
objLock, err := utils.ParsObjectLockHdrs(ctx, c.debug)
objLock, err := utils.ParsObjectLockHdrs(ctx)
if err != nil {
return SendResponse(ctx, err,
&MetaOpts{
@@ -2602,7 +2606,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
})
}
algorithm, checksums, err := utils.ParseChecksumHeaders(ctx, c.debug)
algorithm, checksums, err := utils.ParseChecksumHeaders(ctx)
if err != nil {
return SendResponse(ctx, err,
&MetaOpts{
@@ -3662,7 +3666,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
})
}
_, checksums, err := utils.ParseChecksumHeaders(ctx, c.debug)
_, checksums, err := utils.ParseChecksumHeaders(ctx)
if err != nil {
return SendXMLResponse(ctx, nil, err,
&MetaOpts{
@@ -3674,7 +3678,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
}
checksumType := types.ChecksumType(ctx.Get("x-amz-checksum-type"))
err = utils.IsChecksumTypeValid(checksumType, c.debug)
err = utils.IsChecksumTypeValid(checksumType)
if err != nil {
return SendXMLResponse(ctx, nil, err,
&MetaOpts{
@@ -3752,7 +3756,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
})
}
objLockState, err := utils.ParsObjectLockHdrs(ctx, c.debug)
objLockState, err := utils.ParsObjectLockHdrs(ctx)
if err != nil {
return SendXMLResponse(ctx, nil, err,
&MetaOpts{
@@ -3765,7 +3769,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
metadata := utils.GetUserMetaData(&ctx.Request().Header)
checksumAlgorithm, checksumType, err := utils.ParseCreateMpChecksumHeaders(ctx, c.debug)
checksumAlgorithm, checksumType, err := utils.ParseCreateMpChecksumHeaders(ctx)
if err != nil {
return SendXMLResponse(ctx, nil, err,
&MetaOpts{

View File

@@ -19,6 +19,7 @@ import (
"log"
"net/http"
"strings"
"sync/atomic"
"github.com/gofiber/fiber/v2"
)
@@ -81,9 +82,19 @@ func LogFiberResponseDetails(ctx *fiber.Ctx) {
}
}
var debugEnabled atomic.Bool
// SetDebugEnabled sets the debug mode
func SetDebugEnabled() {
debugEnabled.Store(true)
}
// Logf is the same as 'fmt.Printf' with debug prefix,
// a color added and '\n' at the end
func Logf(format string, v ...any) {
if !debugEnabled.Load() {
return
}
debugPrefix := "[DEBUG]: "
fmt.Printf(yellow+debugPrefix+format+reset+"\n", v...)
}

View File

@@ -390,7 +390,7 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
return cr.handleRdrErr(err, header)
}
if !IsValidChecksum(checksum, algo, cr.debug) {
if !IsValidChecksum(checksum, algo) {
return 0, "", 0, s3err.GetInvalidTrailingChecksumHeaderErr(trailer)
}

View File

@@ -177,21 +177,17 @@ func SetMetaHeaders(ctx *fiber.Ctx, meta map[string]string) {
ctx.Response().Header.EnableNormalizing()
}
func ParseUint(str string, debug bool) (int32, error) {
func ParseUint(str string) (int32, error) {
if str == "" {
return 1000, nil
}
num, err := strconv.ParseInt(str, 10, 32)
if err != nil {
if debug {
debuglogger.Logf("invalid intager provided: %v\n", err)
}
debuglogger.Logf("invalid intager provided: %v\n", err)
return 1000, fmt.Errorf("invalid int: %w", err)
}
if num < 0 {
if debug {
debuglogger.Logf("negative intager provided: %v\n", num)
}
debuglogger.Logf("negative intager provided: %v\n", num)
return 1000, fmt.Errorf("negative uint: %v", num)
}
if num > 1000 {
@@ -218,7 +214,7 @@ func StreamResponseBody(ctx *fiber.Ctx, rdr io.ReadCloser, bodysize int) {
ctx.Context().SetBodyStream(rdr, bodysize)
}
func IsValidBucketName(bucket string, debug bool) bool {
func IsValidBucketName(bucket string) bool {
if len(bucket) < 3 || len(bucket) > 63 {
debuglogger.Logf("bucket name length should be in 3-63 range, got: %v\n", len(bucket))
return false
@@ -304,7 +300,7 @@ func FilterObjectAttributes(attrs map[s3response.ObjectAttributes]struct{}, outp
return output
}
func ParseObjectAttributes(ctx *fiber.Ctx, debug bool) (map[s3response.ObjectAttributes]struct{}, error) {
func ParseObjectAttributes(ctx *fiber.Ctx) (map[s3response.ObjectAttributes]struct{}, error) {
attrs := map[s3response.ObjectAttributes]struct{}{}
var err error
ctx.Request().Header.VisitAll(func(key, value []byte) {
@@ -316,9 +312,7 @@ func ParseObjectAttributes(ctx *fiber.Ctx, debug bool) (map[s3response.ObjectAtt
for _, a := range oattrs {
attr := s3response.ObjectAttributes(a)
if !attr.IsValid() {
if debug {
debuglogger.Logf("invalid object attribute: %v\n", attr)
}
debuglogger.Logf("invalid object attribute: %v\n", attr)
err = s3err.GetAPIError(s3err.ErrInvalidObjectAttributes)
break
}
@@ -332,9 +326,7 @@ func ParseObjectAttributes(ctx *fiber.Ctx, debug bool) (map[s3response.ObjectAtt
}
if len(attrs) == 0 {
if debug {
debuglogger.Logf("empty get object attributes")
}
debuglogger.Logf("empty get object attributes")
return nil, s3err.GetAPIError(s3err.ErrObjectAttributesInvalidHeader)
}
@@ -347,15 +339,13 @@ type objLockCfg struct {
LegalHoldStatus types.ObjectLockLegalHoldStatus
}
func ParsObjectLockHdrs(ctx *fiber.Ctx, debug bool) (*objLockCfg, error) {
func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
legalHoldHdr := ctx.Get("X-Amz-Object-Lock-Legal-Hold")
objLockModeHdr := ctx.Get("X-Amz-Object-Lock-Mode")
objLockDate := ctx.Get("X-Amz-Object-Lock-Retain-Until-Date")
if (objLockDate != "" && objLockModeHdr == "") || (objLockDate == "" && objLockModeHdr != "") {
if debug {
debuglogger.Logf("one of 2 required params is missing: (lock date): %v, (lock mode): %v\n", objLockDate, objLockModeHdr)
}
debuglogger.Logf("one of 2 required params is missing: (lock date): %v, (lock mode): %v\n", objLockDate, objLockModeHdr)
return nil, s3err.GetAPIError(s3err.ErrObjectLockInvalidHeaders)
}
@@ -363,15 +353,11 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx, debug bool) (*objLockCfg, error) {
if objLockDate != "" {
rDate, err := time.Parse(time.RFC3339, objLockDate)
if err != nil {
if debug {
debuglogger.Logf("failed to parse retain until date: %v\n", err)
}
debuglogger.Logf("failed to parse retain until date: %v\n", err)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
if rDate.Before(time.Now()) {
if debug {
debuglogger.Logf("expired retain until date: %v\n", rDate.Format(time.RFC3339))
}
debuglogger.Logf("expired retain until date: %v\n", rDate.Format(time.RFC3339))
return nil, s3err.GetAPIError(s3err.ErrPastObjectLockRetainDate)
}
retainUntilDate = rDate
@@ -382,18 +368,14 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx, debug bool) (*objLockCfg, error) {
if objLockMode != "" &&
objLockMode != types.ObjectLockModeCompliance &&
objLockMode != types.ObjectLockModeGovernance {
if debug {
debuglogger.Logf("invalid object lock mode: %v\n", objLockMode)
}
debuglogger.Logf("invalid object lock mode: %v\n", objLockMode)
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectLockMode)
}
legalHold := types.ObjectLockLegalHoldStatus(legalHoldHdr)
if legalHold != "" && legalHold != types.ObjectLockLegalHoldStatusOff && legalHold != types.ObjectLockLegalHoldStatusOn {
if debug {
debuglogger.Logf("invalid object lock legal hold status: %v\n", legalHold)
}
debuglogger.Logf("invalid object lock legal hold status: %v\n", legalHold)
return nil, s3err.GetAPIError(s3err.ErrInvalidLegalHoldStatus)
}
@@ -404,7 +386,7 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx, debug bool) (*objLockCfg, error) {
}, nil
}
func IsValidOwnership(val types.ObjectOwnership, debug bool) bool {
func IsValidOwnership(val types.ObjectOwnership) bool {
switch val {
case types.ObjectOwnershipBucketOwnerEnforced:
return true
@@ -413,9 +395,7 @@ func IsValidOwnership(val types.ObjectOwnership, debug bool) bool {
case types.ObjectOwnershipObjectWriter:
return true
default:
if debug {
debuglogger.Logf("invalid object ownership: %v\n", val)
}
debuglogger.Logf("invalid object ownership: %v\n", val)
return false
}
}
@@ -510,14 +490,12 @@ func (cv ChecksumValues) Headers() string {
return result
}
func ParseChecksumHeaders(ctx *fiber.Ctx, debug bool) (types.ChecksumAlgorithm, ChecksumValues, error) {
func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, ChecksumValues, error) {
sdkAlgorithm := types.ChecksumAlgorithm(strings.ToUpper(ctx.Get("X-Amz-Sdk-Checksum-Algorithm")))
err := IsChecksumAlgorithmValid(sdkAlgorithm, debug)
err := IsChecksumAlgorithmValid(sdkAlgorithm)
if err != nil {
if debug {
debuglogger.Logf("invalid checksum algorithm: %v\n", sdkAlgorithm)
}
debuglogger.Logf("invalid checksum algorithm: %v\n", sdkAlgorithm)
return "", nil, err
}
@@ -532,11 +510,9 @@ func ParseChecksumHeaders(ctx *fiber.Ctx, debug bool) (types.ChecksumAlgorithm,
}
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(key), "X-Amz-Checksum-")))
err := IsChecksumAlgorithmValid(algo, debug)
err := IsChecksumAlgorithmValid(algo)
if err != nil {
if debug {
debuglogger.Logf("invalid checksum header: %s\n", key)
}
debuglogger.Logf("invalid checksum header: %s\n", key)
hdrErr = s3err.GetAPIError(s3err.ErrInvalidChecksumHeader)
return
}
@@ -549,14 +525,12 @@ func ParseChecksumHeaders(ctx *fiber.Ctx, debug bool) (types.ChecksumAlgorithm,
}
if len(checksums) > 1 {
if debug {
debuglogger.Logf("multiple checksum headers provided: %v\n", checksums.Headers())
}
debuglogger.Logf("multiple checksum headers provided: %v\n", checksums.Headers())
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
}
for al, val := range checksums {
if !IsValidChecksum(val, al, debug) {
if !IsValidChecksum(val, al) {
return sdkAlgorithm, checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(al))))
}
// If any other checksum value is provided,
@@ -578,32 +552,28 @@ var checksumLengths = map[types.ChecksumAlgorithm]int{
types.ChecksumAlgorithmSha256: 32,
}
func IsValidChecksum(checksum string, algorithm types.ChecksumAlgorithm, debug bool) bool {
func IsValidChecksum(checksum string, algorithm types.ChecksumAlgorithm) bool {
decoded, err := base64.StdEncoding.DecodeString(checksum)
if err != nil {
if debug {
debuglogger.Logf("failed to parse checksum base64: %v\n", err)
}
debuglogger.Logf("failed to parse checksum base64: %v\n", err)
return false
}
expectedLength, exists := checksumLengths[algorithm]
if !exists {
if debug {
debuglogger.Logf("unknown checksum algorithm: %v\n", algorithm)
}
debuglogger.Logf("unknown checksum algorithm: %v\n", algorithm)
return false
}
isValid := len(decoded) == expectedLength
if !isValid && debug {
if !isValid {
debuglogger.Logf("decoded checksum length: (expected): %v, (got): %v\n", expectedLength, len(decoded))
}
return isValid
}
func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm, debug bool) error {
func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm) error {
alg = types.ChecksumAlgorithm(strings.ToUpper(string(alg)))
if alg != "" &&
alg != types.ChecksumAlgorithmCrc32 &&
@@ -611,9 +581,7 @@ func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm, debug bool) error {
alg != types.ChecksumAlgorithmSha1 &&
alg != types.ChecksumAlgorithmSha256 &&
alg != types.ChecksumAlgorithmCrc64nvme {
if debug {
debuglogger.Logf("invalid checksum algorithm: %v\n", alg)
}
debuglogger.Logf("invalid checksum algorithm: %v\n", alg)
return s3err.GetAPIError(s3err.ErrInvalidChecksumAlgorithm)
}
@@ -621,13 +589,11 @@ func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm, debug bool) error {
}
// Validates the provided checksum type
func IsChecksumTypeValid(t types.ChecksumType, debug bool) error {
func IsChecksumTypeValid(t types.ChecksumType) error {
if t != "" &&
t != types.ChecksumTypeComposite &&
t != types.ChecksumTypeFullObject {
if debug {
debuglogger.Logf("invalid checksum type: %v\n", t)
}
debuglogger.Logf("invalid checksum type: %v\n", t)
return s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-type")
}
return nil
@@ -667,13 +633,11 @@ var checksumMap checksumSchema = checksumSchema{
}
// Checks if checksum type and algorithm are supported together
func checkChecksumTypeAndAlgo(algo types.ChecksumAlgorithm, t types.ChecksumType, debug bool) error {
func checkChecksumTypeAndAlgo(algo types.ChecksumAlgorithm, t types.ChecksumType) error {
typeSchema := checksumMap[algo]
_, ok := typeSchema[t]
if !ok {
if debug {
debuglogger.Logf("checksum type and algorithm mismatch: (type): %v, (algorithm): %v\n", t, algo)
}
debuglogger.Logf("checksum type and algorithm mismatch: (type): %v, (algorithm): %v\n", t, algo)
return s3err.GetChecksumSchemaMismatchErr(algo, t)
}
@@ -681,29 +645,27 @@ func checkChecksumTypeAndAlgo(algo types.ChecksumAlgorithm, t types.ChecksumType
}
// Parses and validates the x-amz-checksum-algorithm and x-amz-checksum-type headers
func ParseCreateMpChecksumHeaders(ctx *fiber.Ctx, debug bool) (types.ChecksumAlgorithm, types.ChecksumType, error) {
func ParseCreateMpChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, types.ChecksumType, error) {
algo := types.ChecksumAlgorithm(ctx.Get("x-amz-checksum-algorithm"))
if err := IsChecksumAlgorithmValid(algo, debug); err != nil {
if err := IsChecksumAlgorithmValid(algo); err != nil {
return "", "", err
}
chType := types.ChecksumType(ctx.Get("x-amz-checksum-type"))
if err := IsChecksumTypeValid(chType, debug); err != nil {
if err := IsChecksumTypeValid(chType); err != nil {
return "", "", err
}
// Verify if checksum algorithm is provided, if
// checksum type is specified
if chType != "" && algo == "" {
if debug {
debuglogger.Logf("checksum type can only be used with checksum algorithm: (type): %v\n", chType)
}
debuglogger.Logf("checksum type can only be used with checksum algorithm: (type): %v\n", chType)
return algo, chType, s3err.GetAPIError(s3err.ErrChecksumTypeWithAlgo)
}
// Verify if the checksum type is supported for
// the provided checksum algorithm
if err := checkChecksumTypeAndAlgo(algo, chType, debug); err != nil {
if err := checkChecksumTypeAndAlgo(algo, chType); err != nil {
return algo, chType, err
}
@@ -732,13 +694,11 @@ const (
)
// Parses and validates tagging
func ParseTagging(data []byte, limit TagLimit, debug bool) (map[string]string, error) {
func ParseTagging(data []byte, limit TagLimit) (map[string]string, error) {
var tagging s3response.TaggingInput
err := xml.Unmarshal(data, &tagging)
if err != nil {
if debug {
debuglogger.Logf("invalid taggging: %s", data)
}
debuglogger.Logf("invalid taggging: %s", data)
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
}
@@ -746,14 +706,10 @@ func ParseTagging(data []byte, limit TagLimit, debug bool) (map[string]string, e
if tLen > int(limit) {
switch limit {
case TagLimitObject:
if debug {
debuglogger.Logf("bucket tagging length exceeds %v: %v", limit, tLen)
}
debuglogger.Logf("bucket tagging length exceeds %v: %v", limit, tLen)
return nil, s3err.GetAPIError(s3err.ErrObjectTaggingLimited)
case TagLimitBucket:
if debug {
debuglogger.Logf("object tagging length exceeds %v: %v", limit, tLen)
}
debuglogger.Logf("object tagging length exceeds %v: %v", limit, tLen)
return nil, s3err.GetAPIError(s3err.ErrBucketTaggingLimited)
}
}
@@ -763,26 +719,20 @@ func ParseTagging(data []byte, limit TagLimit, debug bool) (map[string]string, e
for _, tag := range tagging.TagSet.Tags {
// validate tag key
if len(tag.Key) == 0 || len(tag.Key) > 128 {
if debug {
debuglogger.Logf("tag key should 0 < tag.Key <= 128, key: %v", tag.Key)
}
debuglogger.Logf("tag key should 0 < tag.Key <= 128, key: %v", tag.Key)
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
}
// validate tag value
if len(tag.Value) > 256 {
if debug {
debuglogger.Logf("invalid long tag value: (length): %v, (value): %v", len(tag.Value), tag.Value)
}
debuglogger.Logf("invalid long tag value: (length): %v, (value): %v", len(tag.Value), tag.Value)
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
}
// make sure there are no duplicate keys
_, ok := tagSet[tag.Key]
if ok {
if debug {
debuglogger.Logf("duplicate tag key: %v", tag.Key)
}
debuglogger.Logf("duplicate tag key: %v", tag.Key)
return nil, s3err.GetAPIError(s3err.ErrDuplicateTagKey)
}

View File

@@ -223,7 +223,7 @@ func TestIsValidBucketName(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsValidBucketName(tt.args.bucket, false); got != tt.want {
if got := IsValidBucketName(tt.args.bucket); got != tt.want {
t.Errorf("IsValidBucketName() = %v, want %v", got, tt.want)
}
})
@@ -283,7 +283,7 @@ func TestParseUint(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ParseUint(tt.args.str, false)
got, err := ParseUint(tt.args.str)
if (err != nil) != tt.wantErr {
t.Errorf("ParseMaxKeys() error = %v, wantErr %v", err, tt.wantErr)
return
@@ -411,7 +411,7 @@ func TestIsValidOwnership(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsValidOwnership(tt.args.val, false); got != tt.want {
if got := IsValidOwnership(tt.args.val); got != tt.want {
t.Errorf("IsValidOwnership() = %v, want %v", got, tt.want)
}
})
@@ -601,7 +601,7 @@ func TestIsChecksumAlgorithmValid(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := IsChecksumAlgorithmValid(tt.args.alg, false); (err != nil) != tt.wantErr {
if err := IsChecksumAlgorithmValid(tt.args.alg); (err != nil) != tt.wantErr {
t.Errorf("IsChecksumAlgorithmValid() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -693,7 +693,7 @@ func TestIsValidChecksum(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsValidChecksum(tt.args.checksum, tt.args.algorithm, false); got != tt.want {
if got := IsValidChecksum(tt.args.checksum, tt.args.algorithm); got != tt.want {
t.Errorf("IsValidChecksum() = %v, want %v", got, tt.want)
}
})
@@ -733,7 +733,7 @@ func TestIsChecksumTypeValid(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := IsChecksumTypeValid(tt.args.t, false); (err != nil) != tt.wantErr {
if err := IsChecksumTypeValid(tt.args.t); (err != nil) != tt.wantErr {
t.Errorf("IsChecksumTypeValid() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -855,7 +855,7 @@ func Test_checkChecksumTypeAndAlgo(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := checkChecksumTypeAndAlgo(tt.args.algo, tt.args.t, false); (err != nil) != tt.wantErr {
if err := checkChecksumTypeAndAlgo(tt.args.algo, tt.args.t); (err != nil) != tt.wantErr {
t.Errorf("checkChecksumTypeAndAlgo() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -890,7 +890,6 @@ func TestParseTagging(t *testing.T) {
data s3response.TaggingInput
overrideXML []byte
limit TagLimit
debug bool
}
tests := []struct {
name string
@@ -1010,7 +1009,7 @@ func TestParseTagging(t *testing.T) {
t.Fatalf("error marshalling input: %v", err)
}
}
got, err := ParseTagging(data, tt.args.limit, tt.args.debug)
got, err := ParseTagging(data, tt.args.limit)
if !errors.Is(err, tt.wantErr) {
t.Errorf("expected error %v, got %v", tt.wantErr, err)