mirror of
https://github.com/versity/versitygw.git
synced 2026-01-04 11:03:57 +00:00
feat: makes debug loggin prettier. Adds missing logs in FE and utility functions
Added missing debug logs in the `front-end` and `utility` functions. Enhanced debug logging with the following improvements: - Each debug message is now prefixed with [DEBUG] and appears in color. - The full request URL is printed at the beginning of each debug log block. - Request/response details are wrapped in framed sections for better readability. - Headers are displayed in a colored box. - XML request/response bodies are pretty-printed with indentation and color.
This commit is contained in:
@@ -1744,7 +1744,7 @@ func validatePartChecksum(checksum s3response.Checksum, part types.CompletedPart
|
||||
continue
|
||||
}
|
||||
|
||||
if !utils.IsValidChecksum(*cs.checksum, cs.algo) {
|
||||
if !utils.IsValidChecksum(*cs.checksum, cs.algo, false) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidChecksumPart)
|
||||
}
|
||||
|
||||
|
||||
@@ -503,7 +503,7 @@ func validatePartChecksum(checksum s3response.Checksum, part types.CompletedPart
|
||||
continue
|
||||
}
|
||||
|
||||
if !utils.IsValidChecksum(*cs.checksum, cs.algo) {
|
||||
if !utils.IsValidChecksum(*cs.checksum, cs.algo, false) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidChecksumPart)
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@@ -33,6 +32,7 @@ import (
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/metrics"
|
||||
"github.com/versity/versitygw/s3api/debuglogger"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3event"
|
||||
@@ -79,7 +79,7 @@ func (c S3ApiController) ListBuckets(ctx *fiber.Ctx) error {
|
||||
maxBucketsParsed, err := strconv.ParseInt(maxBucketsStr, 10, 32)
|
||||
if err != nil || maxBucketsParsed < 0 || maxBucketsParsed > 10000 {
|
||||
if c.debug {
|
||||
log.Printf("error parsing max-buckets %q: %v\n", maxBucketsStr, err)
|
||||
debuglogger.Logf("error parsing max-buckets %q: %v", maxBucketsStr, err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidMaxBuckets),
|
||||
&MetaOpts{
|
||||
@@ -253,7 +253,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
n, err := strconv.Atoi(partNumberMarker)
|
||||
if err != nil || n < 0 {
|
||||
if err != nil && c.debug {
|
||||
log.Printf("error parsing part number marker %q: %v",
|
||||
debuglogger.Logf("error parsing part number marker %q: %v",
|
||||
partNumberMarker, err)
|
||||
}
|
||||
return SendResponse(ctx,
|
||||
@@ -267,10 +267,10 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
}
|
||||
mxParts := ctx.Query("max-parts")
|
||||
maxParts, err := utils.ParseUint(mxParts)
|
||||
maxParts, err := utils.ParseUint(mxParts, c.debug)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error parsing max parts %q: %v",
|
||||
debuglogger.Logf("error parsing max parts %q: %v",
|
||||
mxParts, err)
|
||||
}
|
||||
return SendResponse(ctx,
|
||||
@@ -374,8 +374,12 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
maxParts := ctx.Get("X-Amz-Max-Parts")
|
||||
partNumberMarker := ctx.Get("X-Amz-Part-Number-Marker")
|
||||
maxPartsParsed, err := utils.ParseUint(maxParts)
|
||||
maxPartsParsed, err := utils.ParseUint(maxParts, c.debug)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
debuglogger.Logf("error parsing max parts %q: %v",
|
||||
maxParts, err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidMaxParts),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -384,11 +388,8 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
attrs, err := utils.ParseObjectAttributes(ctx)
|
||||
attrs, err := utils.ParseObjectAttributes(ctx, c.debug)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error parsing object attributes: %v", err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -492,7 +493,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
checksumMode := types.ChecksumMode(ctx.Get("x-amz-checksum-mode"))
|
||||
if checksumMode != "" && checksumMode != types.ChecksumModeEnabled {
|
||||
if c.debug {
|
||||
log.Printf("invalid x-amz-checksum-mode header value: %v\n", checksumMode)
|
||||
debuglogger.Logf("invalid x-amz-checksum-mode header value: %v", checksumMode)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-mode"),
|
||||
&MetaOpts{
|
||||
@@ -503,7 +504,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
ctx.Locals("logResBody", false)
|
||||
ctx.Locals("skip-res-body-log", true)
|
||||
res, err := c.be.GetObject(ctx.Context(), &s3.GetObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
@@ -921,10 +922,10 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
maxkeys, err := utils.ParseUint(maxkeysStr)
|
||||
maxkeys, err := utils.ParseUint(maxkeysStr, c.debug)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error parsing max keys %q: %v",
|
||||
debuglogger.Logf("error parsing max keys %q: %v",
|
||||
maxkeysStr, err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidMaxKeys),
|
||||
@@ -1054,10 +1055,10 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
maxUploads, err := utils.ParseUint(maxUploadsStr)
|
||||
maxUploads, err := utils.ParseUint(maxUploadsStr, c.debug)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error parsing max uploads %q: %v",
|
||||
debuglogger.Logf("error parsing max uploads %q: %v",
|
||||
maxUploadsStr, err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidMaxUploads),
|
||||
@@ -1105,10 +1106,10 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
maxkeys, err := utils.ParseUint(maxkeysStr)
|
||||
maxkeys, err := utils.ParseUint(maxkeysStr, c.debug)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error parsing max keys %q: %v",
|
||||
debuglogger.Logf("error parsing max keys %q: %v",
|
||||
maxkeysStr, err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidMaxKeys),
|
||||
@@ -1159,10 +1160,10 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
maxkeys, err := utils.ParseUint(maxkeysStr)
|
||||
maxkeys, err := utils.ParseUint(maxkeysStr, c.debug)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error parsing max keys %q: %v",
|
||||
debuglogger.Logf("error parsing max keys %q: %v",
|
||||
maxkeysStr, err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidMaxKeys),
|
||||
@@ -1214,7 +1215,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
err := xml.Unmarshal(ctx.Body(), &bucketTagging)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error unmarshalling bucket tagging: %v", err)
|
||||
debuglogger.Logf("error unmarshalling bucket tagging: %v", err)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
&MetaOpts{
|
||||
@@ -1227,7 +1228,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
|
||||
if len(bucketTagging.TagSet.Tags) > 50 {
|
||||
if c.debug {
|
||||
log.Printf("bucket tagging length exceeds 50: %v\n", len(bucketTagging.TagSet.Tags))
|
||||
debuglogger.Logf("bucket tagging length exceeds 50: %v", len(bucketTagging.TagSet.Tags))
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrBucketTaggingLimited),
|
||||
&MetaOpts{
|
||||
@@ -1242,6 +1243,9 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
|
||||
for _, tag := range bucketTagging.TagSet.Tags {
|
||||
if len(tag.Key) > 128 || len(tag.Value) > 256 {
|
||||
if c.debug {
|
||||
debuglogger.Logf("invalid long bucket tagging key/value")
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidTag),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -1287,6 +1291,9 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
var ownershipControls s3response.OwnershipControls
|
||||
if err := xml.Unmarshal(ctx.Body(), &ownershipControls); err != nil {
|
||||
if c.debug {
|
||||
debuglogger.Logf("failed to unmarshal request body: %v", err)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -1296,7 +1303,12 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
if len(ownershipControls.Rules) != 1 || !utils.IsValidOwnership(ownershipControls.Rules[0].ObjectOwnership) {
|
||||
rulesCount := len(ownershipControls.Rules)
|
||||
isValidOwnership := utils.IsValidOwnership(ownershipControls.Rules[0].ObjectOwnership, c.debug)
|
||||
if rulesCount != 1 || !isValidOwnership {
|
||||
if c.debug && rulesCount != 1 {
|
||||
debuglogger.Logf("ownership control rules should be 1, got %v", rulesCount)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -1359,7 +1371,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
err = xml.Unmarshal(ctx.Body(), &versioningConf)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error unmarshalling versioning configuration: %v",
|
||||
debuglogger.Logf("error unmarshalling versioning configuration: %v",
|
||||
err)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
@@ -1374,7 +1386,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
if versioningConf.Status != types.BucketVersioningStatusEnabled &&
|
||||
versioningConf.Status != types.BucketVersioningStatusSuspended {
|
||||
if c.debug {
|
||||
log.Printf("invalid versioning configuration status: %v\n", versioningConf.Status)
|
||||
debuglogger.Logf("invalid versioning configuration status: %v", versioningConf.Status)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
&MetaOpts{
|
||||
@@ -1531,7 +1543,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
if ownership == types.ObjectOwnershipBucketOwnerEnforced {
|
||||
if c.debug {
|
||||
log.Println("bucket acls are disabled")
|
||||
debuglogger.Logf("bucket acls are disabled")
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrAclNotSupported),
|
||||
&MetaOpts{
|
||||
@@ -1567,7 +1579,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
err := xml.Unmarshal(ctx.Body(), &accessControlPolicy)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error unmarshalling access control policy: %v", err)
|
||||
debuglogger.Logf("error unmarshalling access control policy: %v", err)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedACL),
|
||||
&MetaOpts{
|
||||
@@ -1580,7 +1592,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
err = accessControlPolicy.Validate()
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("invalid access control policy: %v\n", err)
|
||||
debuglogger.Logf("invalid access control policy: %v", err)
|
||||
}
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
@@ -1592,7 +1604,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
|
||||
if *accessControlPolicy.Owner.ID != parsedAcl.Owner {
|
||||
if c.debug {
|
||||
log.Printf("invalid access control policy owner id: %v, expected %v", *accessControlPolicy.Owner.ID, parsedAcl.Owner)
|
||||
debuglogger.Logf("invalid access control policy owner id: %v, expected %v", *accessControlPolicy.Owner.ID, parsedAcl.Owner)
|
||||
}
|
||||
return SendResponse(ctx, s3err.APIError{
|
||||
Code: "InvalidArgument",
|
||||
@@ -1608,7 +1620,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
|
||||
if grants+acl != "" {
|
||||
if c.debug {
|
||||
log.Printf("invalid request: %q (grants) %q (acl)",
|
||||
debuglogger.Logf("invalid request: %q (grants) %q (acl)",
|
||||
grants, acl)
|
||||
}
|
||||
return SendResponse(ctx,
|
||||
@@ -1628,7 +1640,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
} else if acl != "" {
|
||||
if acl != "private" && acl != "public-read" && acl != "public-read-write" {
|
||||
if c.debug {
|
||||
log.Printf("invalid acl: %q", acl)
|
||||
debuglogger.Logf("invalid acl: %q", acl)
|
||||
}
|
||||
return SendResponse(ctx,
|
||||
s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
@@ -1641,7 +1653,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
if grants != "" {
|
||||
if c.debug {
|
||||
log.Printf("invalid request: %q (grants) %q (acl)",
|
||||
debuglogger.Logf("invalid request: %q (grants) %q (acl)",
|
||||
grants, acl)
|
||||
}
|
||||
return SendResponse(ctx,
|
||||
@@ -1669,7 +1681,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
} else {
|
||||
if c.debug {
|
||||
log.Println("none of the bucket acl options has been specified: canned, req headers, req body")
|
||||
debuglogger.Logf("none of the bucket acl options has been specified: canned, req headers, req body")
|
||||
}
|
||||
return SendResponse(ctx,
|
||||
s3err.GetAPIError(s3err.ErrMissingSecurityHeader),
|
||||
@@ -1702,7 +1714,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
if ok := utils.IsValidBucketName(bucket); !ok {
|
||||
if ok := utils.IsValidBucketName(bucket, c.debug); !ok {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidBucketName),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -1710,10 +1722,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
Action: metrics.ActionCreateBucket,
|
||||
})
|
||||
}
|
||||
if ok := utils.IsValidOwnership(objectOwnership); !ok {
|
||||
if c.debug {
|
||||
log.Printf("invalid bucket object ownership: %v", objectOwnership)
|
||||
}
|
||||
if ok := utils.IsValidOwnership(objectOwnership, c.debug); !ok {
|
||||
return SendResponse(ctx, s3err.APIError{
|
||||
Code: "InvalidArgument",
|
||||
Description: fmt.Sprintf("Invalid x-amz-object-ownership header: %v", objectOwnership),
|
||||
@@ -1729,7 +1738,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
|
||||
if acl+grants != "" && objectOwnership == types.ObjectOwnershipBucketOwnerEnforced {
|
||||
if c.debug {
|
||||
log.Printf("bucket acls are disabled for %v object ownership", objectOwnership)
|
||||
debuglogger.Logf("bucket acls are disabled for %v object ownership", objectOwnership)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidBucketAclWithObjectOwnership),
|
||||
&MetaOpts{
|
||||
@@ -1742,7 +1751,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
|
||||
if acl != "" && grants != "" {
|
||||
if c.debug {
|
||||
log.Printf("invalid request: %q (grants) %q (acl)", grants, acl)
|
||||
debuglogger.Logf("invalid request: %q (grants) %q (acl)", grants, acl)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrBothCannedAndHeaderGrants),
|
||||
&MetaOpts{
|
||||
@@ -1770,6 +1779,9 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
ACL: types.BucketCannedACL(acl),
|
||||
}, defACL, c.iam, acct.Role == auth.RoleAdmin)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
debuglogger.Logf("failed to update bucket acl: %v", err)
|
||||
}
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -1865,7 +1877,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
err := xml.Unmarshal(ctx.Body(), &objTagging)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error unmarshalling object tagging: %v", err)
|
||||
debuglogger.Logf("error unmarshalling object tagging: %v", err)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
&MetaOpts{
|
||||
@@ -1878,7 +1890,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
|
||||
if len(objTagging.TagSet.Tags) > 10 {
|
||||
if c.debug {
|
||||
log.Printf("bucket tagging length exceeds 10: %v\n", len(objTagging.TagSet.Tags))
|
||||
debuglogger.Logf("bucket tagging length exceeds 10: %v", len(objTagging.TagSet.Tags))
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrObjectTaggingLimited),
|
||||
&MetaOpts{
|
||||
@@ -1894,7 +1906,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
for _, tag := range objTagging.TagSet.Tags {
|
||||
if len(tag.Key) > 128 || len(tag.Value) > 256 {
|
||||
if c.debug {
|
||||
log.Printf("invalid tag key/value len: %q %q",
|
||||
debuglogger.Logf("invalid tag key/value len: %q %q",
|
||||
tag.Key, tag.Value)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidTag),
|
||||
@@ -1975,6 +1987,9 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
|
||||
retention, err := auth.ParseObjectLockRetentionInput(ctx.Body())
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
debuglogger.Logf("failed to parse object lock configuration input: %v", err)
|
||||
}
|
||||
return SendResponse(ctx, err, &MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
@@ -1995,6 +2010,9 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
if ctx.Request().URI().QueryArgs().Has("legal-hold") {
|
||||
var legalHold types.ObjectLockLegalHold
|
||||
if err := xml.Unmarshal(ctx.Body(), &legalHold); err != nil {
|
||||
if c.debug {
|
||||
debuglogger.Logf("failed to parse request body: %v", err)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -2005,6 +2023,9 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
if legalHold.Status != types.ObjectLockLegalHoldStatusOff && legalHold.Status != types.ObjectLockLegalHoldStatusOn {
|
||||
if c.debug {
|
||||
debuglogger.Logf("invalid legal hold status: %v", legalHold.Status)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -2050,7 +2071,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
copySource, err := url.QueryUnescape(copySource)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error unescaping copy source %q: %v",
|
||||
debuglogger.Logf("error unescaping copy source %q: %v",
|
||||
cs, err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil,
|
||||
@@ -2066,7 +2087,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
partNumber := int32(ctx.QueryInt("partNumber", -1))
|
||||
if partNumber < 1 || partNumber > 10000 {
|
||||
if c.debug {
|
||||
log.Printf("invalid part number: %d", partNumber)
|
||||
debuglogger.Logf("invalid part number: %d", partNumber)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil,
|
||||
s3err.GetAPIError(s3err.ErrInvalidPartNumber),
|
||||
@@ -2130,7 +2151,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
partNumber := int32(ctx.QueryInt("partNumber", -1))
|
||||
if partNumber < 1 || partNumber > 10000 {
|
||||
if c.debug {
|
||||
log.Printf("invalid part number: %d", partNumber)
|
||||
debuglogger.Logf("invalid part number: %d", partNumber)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPartNumber),
|
||||
&MetaOpts{
|
||||
@@ -2165,7 +2186,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
contentLength, err := strconv.ParseInt(contentLengthStr, 10, 64)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error parsing content length %q: %v",
|
||||
debuglogger.Logf("error parsing content length %q: %v",
|
||||
contentLengthStr, err)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
@@ -2177,8 +2198,11 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
algorithm, checksums, err := utils.ParseChecksumHeaders(ctx)
|
||||
algorithm, checksums, err := utils.ParseChecksumHeaders(ctx, c.debug)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
debuglogger.Logf("err parsing checksum headers: %v", err)
|
||||
}
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -2196,7 +2220,6 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
body = bytes.NewReader([]byte{})
|
||||
}
|
||||
|
||||
ctx.Locals("logReqBody", false)
|
||||
res, err := c.be.UploadPart(ctx.Context(),
|
||||
&s3.UploadPartInput{
|
||||
Bucket: &bucket,
|
||||
@@ -2266,7 +2289,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
if len(ctx.Body()) > 0 {
|
||||
if grants+acl != "" {
|
||||
if c.debug {
|
||||
log.Printf("invalid request: %q (grants) %q (acl)",
|
||||
debuglogger.Logf("invalid request: %q (grants) %q (acl)",
|
||||
grants, acl)
|
||||
}
|
||||
return SendResponse(ctx,
|
||||
@@ -2283,7 +2306,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
err := xml.Unmarshal(ctx.Body(), &accessControlPolicy)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error unmarshalling access control policy: %v",
|
||||
debuglogger.Logf("error unmarshalling access control policy: %v",
|
||||
err)
|
||||
}
|
||||
return SendResponse(ctx,
|
||||
@@ -2322,7 +2345,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
if acl != "" {
|
||||
if acl != "private" && acl != "public-read" && acl != "public-read-write" {
|
||||
if c.debug {
|
||||
log.Printf("invalid acl: %q", acl)
|
||||
debuglogger.Logf("invalid acl: %q", acl)
|
||||
}
|
||||
return SendResponse(ctx,
|
||||
s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
@@ -2335,7 +2358,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
if len(ctx.Body()) > 0 || grants != "" {
|
||||
if c.debug {
|
||||
log.Printf("invalid request: %q (grants) %q (acl) %v (body len)",
|
||||
debuglogger.Logf("invalid request: %q (grants) %q (acl) %v (body len)",
|
||||
grants, acl, len(ctx.Body()))
|
||||
}
|
||||
return SendResponse(ctx,
|
||||
@@ -2390,7 +2413,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
copySource, err := url.QueryUnescape(copySource)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error unescaping copy source %q: %v",
|
||||
debuglogger.Logf("error unescaping copy source %q: %v",
|
||||
cs, err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil,
|
||||
@@ -2429,7 +2452,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
tm, err := time.Parse(iso8601Format, copySrcModifSince)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error parsing copy source modified since %q: %v",
|
||||
debuglogger.Logf("error parsing copy source modified since %q: %v",
|
||||
copySrcModifSince, err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil,
|
||||
@@ -2447,7 +2470,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
tm, err := time.Parse(iso8601Format, copySrcUnmodifSince)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error parsing copy source unmodified since %q: %v",
|
||||
debuglogger.Logf("error parsing copy source unmodified since %q: %v",
|
||||
copySrcUnmodifSince, err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil,
|
||||
@@ -2465,6 +2488,9 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
metadata := utils.GetUserMetaData(&ctx.Request().Header)
|
||||
|
||||
if directive != "" && directive != "COPY" && directive != "REPLACE" {
|
||||
if c.debug {
|
||||
debuglogger.Logf("invalid metadata directive: %v", directive)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil,
|
||||
s3err.GetAPIError(s3err.ErrInvalidMetadataDirective),
|
||||
&MetaOpts{
|
||||
@@ -2482,6 +2508,9 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
|
||||
tDirective := types.TaggingDirective(ctx.Get("X-Amz-Tagging-Directive"))
|
||||
if tDirective != "" && tDirective != types.TaggingDirectiveCopy && tDirective != types.TaggingDirectiveReplace {
|
||||
if c.debug {
|
||||
debuglogger.Logf("invalid tagging direcrive: %v", tDirective)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil,
|
||||
s3err.GetAPIError(s3err.ErrInvalidTaggingDirective),
|
||||
&MetaOpts{
|
||||
@@ -2498,11 +2527,8 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
checksumAlgorithm := types.ChecksumAlgorithm(ctx.Get("x-amz-checksum-algorithm"))
|
||||
err = utils.IsChecksumAlgorithmValid(checksumAlgorithm)
|
||||
err = utils.IsChecksumAlgorithmValid(checksumAlgorithm, c.debug)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("invalid checksum algorithm: %v", checksumAlgorithm)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -2512,7 +2538,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
objLock, err := utils.ParsObjectLockHdrs(ctx)
|
||||
objLock, err := utils.ParsObjectLockHdrs(ctx, c.debug)
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
@@ -2624,7 +2650,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
contentLength, err := strconv.ParseInt(contentLengthStr, 10, 64)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error parsing content length %q: %v",
|
||||
debuglogger.Logf("error parsing content length %q: %v",
|
||||
contentLengthStr, err)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
@@ -2636,7 +2662,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
objLock, err := utils.ParsObjectLockHdrs(ctx)
|
||||
objLock, err := utils.ParsObjectLockHdrs(ctx, c.debug)
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
@@ -2647,7 +2673,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
algorithm, checksums, err := utils.ParseChecksumHeaders(ctx)
|
||||
algorithm, checksums, err := utils.ParseChecksumHeaders(ctx, c.debug)
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
@@ -2666,7 +2692,6 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
body = bytes.NewReader([]byte{})
|
||||
}
|
||||
|
||||
ctx.Locals("logReqBody", false)
|
||||
res, err := c.be.PutObject(ctx.Context(),
|
||||
s3response.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
@@ -2942,7 +2967,7 @@ func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {
|
||||
err := xml.Unmarshal(ctx.Body(), &dObj)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error unmarshalling delete objects: %v", err)
|
||||
debuglogger.Logf("error unmarshalling delete objects: %v", err)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
&MetaOpts{
|
||||
@@ -3263,7 +3288,7 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
if ctx.Request().URI().QueryArgs().Has("partNumber") {
|
||||
if partNumberQuery < 1 || partNumberQuery > 10000 {
|
||||
if c.debug {
|
||||
log.Printf("invalid part number: %d", partNumberQuery)
|
||||
debuglogger.Logf("invalid part number: %d", partNumberQuery)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPartNumber),
|
||||
&MetaOpts{
|
||||
@@ -3301,7 +3326,7 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
checksumMode := types.ChecksumMode(ctx.Get("x-amz-checksum-mode"))
|
||||
if checksumMode != "" && checksumMode != types.ChecksumModeEnabled {
|
||||
if c.debug {
|
||||
log.Printf("invalid x-amz-checksum-mode header value: %v\n", checksumMode)
|
||||
debuglogger.Logf("invalid x-amz-checksum-mode header value: %v", checksumMode)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-mode"),
|
||||
&MetaOpts{
|
||||
@@ -3514,6 +3539,9 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
var restoreRequest types.RestoreRequest
|
||||
if err := xml.Unmarshal(ctx.Body(), &restoreRequest); err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
if c.debug {
|
||||
debuglogger.Logf("failed to parse the request body: %v", err)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -3566,7 +3594,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
err := xml.Unmarshal(ctx.Body(), &payload)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error unmarshalling select object content: %v", err)
|
||||
debuglogger.Logf("error unmarshalling select object content: %v", err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil,
|
||||
s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
@@ -3624,7 +3652,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
err := xml.Unmarshal(ctx.Body(), &data)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error unmarshalling complete multipart upload: %v", err)
|
||||
debuglogger.Logf("error unmarshalling complete multipart upload: %v", err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil,
|
||||
s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
@@ -3638,7 +3666,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
|
||||
if len(data.Parts) == 0 {
|
||||
if c.debug {
|
||||
log.Println("empty parts provided for complete multipart upload")
|
||||
debuglogger.Logf("empty parts provided for complete multipart upload")
|
||||
}
|
||||
return SendXMLResponse(ctx, nil,
|
||||
s3err.GetAPIError(s3err.ErrEmptyParts),
|
||||
@@ -3656,6 +3684,9 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
val, err := strconv.ParseInt(mpuObjSizeHdr, 10, 64)
|
||||
//TODO: Not sure if invalid request should be returned
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
debuglogger.Logf("invalid value for 'x-amz-mp-objects-size' header: %v", err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil,
|
||||
s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
&MetaOpts{
|
||||
@@ -3667,6 +3698,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
if val < 0 {
|
||||
debuglogger.Logf("value for 'x-amz-mp-objects-size' header is less than 0: %v", val)
|
||||
return SendXMLResponse(ctx, nil,
|
||||
s3err.GetInvalidMpObjectSizeErr(val),
|
||||
&MetaOpts{
|
||||
@@ -3701,11 +3733,8 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
_, checksums, err := utils.ParseChecksumHeaders(ctx)
|
||||
_, checksums, err := utils.ParseChecksumHeaders(ctx, c.debug)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("err parsing checksum headers: %v", err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -3716,11 +3745,8 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
checksumType := types.ChecksumType(ctx.Get("x-amz-checksum-type"))
|
||||
err = utils.IsChecksumTypeValid(checksumType)
|
||||
err = utils.IsChecksumTypeValid(checksumType, c.debug)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("invalid checksum type: %v", err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -3797,7 +3823,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
objLockState, err := utils.ParsObjectLockHdrs(ctx)
|
||||
objLockState, err := utils.ParsObjectLockHdrs(ctx, c.debug)
|
||||
if err != nil {
|
||||
return SendXMLResponse(ctx, nil, err,
|
||||
&MetaOpts{
|
||||
@@ -3810,11 +3836,8 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
|
||||
metadata := utils.GetUserMetaData(&ctx.Request().Header)
|
||||
|
||||
checksumAlgorithm, checksumType, err := utils.ParseCreateMpChecksumHeaders(ctx)
|
||||
checksumAlgorithm, checksumType, err := utils.ParseCreateMpChecksumHeaders(ctx, c.debug)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("err parsing checksum headers: %v", err)
|
||||
}
|
||||
return SendXMLResponse(ctx, nil, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -3900,7 +3923,7 @@ func SendResponse(ctx *fiber.Ctx, err error, l *MetaOpts) error {
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(apierr, "", "", ""))
|
||||
}
|
||||
|
||||
log.Printf("Internal Error, %v", err)
|
||||
debuglogger.Logf("Internal Error, %v", err)
|
||||
ctx.Status(http.StatusInternalServerError)
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(
|
||||
s3err.GetAPIError(s3err.ErrInternalError), "", "", ""))
|
||||
@@ -3915,8 +3938,6 @@ func SendResponse(ctx *fiber.Ctx, err error, l *MetaOpts) error {
|
||||
})
|
||||
}
|
||||
|
||||
utils.LogCtxDetails(ctx, []byte{})
|
||||
|
||||
if l.Status == 0 {
|
||||
l.Status = http.StatusOK
|
||||
}
|
||||
@@ -3956,7 +3977,7 @@ func SendXMLResponse(ctx *fiber.Ctx, resp any, err error, l *MetaOpts) error {
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(serr, "", "", ""))
|
||||
}
|
||||
|
||||
log.Printf("Internal Error, %v", err)
|
||||
debuglogger.Logf("Internal Error, %v", err)
|
||||
ctx.Status(http.StatusInternalServerError)
|
||||
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(
|
||||
@@ -3982,7 +4003,6 @@ func SendXMLResponse(ctx *fiber.Ctx, resp any, err error, l *MetaOpts) error {
|
||||
}
|
||||
}
|
||||
|
||||
utils.LogCtxDetails(ctx, b)
|
||||
if l.Logger != nil {
|
||||
l.Logger.Log(ctx, nil, b, s3log.LogMeta{
|
||||
Action: l.Action,
|
||||
@@ -4011,7 +4031,7 @@ func SendXMLResponse(ctx *fiber.Ctx, resp any, err error, l *MetaOpts) error {
|
||||
|
||||
msglen := len(xmlhdr) + len(b)
|
||||
if msglen > maxXMLBodyLen {
|
||||
log.Printf("XML encoded body len %v exceeds max len %v",
|
||||
debuglogger.Logf("XML encoded body len %v exceeds max len %v",
|
||||
msglen, maxXMLBodyLen)
|
||||
ctx.Status(http.StatusInternalServerError)
|
||||
|
||||
|
||||
251
s3api/debuglogger/logger.go
Normal file
251
s3api/debuglogger/logger.go
Normal file
@@ -0,0 +1,251 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package debuglogger
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
reset = "\033[0m"
|
||||
green = "\033[32m"
|
||||
yellow = "\033[33m"
|
||||
blue = "\033[34m"
|
||||
borderChar = "─"
|
||||
boxWidth = 120
|
||||
)
|
||||
|
||||
// Logs http request details: headers, body, params, query args
|
||||
func LogFiberRequestDetails(ctx *fiber.Ctx) {
|
||||
// Log the full request url
|
||||
fullURL := ctx.Protocol() + "://" + ctx.Hostname() + ctx.OriginalURL()
|
||||
fmt.Printf("%s[URL]: %s%s\n", green, fullURL, reset)
|
||||
|
||||
// log request headers
|
||||
wrapInBox(green, "REQUEST HEADERS", boxWidth, func() {
|
||||
ctx.Request().Header.VisitAll(func(key, value []byte) {
|
||||
printWrappedLine(yellow, string(key), string(value))
|
||||
})
|
||||
})
|
||||
// skip request body log for PutObject and UploadPart
|
||||
skipBodyLog := isLargeDataAction(ctx)
|
||||
if !skipBodyLog {
|
||||
body := ctx.Request().Body()
|
||||
if len(body) != 0 {
|
||||
printBoxTitleLine(blue, "REQUEST BODY", boxWidth, false)
|
||||
prettyBody := prettyPrintXML(body)
|
||||
fmt.Printf("%s%s%s\n", blue, prettyBody, reset)
|
||||
printHorizontalBorder(blue, boxWidth, false)
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Len() != 0 {
|
||||
ctx.Request().URI().QueryArgs().VisitAll(func(key, val []byte) {
|
||||
log.Printf("%s: %s", key, val)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Logs http response details: body, headers
|
||||
func LogFiberResponseDetails(ctx *fiber.Ctx) {
|
||||
wrapInBox(green, "RESPONSE HEADERS", boxWidth, func() {
|
||||
ctx.Response().Header.VisitAll(func(key, value []byte) {
|
||||
printWrappedLine(yellow, string(key), string(value))
|
||||
})
|
||||
})
|
||||
|
||||
_, ok := ctx.Locals("skip-res-body-log").(bool)
|
||||
if !ok {
|
||||
body := ctx.Response().Body()
|
||||
if len(body) != 0 {
|
||||
printBoxTitleLine(blue, "RESPONSE BODY", boxWidth, false)
|
||||
prettyBody := prettyPrintXML(body)
|
||||
fmt.Printf("%s%s%s\n", blue, prettyBody, reset)
|
||||
printHorizontalBorder(blue, boxWidth, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Logf is the same as 'fmt.Printf' with debug prefix,
|
||||
// a color added and '\n' at the end
|
||||
func Logf(format string, v ...any) {
|
||||
debugPrefix := "[DEBUG]: "
|
||||
fmt.Printf(yellow+debugPrefix+format+reset+"\n", v...)
|
||||
}
|
||||
|
||||
// prettyPrintXML takes raw XML input and returns a formatted (pretty-printed) version.
|
||||
func prettyPrintXML(input []byte) string {
|
||||
b := &bytes.Buffer{}
|
||||
decoder := xml.NewDecoder(bytes.NewReader(input))
|
||||
encoder := xml.NewEncoder(b)
|
||||
encoder.Indent("", " ")
|
||||
|
||||
var depth int
|
||||
for {
|
||||
token, err := decoder.Token()
|
||||
if err == io.EOF {
|
||||
encoder.Flush()
|
||||
return b.String()
|
||||
}
|
||||
if err != nil {
|
||||
// Return the raw input if decoding fails
|
||||
return string(input)
|
||||
}
|
||||
|
||||
switch t := token.(type) {
|
||||
case xml.StartElement:
|
||||
if depth > 0 {
|
||||
// Strip namespace from tag name
|
||||
t.Name.Space = ""
|
||||
|
||||
// Filter out xmlns attributes to make it more readable
|
||||
newAttrs := make([]xml.Attr, 0, len(t.Attr))
|
||||
for _, attr := range t.Attr {
|
||||
if !(attr.Name.Space == "" && attr.Name.Local == "xmlns") {
|
||||
newAttrs = append(newAttrs, attr)
|
||||
}
|
||||
}
|
||||
t.Attr = newAttrs
|
||||
}
|
||||
depth++
|
||||
err = encoder.EncodeToken(t)
|
||||
case xml.EndElement:
|
||||
if depth > 1 {
|
||||
t.Name.Space = ""
|
||||
}
|
||||
depth--
|
||||
err = encoder.EncodeToken(t)
|
||||
default:
|
||||
err = encoder.EncodeToken(t)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Return the raw input if decoding fails
|
||||
return string(input)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prints out box title either with closing characters or not: "┌", "┐"
|
||||
// e.g ┌────────────────[ RESPONSE HEADERS ]────────────────┐
|
||||
func printBoxTitleLine(color, title string, length int, closing bool) {
|
||||
leftCorner, rightCorner := "┌", "┐"
|
||||
|
||||
if !closing {
|
||||
leftCorner, rightCorner = borderChar, borderChar
|
||||
}
|
||||
|
||||
// Calculate how many border characters are needed
|
||||
titleFormatted := fmt.Sprintf("[ %s ]", title)
|
||||
borderSpace := length - len(titleFormatted) - 2 // 2 for corners
|
||||
leftLen := borderSpace / 2
|
||||
rightLen := borderSpace - leftLen
|
||||
|
||||
// Build the line
|
||||
line := leftCorner +
|
||||
strings.Repeat(borderChar, leftLen) +
|
||||
titleFormatted +
|
||||
strings.Repeat(borderChar, rightLen) +
|
||||
rightCorner
|
||||
|
||||
fmt.Println(color + line + reset)
|
||||
}
|
||||
|
||||
// Prints out a horizontal line either with closing characters or not: "└", "┘"
|
||||
func printHorizontalBorder(color string, length int, closing bool) {
|
||||
leftCorner, rightCorner := "└", "┘"
|
||||
if !closing {
|
||||
leftCorner, rightCorner = borderChar, borderChar
|
||||
}
|
||||
|
||||
line := leftCorner + strings.Repeat(borderChar, length-2) + rightCorner + reset
|
||||
fmt.Println(color + line)
|
||||
}
|
||||
|
||||
// wrapInBox wraps the output of a function call (fn) inside a styled box with a title.
|
||||
func wrapInBox(color, title string, length int, fn func()) {
|
||||
printBoxTitleLine(color, title, length, true)
|
||||
fn()
|
||||
printHorizontalBorder(color, length, true)
|
||||
}
|
||||
|
||||
// returns the provided string length
|
||||
// defaulting to 13 for exceeding lengths
|
||||
func getLen(str string) int {
|
||||
if len(str) < 13 {
|
||||
return 13
|
||||
}
|
||||
|
||||
return len(str)
|
||||
}
|
||||
|
||||
// prints a formatted key-value pair within a box layout,
|
||||
// wrapping the value text if it exceeds the allowed width.
|
||||
func printWrappedLine(keyColor, key, value string) {
|
||||
prefix := fmt.Sprintf("%s│%s %s%-13s%s : ", green, reset, keyColor, key, reset)
|
||||
prefixLen := len(prefix) - len(green) - len(reset) - len(keyColor) - len(reset)
|
||||
// the actual prefix size without colors
|
||||
actualPrefixLen := getLen(key) + 5
|
||||
|
||||
lineWidth := boxWidth - prefixLen
|
||||
valueLines := wrapText(value, lineWidth)
|
||||
|
||||
for i, line := range valueLines {
|
||||
if i == 0 {
|
||||
if len(line) < lineWidth {
|
||||
line += strings.Repeat(" ", lineWidth-len(line))
|
||||
}
|
||||
fmt.Printf("%s%s%s %s│%s\n", prefix, reset, line, green, reset)
|
||||
} else {
|
||||
line = strings.Repeat(" ", actualPrefixLen-2) + line
|
||||
if len(line) < boxWidth-4 {
|
||||
line += strings.Repeat(" ", boxWidth-len(line)-4)
|
||||
}
|
||||
fmt.Printf("%s│ %s%s %s│%s\n", green, reset, line, green, reset)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// wrapText splits the input text into lines of at most `width` characters each.
|
||||
func wrapText(text string, width int) []string {
|
||||
var lines []string
|
||||
for len(text) > width {
|
||||
lines = append(lines, text[:width])
|
||||
text = text[width:]
|
||||
}
|
||||
if text != "" {
|
||||
lines = append(lines, text)
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
// TODO: remove this and use utils.IsBidDataAction after refactoring
|
||||
// and creating 'internal' package
|
||||
func isLargeDataAction(ctx *fiber.Ctx) bool {
|
||||
if ctx.Method() == http.MethodPut && len(strings.Split(ctx.Path(), "/")) >= 3 {
|
||||
if !ctx.Request().URI().QueryArgs().Has("tagging") && ctx.Get("X-Amz-Copy-Source") == "" && !ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -120,7 +120,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
var cr io.Reader
|
||||
cr, err = utils.NewChunkReader(ctx, r, authData, region, account.Secret, tdate)
|
||||
cr, err = utils.NewChunkReader(ctx, r, authData, region, account.Secret, tdate, debug)
|
||||
return cr
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -15,30 +15,15 @@
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/debuglogger"
|
||||
)
|
||||
|
||||
func RequestLogger(isDebug bool) fiber.Handler {
|
||||
func DebugLogger() fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("isDebug", isDebug)
|
||||
if isDebug {
|
||||
log.Println("Request headers: ")
|
||||
ctx.Request().Header.VisitAll(func(key, val []byte) {
|
||||
log.Printf("%s: %s", key, val)
|
||||
})
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Len() != 0 {
|
||||
fmt.Println()
|
||||
log.Println("Request query arguments: ")
|
||||
ctx.Request().URI().QueryArgs().VisitAll(func(key, val []byte) {
|
||||
log.Printf("%s: %s", key, val)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.Next()
|
||||
debuglogger.LogFiberRequestDetails(ctx)
|
||||
err := ctx.Next()
|
||||
debuglogger.LogFiberResponseDetails(ctx)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,7 +76,9 @@ func New(
|
||||
})
|
||||
}
|
||||
app.Use(middlewares.DecodeURL(l, mm))
|
||||
app.Use(middlewares.RequestLogger(server.debug))
|
||||
if server.debug {
|
||||
app.Use(middlewares.DebugLogger())
|
||||
}
|
||||
|
||||
// Authentication middlewares
|
||||
app.Use(middlewares.VerifyPresignedV4Signature(root, iam, l, mm, region, server.debug))
|
||||
|
||||
@@ -95,7 +95,7 @@ func IsStreamingPayload(str string) bool {
|
||||
pt == payloadTypeStreamingSignedTrailer
|
||||
}
|
||||
|
||||
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
|
||||
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time, debug bool) (io.Reader, error) {
|
||||
decContLength := ctx.Get("X-Amz-Decoded-Content-Length")
|
||||
if decContLength == "" {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMissingDecodedContentLength)
|
||||
@@ -113,11 +113,11 @@ func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secr
|
||||
|
||||
switch contentSha256 {
|
||||
case payloadTypeStreamingUnsignedTrailer:
|
||||
return NewUnsignedChunkReader(r, checksumType)
|
||||
return NewUnsignedChunkReader(r, checksumType, debug)
|
||||
case payloadTypeStreamingSignedTrailer:
|
||||
return NewSignedChunkReader(r, authdata, region, secret, date, checksumType)
|
||||
return NewSignedChunkReader(r, authdata, region, secret, date, checksumType, debug)
|
||||
case payloadTypeStreamingSigned:
|
||||
return NewSignedChunkReader(r, authdata, region, secret, date, "")
|
||||
return NewSignedChunkReader(r, authdata, region, secret, date, "", debug)
|
||||
// return not supported for:
|
||||
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD
|
||||
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func LogCtxDetails(ctx *fiber.Ctx, respBody []byte) {
|
||||
isDebug, ok := ctx.Locals("isDebug").(bool)
|
||||
_, notLogReqBody := ctx.Locals("logReqBody").(bool)
|
||||
_, notLogResBody := ctx.Locals("logResBody").(bool)
|
||||
if isDebug && ok {
|
||||
// Log request body
|
||||
if !notLogReqBody {
|
||||
fmt.Println()
|
||||
log.Printf("Request Body: %s", ctx.Request().Body())
|
||||
}
|
||||
|
||||
// Log path parameters
|
||||
fmt.Println()
|
||||
log.Println("Path parameters: ")
|
||||
for key, val := range ctx.AllParams() {
|
||||
log.Printf("%s: %s", key, val)
|
||||
}
|
||||
|
||||
// Log response headers
|
||||
fmt.Println()
|
||||
log.Println("Response Headers: ")
|
||||
ctx.Response().Header.VisitAll(func(key, val []byte) {
|
||||
log.Printf("%s: %s", key, val)
|
||||
})
|
||||
|
||||
// Log response body
|
||||
if !notLogResBody && len(respBody) > 0 {
|
||||
fmt.Println()
|
||||
log.Printf("Response body %s", ctx.Response().Body())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -64,15 +64,17 @@ type ChunkReader struct {
|
||||
checksumHash hash.Hash
|
||||
isEOF bool
|
||||
isFirstHeader bool
|
||||
region string
|
||||
date time.Time
|
||||
//TODO: Add debug logging for the reader
|
||||
debug bool
|
||||
region string
|
||||
date time.Time
|
||||
}
|
||||
|
||||
// NewChunkReader reads from request body io.Reader and parses out the
|
||||
// chunk metadata in stream. The headers are validated for proper signatures.
|
||||
// Reading from the chunk reader will read only the object data stream
|
||||
// without the chunk headers/trailers.
|
||||
func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string, date time.Time, chType checksumType) (io.Reader, error) {
|
||||
func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string, date time.Time, chType checksumType, debug bool) (io.Reader, error) {
|
||||
chRdr := &ChunkReader{
|
||||
r: r,
|
||||
signingKey: getSigningKey(secret, region, date),
|
||||
@@ -84,6 +86,7 @@ func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string,
|
||||
date: date,
|
||||
region: region,
|
||||
trailer: chType,
|
||||
debug: debug,
|
||||
}
|
||||
|
||||
if chType != "" {
|
||||
@@ -390,7 +393,7 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte, l *int) (int64, stri
|
||||
return cr.handleRdrErr(err, header)
|
||||
}
|
||||
|
||||
if !IsValidChecksum(checksum, algo) {
|
||||
if !IsValidChecksum(checksum, algo, cr.debug) {
|
||||
return 0, "", 0, s3err.GetInvalidTrailingChecksumHeaderErr(trailer)
|
||||
}
|
||||
|
||||
|
||||
@@ -44,9 +44,11 @@ type UnsignedChunkReader struct {
|
||||
stash []byte
|
||||
chunkCounter int
|
||||
offset int
|
||||
//TODO: Add debug logging for the reader
|
||||
debug bool
|
||||
}
|
||||
|
||||
func NewUnsignedChunkReader(r io.Reader, ct checksumType) (*UnsignedChunkReader, error) {
|
||||
func NewUnsignedChunkReader(r io.Reader, ct checksumType, debug bool) (*UnsignedChunkReader, error) {
|
||||
hasher, err := getHasher(ct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -57,6 +59,7 @@ func NewUnsignedChunkReader(r io.Reader, ct checksumType) (*UnsignedChunkReader,
|
||||
stash: make([]byte, 0),
|
||||
hasher: hasher,
|
||||
chunkCounter: 1,
|
||||
debug: debug,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/aws/smithy-go/encoding/httpbinding"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
"github.com/versity/versitygw/s3api/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
@@ -175,15 +176,21 @@ func SetMetaHeaders(ctx *fiber.Ctx, meta map[string]string) {
|
||||
ctx.Response().Header.EnableNormalizing()
|
||||
}
|
||||
|
||||
func ParseUint(str string) (int32, error) {
|
||||
func ParseUint(str string, debug bool) (int32, error) {
|
||||
if str == "" {
|
||||
return 1000, nil
|
||||
}
|
||||
num, err := strconv.ParseInt(str, 10, 32)
|
||||
if err != nil {
|
||||
if debug {
|
||||
debuglogger.Logf("invalid intager provided: %v\n", err)
|
||||
}
|
||||
return 1000, fmt.Errorf("invalid int: %w", err)
|
||||
}
|
||||
if num < 0 {
|
||||
if debug {
|
||||
debuglogger.Logf("negative intager provided: %v\n", num)
|
||||
}
|
||||
return 1000, fmt.Errorf("negative uint: %v", num)
|
||||
}
|
||||
if num > 1000 {
|
||||
@@ -210,17 +217,20 @@ func StreamResponseBody(ctx *fiber.Ctx, rdr io.ReadCloser, bodysize int) {
|
||||
ctx.Context().SetBodyStream(rdr, bodysize)
|
||||
}
|
||||
|
||||
func IsValidBucketName(bucket string) bool {
|
||||
func IsValidBucketName(bucket string, debug bool) bool {
|
||||
if len(bucket) < 3 || len(bucket) > 63 {
|
||||
debuglogger.Logf("bucket name length should be in 3-63 range, got: %v\n", len(bucket))
|
||||
return false
|
||||
}
|
||||
// Checks to contain only digits, lowercase letters, dot, hyphen.
|
||||
// Checks to start and end with only digits and lowercase letters.
|
||||
if !bucketNameRegexp.MatchString(bucket) {
|
||||
debuglogger.Logf("invalid bucket name: %v\n", bucket)
|
||||
return false
|
||||
}
|
||||
// Checks not to be a valid IP address
|
||||
if bucketNameIpRegexp.MatchString(bucket) {
|
||||
debuglogger.Logf("bucket name is an ip address: %v\n", bucket)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -293,7 +303,7 @@ func FilterObjectAttributes(attrs map[s3response.ObjectAttributes]struct{}, outp
|
||||
return output
|
||||
}
|
||||
|
||||
func ParseObjectAttributes(ctx *fiber.Ctx) (map[s3response.ObjectAttributes]struct{}, error) {
|
||||
func ParseObjectAttributes(ctx *fiber.Ctx, debug bool) (map[s3response.ObjectAttributes]struct{}, error) {
|
||||
attrs := map[s3response.ObjectAttributes]struct{}{}
|
||||
var err error
|
||||
ctx.Request().Header.VisitAll(func(key, value []byte) {
|
||||
@@ -305,6 +315,9 @@ func ParseObjectAttributes(ctx *fiber.Ctx) (map[s3response.ObjectAttributes]stru
|
||||
for _, a := range oattrs {
|
||||
attr := s3response.ObjectAttributes(a)
|
||||
if !attr.IsValid() {
|
||||
if debug {
|
||||
debuglogger.Logf("invalid object attribute: %v\n", attr)
|
||||
}
|
||||
err = s3err.GetAPIError(s3err.ErrInvalidObjectAttributes)
|
||||
break
|
||||
}
|
||||
@@ -318,6 +331,9 @@ func ParseObjectAttributes(ctx *fiber.Ctx) (map[s3response.ObjectAttributes]stru
|
||||
}
|
||||
|
||||
if len(attrs) == 0 {
|
||||
if debug {
|
||||
debuglogger.Logf("empty get object attributes")
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrObjectAttributesInvalidHeader)
|
||||
}
|
||||
|
||||
@@ -330,12 +346,15 @@ type objLockCfg struct {
|
||||
LegalHoldStatus types.ObjectLockLegalHoldStatus
|
||||
}
|
||||
|
||||
func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
|
||||
func ParsObjectLockHdrs(ctx *fiber.Ctx, debug bool) (*objLockCfg, error) {
|
||||
legalHoldHdr := ctx.Get("X-Amz-Object-Lock-Legal-Hold")
|
||||
objLockModeHdr := ctx.Get("X-Amz-Object-Lock-Mode")
|
||||
objLockDate := ctx.Get("X-Amz-Object-Lock-Retain-Until-Date")
|
||||
|
||||
if (objLockDate != "" && objLockModeHdr == "") || (objLockDate == "" && objLockModeHdr != "") {
|
||||
if debug {
|
||||
debuglogger.Logf("one of 2 required params is missing: (lock date): %v, (lock mode): %v\n", objLockDate, objLockModeHdr)
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrObjectLockInvalidHeaders)
|
||||
}
|
||||
|
||||
@@ -343,9 +362,15 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
|
||||
if objLockDate != "" {
|
||||
rDate, err := time.Parse(time.RFC3339, objLockDate)
|
||||
if err != nil {
|
||||
if debug {
|
||||
debuglogger.Logf("failed to parse retain until date: %v\n", err)
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
if rDate.Before(time.Now()) {
|
||||
if debug {
|
||||
debuglogger.Logf("expired retain until date: %v\n", rDate.Format(time.RFC3339))
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrPastObjectLockRetainDate)
|
||||
}
|
||||
retainUntilDate = rDate
|
||||
@@ -356,12 +381,18 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
|
||||
if objLockMode != "" &&
|
||||
objLockMode != types.ObjectLockModeCompliance &&
|
||||
objLockMode != types.ObjectLockModeGovernance {
|
||||
if debug {
|
||||
debuglogger.Logf("invalid object lock mode: %v\n", objLockMode)
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectLockMode)
|
||||
}
|
||||
|
||||
legalHold := types.ObjectLockLegalHoldStatus(legalHoldHdr)
|
||||
|
||||
if legalHold != "" && legalHold != types.ObjectLockLegalHoldStatusOff && legalHold != types.ObjectLockLegalHoldStatusOn {
|
||||
if debug {
|
||||
debuglogger.Logf("invalid object lock legal hold status: %v\n", legalHold)
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidLegalHoldStatus)
|
||||
}
|
||||
|
||||
@@ -372,7 +403,7 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func IsValidOwnership(val types.ObjectOwnership) bool {
|
||||
func IsValidOwnership(val types.ObjectOwnership, debug bool) bool {
|
||||
switch val {
|
||||
case types.ObjectOwnershipBucketOwnerEnforced:
|
||||
return true
|
||||
@@ -381,6 +412,9 @@ func IsValidOwnership(val types.ObjectOwnership) bool {
|
||||
case types.ObjectOwnershipObjectWriter:
|
||||
return true
|
||||
default:
|
||||
if debug {
|
||||
debuglogger.Logf("invalid object ownership: %v\n", val)
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -456,15 +490,37 @@ func shouldEscape(c byte) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, map[types.ChecksumAlgorithm]string, error) {
|
||||
type ChecksumValues map[types.ChecksumAlgorithm]string
|
||||
|
||||
// Headers concatinates checksum algorithm by prefixing each
|
||||
// with 'x-amz-checksum-'
|
||||
// e.g.
|
||||
// "x-amz-checksum-crc64nvme, x-amz-checksum-sha1"
|
||||
func (cv ChecksumValues) Headers() string {
|
||||
result := ""
|
||||
isFirst := false
|
||||
|
||||
for key := range cv {
|
||||
if !isFirst {
|
||||
result += ", "
|
||||
}
|
||||
result += fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(key)))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func ParseChecksumHeaders(ctx *fiber.Ctx, debug bool) (types.ChecksumAlgorithm, ChecksumValues, error) {
|
||||
sdkAlgorithm := types.ChecksumAlgorithm(strings.ToUpper(ctx.Get("X-Amz-Sdk-Checksum-Algorithm")))
|
||||
|
||||
err := IsChecksumAlgorithmValid(sdkAlgorithm)
|
||||
err := IsChecksumAlgorithmValid(sdkAlgorithm, debug)
|
||||
if err != nil {
|
||||
if debug {
|
||||
debuglogger.Logf("invalid checksum algorithm: %v\n", sdkAlgorithm)
|
||||
}
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
checksums := map[types.ChecksumAlgorithm]string{}
|
||||
checksums := ChecksumValues{}
|
||||
|
||||
var hdrErr error
|
||||
// Parse and validate checksum headers
|
||||
@@ -475,8 +531,11 @@ func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, map[types.Ch
|
||||
}
|
||||
|
||||
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(key), "X-Amz-Checksum-")))
|
||||
err := IsChecksumAlgorithmValid(algo)
|
||||
err := IsChecksumAlgorithmValid(algo, debug)
|
||||
if err != nil {
|
||||
if debug {
|
||||
debuglogger.Logf("invalid checksum header: %s\n", key)
|
||||
}
|
||||
hdrErr = s3err.GetAPIError(s3err.ErrInvalidChecksumHeader)
|
||||
return
|
||||
}
|
||||
@@ -489,11 +548,14 @@ func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, map[types.Ch
|
||||
}
|
||||
|
||||
if len(checksums) > 1 {
|
||||
if debug {
|
||||
debuglogger.Logf("multiple checksum headers provided: %v\n", checksums.Headers())
|
||||
}
|
||||
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
|
||||
}
|
||||
|
||||
for al, val := range checksums {
|
||||
if !IsValidChecksum(val, al) {
|
||||
if !IsValidChecksum(val, al, debug) {
|
||||
return sdkAlgorithm, checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(al))))
|
||||
}
|
||||
// If any other checksum value is provided,
|
||||
@@ -515,21 +577,32 @@ var checksumLengths = map[types.ChecksumAlgorithm]int{
|
||||
types.ChecksumAlgorithmSha256: 32,
|
||||
}
|
||||
|
||||
func IsValidChecksum(checksum string, algorithm types.ChecksumAlgorithm) bool {
|
||||
func IsValidChecksum(checksum string, algorithm types.ChecksumAlgorithm, debug bool) bool {
|
||||
decoded, err := base64.StdEncoding.DecodeString(checksum)
|
||||
if err != nil {
|
||||
if debug {
|
||||
debuglogger.Logf("failed to parse checksum base64: %v\n", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
expectedLength, exists := checksumLengths[algorithm]
|
||||
if !exists {
|
||||
if debug {
|
||||
debuglogger.Logf("unknown checksum algorithm: %v\n", algorithm)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return len(decoded) == expectedLength
|
||||
isValid := len(decoded) == expectedLength
|
||||
if !isValid && debug {
|
||||
debuglogger.Logf("decoded checksum length: (expected): %v, (got): %v\n", expectedLength, len(decoded))
|
||||
}
|
||||
|
||||
return isValid
|
||||
}
|
||||
|
||||
func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm) error {
|
||||
func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm, debug bool) error {
|
||||
alg = types.ChecksumAlgorithm(strings.ToUpper(string(alg)))
|
||||
if alg != "" &&
|
||||
alg != types.ChecksumAlgorithmCrc32 &&
|
||||
@@ -537,6 +610,9 @@ func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm) error {
|
||||
alg != types.ChecksumAlgorithmSha1 &&
|
||||
alg != types.ChecksumAlgorithmSha256 &&
|
||||
alg != types.ChecksumAlgorithmCrc64nvme {
|
||||
if debug {
|
||||
debuglogger.Logf("invalid checksum algorithm: %v\n", alg)
|
||||
}
|
||||
return s3err.GetAPIError(s3err.ErrInvalidChecksumAlgorithm)
|
||||
}
|
||||
|
||||
@@ -544,10 +620,13 @@ func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm) error {
|
||||
}
|
||||
|
||||
// Validates the provided checksum type
|
||||
func IsChecksumTypeValid(t types.ChecksumType) error {
|
||||
func IsChecksumTypeValid(t types.ChecksumType, debug bool) error {
|
||||
if t != "" &&
|
||||
t != types.ChecksumTypeComposite &&
|
||||
t != types.ChecksumTypeFullObject {
|
||||
if debug {
|
||||
debuglogger.Logf("invalid checksum type: %v\n", t)
|
||||
}
|
||||
return s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-type")
|
||||
}
|
||||
return nil
|
||||
@@ -587,10 +666,13 @@ var checksumMap checksumSchema = checksumSchema{
|
||||
}
|
||||
|
||||
// Checks if checksum type and algorithm are supported together
|
||||
func checkChecksumTypeAndAlgo(algo types.ChecksumAlgorithm, t types.ChecksumType) error {
|
||||
func checkChecksumTypeAndAlgo(algo types.ChecksumAlgorithm, t types.ChecksumType, debug bool) error {
|
||||
typeSchema := checksumMap[algo]
|
||||
_, ok := typeSchema[t]
|
||||
if !ok {
|
||||
if debug {
|
||||
debuglogger.Logf("checksum type and algorithm mismatch: (type): %v, (algorithm): %v\n", t, algo)
|
||||
}
|
||||
return s3err.GetChecksumSchemaMismatchErr(algo, t)
|
||||
}
|
||||
|
||||
@@ -598,26 +680,29 @@ func checkChecksumTypeAndAlgo(algo types.ChecksumAlgorithm, t types.ChecksumType
|
||||
}
|
||||
|
||||
// Parses and validates the x-amz-checksum-algorithm and x-amz-checksum-type headers
|
||||
func ParseCreateMpChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, types.ChecksumType, error) {
|
||||
func ParseCreateMpChecksumHeaders(ctx *fiber.Ctx, debug bool) (types.ChecksumAlgorithm, types.ChecksumType, error) {
|
||||
algo := types.ChecksumAlgorithm(ctx.Get("x-amz-checksum-algorithm"))
|
||||
if err := IsChecksumAlgorithmValid(algo); err != nil {
|
||||
if err := IsChecksumAlgorithmValid(algo, debug); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
chType := types.ChecksumType(ctx.Get("x-amz-checksum-type"))
|
||||
if err := IsChecksumTypeValid(chType); err != nil {
|
||||
if err := IsChecksumTypeValid(chType, debug); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// Verify if checksum algorithm is provided, if
|
||||
// checksum type is specified
|
||||
if chType != "" && algo == "" {
|
||||
if debug {
|
||||
debuglogger.Logf("checksum type can only be used with checksum algorithm: (type): %v\n", chType)
|
||||
}
|
||||
return algo, chType, s3err.GetAPIError(s3err.ErrChecksumTypeWithAlgo)
|
||||
}
|
||||
|
||||
// Verify if the checksum type is supported for
|
||||
// the provided checksum algorithm
|
||||
if err := checkChecksumTypeAndAlgo(algo, chType); err != nil {
|
||||
if err := checkChecksumTypeAndAlgo(algo, chType, debug); err != nil {
|
||||
return algo, chType, err
|
||||
}
|
||||
|
||||
|
||||
@@ -219,7 +219,7 @@ func TestIsValidBucketName(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := IsValidBucketName(tt.args.bucket); got != tt.want {
|
||||
if got := IsValidBucketName(tt.args.bucket, false); got != tt.want {
|
||||
t.Errorf("IsValidBucketName() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
@@ -279,7 +279,7 @@ func TestParseUint(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := ParseUint(tt.args.str)
|
||||
got, err := ParseUint(tt.args.str, false)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ParseMaxKeys() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -407,7 +407,7 @@ func TestIsValidOwnership(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := IsValidOwnership(tt.args.val); got != tt.want {
|
||||
if got := IsValidOwnership(tt.args.val, false); got != tt.want {
|
||||
t.Errorf("IsValidOwnership() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
@@ -597,7 +597,7 @@ func TestIsChecksumAlgorithmValid(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := IsChecksumAlgorithmValid(tt.args.alg); (err != nil) != tt.wantErr {
|
||||
if err := IsChecksumAlgorithmValid(tt.args.alg, false); (err != nil) != tt.wantErr {
|
||||
t.Errorf("IsChecksumAlgorithmValid() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
@@ -689,7 +689,7 @@ func TestIsValidChecksum(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := IsValidChecksum(tt.args.checksum, tt.args.algorithm); got != tt.want {
|
||||
if got := IsValidChecksum(tt.args.checksum, tt.args.algorithm, false); got != tt.want {
|
||||
t.Errorf("IsValidChecksum() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
@@ -729,7 +729,7 @@ func TestIsChecksumTypeValid(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := IsChecksumTypeValid(tt.args.t); (err != nil) != tt.wantErr {
|
||||
if err := IsChecksumTypeValid(tt.args.t, false); (err != nil) != tt.wantErr {
|
||||
t.Errorf("IsChecksumTypeValid() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
@@ -851,7 +851,7 @@ func Test_checkChecksumTypeAndAlgo(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := checkChecksumTypeAndAlgo(tt.args.algo, tt.args.t); (err != nil) != tt.wantErr {
|
||||
if err := checkChecksumTypeAndAlgo(tt.args.algo, tt.args.t, false); (err != nil) != tt.wantErr {
|
||||
t.Errorf("checkChecksumTypeAndAlgo() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user