fix: bunch of fixes in signed streaming requests

Fixes #1683
Fixes #1684
Fixes #1685
Fixes #1690
Fixes #1691
Fixes #1692
Fixes #1694
Fixes #1695

This PR primarily focuses on error handling and checksum calculation for signed streaming requests of type `STREAMING-AWS4-HMAC-SHA256-PAYLOAD` and `STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER`.

It fixes the error type returned when the payload encoding is incorrect: the correct `IncompleteBody` error is now returned.

Chunk size validation has been added, enforcing the rule that only the final chunk may be smaller than 8192 bytes.

The `x-amz-trailer` header value is now validated against the checksum trailer present in the payload. For `STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER` requests, if no checksum trailer is provided in the payload, the gateway now returns an `IncompleteBody` error.

If there is a mismatch between the `x-amz-trailer` header and the checksum trailer in the payload, or if the checksum header key in the payload is invalid, a `MalformedTrailer` error is returned.

The `x-amz-decoded-content-length` header value is now compared against the actual decoded payload length, and an `IncompleteBody` error is returned if there is a mismatch.

Finally, the double checksum calculation issue has been fixed. For `STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER` requests, the trailing checksum is now parsed from the request payload and stored in the backend, instead of being recalculated by the backend.
This commit is contained in:
niksis02
2025-12-16 23:15:34 +04:00
parent 30acb4b152
commit 9eaaeedd28
4 changed files with 104 additions and 41 deletions

View File

@@ -134,7 +134,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string,
var err error
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
var cr io.Reader
cr, err = utils.NewChunkReader(ctx, r, authData, region, account.Secret, tdate)
cr, err = utils.NewChunkReader(ctx, r, authData, account.Secret, tdate)
return cr
})
if err != nil {

View File

@@ -182,7 +182,7 @@ func ParseDecodedContentLength(ctx *fiber.Ctx) (int64, error) {
return decContLength, nil
}
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, secret string, date time.Time) (io.Reader, error) {
cLength, err := ParseDecodedContentLength(ctx)
if err != nil {
return nil, err
@@ -204,9 +204,9 @@ func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secr
case payloadTypeStreamingUnsignedTrailer:
return NewUnsignedChunkReader(r, checksumType, cLength)
case payloadTypeStreamingSignedTrailer:
return NewSignedChunkReader(r, authdata, region, secret, date, checksumType)
return NewSignedChunkReader(r, authdata, secret, date, checksumType, true, cLength)
case payloadTypeStreamingSigned:
return NewSignedChunkReader(r, authdata, region, secret, date, "")
return NewSignedChunkReader(r, authdata, secret, date, "", false, cLength)
// return not supported for:
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER

View File

@@ -46,6 +46,12 @@ const (
trailerSignatureHeader = "x-amz-trailer-signature"
streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD"
streamPayloadTrailerAlgo = "AWS4-HMAC-SHA256-TRAILER"
maxHeaderSize = 1024
)
var (
errskipHeader = errors.New("skip to next header")
)
// ChunkReader reads from chunked upload request body, and returns
@@ -66,24 +72,31 @@ type ChunkReader struct {
isFirstHeader bool
region string
date time.Time
requireTrailer bool
chunkSizes []int64
cLength int64
dataRead int64
}
// NewChunkReader reads from request body io.Reader and parses out the
// chunk metadata in stream. The headers are validated for proper signatures.
// Reading from the chunk reader will read only the object data stream
// without the chunk headers/trailers.
func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string, date time.Time, chType checksumType) (io.Reader, error) {
func NewSignedChunkReader(r io.Reader, authdata AuthData, secret string, date time.Time, chType checksumType, requireTrailer bool, cLength int64) (io.Reader, error) {
chRdr := &ChunkReader{
r: r,
signingKey: getSigningKey(secret, region, date),
signingKey: getSigningKey(secret, authdata.Region, date),
// the authdata.Signature is validated in the auth-reader,
// so we can use that here without any other checks
prevSig: authdata.Signature,
chunkHash: sha256.New(),
isFirstHeader: true,
date: date,
region: region,
trailer: chType,
prevSig: authdata.Signature,
chunkHash: sha256.New(),
isFirstHeader: true,
date: date,
region: authdata.Region,
trailer: chType,
requireTrailer: requireTrailer,
chunkSizes: []int64{},
cLength: cLength,
}
if chType != "" {
@@ -95,7 +108,7 @@ func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string,
chRdr.checksumHash = checksumHasher
}
if chType == "" {
if !requireTrailer {
debuglogger.Infof("initializing signed chunk reader")
} else {
debuglogger.Infof("initializing signed chunk reader with '%v' trailing checksum", chType)
@@ -122,6 +135,13 @@ func (cr *ChunkReader) Read(p []byte) (int, error) {
}
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
n += int(chunkSize)
cr.dataRead += int64(n)
if cr.isEOF {
if cr.cLength != cr.dataRead {
debuglogger.Logf("number of bytes expected: (%v), number of bytes read: (%v)", cr.cLength, cr.dataRead)
return n, s3err.GetAPIError(s3err.ErrContentLengthMismatch)
}
}
return n, err
}
@@ -130,6 +150,13 @@ func (cr *ChunkReader) Read(p []byte) (int, error) {
if cr.checksumHash != nil {
cr.checksumHash.Write(p[:n])
}
cr.dataRead += int64(n)
if cr.isEOF {
if cr.cLength != cr.dataRead {
debuglogger.Logf("number of bytes expected: (%v), number of bytes read: (%v)", cr.cLength, cr.dataRead)
return n, s3err.GetAPIError(s3err.ErrContentLengthMismatch)
}
}
return n, err
}
@@ -328,15 +355,6 @@ func hmac256(key []byte, data []byte) []byte {
return hash.Sum(nil)
}
var (
errInvalidChunkFormat = errors.New("invalid chunk header format")
errskipHeader = errors.New("skip to next header")
)
const (
maxHeaderSize = 1024
)
// This returns the chunk payload size, signature, data start offset, and
// error if any. See the AWS documentation for the chunk header format. The
// header[0] byte is expected to be the first byte of the chunk size here.
@@ -344,7 +362,7 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
stashLen := len(cr.stash)
if stashLen > maxHeaderSize {
debuglogger.Logf("the stash length exceeds the maximum allowed chunk header size: (stash len): %v, (header limit): %v", stashLen, maxHeaderSize)
return 0, "", 0, errInvalidChunkFormat
return 0, "", 0, s3err.GetAPIError(s3err.ErrIncompleteBody)
}
if cr.stash != nil {
debuglogger.Logf("recovering the stash: (stash len): %v", stashLen)
@@ -367,16 +385,9 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
}
}
// read and parse the chunk size
chunkSizeStr, err := readAndTrim(rdr, ';')
chunkSize, err := cr.parseChunkSize(rdr, header)
if err != nil {
debuglogger.Logf("failed to read chunk size: %v", err)
return cr.handleRdrErr(err, header)
}
chunkSize, err := strconv.ParseInt(chunkSizeStr, 16, 64)
if err != nil {
debuglogger.Logf("failed to parse chunk size: (size): %v, (err): %v", chunkSizeStr, err)
return 0, "", 0, errInvalidChunkFormat
return 0, "", 0, err
}
// read the chunk signature
@@ -393,7 +404,7 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
// read and parse the final chunk trailer and checksum
if chunkSize == 0 {
if cr.trailer != "" {
if cr.requireTrailer {
err = readAndSkip(rdr, '\n')
if err != nil {
debuglogger.Logf("failed to read \\n before the trailer: %v", err)
@@ -407,7 +418,7 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
}
if trailer != string(cr.trailer) {
debuglogger.Logf("incorrect trailer prefix: (expected): %v, (got): %v", cr.trailer, trailer)
return 0, "", 0, errInvalidChunkFormat
return 0, "", 0, s3err.GetAPIError(s3err.ErrMalformedTrailer)
}
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(trailer, "x-amz-checksum-")))
@@ -439,7 +450,7 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
if trailerSigPrefix != trailerSignatureHeader {
debuglogger.Logf("invalid trailing signature prefix: (expected): %v, (got): %v", trailerSignatureHeader, trailerSigPrefix)
return 0, "", 0, errInvalidChunkFormat
return 0, "", 0, s3err.GetAPIError(s3err.ErrIncompleteBody)
}
trailerSig, err := readAndTrim(rdr, '\r')
@@ -498,11 +509,64 @@ func (cr *ChunkReader) handleRdrErr(err error, header []byte) (int64, string, in
if err == io.EOF {
if cr.isEOF {
debuglogger.Logf("incomplete chunk encoding, EOF reached")
return 0, "", 0, errInvalidChunkFormat
return 0, "", 0, s3err.GetAPIError(s3err.ErrIncompleteBody)
}
return cr.stashAndSkipHeader(header)
}
return 0, "", 0, err
return 0, "", 0, s3err.GetAPIError(s3err.ErrIncompleteBody)
}
// parseChunkSize parses and validates the chunk size
func (cr *ChunkReader) parseChunkSize(rdr *bufio.Reader, header []byte) (int64, error) {
// read and parse the chunk size
chunkSizeStr, err := readAndTrim(rdr, ';')
if err != nil {
debuglogger.Logf("failed to read chunk size: %v", err)
_, _, _, err := cr.handleRdrErr(err, header)
return 0, err
}
chunkSize, err := strconv.ParseInt(chunkSizeStr, 16, 64)
if err != nil {
debuglogger.Logf("failed to parse chunk size: (size): %v, (err): %v", chunkSizeStr, err)
return 0, s3err.GetAPIError(s3err.ErrIncompleteBody)
}
if !cr.isValidChunkSize(chunkSize) {
return 0, s3err.GetAPIError(s3err.ErrInvalidChunkSize)
}
cr.chunkSizes = append(cr.chunkSizes, chunkSize)
return chunkSize, nil
}
// isValidChunkSize checks if the parsed chunk size is valid
// they follow one rule: all chunk sizes except for the last one
// should be greater than 8192
func (cr *ChunkReader) isValidChunkSize(size int64) bool {
if len(cr.chunkSizes) == 0 {
// any valid number is valid as a first chunk size
return true
}
lastChunkSize := cr.chunkSizes[len(cr.chunkSizes)-1]
// any chunk size, except the last one should be greater than 8192
if size != 0 && lastChunkSize < minChunkSize {
debuglogger.Logf("invalid chunk size %v", lastChunkSize)
return false
}
return true
}
// Algorithm returns the checksum algorithm
func (cr *ChunkReader) Algorithm() string {
return strings.TrimPrefix(string(cr.trailer), "x-amz-checksum-")
}
// Checksum returns the parsed trailing checksum
func (cr *ChunkReader) Checksum() string {
return cr.parsedChecksum
}
// reads data from the "rdr" and validates the passed data bytes
@@ -514,7 +578,7 @@ func readAndSkip(rdr *bufio.Reader, data ...byte) error {
}
if b != d {
return errMalformedEncoding
return s3err.GetAPIError(s3err.ErrIncompleteBody)
}
}

View File

@@ -35,9 +35,8 @@ import (
)
var (
trailerDelim = []byte{'\n', '\r', '\n'}
minChunkSize int64 = 8192
errMalformedEncoding = errors.New("malformed chunk encoding")
trailerDelim = []byte{'\n', '\r', '\n'}
minChunkSize int64 = 8192
)
type UnsignedChunkReader struct {