diff --git a/s3api/middlewares/authentication.go b/s3api/middlewares/authentication.go index 4208879..91ed779 100644 --- a/s3api/middlewares/authentication.go +++ b/s3api/middlewares/authentication.go @@ -134,7 +134,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string, var err error wrapBodyReader(ctx, func(r io.Reader) io.Reader { var cr io.Reader - cr, err = utils.NewChunkReader(ctx, r, authData, region, account.Secret, tdate) + cr, err = utils.NewChunkReader(ctx, r, authData, account.Secret, tdate) return cr }) if err != nil { diff --git a/s3api/utils/chunk-reader.go b/s3api/utils/chunk-reader.go index 19773f5..05ee98c 100644 --- a/s3api/utils/chunk-reader.go +++ b/s3api/utils/chunk-reader.go @@ -182,7 +182,7 @@ func ParseDecodedContentLength(ctx *fiber.Ctx) (int64, error) { return decContLength, nil } -func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) { +func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, secret string, date time.Time) (io.Reader, error) { cLength, err := ParseDecodedContentLength(ctx) if err != nil { return nil, err @@ -204,9 +204,9 @@ func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secr case payloadTypeStreamingUnsignedTrailer: return NewUnsignedChunkReader(r, checksumType, cLength) case payloadTypeStreamingSignedTrailer: - return NewSignedChunkReader(r, authdata, region, secret, date, checksumType) + return NewSignedChunkReader(r, authdata, secret, date, checksumType, true, cLength) case payloadTypeStreamingSigned: - return NewSignedChunkReader(r, authdata, region, secret, date, "") + return NewSignedChunkReader(r, authdata, secret, date, "", false, cLength) // return not supported for: // - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD // - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER diff --git a/s3api/utils/signed-chunk-reader.go b/s3api/utils/signed-chunk-reader.go index 58d8ade..dc60b2e 100644 --- a/s3api/utils/signed-chunk-reader.go +++ b/s3api/utils/signed-chunk-reader.go @@ -46,6 +46,12 @@ const ( trailerSignatureHeader = "x-amz-trailer-signature" streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD" streamPayloadTrailerAlgo = "AWS4-HMAC-SHA256-TRAILER" + + maxHeaderSize = 1024 +) + +var ( + errskipHeader = errors.New("skip to next header") ) // ChunkReader reads from chunked upload request body, and returns @@ -66,24 +72,31 @@ type ChunkReader struct { isFirstHeader bool region string date time.Time + requireTrailer bool + chunkSizes []int64 + cLength int64 + dataRead int64 } // NewChunkReader reads from request body io.Reader and parses out the // chunk metadata in stream. The headers are validated for proper signatures. // Reading from the chunk reader will read only the object data stream // without the chunk headers/trailers. -func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string, date time.Time, chType checksumType) (io.Reader, error) { +func NewSignedChunkReader(r io.Reader, authdata AuthData, secret string, date time.Time, chType checksumType, requireTrailer bool, cLength int64) (io.Reader, error) { chRdr := &ChunkReader{ r: r, - signingKey: getSigningKey(secret, region, date), + signingKey: getSigningKey(secret, authdata.Region, date), // the authdata.Signature is validated in the auth-reader, // so we can use that here without any other checks - prevSig: authdata.Signature, - chunkHash: sha256.New(), - isFirstHeader: true, - date: date, - region: region, - trailer: chType, + prevSig: authdata.Signature, + chunkHash: sha256.New(), + isFirstHeader: true, + date: date, + region: authdata.Region, + trailer: chType, + requireTrailer: requireTrailer, + chunkSizes: []int64{}, + cLength: cLength, } if chType != "" { @@ -95,7 +108,7 @@ func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string, chRdr.checksumHash = checksumHasher } - if chType == "" { + if !requireTrailer { debuglogger.Infof("initializing signed chunk reader") } else { debuglogger.Infof("initializing signed chunk reader with '%v' trailing checksum", chType) @@ -122,6 +135,13 @@ func (cr *ChunkReader) Read(p []byte) (int, error) { } n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n]) n += int(chunkSize) + cr.dataRead += int64(n) + if cr.isEOF { + if cr.cLength != cr.dataRead { + debuglogger.Logf("number of bytes expected: (%v), number of bytes read: (%v)", cr.cLength, cr.dataRead) + return n, s3err.GetAPIError(s3err.ErrContentLengthMismatch) + } + } return n, err } @@ -130,6 +150,13 @@ func (cr *ChunkReader) Read(p []byte) (int, error) { if cr.checksumHash != nil { cr.checksumHash.Write(p[:n]) } + cr.dataRead += int64(n) + if cr.isEOF { + if cr.cLength != cr.dataRead { + debuglogger.Logf("number of bytes expected: (%v), number of bytes read: (%v)", cr.cLength, cr.dataRead) + return n, s3err.GetAPIError(s3err.ErrContentLengthMismatch) + } + } return n, err } @@ -328,15 +355,6 @@ func hmac256(key []byte, data []byte) []byte { return hash.Sum(nil) } -var ( - errInvalidChunkFormat = errors.New("invalid chunk header format") - errskipHeader = errors.New("skip to next header") -) - -const ( - maxHeaderSize = 1024 -) - // This returns the chunk payload size, signature, data start offset, and // error if any. See the AWS documentation for the chunk header format. The // header[0] byte is expected to be the first byte of the chunk size here. @@ -344,7 +362,7 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, stashLen := len(cr.stash) if stashLen > maxHeaderSize { debuglogger.Logf("the stash length exceeds the maximum allowed chunk header size: (stash len): %v, (header limit): %v", stashLen, maxHeaderSize) - return 0, "", 0, errInvalidChunkFormat + return 0, "", 0, s3err.GetAPIError(s3err.ErrIncompleteBody) } if cr.stash != nil { debuglogger.Logf("recovering the stash: (stash len): %v", stashLen) @@ -367,16 +385,9 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, } } - // read and parse the chunk size - chunkSizeStr, err := readAndTrim(rdr, ';') + chunkSize, err := cr.parseChunkSize(rdr, header) if err != nil { - debuglogger.Logf("failed to read chunk size: %v", err) - return cr.handleRdrErr(err, header) - } - chunkSize, err := strconv.ParseInt(chunkSizeStr, 16, 64) - if err != nil { - debuglogger.Logf("failed to parse chunk size: (size): %v, (err): %v", chunkSizeStr, err) - return 0, "", 0, errInvalidChunkFormat + return 0, "", 0, err } // read the chunk signature @@ -393,7 +404,7 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, // read and parse the final chunk trailer and checksum if chunkSize == 0 { - if cr.trailer != "" { + if cr.requireTrailer { err = readAndSkip(rdr, '\n') if err != nil { debuglogger.Logf("failed to read \\n before the trailer: %v", err) @@ -407,7 +418,7 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, } if trailer != string(cr.trailer) { debuglogger.Logf("incorrect trailer prefix: (expected): %v, (got): %v", cr.trailer, trailer) - return 0, "", 0, errInvalidChunkFormat + return 0, "", 0, s3err.GetAPIError(s3err.ErrMalformedTrailer) } algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(trailer, "x-amz-checksum-"))) @@ -439,7 +450,7 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, if trailerSigPrefix != trailerSignatureHeader { debuglogger.Logf("invalid trailing signature prefix: (expected): %v, (got): %v", trailerSignatureHeader, trailerSigPrefix) - return 0, "", 0, errInvalidChunkFormat + return 0, "", 0, s3err.GetAPIError(s3err.ErrIncompleteBody) } trailerSig, err := readAndTrim(rdr, '\r') @@ -498,11 +509,64 @@ func (cr *ChunkReader) handleRdrErr(err error, header []byte) (int64, string, in if err == io.EOF { if cr.isEOF { debuglogger.Logf("incomplete chunk encoding, EOF reached") - return 0, "", 0, errInvalidChunkFormat + return 0, "", 0, s3err.GetAPIError(s3err.ErrIncompleteBody) } return cr.stashAndSkipHeader(header) } - return 0, "", 0, err + return 0, "", 0, s3err.GetAPIError(s3err.ErrIncompleteBody) +} + +// parseChunkSize parses and validates the chunk size +func (cr *ChunkReader) parseChunkSize(rdr *bufio.Reader, header []byte) (int64, error) { + // read and parse the chunk size + chunkSizeStr, err := readAndTrim(rdr, ';') + if err != nil { + debuglogger.Logf("failed to read chunk size: %v", err) + _, _, _, err := cr.handleRdrErr(err, header) + return 0, err + } + chunkSize, err := strconv.ParseInt(chunkSizeStr, 16, 64) + if err != nil { + debuglogger.Logf("failed to parse chunk size: (size): %v, (err): %v", chunkSizeStr, err) + return 0, s3err.GetAPIError(s3err.ErrIncompleteBody) + } + + if !cr.isValidChunkSize(chunkSize) { + return 0, s3err.GetAPIError(s3err.ErrInvalidChunkSize) + } + + cr.chunkSizes = append(cr.chunkSizes, chunkSize) + + return chunkSize, nil +} + +// isValidChunkSize checks if the parsed chunk size is valid +// they follow one rule: all chunk sizes except for the last one +// should be greater than 8192 +func (cr *ChunkReader) isValidChunkSize(size int64) bool { + if len(cr.chunkSizes) == 0 { + // any valid number is valid as a first chunk size + return true + } + + lastChunkSize := cr.chunkSizes[len(cr.chunkSizes)-1] + // any chunk size, except the last one should be greater than 8192 + if size != 0 && lastChunkSize < minChunkSize { + debuglogger.Logf("invalid chunk size %v", lastChunkSize) + return false + } + + return true +} + +// Algorithm returns the checksum algorithm +func (cr *ChunkReader) Algorithm() string { + return strings.TrimPrefix(string(cr.trailer), "x-amz-checksum-") +} + +// Checksum returns the parsed trailing checksum +func (cr *ChunkReader) Checksum() string { + return cr.parsedChecksum } // reads data from the "rdr" and validates the passed data bytes @@ -514,7 +578,7 @@ func readAndSkip(rdr *bufio.Reader, data ...byte) error { } if b != d { - return errMalformedEncoding + return s3err.GetAPIError(s3err.ErrIncompleteBody) } } diff --git a/s3api/utils/unsigned-chunk-reader.go b/s3api/utils/unsigned-chunk-reader.go index 8e77a4d..be7b6d3 100644 --- a/s3api/utils/unsigned-chunk-reader.go +++ b/s3api/utils/unsigned-chunk-reader.go @@ -35,9 +35,8 @@ import ( ) var ( - trailerDelim = []byte{'\n', '\r', '\n'} - minChunkSize int64 = 8192 - errMalformedEncoding = errors.New("malformed chunk encoding") + trailerDelim = []byte{'\n', '\r', '\n'} + minChunkSize int64 = 8192 ) type UnsignedChunkReader struct {