fix: chunkreader invalid signature when header crossed read buffers

Fixes #512. For chunked uploads, we parse the chunk headers in place
and then move the data payload up on the buffer to overwrite the
chunk headers for the real data stream.

For the special case where the chunk header was truncated in the
current read buffer, the partial header is stashed in a temporary
byte slice. The following read will contain the remainder of the
header that we can put together and parse.

We were correctly parsing this, but we forgot that the data offset
is calculated based on the start of the header. But the special
case where part of the header was stashed means we were incorrectly
calculating the data offset into the read buffer.

Easy fix to just remove the stash size from the data offset return
value.
This commit is contained in:
Ben McClelland
2024-04-16 23:01:55 -07:00
parent 6f35a5fbaf
commit 07b01a738a

View File

@@ -193,11 +193,11 @@ func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
cr.chunkHash.Write(p[:chunkSize])
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
return n + int(chunkSize), err
} else {
cr.chunkDataLeft = chunkSize - int64(n)
cr.chunkHash.Write(p[:n])
}
cr.chunkDataLeft = chunkSize - int64(n)
cr.chunkHash.Write(p[:n])
return n, nil
}
@@ -231,6 +231,7 @@ const (
// error if any. See the AWS documentation for the chunk header format. The
// header[0] byte is expected to be the first byte of the chunk size here.
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, error) {
stashLen := len(cr.stash)
if cr.stash != nil {
tmp := make([]byte, maxHeaderSize)
copy(tmp, cr.stash)
@@ -265,5 +266,5 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
signature := string(header[sigIndex:(sigIndex + sigEndIndex)])
dataStartOffset := sigIndex + sigEndIndex + len(chunkHdrDelim)
return chunkSize, signature, dataStartOffset, nil
return chunkSize, signature, dataStartOffset - stashLen, nil
}