mirror of
https://github.com/scylladb/scylladb.git
synced 2026-04-29 12:47:02 +00:00
s3_client: add tests for calc_part_size
Introduce tests that validate the corrected multipart part-size
calculation, including boundary conditions and error cases.
(cherry picked from commit 6280cb91ca)
This commit is contained in:
committed by
GitHub Action
parent
137233b1e6
commit
fa1ca8096b
@@ -1522,30 +1522,6 @@ class client::do_upload_file : private multipart_upload {
|
||||
}).finally([gh = std::move(gh)] {});
|
||||
}
|
||||
|
||||
// returns pair<num_of_parts, part_size>
|
||||
static std::pair<unsigned, size_t> calc_part_size(size_t total_size, size_t part_size) {
|
||||
if (part_size > 0) {
|
||||
if (part_size < aws_minimum_part_size) {
|
||||
on_internal_error(s3l, fmt::format("part_size too small: {} is smaller than minimum part size: {}", part_size, aws_minimum_part_size));
|
||||
}
|
||||
const size_t num_parts = div_ceil(total_size, part_size);
|
||||
if (num_parts > aws_maximum_parts_in_piece) {
|
||||
on_internal_error(s3l, fmt::format("too many parts: {} > {}", num_parts, aws_maximum_parts_in_piece));
|
||||
}
|
||||
return {num_parts, part_size};
|
||||
}
|
||||
// if part_size is 0, this means the caller leaves it to us to decide the part_size. The default part size for multipart upload is set to 50MiB. This
|
||||
// value was determined empirically by running `perf_s3_client` with various part sizes to find the optimal one.
|
||||
static constexpr size_t default_part_size = 50_MiB;
|
||||
const size_t num_parts = div_ceil(total_size, default_part_size);
|
||||
if (num_parts <= aws_maximum_parts_in_piece) {
|
||||
return {num_parts, default_part_size};
|
||||
}
|
||||
|
||||
part_size = align_up(div_ceil(total_size, aws_maximum_parts_in_piece), 1_MiB);
|
||||
return {div_ceil(total_size, part_size), part_size};
|
||||
}
|
||||
|
||||
future<> multi_part_upload(file&& f, uint64_t total_size, size_t part_size) {
|
||||
co_await start_upload();
|
||||
|
||||
@@ -1911,4 +1887,28 @@ future<> client::bucket_lister::close() noexcept {
|
||||
}
|
||||
}
|
||||
|
||||
// returns pair<num_of_parts, part_size>
|
||||
std::pair<unsigned, size_t> calc_part_size(size_t total_size, size_t part_size) {
|
||||
if (part_size > 0) {
|
||||
if (part_size < aws_minimum_part_size) {
|
||||
on_internal_error(s3l, fmt::format("part_size too small: {} is smaller than minimum part size: {}", part_size, aws_minimum_part_size));
|
||||
}
|
||||
const size_t num_parts = div_ceil(total_size, part_size);
|
||||
if (num_parts > aws_maximum_parts_in_piece) {
|
||||
on_internal_error(s3l, fmt::format("too many parts: {} > {}", num_parts, aws_maximum_parts_in_piece));
|
||||
}
|
||||
return {num_parts, part_size};
|
||||
}
|
||||
// if part_size is 0, this means the caller leaves it to us to decide the part_size. The default part size for multipart upload is set to 50MiB. This
|
||||
// value was determined empirically by running `perf_s3_client` with various part sizes to find the optimal one.
|
||||
static constexpr size_t default_part_size = 50_MiB;
|
||||
const size_t num_parts = div_ceil(total_size, default_part_size);
|
||||
if (num_parts <= aws_maximum_parts_in_piece) {
|
||||
return {num_parts, default_part_size};
|
||||
}
|
||||
|
||||
part_size = align_up(div_ceil(total_size, aws_maximum_parts_in_piece), 1_MiB);
|
||||
return {div_ceil(total_size, part_size), part_size};
|
||||
}
|
||||
|
||||
} // s3 namespace
|
||||
|
||||
@@ -231,6 +231,8 @@ public:
|
||||
future<> close();
|
||||
};
|
||||
|
||||
std::pair<unsigned, size_t> calc_part_size(size_t total_size, size_t part_size);
|
||||
|
||||
} // namespace s3
|
||||
|
||||
template <>
|
||||
|
||||
Reference in New Issue
Block a user