mirror of
https://github.com/scylladb/scylladb.git
synced 2026-05-12 19:02:12 +00:00
streamed_mutation_freezer: use chunked_vector instead of std::deque for clustering rows
The streamed_mutation_freezer class uses a deque to avoid large allocations, but fails as seen in the referenced issue when the vector backing the deque grows too large. This may be a problem in itself, but the issue doesn't provide enough information to tell. Fix the immediate problem by switching to chunked_vector, which is better in avoiding large allocations. We do lose some early-free in serialize_mutation_fragments(), but since most of the memory should be in the clustering row itself, not in the deque/chunked_vector holding it, it should not be a problem. Fixes #28275 Closes scylladb/scylladb#28281
This commit is contained in:
@@ -178,7 +178,7 @@ class fragmenting_mutation_freezer {
|
||||
|
||||
tombstone _partition_tombstone;
|
||||
std::optional<static_row> _sr;
|
||||
std::deque<clustering_row> _crs;
|
||||
utils::chunked_vector<clustering_row> _crs;
|
||||
range_tombstone_list _rts;
|
||||
|
||||
frozen_mutation_consumer_fn _consumer;
|
||||
|
||||
@@ -242,7 +242,7 @@ class streamed_mutation_freezer {
|
||||
|
||||
tombstone _partition_tombstone;
|
||||
std::optional<static_row> _sr;
|
||||
std::deque<clustering_row> _crs;
|
||||
utils::chunked_vector<clustering_row> _crs;
|
||||
range_tombstone_list _rts;
|
||||
public:
|
||||
streamed_mutation_freezer(const schema& s, const partition_key& key)
|
||||
|
||||
@@ -226,7 +226,7 @@ future<> mutation_partition_serializer::write_gently(ser::writer_of_mutation_par
|
||||
|
||||
void serialize_mutation_fragments(const schema& s, tombstone partition_tombstone,
|
||||
std::optional<static_row> sr, range_tombstone_list rts,
|
||||
std::deque<clustering_row> crs, ser::writer_of_mutation_partition<bytes_ostream>&& wr)
|
||||
utils::chunked_vector<clustering_row> crs, ser::writer_of_mutation_partition<bytes_ostream>&& wr)
|
||||
{
|
||||
auto srow_writer = std::move(wr).write_tomb(partition_tombstone).start_static_row();
|
||||
auto row_tombstones = [&] {
|
||||
@@ -242,10 +242,9 @@ void serialize_mutation_fragments(const schema& s, tombstone partition_tombstone
|
||||
rts.clear();
|
||||
|
||||
auto clustering_rows = std::move(row_tombstones).end_range_tombstones().start_rows();
|
||||
while (!crs.empty()) {
|
||||
auto& cr = crs.front();
|
||||
for (auto& cr : crs) {
|
||||
write_row(clustering_rows.add(), s, cr.key(), cr.cells(), cr.marker(), cr.tomb()).end_deletable_row();
|
||||
crs.pop_front();
|
||||
cr = clustering_row(clustering_key_prefix{});
|
||||
}
|
||||
std::move(clustering_rows).end_rows().end_mutation_partition();
|
||||
}
|
||||
|
||||
@@ -41,4 +41,4 @@ public:
|
||||
|
||||
void serialize_mutation_fragments(const schema& s, tombstone partition_tombstone,
|
||||
std::optional<static_row> sr, range_tombstone_list range_tombstones,
|
||||
std::deque<clustering_row> clustering_rows, ser::writer_of_mutation_partition<bytes_ostream>&&);
|
||||
utils::chunked_vector<clustering_row> clustering_rows, ser::writer_of_mutation_partition<bytes_ostream>&&);
|
||||
|
||||
Reference in New Issue
Block a user