From 1bf643d4fd2dbcf5e7b154b0b4e5c5ec0777db5c Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Fri, 2 Jul 2021 16:50:35 +0300 Subject: [PATCH] mutation_partition: Pin mutable access to range tombstones Some callers of mutation_partition::row_tomstones() don't want (and shouldn't) modify the list itself, while they may want to modify the tombstones. This patch explicitly locates those that need to modify the collection, because the next patch will return immutable collection for the others. Signed-off-by: Pavel Emelyanov --- cache_flat_mutation_reader.hh | 2 +- flat_mutation_reader.cc | 4 ++-- mutation_partition.cc | 2 +- mutation_partition.hh | 1 + partition_version.cc | 4 ++-- test/boost/cache_flat_mutation_reader_test.cc | 2 +- test/boost/row_cache_test.cc | 2 +- 7 files changed, 9 insertions(+), 8 deletions(-) diff --git a/cache_flat_mutation_reader.hh b/cache_flat_mutation_reader.hh index abc1ee7b0e..401ee73581 100644 --- a/cache_flat_mutation_reader.hh +++ b/cache_flat_mutation_reader.hh @@ -732,7 +732,7 @@ void cache_flat_mutation_reader::maybe_add_to_cache(const range_tombstone& rt) { if (can_populate()) { clogger.trace("csm {}: maybe_add_to_cache({})", fmt::ptr(this), rt); _lsa_manager.run_in_update_section_with_allocator([&] { - _snp->version()->partition().row_tombstones().apply_monotonically(*_schema, rt); + _snp->version()->partition().mutable_row_tombstones().apply_monotonically(*_schema, rt); }); } else { _read_context.cache().on_mispopulate(); diff --git a/flat_mutation_reader.cc b/flat_mutation_reader.cc index 6cf1efd60e..d5888c94c4 100644 --- a/flat_mutation_reader.cc +++ b/flat_mutation_reader.cc @@ -466,7 +466,7 @@ flat_mutation_reader_from_mutations(reader_permit permit, std::vector } } void prepare_next_range_tombstone() { - auto& rts = _cur->partition().row_tombstones(); + auto& rts = _cur->partition().mutable_row_tombstones(); auto rt = rts.pop_front_and_lock(); if (rt) { auto rt_deleter = defer([rt] { current_deleter()(rt); }); @@ -521,7 +521,7 @@ flat_mutation_reader_from_mutations(reader_permit permit, std::vector auto deleter = current_deleter(); crs.clear_and_dispose(deleter); - auto &rts = _cur->partition().row_tombstones(); + auto &rts = _cur->partition().mutable_row_tombstones(); auto rt = rts.pop_front_and_lock(); while (rt) { current_deleter()(rt); diff --git a/mutation_partition.cc b/mutation_partition.cc index 7c0357c10e..22fcdbb2b0 100644 --- a/mutation_partition.cc +++ b/mutation_partition.cc @@ -1136,7 +1136,7 @@ bool mutation_partition::equal_continuity(const schema& s, const mutation_partit mutation_partition mutation_partition::sliced(const schema& s, const query::clustering_row_ranges& ranges) const { auto p = mutation_partition(*this, s, ranges); - p.row_tombstones().trim(s, ranges); + p._row_tombstones.trim(s, ranges); return p; } diff --git a/mutation_partition.hh b/mutation_partition.hh index 42be1d91e4..c1516f8bd6 100644 --- a/mutation_partition.hh +++ b/mutation_partition.hh @@ -1314,6 +1314,7 @@ public: const range_tombstone_list& row_tombstones() const noexcept { return _row_tombstones; } range_tombstone_list& row_tombstones() noexcept { return _row_tombstones; } + range_tombstone_list& mutable_row_tombstones() noexcept { return _row_tombstones; } const row* find_row(const schema& s, const clustering_key& key) const; tombstone range_tombstone_for_row(const schema& schema, const clustering_key& key) const; diff --git a/partition_version.cc b/partition_version.cc index 40a10fe7d7..3cc5656e5b 100644 --- a/partition_version.cc +++ b/partition_version.cc @@ -437,10 +437,10 @@ utils::coroutine partition_entry::apply_to_incomplete(const schema& s, } } dirty_size += current->partition().row_tombstones().external_memory_usage(s); - range_tombstone_list& tombstones = dst.partition().row_tombstones(); + range_tombstone_list& tombstones = dst.partition().mutable_row_tombstones(); // FIXME: defer while applying range tombstones if (can_move) { - tombstones.apply_monotonically(s, std::move(current->partition().row_tombstones())); + tombstones.apply_monotonically(s, std::move(current->partition().mutable_row_tombstones())); } else { tombstones.apply_monotonically(s, current->partition().row_tombstones()); } diff --git a/test/boost/cache_flat_mutation_reader_test.cc b/test/boost/cache_flat_mutation_reader_test.cc index df73725668..da617a31a2 100644 --- a/test/boost/cache_flat_mutation_reader_test.cc +++ b/test/boost/cache_flat_mutation_reader_test.cc @@ -177,7 +177,7 @@ struct expected_tombstone { }; static void assert_cached_tombstones(partition_snapshot_ptr snp, std::deque expected, const query::clustering_row_ranges& ck_ranges) { - range_tombstone_list rts = snp->version()->partition().row_tombstones(); + range_tombstone_list rts = snp->version()->partition().mutable_row_tombstones(); rts.trim(*SCHEMA, ck_ranges); range_tombstone_list expected_list(*SCHEMA); diff --git a/test/boost/row_cache_test.cc b/test/boost/row_cache_test.cc index ec49f57a00..b06d7c8113 100644 --- a/test/boost/row_cache_test.cc +++ b/test/boost/row_cache_test.cc @@ -3249,7 +3249,7 @@ SEASTAR_TEST_CASE(test_concurrent_reads_and_eviction) { auto actual = *actual_opt; auto&& ranges = slice.row_ranges(*s, actual.key()); - actual.partition().row_tombstones().trim(*s, ranges); + actual.partition().mutable_row_tombstones().trim(*s, ranges); auto n_to_consider = last_generation - oldest_generation + 1; auto possible_versions = boost::make_iterator_range(versions.end() - n_to_consider, versions.end());