mutation_partition: Pin mutable access to range tombstones

Some callers of mutation_partition::row_tomstones() don't want
(and shouldn't) modify the list itself, while they may want to
modify the tombstones. This patch explicitly locates those that
need to modify the collection, because the next patch will
return immutable collection for the others.

Signed-off-by: Pavel Emelyanov <xemul@scylladb.com>
This commit is contained in:
Pavel Emelyanov
2021-07-02 16:50:35 +03:00
parent 05b8cdfd24
commit 1bf643d4fd
7 changed files with 9 additions and 8 deletions

View File

@@ -732,7 +732,7 @@ void cache_flat_mutation_reader::maybe_add_to_cache(const range_tombstone& rt) {
if (can_populate()) {
clogger.trace("csm {}: maybe_add_to_cache({})", fmt::ptr(this), rt);
_lsa_manager.run_in_update_section_with_allocator([&] {
_snp->version()->partition().row_tombstones().apply_monotonically(*_schema, rt);
_snp->version()->partition().mutable_row_tombstones().apply_monotonically(*_schema, rt);
});
} else {
_read_context.cache().on_mispopulate();

View File

@@ -466,7 +466,7 @@ flat_mutation_reader_from_mutations(reader_permit permit, std::vector<mutation>
}
}
void prepare_next_range_tombstone() {
auto& rts = _cur->partition().row_tombstones();
auto& rts = _cur->partition().mutable_row_tombstones();
auto rt = rts.pop_front_and_lock();
if (rt) {
auto rt_deleter = defer([rt] { current_deleter<range_tombstone>()(rt); });
@@ -521,7 +521,7 @@ flat_mutation_reader_from_mutations(reader_permit permit, std::vector<mutation>
auto deleter = current_deleter<rows_entry>();
crs.clear_and_dispose(deleter);
auto &rts = _cur->partition().row_tombstones();
auto &rts = _cur->partition().mutable_row_tombstones();
auto rt = rts.pop_front_and_lock();
while (rt) {
current_deleter<range_tombstone>()(rt);

View File

@@ -1136,7 +1136,7 @@ bool mutation_partition::equal_continuity(const schema& s, const mutation_partit
mutation_partition mutation_partition::sliced(const schema& s, const query::clustering_row_ranges& ranges) const {
auto p = mutation_partition(*this, s, ranges);
p.row_tombstones().trim(s, ranges);
p._row_tombstones.trim(s, ranges);
return p;
}

View File

@@ -1314,6 +1314,7 @@ public:
const range_tombstone_list& row_tombstones() const noexcept { return _row_tombstones; }
range_tombstone_list& row_tombstones() noexcept { return _row_tombstones; }
range_tombstone_list& mutable_row_tombstones() noexcept { return _row_tombstones; }
const row* find_row(const schema& s, const clustering_key& key) const;
tombstone range_tombstone_for_row(const schema& schema, const clustering_key& key) const;

View File

@@ -437,10 +437,10 @@ utils::coroutine partition_entry::apply_to_incomplete(const schema& s,
}
}
dirty_size += current->partition().row_tombstones().external_memory_usage(s);
range_tombstone_list& tombstones = dst.partition().row_tombstones();
range_tombstone_list& tombstones = dst.partition().mutable_row_tombstones();
// FIXME: defer while applying range tombstones
if (can_move) {
tombstones.apply_monotonically(s, std::move(current->partition().row_tombstones()));
tombstones.apply_monotonically(s, std::move(current->partition().mutable_row_tombstones()));
} else {
tombstones.apply_monotonically(s, current->partition().row_tombstones());
}

View File

@@ -177,7 +177,7 @@ struct expected_tombstone {
};
static void assert_cached_tombstones(partition_snapshot_ptr snp, std::deque<range_tombstone> expected, const query::clustering_row_ranges& ck_ranges) {
range_tombstone_list rts = snp->version()->partition().row_tombstones();
range_tombstone_list rts = snp->version()->partition().mutable_row_tombstones();
rts.trim(*SCHEMA, ck_ranges);
range_tombstone_list expected_list(*SCHEMA);

View File

@@ -3249,7 +3249,7 @@ SEASTAR_TEST_CASE(test_concurrent_reads_and_eviction) {
auto actual = *actual_opt;
auto&& ranges = slice.row_ranges(*s, actual.key());
actual.partition().row_tombstones().trim(*s, ranges);
actual.partition().mutable_row_tombstones().trim(*s, ranges);
auto n_to_consider = last_generation - oldest_generation + 1;
auto possible_versions = boost::make_iterator_range(versions.end() - n_to_consider, versions.end());