Files
scylladb/test/lib/memtable_snapshot_source.hh
Avi Kivity fdc1449392 treewide: rename flat_mutation_reader_v2 to mutation_reader
flat_mutation_reader_v2 was introduced in a pair of commits in 2021:

  e3309322c3 "Clone flat_mutation_reader related classes into v2 variants"
  08b5773c12 "Adapt flat_mutation_reader_v2 to the new version of the API"

as a replacement for flat_mutation_reader, using range_tombstone_change
instead of range_tombstone to represent represent range tombstones. See
those commits for more information.

The transition was incremental; the last use of the original
flat_mutation_reader was removed in 2022 in commit

  026f8cc1e7 "db: Use mutation_partition_v2 in mvcc"

In turn, flat_mutation_reader was introduced in 2017 in commit

  748205ca75 "Introduce flat_mutation_reader"

To transition from a mutation_reader that nested rows within
a partition in a separate stream, to a flat reader that streamed
partitions and rows in the same stream.

Here, we reclaim the original name and rename the awkward
flat_mutation_reader_v2 to mutation_reader.

Note that mutation_fragment_v2 remains since we still use the original
for compatibilty, sometimes.

Some notes about the transition:

 - files were also renamed. In one case (flat_mutation_reader_test.cc), the
   rename target already existed, so we rename to
    mutation_reader_another_test.cc.

 - a namespace 'mutation_reader' with two definitions existed (in
   mutation_reader_fwd.hh). Its contents was folded into the mutation_reader
   class. As a result, a few #includes had to be adjusted.

Closes scylladb/scylladb#19356
2024-06-21 07:12:06 +03:00

153 lines
5.1 KiB
C++

/*
* Copyright (C) 2017-present ScyllaDB
*/
/*
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
#pragma once
#include "readers/combined.hh"
#include "readers/mutation_reader.hh"
#include "replica/memtable.hh"
#include "utils/phased_barrier.hh"
#include "test/lib/reader_concurrency_semaphore.hh"
#include <seastar/core/circular_buffer.hh>
#include <seastar/core/thread.hh>
#include <seastar/core/condition-variable.hh>
#include <seastar/util/closeable.hh>
// in-memory snapshottable mutation source.
// Must be destroyed in a seastar thread.
class memtable_snapshot_source {
schema_ptr _s;
circular_buffer<lw_shared_ptr<replica::memtable>> _memtables;
utils::phased_barrier _apply;
bool _closed = false;
seastar::condition_variable _should_compact;
future<> _compactor;
private:
bool should_compact() const {
return !_closed && _memtables.size() >= 3;
}
lw_shared_ptr<replica::memtable> new_memtable() {
return make_lw_shared<replica::memtable>(_s);
}
lw_shared_ptr<replica::memtable> pending() {
if (_memtables.empty()) {
_memtables.push_back(new_memtable());
on_new_memtable();
}
return _memtables.back();
}
void on_new_memtable() {
if (should_compact()) {
_should_compact.signal();
}
}
void compact() {
if (_memtables.empty()) {
return;
}
auto count = _memtables.size();
auto op = _apply.start();
auto new_mt = make_lw_shared<replica::memtable>(_s);
tests::reader_concurrency_semaphore_wrapper semaphore;
auto permit = semaphore.make_permit();
std::vector<mutation_reader> readers;
for (auto&& mt : _memtables) {
readers.push_back(mt->make_flat_reader(new_mt->schema(),
permit,
query::full_partition_range,
new_mt->schema()->full_slice(),
nullptr,
streamed_mutation::forwarding::no,
mutation_reader::forwarding::yes));
}
_memtables.push_back(new_memtable());
auto&& rd = make_combined_reader(new_mt->schema(), permit, std::move(readers));
auto close_rd = deferred_close(rd);
consume_partitions(rd, [&] (mutation&& m) {
new_mt->apply(std::move(m));
return stop_iteration::no;
}).get();
_memtables.erase(_memtables.begin(), _memtables.begin() + count);
_memtables.push_back(new_mt);
}
public:
memtable_snapshot_source(schema_ptr s)
: _s(s)
, _compactor(seastar::async([this] () noexcept {
while (!_closed) {
std::optional<future<>> f;
{
memory::scoped_critical_alloc_section dfg;
// condition_variable::wait() also allocates memory
f = _should_compact.wait();
}
// Waiting on the future should not be covered by critical section.
f->get();
memory::scoped_critical_alloc_section dfg;
while (should_compact()) {
compact();
}
}
}))
{ }
memtable_snapshot_source(memtable_snapshot_source&&) = delete; // 'this' captured.
~memtable_snapshot_source() {
_closed = true;
_should_compact.broadcast();
_compactor.get();
}
// Will cause subsequent apply() calls to accept writes conforming to given schema (or older).
// Without this, the writes will be upgraded to the old schema and snapshots will not reflect
// parts of writes which depend on the new schema.
void set_schema(schema_ptr s) {
pending()->set_schema(s);
_s = s;
}
// Must run in a seastar thread
void clear() {
_memtables.erase(_memtables.begin(), _memtables.end());
_apply.advance_and_await().get();
_memtables.erase(_memtables.begin(), _memtables.end());
}
size_t used_space() const {
size_t space = 0;
for (auto&& mt : _memtables) {
space += mt->region().occupancy().used_space();
}
return space;
}
void apply(const mutation& mt) {
pending()->apply(mt);
}
// Must run in a seastar thread
void apply(replica::memtable& mt) {
auto op = _apply.start();
auto new_mt = new_memtable();
tests::reader_concurrency_semaphore_wrapper semaphore;
new_mt->apply(mt, semaphore.make_permit()).get();
_memtables.push_back(new_mt);
}
// mt must not change from now on.
void apply(lw_shared_ptr<replica::memtable> mt) {
auto op = _apply.start();
_memtables.push_back(std::move(mt));
on_new_memtable();
}
mutation_source operator()() {
std::vector<mutation_source> src;
for (auto&& mt : _memtables) {
src.push_back(mt->as_data_source());
}
_memtables.push_back(new_memtable()); // so that src won't change any more.
on_new_memtable();
return make_combined_mutation_source(std::move(src));
}
};