Files
scylladb/test/lib/reader_lifecycle_policy.hh
Avi Kivity fdc1449392 treewide: rename flat_mutation_reader_v2 to mutation_reader
flat_mutation_reader_v2 was introduced in a pair of commits in 2021:

  e3309322c3 "Clone flat_mutation_reader related classes into v2 variants"
  08b5773c12 "Adapt flat_mutation_reader_v2 to the new version of the API"

as a replacement for flat_mutation_reader, using range_tombstone_change
instead of range_tombstone to represent represent range tombstones. See
those commits for more information.

The transition was incremental; the last use of the original
flat_mutation_reader was removed in 2022 in commit

  026f8cc1e7 "db: Use mutation_partition_v2 in mvcc"

In turn, flat_mutation_reader was introduced in 2017 in commit

  748205ca75 "Introduce flat_mutation_reader"

To transition from a mutation_reader that nested rows within
a partition in a separate stream, to a flat reader that streamed
partitions and rows in the same stream.

Here, we reclaim the original name and rename the awkward
flat_mutation_reader_v2 to mutation_reader.

Note that mutation_fragment_v2 remains since we still use the original
for compatibilty, sometimes.

Some notes about the transition:

 - files were also renamed. In one case (flat_mutation_reader_test.cc), the
   rename target already existed, so we rename to
    mutation_reader_another_test.cc.

 - a namespace 'mutation_reader' with two definitions existed (in
   mutation_reader_fwd.hh). Its contents was folded into the mutation_reader
   class. As a result, a few #includes had to be adjusted.

Closes scylladb/scylladb#19356
2024-06-21 07:12:06 +03:00

106 lines
4.5 KiB
C++

/*
* Copyright (C) 2020-present ScyllaDB
*/
/*
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
#pragma once
#include "readers/multishard.hh"
#include <seastar/core/gate.hh>
class test_reader_lifecycle_policy
: public reader_lifecycle_policy_v2
, public enable_shared_from_this<test_reader_lifecycle_policy> {
using factory_function = std::function<mutation_reader(
schema_ptr,
reader_permit,
const dht::partition_range&,
const query::partition_slice&,
tracing::trace_state_ptr,
mutation_reader::forwarding)>;
struct reader_context {
std::optional<reader_concurrency_semaphore> semaphore;
lw_shared_ptr<const dht::partition_range> range;
std::optional<const query::partition_slice> slice;
reader_context() = default;
reader_context(dht::partition_range range, query::partition_slice slice)
: range(make_lw_shared<const dht::partition_range>(std::move(range))), slice(std::move(slice)) {
}
};
factory_function _factory_function;
std::vector<foreign_ptr<std::unique_ptr<reader_context>>> _contexts;
std::vector<future<>> _destroy_futures;
bool _evict_paused_readers = false;
public:
explicit test_reader_lifecycle_policy(factory_function f, bool evict_paused_readers = false)
: _factory_function(std::move(f))
, _contexts(smp::count)
, _evict_paused_readers(evict_paused_readers) {
}
virtual mutation_reader create_reader(
schema_ptr schema,
reader_permit permit,
const dht::partition_range& range,
const query::partition_slice& slice,
tracing::trace_state_ptr trace_state,
mutation_reader::forwarding fwd_mr) override {
const auto shard = this_shard_id();
if (_contexts[shard]) {
_contexts[shard]->range = make_lw_shared<const dht::partition_range>(range);
_contexts[shard]->slice.emplace(slice);
} else {
_contexts[shard] = make_foreign(std::make_unique<reader_context>(range, slice));
}
return _factory_function(std::move(schema), std::move(permit), *_contexts[shard]->range, *_contexts[shard]->slice, std::move(trace_state), fwd_mr);
}
virtual const dht::partition_range* get_read_range() const override {
const auto shard = this_shard_id();
assert(_contexts[shard]);
return _contexts[shard]->range.get();
}
void update_read_range(lw_shared_ptr<const dht::partition_range> range) override {
const auto shard = this_shard_id();
assert(_contexts[shard]);
_contexts[shard]->range = std::move(range);
}
virtual future<> destroy_reader(stopped_reader reader) noexcept override {
auto& ctx = _contexts[this_shard_id()];
auto reader_opt = ctx->semaphore->unregister_inactive_read(std::move(reader.handle));
auto ret = reader_opt ? reader_opt->close() : make_ready_future<>();
return ret.finally([&ctx] {
return ctx->semaphore->stop().finally([&ctx] {
ctx.release();
});
});
}
virtual reader_concurrency_semaphore& semaphore() override {
const auto shard = this_shard_id();
if (!_contexts[shard]) {
_contexts[shard] = make_foreign(std::make_unique<reader_context>());
} else if (_contexts[shard]->semaphore) {
return *_contexts[shard]->semaphore;
}
// To support multiple reader life-cycle instances alive at the same
// time, incorporate `this` into the name, to make their names unique.
auto name = format("tests::reader_lifecycle_policy@{}@shard_id={}", fmt::ptr(this), shard);
if (_evict_paused_readers) {
// Create with no memory, so all inactive reads are immediately evicted.
_contexts[shard]->semaphore.emplace(reader_concurrency_semaphore::for_tests{}, std::move(name), 1, 0);
} else {
_contexts[shard]->semaphore.emplace(reader_concurrency_semaphore::no_limits{}, std::move(name), reader_concurrency_semaphore::register_metrics::no);
}
return *_contexts[shard]->semaphore;
}
virtual future<reader_permit> obtain_reader_permit(schema_ptr schema, const char* const description, db::timeout_clock::time_point timeout, tracing::trace_state_ptr trace_ptr) override {
return semaphore().obtain_permit(schema, description, 128 * 1024, timeout, std::move(trace_ptr));
}
};