Merge "flat_mutation_reader: keep timeout in permit" from Benny

"
This series moves the timeout parameter, that is passed to most
f_m_r methods, into the reader_permit.  This eliminates
the need to pass the timeout around, as it's taken
from the permit when needed.

The permit timeout is updated in certain cases
when the permit/reader is paused and retrieved
later on for reuse.

Following are perf_simple_query results showing ~1%
reduction in insns/op and corresponding increase in tps.

$ build/release/test/perf/perf_simple_query -c 1 --operations-per-shard 1000000 --task-quota-ms 10

Before:
102500.38 tps ( 75.1 allocs/op,  12.1 tasks/op,   45620 insns/op)

After:
103957.53 tps ( 75.1 allocs/op,  12.1 tasks/op,   45372 insns/op)

Test: unit(dev)
DTest:
    repair_additional_test.py:RepairAdditionalTest.repair_abort_test (release)
    materialized_views_test.py:TestMaterializedViews.remove_node_during_mv_insert_3_nodes_test (release)
    materialized_views_test.py:InterruptBuildProcess.interrupt_build_process_with_resharding_half_to_max_test (release)
    migration_test.py:TTLWithMigrate.big_table_with_ttls_test (release)
"

* tag 'reader_permit-timeout-v6' of github.com:bhalevy/scylla:
  flat_mutation_reader: get rid of timeout parameter
  reader_concurrency_semaphore: use permit timeout for admission
  reader_concurrency_semaphore: adjust reactivated reader timeout
  multishard_mutation_query: create_reader: validate saved reader permit
  repair: row_level: read_mutation_fragment: set reader timeout
  flat_mutation_reader: maybe_timed_out: use permit timeout
  test: sstable_datafile_test: add sstable_reader_with_timeout
  reader_permit: add timeout member
This commit is contained in:
Avi Kivity
2021-08-25 17:51:10 +03:00
85 changed files with 1090 additions and 1000 deletions

View File

@@ -114,10 +114,10 @@ class cache_flat_mutation_reader final : public flat_mutation_reader::impl {
flat_mutation_reader* _underlying = nullptr;
flat_mutation_reader_opt _underlying_holder;
future<> do_fill_buffer(db::timeout_clock::time_point);
future<> ensure_underlying(db::timeout_clock::time_point);
future<> do_fill_buffer();
future<> ensure_underlying();
void copy_from_cache_to_buffer();
future<> process_static_row(db::timeout_clock::time_point);
future<> process_static_row();
void move_to_end();
void move_to_next_range();
void move_to_range(query::clustering_row_ranges::const_iterator);
@@ -128,7 +128,7 @@ class cache_flat_mutation_reader final : public flat_mutation_reader::impl {
void add_to_buffer(range_tombstone&&);
void add_range_tombstone_to_buffer(range_tombstone&&);
void add_to_buffer(mutation_fragment&&);
future<> read_from_underlying(db::timeout_clock::time_point);
future<> read_from_underlying();
void start_reading_from_underlying();
bool after_current_range(position_in_partition_view position);
bool can_populate() const;
@@ -187,7 +187,7 @@ public:
}
cache_flat_mutation_reader(const cache_flat_mutation_reader&) = delete;
cache_flat_mutation_reader(cache_flat_mutation_reader&&) = delete;
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override;
virtual future<> fill_buffer() override;
virtual future<> next_partition() override {
clear_buffer_to_next_partition();
if (is_buffer_empty()) {
@@ -195,12 +195,12 @@ public:
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range&, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range&) override {
clear_buffer();
_end_of_stream = true;
return make_ready_future<>();
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> close() noexcept {
@@ -211,7 +211,7 @@ public:
};
inline
future<> cache_flat_mutation_reader::process_static_row(db::timeout_clock::time_point timeout) {
future<> cache_flat_mutation_reader::process_static_row() {
if (_snp->static_row_continuous()) {
_read_context.cache().on_row_hit();
static_row sr = _lsa_manager.run_in_read_section([this] {
@@ -223,8 +223,8 @@ future<> cache_flat_mutation_reader::process_static_row(db::timeout_clock::time_
return make_ready_future<>();
} else {
_read_context.cache().on_row_miss();
return ensure_underlying(timeout).then([this, timeout] {
return (*_underlying)(timeout).then([this] (mutation_fragment_opt&& sr) {
return ensure_underlying().then([this] {
return (*_underlying)().then([this] (mutation_fragment_opt&& sr) {
if (sr) {
assert(sr->is_static_row());
maybe_add_to_cache(sr->as_static_row());
@@ -242,10 +242,10 @@ void cache_flat_mutation_reader::touch_partition() {
}
inline
future<> cache_flat_mutation_reader::fill_buffer(db::timeout_clock::time_point timeout) {
future<> cache_flat_mutation_reader::fill_buffer() {
if (_state == state::before_static_row) {
touch_partition();
auto after_static_row = [this, timeout] {
auto after_static_row = [this] {
if (_ck_ranges_curr == _ck_ranges_end) {
finish_reader();
return make_ready_future<>();
@@ -254,26 +254,26 @@ future<> cache_flat_mutation_reader::fill_buffer(db::timeout_clock::time_point t
_lsa_manager.run_in_read_section([this] {
move_to_range(_ck_ranges_curr);
});
return fill_buffer(timeout);
return fill_buffer();
};
if (_schema->has_static_columns()) {
return process_static_row(timeout).then(std::move(after_static_row));
return process_static_row().then(std::move(after_static_row));
} else {
return after_static_row();
}
}
clogger.trace("csm {}: fill_buffer(), range={}, lb={}", fmt::ptr(this), *_ck_ranges_curr, _lower_bound);
return do_until([this] { return _end_of_stream || is_buffer_full(); }, [this, timeout] {
return do_fill_buffer(timeout);
return do_until([this] { return _end_of_stream || is_buffer_full(); }, [this] {
return do_fill_buffer();
});
}
inline
future<> cache_flat_mutation_reader::ensure_underlying(db::timeout_clock::time_point timeout) {
future<> cache_flat_mutation_reader::ensure_underlying() {
if (_underlying) {
return make_ready_future<>();
}
return _read_context.ensure_underlying(timeout).then([this] {
return _read_context.ensure_underlying().then([this] {
flat_mutation_reader& ctx_underlying = _read_context.underlying().underlying();
if (ctx_underlying.schema() != _schema) {
_underlying_holder = make_delegating_reader(ctx_underlying);
@@ -286,26 +286,26 @@ future<> cache_flat_mutation_reader::ensure_underlying(db::timeout_clock::time_p
}
inline
future<> cache_flat_mutation_reader::do_fill_buffer(db::timeout_clock::time_point timeout) {
future<> cache_flat_mutation_reader::do_fill_buffer() {
if (_state == state::move_to_underlying) {
if (!_underlying) {
return ensure_underlying(timeout).then([this, timeout] {
return do_fill_buffer(timeout);
return ensure_underlying().then([this] {
return do_fill_buffer();
});
}
_state = state::reading_from_underlying;
_population_range_starts_before_all_rows = _lower_bound.is_before_all_clustered_rows(*_schema);
if (!_read_context.partition_exists()) {
return read_from_underlying(timeout);
return read_from_underlying();
}
auto end = _next_row_in_range ? position_in_partition(_next_row.position())
: position_in_partition(_upper_bound);
return _underlying->fast_forward_to(position_range{_lower_bound, std::move(end)}, timeout).then([this, timeout] {
return read_from_underlying(timeout);
return _underlying->fast_forward_to(position_range{_lower_bound, std::move(end)}).then([this] {
return read_from_underlying();
});
}
if (_state == state::reading_from_underlying) {
return read_from_underlying(timeout);
return read_from_underlying();
}
// assert(_state == state::reading_from_cache)
return _lsa_manager.run_in_read_section([this] {
@@ -340,7 +340,7 @@ future<> cache_flat_mutation_reader::do_fill_buffer(db::timeout_clock::time_poin
}
inline
future<> cache_flat_mutation_reader::read_from_underlying(db::timeout_clock::time_point timeout) {
future<> cache_flat_mutation_reader::read_from_underlying() {
return consume_mutation_fragments_until(*_underlying,
[this] { return _state != state::reading_from_underlying || is_buffer_full(); },
[this] (mutation_fragment mf) {
@@ -415,7 +415,7 @@ future<> cache_flat_mutation_reader::read_from_underlying(db::timeout_clock::tim
}
});
return make_ready_future<>();
}, timeout);
});
}
inline

View File

@@ -551,7 +551,7 @@ protected:
: _cf(cf)
, _sstable_creator(std::move(descriptor.creator))
, _schema(cf.schema())
, _permit(_cf.compaction_concurrency_semaphore().make_tracking_only_permit(_cf.schema().get(), "compaction"))
, _permit(_cf.compaction_concurrency_semaphore().make_tracking_only_permit(_cf.schema().get(), "compaction", db::no_timeout))
, _sstables(std::move(descriptor.sstables))
, _max_sstable_size(descriptor.max_sstable_bytes)
, _sstable_level(descriptor.level)
@@ -715,7 +715,7 @@ private:
get_compacting_sstable_writer(),
std::move(gc_consumer));
reader.consume_in_thread(std::move(cfc), db::no_timeout);
reader.consume_in_thread(std::move(cfc));
});
});
return consumer(make_sstable_reader());
@@ -1397,12 +1397,12 @@ private:
, _reader(std::move(underlying))
, _validator(*_schema)
{ }
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
if (_end_of_stream) {
return make_ready_future<>();
}
return repeat([this, timeout] {
return _reader.fill_buffer(timeout).then([this] {
return repeat([this] {
return _reader.fill_buffer().then([this] {
fill_buffer_from_underlying();
return stop_iteration(is_buffer_full() || _end_of_stream);
});
@@ -1428,10 +1428,10 @@ private:
virtual future<> next_partition() override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> close() noexcept override {
@@ -1650,7 +1650,7 @@ future<bool> scrub_validate_mode_validate_reader(flat_mutation_reader reader, co
try {
auto validator = mutation_fragment_stream_validator(*schema);
while (auto mf_opt = co_await reader(db::no_timeout)) {
while (auto mf_opt = co_await reader()) {
if (info.is_stop_requested()) [[unlikely]] {
// Compaction manager will catch this exception and re-schedule the compaction.
co_return coroutine::make_exception(compaction_stop_exception(info.ks_name, info.cf_name, info.stop_requested));
@@ -1718,7 +1718,7 @@ static future<compaction_info> scrub_sstables_validate_mode(sstables::compaction
clogger.info("Scrubbing in validate mode {}", sstables_list_msg);
auto permit = cf.compaction_concurrency_semaphore().make_tracking_only_permit(schema.get(), "scrub:validate");
auto permit = cf.compaction_concurrency_semaphore().make_tracking_only_permit(schema.get(), "scrub:validate", db::no_timeout);
auto reader = sstables->make_local_shard_sstable_reader(schema, permit, query::full_partition_range, schema->full_slice(), descriptor.io_priority,
tracing::trace_state_ptr(), ::streamed_mutation::forwarding::no, ::mutation_reader::forwarding::no, default_read_monitor_generator());

View File

@@ -1390,7 +1390,7 @@ database::query(schema_ptr s, const query::read_command& cmd, query::result_opti
std::exception_ptr ex;
if (cmd.query_uuid != utils::UUID{} && !cmd.is_first_page) {
querier_opt = _querier_cache.lookup_data_querier(cmd.query_uuid, *s, ranges.front(), cmd.slice, trace_state);
querier_opt = _querier_cache.lookup_data_querier(cmd.query_uuid, *s, ranges.front(), cmd.slice, trace_state, timeout);
}
auto read_func = [&, this] (reader_permit permit) {
@@ -1445,7 +1445,7 @@ database::query_mutations(schema_ptr s, const query::read_command& cmd, const dh
std::exception_ptr ex;
if (cmd.query_uuid != utils::UUID{} && !cmd.is_first_page) {
querier_opt = _querier_cache.lookup_mutation_querier(cmd.query_uuid, *s, range, cmd.slice, trace_state);
querier_opt = _querier_cache.lookup_mutation_querier(cmd.query_uuid, *s, range, cmd.slice, trace_state, timeout);
}
auto read_func = [&, this] (reader_permit permit) {
@@ -1649,8 +1649,8 @@ future<mutation> database::do_apply_counter_update(column_family& cf, const froz
// counter state for each modified cell...
tracing::trace(trace_state, "Reading counter values from the CF");
auto permit = get_reader_concurrency_semaphore().make_tracking_only_permit(m_schema.get(), "counter-read-before-write");
return counter_write_query(m_schema, cf.as_mutation_source(), std::move(permit), m.decorated_key(), slice, trace_state, timeout)
auto permit = get_reader_concurrency_semaphore().make_tracking_only_permit(m_schema.get(), "counter-read-before-write", timeout);
return counter_write_query(m_schema, cf.as_mutation_source(), std::move(permit), m.decorated_key(), slice, trace_state)
.then([this, &cf, &m, m_schema, timeout, trace_state] (auto mopt) {
// ...now, that we got existing state of all affected counter
// cells we can look for our shard in each of them, increment

View File

@@ -29,11 +29,11 @@
// All calls will first wait for a future to resolve, then forward to a given underlying reader.
class chained_delegating_reader : public flat_mutation_reader::impl {
std::unique_ptr<flat_mutation_reader> _underlying;
std::function<future<flat_mutation_reader>(db::timeout_clock::time_point)> _populate_reader;
std::function<future<flat_mutation_reader>()> _populate_reader;
std::function<void()> _on_destroyed;
public:
chained_delegating_reader(schema_ptr s, std::function<future<flat_mutation_reader>(db::timeout_clock::time_point)>&& populate, reader_permit permit, std::function<void()> on_destroyed = []{})
chained_delegating_reader(schema_ptr s, std::function<future<flat_mutation_reader>()>&& populate, reader_permit permit, std::function<void()> on_destroyed = []{})
: impl(s, std::move(permit))
, _populate_reader(std::move(populate))
, _on_destroyed(std::move(on_destroyed))
@@ -45,11 +45,11 @@ public:
_on_destroyed();
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
if (!_underlying) {
return _populate_reader(timeout).then([this, timeout] (flat_mutation_reader&& rd) {
return _populate_reader().then([this] (flat_mutation_reader&& rd) {
_underlying = std::make_unique<flat_mutation_reader>(std::move(rd));
return fill_buffer(timeout);
return fill_buffer();
});
}
@@ -57,23 +57,23 @@ public:
return make_ready_future<>();
}
return _underlying->fill_buffer(timeout).then([this] {
return _underlying->fill_buffer().then([this] {
_end_of_stream = _underlying->is_end_of_stream();
_underlying->move_buffer_content_to(*this);
});
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
if (!_underlying) {
return _populate_reader(timeout).then([this, timeout, pr = std::move(pr)] (flat_mutation_reader&& rd) mutable {
return _populate_reader().then([this, pr = std::move(pr)] (flat_mutation_reader&& rd) mutable {
_underlying = std::make_unique<flat_mutation_reader>(std::move(rd));
return fast_forward_to(pr, timeout);
return fast_forward_to(pr);
});
}
_end_of_stream = false;
forward_buffer_to(pr.start());
return _underlying->fast_forward_to(std::move(pr), timeout);
return _underlying->fast_forward_to(std::move(pr));
}
virtual future<> next_partition() override {
@@ -91,17 +91,17 @@ public:
return f;
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
if (!_underlying) {
return _populate_reader(timeout).then([this, timeout, &pr] (flat_mutation_reader&& rd) mutable {
return _populate_reader().then([this, &pr] (flat_mutation_reader&& rd) mutable {
_underlying = std::make_unique<flat_mutation_reader>(std::move(rd));
return fast_forward_to(pr, timeout);
return fast_forward_to(pr);
});
}
_end_of_stream = false;
clear_buffer();
return _underlying->fast_forward_to(pr, timeout);
return _underlying->fast_forward_to(pr);
}
virtual future<> close() noexcept override {

View File

@@ -35,7 +35,6 @@
#include "range.hh"
#include "mutation_fragment.hh"
#include "sstables/sstables.hh"
#include "db/timeout_clock.hh"
#include "database.hh"
#include "db/size_estimates_virtual_reader.hh"
@@ -270,15 +269,15 @@ future<> size_estimates_mutation_reader::close_partition_reader() noexcept {
return _partition_reader ? _partition_reader->close() : make_ready_future<>();
}
future<> size_estimates_mutation_reader::fill_buffer(db::timeout_clock::time_point timeout) {
return do_until([this, timeout] { return is_end_of_stream() || is_buffer_full(); }, [this, timeout] {
future<> size_estimates_mutation_reader::fill_buffer() {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this] {
if (!_partition_reader) {
return get_next_partition();
}
return _partition_reader->consume_pausable([this] (mutation_fragment mf) {
push_mutation_fragment(std::move(mf));
return stop_iteration(is_buffer_full());
}, timeout).then([this] {
}).then([this] {
if (_partition_reader->is_end_of_stream() && _partition_reader->is_buffer_empty()) {
return _partition_reader->close();
}
@@ -295,7 +294,7 @@ future<> size_estimates_mutation_reader::next_partition() {
return make_ready_future<>();
}
future<> size_estimates_mutation_reader::fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) {
future<> size_estimates_mutation_reader::fast_forward_to(const dht::partition_range& pr) {
clear_buffer();
_prange = &pr;
_keyspaces = std::nullopt;
@@ -303,11 +302,11 @@ future<> size_estimates_mutation_reader::fast_forward_to(const dht::partition_ra
return close_partition_reader();
}
future<> size_estimates_mutation_reader::fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) {
future<> size_estimates_mutation_reader::fast_forward_to(position_range pr) {
forward_buffer_to(pr.start());
_end_of_stream = false;
if (_partition_reader) {
return _partition_reader->fast_forward_to(std::move(pr), timeout);
return _partition_reader->fast_forward_to(std::move(pr));
}
return make_ready_future<>();
}

View File

@@ -47,10 +47,10 @@ class size_estimates_mutation_reader final : public flat_mutation_reader::impl {
public:
size_estimates_mutation_reader(database& db, schema_ptr, reader_permit, const dht::partition_range&, const query::partition_slice&, streamed_mutation::forwarding);
virtual future<> fill_buffer(db::timeout_clock::time_point) override;
virtual future<> fill_buffer() override;
virtual future<> next_partition() override;
virtual future<> fast_forward_to(const dht::partition_range&, db::timeout_clock::time_point) override;
virtual future<> fast_forward_to(position_range, db::timeout_clock::time_point) override;
virtual future<> fast_forward_to(const dht::partition_range&) override;
virtual future<> fast_forward_to(position_range) override;
virtual future<> close() noexcept override;
private:
future<> get_next_partition();

View File

@@ -1808,7 +1808,7 @@ public:
.build();
}
future<> execute(std::function<void(mutation)> mutation_sink, db::timeout_clock::time_point timeout) override {
future<> execute(std::function<void(mutation)> mutation_sink) override {
return _ss.get_ownership().then([&, mutation_sink] (std::map<gms::inet_address, float> ownership) {
const locator::token_metadata& tm = _ss.get_token_metadata();
gms::gossiper& gs = gms::get_local_gossiper();

View File

@@ -120,8 +120,8 @@ class build_progress_virtual_reader {
return clustering_key_prefix::from_exploded(r);
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return _underlying.fill_buffer(timeout).then([this] {
virtual future<> fill_buffer() override {
return _underlying.fill_buffer().then([this] {
_end_of_stream = _underlying.is_end_of_stream();
while (!_underlying.is_buffer_empty()) {
auto mf = _underlying.pop_mutation_fragment();
@@ -171,16 +171,16 @@ class build_progress_virtual_reader {
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
clear_buffer();
_end_of_stream = false;
return _underlying.fast_forward_to(pr, timeout);
return _underlying.fast_forward_to(pr);
}
virtual future<> fast_forward_to(position_range range, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range range) override {
forward_buffer_to(range.start());
_end_of_stream = false;
return _underlying.fast_forward_to(std::move(range), timeout);
return _underlying.fast_forward_to(std::move(range));
}
virtual future<> close() noexcept override {

View File

@@ -926,8 +926,8 @@ future<> view_update_builder::close() noexcept {
}
future<stop_iteration> view_update_builder::advance_all() {
auto existings_f = _existings ? (*_existings)(db::no_timeout) : make_ready_future<optimized_optional<mutation_fragment>>();
return when_all(_updates(db::no_timeout), std::move(existings_f)).then([this] (auto&& fragments) mutable {
auto existings_f = _existings ? (*_existings)() : make_ready_future<optimized_optional<mutation_fragment>>();
return when_all(_updates(), std::move(existings_f)).then([this] (auto&& fragments) mutable {
_update = std::move(std::get<0>(fragments).get0());
_existing = std::move(std::get<1>(fragments).get0());
return stop_iteration::no;
@@ -935,7 +935,7 @@ future<stop_iteration> view_update_builder::advance_all() {
}
future<stop_iteration> view_update_builder::advance_updates() {
return _updates(db::no_timeout).then([this] (auto&& update) mutable {
return _updates().then([this] (auto&& update) mutable {
_update = std::move(update);
return stop_iteration::no;
});
@@ -945,7 +945,7 @@ future<stop_iteration> view_update_builder::advance_existings() {
if (!_existings) {
return make_ready_future<stop_iteration>(stop_iteration::no);
}
return (*_existings)(db::no_timeout).then([this] (auto&& existing) mutable {
return (*_existings)().then([this] (auto&& existing) mutable {
_existing = std::move(existing);
return stop_iteration::no;
});
@@ -1387,7 +1387,7 @@ view_builder::view_builder(database& db, db::system_distributed_keyspace& sys_di
: _db(db)
, _sys_dist_ks(sys_dist_ks)
, _mnotifier(mn)
, _permit(_db.get_reader_concurrency_semaphore().make_tracking_only_permit(nullptr, "view_builder")) {
, _permit(_db.get_reader_concurrency_semaphore().make_tracking_only_permit(nullptr, "view_builder", db::no_timeout)) {
setup_metrics();
}
@@ -2040,7 +2040,7 @@ void view_builder::execute(build_step& step, exponential_backoff_retry r) {
query::max_partitions,
view_builder::consumer{*this, step, now});
consumer.consume_new_partition(step.current_key); // Initialize the state in case we're resuming a partition
auto built = step.reader.consume_in_thread(std::move(consumer), db::no_timeout);
auto built = step.reader.consume_in_thread(std::move(consumer));
_as.check();

View File

@@ -97,7 +97,7 @@ future<> view_update_generator::start() {
::mutation_reader::forwarding::no);
inject_failure("view_update_generator_consume_staging_sstable");
auto result = staging_sstable_reader.consume_in_thread(view_updating_consumer(s, std::move(permit), *t, sstables, _as, staging_sstable_reader_handle), db::no_timeout);
auto result = staging_sstable_reader.consume_in_thread(view_updating_consumer(s, std::move(permit), *t, sstables, _as, staging_sstable_reader_handle));
staging_sstable_reader.close().get();
if (result == stop_iteration::yes) {
break;

View File

@@ -81,14 +81,14 @@ mutation_source memtable_filling_virtual_table::as_mutation_source() {
auto units = make_lw_shared<my_units>(permit.consume_memory(0));
auto populate = [this, mt = make_lw_shared<memtable>(schema()), s, units, range, slice, pc, trace_state, fwd, fwd_mr] (db::timeout_clock::time_point timeout) mutable {
auto populate = [this, mt = make_lw_shared<memtable>(schema()), s, units, range, slice, pc, trace_state, fwd, fwd_mr] () mutable {
auto mutation_sink = [units, mt] (mutation m) mutable {
mt->apply(m);
units->units.add(units->units.permit().consume_memory(mt->occupancy().used_space() - units->memory_used));
units->memory_used = mt->occupancy().used_space();
};
return execute(mutation_sink, timeout).then([this, mt, s, units, &range, &slice, &pc, &trace_state, &fwd, &fwd_mr] () {
return execute(mutation_sink).then([this, mt, s, units, &range, &slice, &pc, &trace_state, &fwd, &fwd_mr] () {
auto rd = mt->as_data_source().make_reader(s, units->units.permit(), range, slice, pc, trace_state, fwd, fwd_mr);
if (!_shard_aware) {

View File

@@ -69,8 +69,8 @@ public:
// Override one of these execute() overloads.
// The handler is always allowed to produce more data than implied by the query_restrictions.
virtual future<> execute(std::function<void(mutation)> mutation_sink, db::timeout_clock::time_point timeout) { return make_ready_future<>(); }
virtual future<> execute(std::function<void(mutation)> mutation_sink, db::timeout_clock::time_point timeout, const query_restrictions&) { return execute(mutation_sink, timeout); }
virtual future<> execute(std::function<void(mutation)> mutation_sink) { return make_ready_future<>(); }
virtual future<> execute(std::function<void(mutation)> mutation_sink, const query_restrictions&) { return execute(mutation_sink); }
mutation_source as_mutation_source() override;
};

View File

@@ -130,13 +130,13 @@ flat_mutation_reader make_reversing_reader(flat_mutation_reader& original, query
push_mutation_fragment(std::move(*std::exchange(_partition_end, std::nullopt)));
return stop_iteration::no;
}
future<stop_iteration> consume_partition_from_source(db::timeout_clock::time_point timeout) {
future<stop_iteration> consume_partition_from_source() {
if (_source->is_buffer_empty()) {
if (_source->is_end_of_stream()) {
_end_of_stream = true;
return make_ready_future<stop_iteration>(stop_iteration::yes);
}
return _source->fill_buffer(timeout).then([] { return stop_iteration::no; });
return _source->fill_buffer().then([] { return stop_iteration::no; });
}
while (!_source->is_buffer_empty() && !is_buffer_full()) {
auto mf = _source->pop_mutation_fragment();
@@ -188,8 +188,8 @@ flat_mutation_reader make_reversing_reader(flat_mutation_reader& original, query
, _max_size(max_size)
{ }
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return repeat([&, timeout] {
virtual future<> fill_buffer() override {
return repeat([&] {
if (_partition_end) {
// We have consumed full partition from source, now it is
// time to emit it.
@@ -198,7 +198,7 @@ flat_mutation_reader make_reversing_reader(flat_mutation_reader& original, query
return make_ready_future<stop_iteration>(stop_iteration::yes);
}
}
return consume_partition_from_source(timeout);
return consume_partition_from_source();
});
}
@@ -216,11 +216,11 @@ flat_mutation_reader make_reversing_reader(flat_mutation_reader& original, query
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range&, db::timeout_clock::time_point) override {
virtual future<> fast_forward_to(const dht::partition_range&) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> fast_forward_to(position_range, db::timeout_clock::time_point) override {
virtual future<> fast_forward_to(position_range) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
@@ -234,13 +234,13 @@ flat_mutation_reader make_reversing_reader(flat_mutation_reader& original, query
}
template<typename Source>
future<bool> flat_mutation_reader::impl::fill_buffer_from(Source& source, db::timeout_clock::time_point timeout) {
future<bool> flat_mutation_reader::impl::fill_buffer_from(Source& source) {
if (source.is_buffer_empty()) {
if (source.is_end_of_stream()) {
return make_ready_future<bool>(true);
}
return source.fill_buffer(timeout).then([this, &source, timeout] {
return fill_buffer_from(source, timeout);
return source.fill_buffer().then([this, &source] {
return fill_buffer_from(source);
});
} else {
while (!source.is_buffer_empty() && !is_buffer_full()) {
@@ -250,7 +250,7 @@ future<bool> flat_mutation_reader::impl::fill_buffer_from(Source& source, db::ti
}
}
template future<bool> flat_mutation_reader::impl::fill_buffer_from<flat_mutation_reader>(flat_mutation_reader&, db::timeout_clock::time_point);
template future<bool> flat_mutation_reader::impl::fill_buffer_from<flat_mutation_reader>(flat_mutation_reader&);
flat_mutation_reader make_delegating_reader(flat_mutation_reader& r) {
return make_flat_mutation_reader<delegating_reader>(r);
@@ -262,11 +262,11 @@ flat_mutation_reader make_forwardable(flat_mutation_reader m) {
position_range _current;
mutation_fragment_opt _next;
// When resolves, _next is engaged or _end_of_stream is set.
future<> ensure_next(db::timeout_clock::time_point timeout) {
future<> ensure_next() {
if (_next) {
return make_ready_future<>();
}
return _underlying(timeout).then([this] (auto&& mfo) {
return _underlying().then([this] (auto&& mfo) {
_next = std::move(mfo);
if (!_next) {
_end_of_stream = true;
@@ -278,12 +278,12 @@ flat_mutation_reader make_forwardable(flat_mutation_reader m) {
position_in_partition(position_in_partition::partition_start_tag_t()),
position_in_partition(position_in_partition::after_static_row_tag_t())
}) { }
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return repeat([this, timeout] {
virtual future<> fill_buffer() override {
return repeat([this] {
if (is_buffer_full()) {
return make_ready_future<stop_iteration>(stop_iteration::yes);
}
return ensure_next(timeout).then([this] {
return ensure_next().then([this] {
if (is_end_of_stream()) {
return stop_iteration::yes;
}
@@ -301,7 +301,7 @@ flat_mutation_reader make_forwardable(flat_mutation_reader m) {
});
});
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
_current = std::move(pr);
_end_of_stream = false;
forward_buffer_to(_current.start());
@@ -323,7 +323,7 @@ flat_mutation_reader make_forwardable(flat_mutation_reader m) {
};
});
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
_end_of_stream = false;
clear_buffer();
_next = {};
@@ -331,7 +331,7 @@ flat_mutation_reader make_forwardable(flat_mutation_reader m) {
position_in_partition(position_in_partition::partition_start_tag_t()),
position_in_partition(position_in_partition::after_static_row_tag_t())
};
return _underlying.fast_forward_to(pr, timeout);
return _underlying.fast_forward_to(pr);
}
virtual future<> close() noexcept override {
return _underlying.close();
@@ -348,19 +348,19 @@ flat_mutation_reader make_nonforwardable(flat_mutation_reader r, bool single_par
bool is_end_end_of_underlying_stream() const {
return _underlying.is_buffer_empty() && _underlying.is_end_of_stream();
}
future<> on_end_of_underlying_stream(db::timeout_clock::time_point timeout) {
future<> on_end_of_underlying_stream() {
if (!_static_row_done) {
_static_row_done = true;
return _underlying.fast_forward_to(position_range::all_clustered_rows(), timeout);
return _underlying.fast_forward_to(position_range::all_clustered_rows());
}
push_mutation_fragment(*_schema, _permit, partition_end());
if (_single_partition) {
_end_of_stream = true;
return make_ready_future<>();
}
return _underlying.next_partition().then([this, timeout] {
return _underlying.next_partition().then([this] {
_static_row_done = false;
return _underlying.fill_buffer(timeout).then([this] {
return _underlying.fill_buffer().then([this] {
_end_of_stream = is_end_end_of_underlying_stream();
});
});
@@ -371,17 +371,17 @@ flat_mutation_reader make_nonforwardable(flat_mutation_reader r, bool single_par
, _underlying(std::move(r))
, _single_partition(single_partition)
{ }
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this, timeout] {
return fill_buffer_from(_underlying, timeout).then([this, timeout] (bool underlying_finished) {
virtual future<> fill_buffer() override {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this] {
return fill_buffer_from(_underlying).then([this] (bool underlying_finished) {
if (underlying_finished) {
return on_end_of_underlying_stream(timeout);
return on_end_of_underlying_stream();
}
return make_ready_future<>();
});
});
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> next_partition() override {
@@ -394,10 +394,10 @@ flat_mutation_reader make_nonforwardable(flat_mutation_reader r, bool single_par
_end_of_stream = is_end_end_of_underlying_stream();
});
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
_end_of_stream = false;
clear_buffer();
return _underlying.fast_forward_to(pr, timeout);
return _underlying.fast_forward_to(pr);
}
virtual future<> close() noexcept override {
return _underlying.close();
@@ -409,10 +409,10 @@ flat_mutation_reader make_nonforwardable(flat_mutation_reader r, bool single_par
class empty_flat_reader final : public flat_mutation_reader::impl {
public:
empty_flat_reader(schema_ptr s, reader_permit permit) : impl(std::move(s), std::move(permit)) { _end_of_stream = true; }
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override { return make_ready_future<>(); }
virtual future<> fill_buffer() override { return make_ready_future<>(); }
virtual future<> next_partition() override { return make_ready_future<>(); }
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override { return make_ready_future<>(); };
virtual future<> fast_forward_to(position_range cr, db::timeout_clock::time_point timeout) override { return make_ready_future<>(); };
virtual future<> fast_forward_to(const dht::partition_range& pr) override { return make_ready_future<>(); };
virtual future<> fast_forward_to(position_range cr) override { return make_ready_future<>(); };
virtual future<> close() noexcept override { return make_ready_future<>(); }
};
@@ -486,7 +486,7 @@ flat_mutation_reader_from_mutations(reader_permit permit, std::vector<mutation>
return { };
}
private:
void do_fill_buffer(db::timeout_clock::time_point timeout) {
void do_fill_buffer() {
while (!is_end_of_stream() && !is_buffer_full()) {
if (!_static_row_done) {
_static_row_done = true;
@@ -577,7 +577,7 @@ flat_mutation_reader_from_mutations(reader_permit permit, std::vector<mutation>
auto mutation_destroyer = defer([this] () noexcept { destroy_mutations(); });
start_new_partition();
do_fill_buffer(db::no_timeout);
do_fill_buffer();
mutation_destroyer.cancel();
}
@@ -597,8 +597,8 @@ flat_mutation_reader_from_mutations(reader_permit permit, std::vector<mutation>
~reader() {
destroy_mutations();
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
do_fill_buffer(timeout);
virtual future<> fill_buffer() override {
do_fill_buffer();
return make_ready_future<>();
}
virtual future<> next_partition() override {
@@ -614,7 +614,7 @@ flat_mutation_reader_from_mutations(reader_permit permit, std::vector<mutation>
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
clear_buffer();
_cur = find_first_partition(_mutations, pr);
_end = find_last_partition(_mutations, pr);
@@ -627,7 +627,7 @@ flat_mutation_reader_from_mutations(reader_permit permit, std::vector<mutation>
}
return make_ready_future<>();
};
virtual future<> fast_forward_to(position_range cr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range cr) override {
return make_exception_future<>(std::runtime_error("This reader can't be fast forwarded to another position."));
};
virtual future<> close() noexcept override {
@@ -669,7 +669,7 @@ public:
, _trace_state(std::move(trace_state)) {
_end_of_stream = true;
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
if (!_reader) {
return make_ready_future<>();
}
@@ -678,13 +678,13 @@ public:
_end_of_stream = true;
return make_ready_future<>();
} else {
return _reader->fill_buffer(timeout).then([this, timeout] { return fill_buffer(timeout); });
return _reader->fill_buffer().then([this] { return fill_buffer(); });
}
}
_reader->move_buffer_content_to(*this);
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
if (!_reader) {
_reader = _source.make_reader(_schema, _permit, pr, _slice, _pc, std::move(_trace_state), streamed_mutation::forwarding::no,
mutation_reader::forwarding::yes);
@@ -694,9 +694,9 @@ public:
clear_buffer();
_end_of_stream = false;
return _reader->fast_forward_to(pr, timeout);
return _reader->fast_forward_to(pr);
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> next_partition() override {
@@ -742,9 +742,9 @@ public:
{
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return do_until([this] { return is_end_of_stream() || !is_buffer_empty(); }, [this, timeout] {
return _reader.fill_buffer(timeout).then([this, timeout] () {
virtual future<> fill_buffer() override {
return do_until([this] { return is_end_of_stream() || !is_buffer_empty(); }, [this] {
return _reader.fill_buffer().then([this] () {
while (!_reader.is_buffer_empty()) {
push_mutation_fragment(_reader.pop_mutation_fragment());
}
@@ -752,7 +752,7 @@ public:
return make_ready_future<>();
}
if (auto r = next()) {
return _reader.fast_forward_to(*r, timeout);
return _reader.fast_forward_to(*r);
} else {
_end_of_stream = true;
return make_ready_future<>();
@@ -761,15 +761,15 @@ public:
});
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
clear_buffer();
_end_of_stream = false;
return _reader.fast_forward_to(pr, timeout).then([this] {
return _reader.fast_forward_to(pr).then([this] {
_generator.reset();
});
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
@@ -903,7 +903,7 @@ make_flat_mutation_reader_from_fragments(schema_ptr schema, reader_permit permit
, _cmp(*_schema) {
do_fast_forward_to(*_pr);
}
virtual future<> fill_buffer(db::timeout_clock::time_point) override {
virtual future<> fill_buffer() override {
while (!(_end_of_stream = end_of_range()) && !is_buffer_full()) {
push_mutation_fragment(std::move(_fragments.front()));
_fragments.pop_front();
@@ -919,10 +919,10 @@ make_flat_mutation_reader_from_fragments(schema_ptr schema, reader_permit permit
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
throw std::runtime_error("This reader can't be fast forwarded to another range.");
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
do_fast_forward_to(pr);
return make_ready_future<>();
}
@@ -992,13 +992,13 @@ make_slicing_filtering_reader(flat_mutation_reader rd, const dht::partition_rang
, _cmp(*_schema) {
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
const auto consume_fn = [this] (mutation_fragment mf) {
push_mutation_fragment(std::move(mf));
};
while (!is_buffer_full() && !is_end_of_stream()) {
co_await _rd.fill_buffer(timeout);
co_await _rd.fill_buffer();
while (!_rd.is_buffer_empty()) {
auto mf = _rd.pop_mutation_fragment();
switch (mf.mutation_fragment_kind()) {
@@ -1051,16 +1051,16 @@ make_slicing_filtering_reader(flat_mutation_reader rd, const dht::partition_rang
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
clear_buffer();
_end_of_stream = false;
return _rd.fast_forward_to(pr, timeout);
return _rd.fast_forward_to(pr);
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
forward_buffer_to(pr.start());
_end_of_stream = false;
return _rd.fast_forward_to(std::move(pr), timeout);
return _rd.fast_forward_to(std::move(pr));
}
virtual future<> close() noexcept override {
@@ -1082,7 +1082,7 @@ public:
generating_reader(schema_ptr s, reader_permit permit, std::function<future<mutation_fragment_opt> ()> get_next_fragment)
: impl(std::move(s), std::move(permit)), _get_next_fragment(std::move(get_next_fragment))
{ }
virtual future<> fill_buffer(db::timeout_clock::time_point) override {
virtual future<> fill_buffer() override {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this] {
return _get_next_fragment().then([this] (mutation_fragment_opt mopt) {
if (!mopt) {
@@ -1096,10 +1096,10 @@ public:
virtual future<> next_partition() override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> fast_forward_to(const dht::partition_range&, db::timeout_clock::time_point) override {
virtual future<> fast_forward_to(const dht::partition_range&) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> fast_forward_to(position_range, db::timeout_clock::time_point) override {
virtual future<> fast_forward_to(position_range) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> close() noexcept override {
@@ -1359,13 +1359,13 @@ void flat_mutation_reader_v2::impl::clear_buffer_to_next_partition() {
}
template<typename Source>
future<bool> flat_mutation_reader_v2::impl::fill_buffer_from(Source& source, db::timeout_clock::time_point timeout) {
future<bool> flat_mutation_reader_v2::impl::fill_buffer_from(Source& source) {
if (source.is_buffer_empty()) {
if (source.is_end_of_stream()) {
return make_ready_future<bool>(true);
}
return source.fill_buffer(timeout).then([this, &source, timeout] {
return fill_buffer_from(source, timeout);
return source.fill_buffer().then([this, &source] {
return fill_buffer_from(source);
});
} else {
while (!source.is_buffer_empty() && !is_buffer_full()) {
@@ -1375,14 +1375,14 @@ future<bool> flat_mutation_reader_v2::impl::fill_buffer_from(Source& source, db:
}
}
template future<bool> flat_mutation_reader_v2::impl::fill_buffer_from<flat_mutation_reader_v2>(flat_mutation_reader_v2&, db::timeout_clock::time_point);
template future<bool> flat_mutation_reader_v2::impl::fill_buffer_from<flat_mutation_reader_v2>(flat_mutation_reader_v2&);
void flat_mutation_reader_v2::do_upgrade_schema(const schema_ptr& s) {
*this = transform(std::move(*this), schema_upgrader_v2(s));
}
future<mutation_opt> read_mutation_from_flat_mutation_reader(flat_mutation_reader_v2& r, db::timeout_clock::time_point timeout) {
return r.consume(mutation_rebuilder_v2(r.schema()), timeout);
future<mutation_opt> read_mutation_from_flat_mutation_reader(flat_mutation_reader_v2& r) {
return r.consume(mutation_rebuilder_v2(r.schema()));
}
void flat_mutation_reader_v2::on_close_error(std::unique_ptr<impl> i, std::exception_ptr ep) noexcept {
@@ -1431,11 +1431,11 @@ flat_mutation_reader downgrade_to_v1(flat_mutation_reader_v2 r) {
: impl(r.schema(), r.permit())
, _reader(std::move(r))
{}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
if (_end_of_stream) {
return make_ready_future<>();
}
return _reader.consume_pausable(consumer{this}, timeout).then([this] {
return _reader.consume_pausable(consumer{this}).then([this] {
if (_reader.is_end_of_stream()) {
_rt_assembler.on_end_of_stream();
_end_of_stream = true;
@@ -1458,16 +1458,16 @@ flat_mutation_reader downgrade_to_v1(flat_mutation_reader_v2 r) {
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
clear_buffer();
// As in next_partition(), current partitions' state of having an active range tombstone is irrelevant for the
// partition range that we are forwarding to. Here, it is guaranteed that is_buffer_empty().
_rt_assembler.reset();
_end_of_stream = false;
return _reader.fast_forward_to(pr, timeout);
return _reader.fast_forward_to(pr);
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
clear_buffer();
// It is guaranteed that at the beginning of `pr`, all the `range_tombstone`s active at the beginning of `pr`
@@ -1475,7 +1475,7 @@ flat_mutation_reader downgrade_to_v1(flat_mutation_reader_v2 r) {
// Here, it is guaranteed that is_buffer_empty().
_rt_assembler.reset();
_end_of_stream = false;
return _reader.fast_forward_to(std::move(pr), timeout);
return _reader.fast_forward_to(std::move(pr));
}
virtual future<> close() noexcept override {
return _reader.close();
@@ -1540,11 +1540,11 @@ flat_mutation_reader_v2 upgrade_to_v2(flat_mutation_reader r) {
, _reader(std::move(r))
, _rt_gen(*_schema)
{}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
if (_end_of_stream) {
return make_ready_future<>();
}
return _reader.consume_pausable(consumer{this}, timeout).then([this] {
return _reader.consume_pausable(consumer{this}).then([this] {
if (_reader.is_end_of_stream() && _reader.is_buffer_empty()) {
if (_pr) {
// If !_pr we should flush on partition_end
@@ -1565,12 +1565,12 @@ flat_mutation_reader_v2 upgrade_to_v2(flat_mutation_reader r) {
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
clear_buffer();
_end_of_stream = false;
return _reader.fast_forward_to(pr, timeout);
return _reader.fast_forward_to(pr);
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
clear_buffer();
// r is used to trim range tombstones and range_tombstone:s can be trimmed only to positions
// which are !is_clustering_row(). Replace with equivalent ranges.
@@ -1585,7 +1585,7 @@ flat_mutation_reader_v2 upgrade_to_v2(flat_mutation_reader r) {
_current_rt = {};
_pr = pr;
_end_of_stream = false;
return _reader.fast_forward_to(std::move(pr), timeout);
return _reader.fast_forward_to(std::move(pr));
}
virtual future<> close() noexcept override {
return _reader.close();

View File

@@ -32,7 +32,6 @@
#include <seastar/core/thread.hh>
#include <seastar/core/file.hh>
#include "db/timeout_clock.hh"
#include "reader_permit.hh"
#include <deque>
@@ -163,7 +162,7 @@ public:
void forward_buffer_to(const position_in_partition& pos);
void clear_buffer_to_next_partition();
template<typename Source>
future<bool> fill_buffer_from(Source&, db::timeout_clock::time_point);
future<bool> fill_buffer_from(Source&);
// When succeeds, makes sure that the next push_mutation_fragment() will not fail.
void reserve_one() {
if (_buffer.capacity() == _buffer.size()) {
@@ -176,7 +175,7 @@ public:
public:
impl(schema_ptr s, reader_permit permit) : _buffer(permit), _schema(std::move(s)), _permit(std::move(permit)) { }
virtual ~impl() {}
virtual future<> fill_buffer(db::timeout_clock::time_point) = 0;
virtual future<> fill_buffer() = 0;
virtual future<> next_partition() = 0;
bool is_end_of_stream() const { return _end_of_stream; }
@@ -197,12 +196,12 @@ public:
_buffer_size += memory_usage;
}
future<mutation_fragment_opt> operator()(db::timeout_clock::time_point timeout) {
future<mutation_fragment_opt> operator()() {
if (is_buffer_empty()) {
if (is_end_of_stream()) {
return make_ready_future<mutation_fragment_opt>();
}
return fill_buffer(timeout).then([this, timeout] { return operator()(timeout); });
return fill_buffer().then([this] { return operator()(); });
}
return make_ready_future<mutation_fragment_opt>(pop_mutation_fragment());
}
@@ -211,13 +210,13 @@ public:
requires FlatMutationReaderConsumer<Consumer>
// Stops when consumer returns stop_iteration::yes or end of stream is reached.
// Next call will start from the next mutation_fragment in the stream.
future<> consume_pausable(Consumer consumer, db::timeout_clock::time_point timeout) {
return repeat([this, consumer = std::move(consumer), timeout] () mutable {
future<> consume_pausable(Consumer consumer) {
return repeat([this, consumer = std::move(consumer)] () mutable {
if (is_buffer_empty()) {
if (is_end_of_stream()) {
return make_ready_future<stop_iteration>(stop_iteration::yes);
}
return fill_buffer(timeout).then([] {
return fill_buffer().then([] {
return make_ready_future<stop_iteration>(stop_iteration::no);
});
}
@@ -238,7 +237,7 @@ public:
// a seastar::thread.
// Partitions for which filter(decorated_key) returns false are skipped
// entirely and never reach the consumer.
void consume_pausable_in_thread(Consumer consumer, Filter filter, db::timeout_clock::time_point timeout) {
void consume_pausable_in_thread(Consumer consumer, Filter filter) {
while (true) {
if (need_preempt()) {
seastar::thread::yield();
@@ -247,7 +246,7 @@ public:
if (is_end_of_stream()) {
return;
}
fill_buffer(timeout).get();
fill_buffer().get();
continue;
}
auto mf = pop_mutation_fragment();
@@ -330,9 +329,9 @@ public:
//
//
// This method returns whatever is returned from Consumer::consume_end_of_stream().S
auto consume(Consumer consumer, db::timeout_clock::time_point timeout) {
return do_with(consumer_adapter<Consumer>(*this, std::move(consumer)), [this, timeout] (consumer_adapter<Consumer>& adapter) {
return consume_pausable(std::ref(adapter), timeout).then([this, &adapter] {
auto consume(Consumer consumer) {
return do_with(consumer_adapter<Consumer>(*this, std::move(consumer)), [this] (consumer_adapter<Consumer>& adapter) {
return consume_pausable(std::ref(adapter)).then([this, &adapter] {
return adapter._consumer.consume_end_of_stream();
});
});
@@ -343,9 +342,9 @@ public:
// A variant of consumee() that expects to be run in a seastar::thread.
// Partitions for which filter(decorated_key) returns false are skipped
// entirely and never reach the consumer.
auto consume_in_thread(Consumer consumer, Filter filter, db::timeout_clock::time_point timeout) {
auto consume_in_thread(Consumer consumer, Filter filter) {
auto adapter = consumer_adapter<Consumer>(*this, std::move(consumer));
consume_pausable_in_thread(std::ref(adapter), std::move(filter), timeout);
consume_pausable_in_thread(std::ref(adapter), std::move(filter));
filter.on_end_of_stream();
return adapter._consumer.consume_end_of_stream();
};
@@ -353,8 +352,8 @@ public:
/*
* fast_forward_to is forbidden on flat_mutation_reader created for a single partition.
*/
virtual future<> fast_forward_to(const dht::partition_range&, db::timeout_clock::time_point timeout) = 0;
virtual future<> fast_forward_to(position_range, db::timeout_clock::time_point timeout) = 0;
virtual future<> fast_forward_to(const dht::partition_range&) = 0;
virtual future<> fast_forward_to(position_range) = 0;
// close should cancel any outstanding background operations,
// if possible, and wait on them to complete.
@@ -388,11 +387,19 @@ public:
}
}
static void maybe_timed_out(db::timeout_clock::time_point timeout) {
if (db::timeout_clock::now() >= timeout) {
void maybe_timed_out() {
if (db::timeout_clock::now() >= timeout()) {
throw timed_out_error();
}
}
db::timeout_clock::time_point timeout() const noexcept {
return _permit.timeout();
}
void set_timeout(db::timeout_clock::time_point timeout) noexcept {
_permit.set_timeout(timeout);
}
};
private:
std::unique_ptr<impl> _impl;
@@ -416,20 +423,20 @@ public:
~flat_mutation_reader();
future<mutation_fragment_opt> operator()(db::timeout_clock::time_point timeout) {
return _impl->operator()(timeout);
future<mutation_fragment_opt> operator()() {
return _impl->operator()();
}
template <typename Consumer>
requires FlatMutationReaderConsumer<Consumer>
auto consume_pausable(Consumer consumer, db::timeout_clock::time_point timeout) {
return _impl->consume_pausable(std::move(consumer), timeout);
auto consume_pausable(Consumer consumer) {
return _impl->consume_pausable(std::move(consumer));
}
template <typename Consumer>
requires FlattenedConsumer<Consumer>
auto consume(Consumer consumer, db::timeout_clock::time_point timeout) {
return _impl->consume(std::move(consumer), timeout);
auto consume(Consumer consumer) {
return _impl->consume(std::move(consumer));
}
class filter {
@@ -479,14 +486,14 @@ public:
template<typename Consumer, typename Filter>
requires FlattenedConsumer<Consumer> && FlattenedConsumerFilter<Filter>
auto consume_in_thread(Consumer consumer, Filter filter, db::timeout_clock::time_point timeout) {
return _impl->consume_in_thread(std::move(consumer), std::move(filter), timeout);
auto consume_in_thread(Consumer consumer, Filter filter) {
return _impl->consume_in_thread(std::move(consumer), std::move(filter));
}
template<typename Consumer>
requires FlattenedConsumer<Consumer>
auto consume_in_thread(Consumer consumer, db::timeout_clock::time_point timeout) {
return consume_in_thread(std::move(consumer), no_filter{}, timeout);
auto consume_in_thread(Consumer consumer) {
return consume_in_thread(std::move(consumer), no_filter{});
}
// Skips to the next partition.
@@ -507,7 +514,7 @@ public:
// `operator()()` calls.
future<> next_partition() { return _impl->next_partition(); }
future<> fill_buffer(db::timeout_clock::time_point timeout) { return _impl->fill_buffer(timeout); }
future<> fill_buffer() { return _impl->fill_buffer(); }
// Changes the range of partitions to pr. The range can only be moved
// forwards. pr.begin() needs to be larger than pr.end() of the previousl
@@ -515,8 +522,8 @@ public:
// previous fast forward target).
// pr needs to be valid until the reader is destroyed or fast_forward_to()
// is called again.
future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) {
return _impl->fast_forward_to(pr, timeout);
future<> fast_forward_to(const dht::partition_range& pr) {
return _impl->fast_forward_to(pr);
}
// Skips to a later range of rows.
// The new range must not overlap with the current range.
@@ -544,8 +551,8 @@ public:
// and it affects the set of fragments returned from that partition.
// In particular one must first enter a partition by fetching a `partition_start`
// fragment before calling `fast_forward_to`.
future<> fast_forward_to(position_range cr, db::timeout_clock::time_point timeout) {
return _impl->fast_forward_to(std::move(cr), timeout);
future<> fast_forward_to(position_range cr) {
return _impl->fast_forward_to(std::move(cr));
}
// Closes the reader.
//
@@ -577,21 +584,23 @@ public:
void unpop_mutation_fragment(mutation_fragment mf) { _impl->unpop_mutation_fragment(std::move(mf)); }
const schema_ptr& schema() const { return _impl->_schema; }
const reader_permit& permit() const { return _impl->_permit; }
db::timeout_clock::time_point timeout() const noexcept { return _impl->timeout(); }
void set_timeout(db::timeout_clock::time_point timeout) noexcept { _impl->set_timeout(timeout); }
void set_max_buffer_size(size_t size) {
_impl->max_buffer_size_in_bytes = size;
}
// Resolves with a pointer to the next fragment in the stream without consuming it from the stream,
// or nullptr if there are no more fragments.
// The returned pointer is invalidated by any other non-const call to this object.
future<mutation_fragment*> peek(db::timeout_clock::time_point timeout) {
future<mutation_fragment*> peek() {
if (!is_buffer_empty()) {
return make_ready_future<mutation_fragment*>(&_impl->_buffer.front());
}
if (is_end_of_stream()) {
return make_ready_future<mutation_fragment*>(nullptr);
}
return fill_buffer(timeout).then([this, timeout] {
return peek(timeout);
return fill_buffer().then([this] {
return peek();
});
}
// A peek at the next fragment in the buffer.
@@ -670,9 +679,9 @@ future<> consume_mutation_fragments_until(
flat_mutation_reader& r,
StopCondition&& stop,
ConsumeMutationFragment&& consume_mf,
ConsumeEndOfStream&& consume_eos,
db::timeout_clock::time_point timeout) {
return do_until([stop] { return stop(); }, [&r, stop, consume_mf, consume_eos, timeout] {
ConsumeEndOfStream&& consume_eos)
{
return do_until([stop] { return stop(); }, [&r, stop, consume_mf, consume_eos] {
while (!r.is_buffer_empty()) {
consume_mf(r.pop_mutation_fragment());
if (stop() || need_preempt()) {
@@ -682,7 +691,7 @@ future<> consume_mutation_fragments_until(
if (r.is_end_of_stream()) {
return consume_eos();
}
return r.fill_buffer(timeout);
return r.fill_buffer();
});
}
@@ -706,11 +715,11 @@ flat_mutation_reader transform(flat_mutation_reader r, T t) {
, _reader(std::move(r))
, _t(std::move(t))
{}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
if (_end_of_stream) {
return make_ready_future<>();
}
return _reader.consume_pausable(consumer{this}, timeout).then([this] {
return _reader.consume_pausable(consumer{this}).then([this] {
if (_reader.is_end_of_stream() && _reader.is_buffer_empty()) {
_end_of_stream = true;
}
@@ -723,15 +732,15 @@ flat_mutation_reader transform(flat_mutation_reader r, T t) {
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
clear_buffer();
_end_of_stream = false;
return _reader.fast_forward_to(pr, timeout);
return _reader.fast_forward_to(pr);
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
forward_buffer_to(pr.start());
_end_of_stream = false;
return _reader.fast_forward_to(std::move(pr), timeout);
return _reader.fast_forward_to(std::move(pr));
}
virtual future<> close() noexcept override {
return _reader.close();
@@ -760,19 +769,19 @@ public:
, _underlying_holder(std::move(r))
, _underlying(&*_underlying_holder)
{ }
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
if (is_buffer_full()) {
return make_ready_future<>();
}
return _underlying->fill_buffer(timeout).then([this] {
return _underlying->fill_buffer().then([this] {
_end_of_stream = _underlying->is_end_of_stream();
_underlying->move_buffer_content_to(*this);
});
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
_end_of_stream = false;
forward_buffer_to(pr.start());
return _underlying->fast_forward_to(std::move(pr), timeout);
return _underlying->fast_forward_to(std::move(pr));
}
virtual future<> next_partition() override {
clear_buffer_to_next_partition();
@@ -784,10 +793,10 @@ public:
_end_of_stream = _underlying->is_end_of_stream() && _underlying->is_buffer_empty();
});
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
_end_of_stream = false;
clear_buffer();
return _underlying->fast_forward_to(pr, timeout);
return _underlying->fast_forward_to(pr);
}
virtual future<> close() noexcept override {
return _underlying_holder ? _underlying_holder->close() : make_ready_future<>();
@@ -863,12 +872,12 @@ make_flat_mutation_reader_from_fragments(schema_ptr, reader_permit, std::deque<m
// The returned future<> resolves when consumption ends.
template <typename Consumer>
inline
future<> consume_partitions(flat_mutation_reader& reader, Consumer consumer, db::timeout_clock::time_point timeout) {
future<> consume_partitions(flat_mutation_reader& reader, Consumer consumer) {
static_assert(std::is_same<future<stop_iteration>, futurize_t<std::result_of_t<Consumer(mutation&&)>>>::value, "bad Consumer signature");
return do_with(std::move(consumer), [&reader, timeout] (Consumer& c) -> future<> {
return repeat([&reader, &c, timeout] () {
return read_mutation_from_flat_mutation_reader(reader, timeout).then([&c] (mutation_opt&& mo) -> future<stop_iteration> {
return do_with(std::move(consumer), [&reader] (Consumer& c) -> future<> {
return repeat([&reader, &c] () {
return read_mutation_from_flat_mutation_reader(reader).then([&c] (mutation_opt&& mo) -> future<stop_iteration> {
if (!mo) {
return make_ready_future<stop_iteration>(stop_iteration::yes);
}

View File

@@ -35,7 +35,6 @@
#include <seastar/core/thread.hh>
#include <seastar/core/file.hh>
#include "db/timeout_clock.hh"
#include "reader_permit.hh"
#include <deque>
@@ -204,7 +203,7 @@ public:
void forward_buffer_to(const position_in_partition& pos);
void clear_buffer_to_next_partition();
template<typename Source>
future<bool> fill_buffer_from(Source&, db::timeout_clock::time_point);
future<bool> fill_buffer_from(Source&);
// When succeeds, makes sure that the next push_mutation_fragment() will not fail.
void reserve_one() {
if (_buffer.capacity() == _buffer.size()) {
@@ -217,7 +216,7 @@ public:
public:
impl(schema_ptr s, reader_permit permit) : _buffer(permit), _schema(std::move(s)), _permit(std::move(permit)) { }
virtual ~impl() {}
virtual future<> fill_buffer(db::timeout_clock::time_point) = 0;
virtual future<> fill_buffer() = 0;
virtual future<> next_partition() = 0;
bool is_end_of_stream() const { return _end_of_stream; }
@@ -238,12 +237,12 @@ public:
_buffer_size += memory_usage;
}
future<mutation_fragment_v2_opt> operator()(db::timeout_clock::time_point timeout) {
future<mutation_fragment_v2_opt> operator()() {
if (is_buffer_empty()) {
if (is_end_of_stream()) {
return make_ready_future<mutation_fragment_v2_opt>();
}
return fill_buffer(timeout).then([this, timeout] { return operator()(timeout); });
return fill_buffer().then([this] { return operator()(); });
}
return make_ready_future<mutation_fragment_v2_opt>(pop_mutation_fragment());
}
@@ -252,13 +251,13 @@ public:
requires FlatMutationReaderConsumerV2<Consumer>
// Stops when consumer returns stop_iteration::yes or end of stream is reached.
// Next call will start from the next mutation_fragment_v2 in the stream.
future<> consume_pausable(Consumer consumer, db::timeout_clock::time_point timeout) {
return repeat([this, consumer = std::move(consumer), timeout] () mutable {
future<> consume_pausable(Consumer consumer) {
return repeat([this, consumer = std::move(consumer)] () mutable {
if (is_buffer_empty()) {
if (is_end_of_stream()) {
return make_ready_future<stop_iteration>(stop_iteration::yes);
}
return fill_buffer(timeout).then([] {
return fill_buffer().then([] {
return make_ready_future<stop_iteration>(stop_iteration::no);
});
}
@@ -279,7 +278,7 @@ public:
// a seastar::thread.
// Partitions for which filter(decorated_key) returns false are skipped
// entirely and never reach the consumer.
void consume_pausable_in_thread(Consumer consumer, Filter filter, db::timeout_clock::time_point timeout) {
void consume_pausable_in_thread(Consumer consumer, Filter filter) {
while (true) {
if (need_preempt()) {
seastar::thread::yield();
@@ -288,7 +287,7 @@ public:
if (is_end_of_stream()) {
return;
}
fill_buffer(timeout).get();
fill_buffer().get();
continue;
}
auto mf = pop_mutation_fragment();
@@ -371,9 +370,9 @@ public:
//
//
// This method returns whatever is returned from Consumer::consume_end_of_stream().S
auto consume(Consumer consumer, db::timeout_clock::time_point timeout) {
return do_with(consumer_adapter<Consumer>(*this, std::move(consumer)), [this, timeout] (consumer_adapter<Consumer>& adapter) {
return consume_pausable(std::ref(adapter), timeout).then([this, &adapter] {
auto consume(Consumer consumer) {
return do_with(consumer_adapter<Consumer>(*this, std::move(consumer)), [this] (consumer_adapter<Consumer>& adapter) {
return consume_pausable(std::ref(adapter)).then([this, &adapter] {
return adapter._consumer.consume_end_of_stream();
});
});
@@ -384,9 +383,9 @@ public:
// A variant of consumee() that expects to be run in a seastar::thread.
// Partitions for which filter(decorated_key) returns false are skipped
// entirely and never reach the consumer.
auto consume_in_thread(Consumer consumer, Filter filter, db::timeout_clock::time_point timeout) {
auto consume_in_thread(Consumer consumer, Filter filter) {
auto adapter = consumer_adapter<Consumer>(*this, std::move(consumer));
consume_pausable_in_thread(std::ref(adapter), std::move(filter), timeout);
consume_pausable_in_thread(std::ref(adapter), std::move(filter));
filter.on_end_of_stream();
return adapter._consumer.consume_end_of_stream();
};
@@ -394,8 +393,8 @@ public:
/*
* fast_forward_to is forbidden on flat_mutation_reader_v2 created for a single partition.
*/
virtual future<> fast_forward_to(const dht::partition_range&, db::timeout_clock::time_point timeout) = 0;
virtual future<> fast_forward_to(position_range, db::timeout_clock::time_point timeout) = 0;
virtual future<> fast_forward_to(const dht::partition_range&) = 0;
virtual future<> fast_forward_to(position_range) = 0;
// close should cancel any outstanding background operations,
// if possible, and wait on them to complete.
@@ -429,11 +428,19 @@ public:
}
}
static void maybe_timed_out(db::timeout_clock::time_point timeout) {
if (db::timeout_clock::now() >= timeout) {
void maybe_timed_out() {
if (db::timeout_clock::now() >= timeout()) {
throw timed_out_error();
}
}
db::timeout_clock::time_point timeout() const noexcept {
return _permit.timeout();
}
void set_timeout(db::timeout_clock::time_point timeout) noexcept {
_permit.set_timeout(timeout);
}
};
private:
std::unique_ptr<impl> _impl;
@@ -457,20 +464,20 @@ public:
~flat_mutation_reader_v2();
future<mutation_fragment_v2_opt> operator()(db::timeout_clock::time_point timeout) {
return _impl->operator()(timeout);
future<mutation_fragment_v2_opt> operator()() {
return _impl->operator()();
}
template <typename Consumer>
requires FlatMutationReaderConsumerV2<Consumer>
auto consume_pausable(Consumer consumer, db::timeout_clock::time_point timeout) {
return _impl->consume_pausable(std::move(consumer), timeout);
auto consume_pausable(Consumer consumer) {
return _impl->consume_pausable(std::move(consumer));
}
template <typename Consumer>
requires FlattenedConsumerV2<Consumer>
auto consume(Consumer consumer, db::timeout_clock::time_point timeout) {
return _impl->consume(std::move(consumer), timeout);
auto consume(Consumer consumer) {
return _impl->consume(std::move(consumer));
}
class filter {
@@ -520,14 +527,14 @@ public:
template<typename Consumer, typename Filter>
requires FlattenedConsumerV2<Consumer> && FlattenedConsumerFilterV2<Filter>
auto consume_in_thread(Consumer consumer, Filter filter, db::timeout_clock::time_point timeout) {
return _impl->consume_in_thread(std::move(consumer), std::move(filter), timeout);
auto consume_in_thread(Consumer consumer, Filter filter) {
return _impl->consume_in_thread(std::move(consumer), std::move(filter));
}
template<typename Consumer>
requires FlattenedConsumerV2<Consumer>
auto consume_in_thread(Consumer consumer, db::timeout_clock::time_point timeout) {
return consume_in_thread(std::move(consumer), no_filter{}, timeout);
auto consume_in_thread(Consumer consumer) {
return consume_in_thread(std::move(consumer), no_filter{});
}
// Skips to the next partition.
@@ -548,7 +555,7 @@ public:
// `operator()()` calls.
future<> next_partition() { return _impl->next_partition(); }
future<> fill_buffer(db::timeout_clock::time_point timeout) { return _impl->fill_buffer(timeout); }
future<> fill_buffer() { return _impl->fill_buffer(); }
// Changes the range of partitions to pr. The range can only be moved
// forwards. pr.begin() needs to be larger than pr.end() of the previousl
@@ -556,8 +563,8 @@ public:
// previous fast forward target).
// pr needs to be valid until the reader is destroyed or fast_forward_to()
// is called again.
future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) {
return _impl->fast_forward_to(pr, timeout);
future<> fast_forward_to(const dht::partition_range& pr) {
return _impl->fast_forward_to(pr);
}
// Skips to a later range of rows.
// The new range must not overlap with the current range.
@@ -585,8 +592,8 @@ public:
// and it affects the set of fragments returned from that partition.
// In particular one must first enter a partition by fetching a `partition_start`
// fragment before calling `fast_forward_to`.
future<> fast_forward_to(position_range cr, db::timeout_clock::time_point timeout) {
return _impl->fast_forward_to(std::move(cr), timeout);
future<> fast_forward_to(position_range cr) {
return _impl->fast_forward_to(std::move(cr));
}
// Closes the reader.
//
@@ -621,21 +628,23 @@ public:
void unpop_mutation_fragment(mutation_fragment_v2 mf) { _impl->unpop_mutation_fragment(std::move(mf)); }
const schema_ptr& schema() const { return _impl->_schema; }
const reader_permit& permit() const { return _impl->_permit; }
db::timeout_clock::time_point timeout() const noexcept { return _impl->timeout(); }
void set_timeout(db::timeout_clock::time_point timeout) noexcept { _impl->set_timeout(timeout); }
void set_max_buffer_size(size_t size) {
_impl->max_buffer_size_in_bytes = size;
}
// Resolves with a pointer to the next fragment in the stream without consuming it from the stream,
// or nullptr if there are no more fragments.
// The returned pointer is invalidated by any other non-const call to this object.
future<mutation_fragment_v2*> peek(db::timeout_clock::time_point timeout) {
future<mutation_fragment_v2*> peek() {
if (!is_buffer_empty()) {
return make_ready_future<mutation_fragment_v2*>(&_impl->_buffer.front());
}
if (is_end_of_stream()) {
return make_ready_future<mutation_fragment_v2*>(nullptr);
}
return fill_buffer(timeout).then([this, timeout] {
return peek(timeout);
return fill_buffer().then([this] {
return peek();
});
}
// A peek at the next fragment in the buffer.
@@ -699,9 +708,9 @@ future<> consume_mutation_fragments_until(
flat_mutation_reader_v2& r,
StopCondition&& stop,
ConsumeMutationFragment&& consume_mf,
ConsumeEndOfStream&& consume_eos,
db::timeout_clock::time_point timeout) {
return do_until([stop] { return stop(); }, [&r, stop, consume_mf, consume_eos, timeout] {
ConsumeEndOfStream&& consume_eos)
{
return do_until([stop] { return stop(); }, [&r, stop, consume_mf, consume_eos] {
while (!r.is_buffer_empty()) {
consume_mf(r.pop_mutation_fragment());
if (stop() || need_preempt()) {
@@ -711,7 +720,7 @@ future<> consume_mutation_fragments_until(
if (r.is_end_of_stream()) {
return consume_eos();
}
return r.fill_buffer(timeout);
return r.fill_buffer();
});
}
@@ -735,11 +744,11 @@ flat_mutation_reader_v2 transform(flat_mutation_reader_v2 r, T t) {
, _reader(std::move(r))
, _t(std::move(t))
{}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
if (_end_of_stream) {
return make_ready_future<>();
}
return _reader.consume_pausable(consumer{this}, timeout).then([this] {
return _reader.consume_pausable(consumer{this}).then([this] {
if (_reader.is_end_of_stream()) {
_end_of_stream = true;
}
@@ -752,15 +761,15 @@ flat_mutation_reader_v2 transform(flat_mutation_reader_v2 r, T t) {
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
clear_buffer();
_end_of_stream = false;
return _reader.fast_forward_to(pr, timeout);
return _reader.fast_forward_to(pr);
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
forward_buffer_to(pr.start());
_end_of_stream = false;
return _reader.fast_forward_to(std::move(pr), timeout);
return _reader.fast_forward_to(std::move(pr));
}
virtual future<> close() noexcept override {
return _reader.close();
@@ -776,4 +785,4 @@ flat_mutation_reader downgrade_to_v1(flat_mutation_reader_v2);
flat_mutation_reader_v2 upgrade_to_v2(flat_mutation_reader);
// Reads a single partition from a reader. Returns empty optional if there are no more partitions to be read.
future<mutation_opt> read_mutation_from_flat_mutation_reader(flat_mutation_reader_v2&, db::timeout_clock::time_point timeout);
future<mutation_opt> read_mutation_from_flat_mutation_reader(flat_mutation_reader_v2&);

View File

@@ -274,7 +274,7 @@ future<> fragment_and_freeze(flat_mutation_reader mr, frozen_mutation_consumer_f
fragmenting_mutation_freezer freezer(*mr.schema(), c, fragment_size);
return do_with(std::move(freezer), [&mr] (auto& freezer) {
return repeat([&] {
return mr(db::no_timeout).then([&] (auto mfopt) {
return mr().then([&] (auto mfopt) {
if (!mfopt) {
return make_ready_future<stop_iteration>(stop_iteration::yes);
}

View File

@@ -21,7 +21,6 @@
#include "database.hh"
#include "db/system_keyspace.hh"
#include "db/timeout_clock.hh"
#include "flat_mutation_reader.hh"
#include "mutation_fragment.hh"
#include "mutation_reader.hh"
@@ -147,8 +146,8 @@ class built_indexes_virtual_reader {
}
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return _underlying.fill_buffer(timeout).then([this] {
virtual future<> fill_buffer() override {
return _underlying.fill_buffer().then([this] {
_end_of_stream = _underlying.is_end_of_stream();
while (!_underlying.is_buffer_empty()) {
auto mf = _underlying.pop_mutation_fragment();
@@ -183,13 +182,13 @@ class built_indexes_virtual_reader {
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
clear_buffer();
_end_of_stream = false;
return _underlying.fast_forward_to(pr, timeout);
return _underlying.fast_forward_to(pr);
}
virtual future<> fast_forward_to(position_range range, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range range) override {
forward_buffer_to(range.start());
_end_of_stream = false;
// range contains index names (e.g., xyz) but the underlying table
@@ -214,7 +213,7 @@ class built_indexes_virtual_reader {
std::move(ck));
}
range = position_range(std::move(start), std::move(end));
return _underlying.fast_forward_to(std::move(range), timeout);
return _underlying.fast_forward_to(std::move(range));
}
virtual future<> close() noexcept override {

View File

@@ -366,7 +366,7 @@ protected:
_last = {};
return ret;
}
future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) {
future<> fast_forward_to(const dht::partition_range& pr) {
_range = &pr;
_last = { };
return make_ready_future<>();
@@ -407,8 +407,8 @@ class scanning_reader final : public flat_mutation_reader::impl, private iterato
}
};
future<> fill_buffer_from_delegate(db::timeout_clock::time_point timeout) {
return _delegate->consume_pausable(consumer(this), timeout).then([this] {
future<> fill_buffer_from_delegate() {
return _delegate->consume_pausable(consumer(this)).then([this] {
if (_delegate->is_end_of_stream() && _delegate->is_buffer_empty()) {
if (_delegate_range) {
_end_of_stream = true;
@@ -439,8 +439,8 @@ public:
, _fwd_mr(fwd_mr)
{ }
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this, timeout] {
virtual future<> fill_buffer() override {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this] {
if (!_delegate) {
_delegate_range = get_delegate_range();
if (_delegate_range) {
@@ -475,7 +475,7 @@ public:
}
}
return is_end_of_stream() ? make_ready_future<>() : fill_buffer_from_delegate(timeout);
return is_end_of_stream() ? make_ready_future<>() : fill_buffer_from_delegate();
});
}
virtual future<> next_partition() override {
@@ -489,18 +489,18 @@ public:
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
_end_of_stream = false;
clear_buffer();
if (_delegate_range) {
return _delegate->fast_forward_to(pr, timeout);
return _delegate->fast_forward_to(pr);
} else {
return close_delegate().then([this, &pr, timeout] {
return iterator_reader::fast_forward_to(pr, timeout);
return close_delegate().then([this, &pr] {
return iterator_reader::fast_forward_to(pr);
});
}
}
virtual future<> fast_forward_to(position_range cr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range cr) override {
throw std::runtime_error("This reader can't be fast forwarded to another partition.");
};
virtual future<> close() noexcept override {
@@ -628,8 +628,8 @@ private:
return _partition_reader ? _partition_reader->close() : make_ready_future<>();
}
public:
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this, timeout] {
virtual future<> fill_buffer() override {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this] {
if (!_partition_reader) {
get_next_partition();
if (!_partition_reader) {
@@ -640,7 +640,7 @@ public:
return _partition_reader->consume_pausable([this] (mutation_fragment mf) {
push_mutation_fragment(std::move(mf));
return stop_iteration(is_buffer_full());
}, timeout).then([this] {
}).then([this] {
if (_partition_reader->is_end_of_stream() && _partition_reader->is_buffer_empty()) {
return _partition_reader->close();
}
@@ -655,10 +655,10 @@ public:
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range&, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range&) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> fast_forward_to(position_range, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> close() noexcept override {
@@ -737,7 +737,7 @@ memtable::apply(memtable& mt, reader_permit permit) {
return consume_partitions(rd, [self = this->shared_from_this(), &rd] (mutation&& m) {
self->apply(m);
return stop_iteration::no;
}, db::no_timeout);
});
});
}

View File

@@ -220,10 +220,10 @@ class read_context : public reader_lifecycle_policy {
public:
read_context(distributed<database>& db, schema_ptr s, const query::read_command& cmd, const dht::partition_range_vector& ranges,
tracing::trace_state_ptr trace_state)
tracing::trace_state_ptr trace_state, db::timeout_clock::time_point timeout)
: _db(db)
, _schema(std::move(s))
, _permit(_db.local().get_reader_concurrency_semaphore().make_tracking_only_permit(_schema.get(), "multishard-mutation-query"))
, _permit(_db.local().get_reader_concurrency_semaphore().make_tracking_only_permit(_schema.get(), "multishard-mutation-query", timeout))
, _cmd(cmd)
, _ranges(ranges)
, _trace_state(std::move(trace_state))
@@ -273,7 +273,7 @@ public:
return _db.local().obtain_reader_permit(std::move(schema), description, timeout);
}
future<> lookup_readers();
future<> lookup_readers(db::timeout_clock::time_point timeout);
future<> save_readers(flat_mutation_reader::tracked_buffer unconsumed_buffer, detached_compaction_state compaction_state,
std::optional<clustering_key_prefix> last_ckey);
@@ -320,6 +320,11 @@ flat_mutation_reader read_context::create_reader(
if (rm.state == reader_state::successful_lookup) {
if (auto reader_opt = semaphore().unregister_inactive_read(std::move(*rm.rparts->handle))) {
rm.state = reader_state::used;
// The saved reader permit is expected to be the same one passed to create_reader,
// as returned from obtain_reader_permit()
if (reader_opt->permit() != permit) {
on_internal_error(mmq_log, "read_context::create_reader(): passed-in permit is different than saved reader's permit");
}
return std::move(*reader_opt);
}
}
@@ -531,16 +536,16 @@ future<> read_context::save_reader(shard_id shard, const dht::decorated_key& las
});
}
future<> read_context::lookup_readers() {
future<> read_context::lookup_readers(db::timeout_clock::time_point timeout) {
if (_cmd.query_uuid == utils::UUID{} || _cmd.is_first_page) {
return make_ready_future<>();
}
return parallel_for_each(boost::irange(0u, smp::count), [this] (shard_id shard) {
return parallel_for_each(boost::irange(0u, smp::count), [this, timeout] (shard_id shard) {
return _db.invoke_on(shard, [this, shard, cmd = &_cmd, ranges = &_ranges, gs = global_schema_ptr(_schema),
gts = tracing::global_trace_state_ptr(_trace_state)] (database& db) mutable {
gts = tracing::global_trace_state_ptr(_trace_state), timeout] (database& db) mutable {
auto schema = gs.get();
auto querier_opt = db.get_querier_cache().lookup_shard_mutation_querier(cmd->query_uuid, *schema, *ranges, cmd->slice, gts.get());
auto querier_opt = db.get_querier_cache().lookup_shard_mutation_querier(cmd->query_uuid, *schema, *ranges, cmd->slice, gts.get(), timeout);
auto& table = db.find_column_family(schema);
auto& semaphore = this->semaphore();
@@ -628,7 +633,6 @@ future<page_consume_result<ResultBuilder>> read_page(
const query::read_command& cmd,
const dht::partition_range_vector& ranges,
tracing::trace_state_ptr trace_state,
db::timeout_clock::time_point timeout,
ResultBuilder&& result_builder) {
auto ms = mutation_source([&] (schema_ptr s,
reader_permit permit,
@@ -649,7 +653,7 @@ future<page_consume_result<ResultBuilder>> read_page(
std::exception_ptr ex;
try {
auto [ckey, result] = co_await query::consume_page(reader, compaction_state, cmd.slice, std::move(result_builder), cmd.get_row_limit(),
cmd.partition_limit, cmd.timestamp, timeout, *cmd.max_result_size);
cmd.partition_limit, cmd.timestamp, *cmd.max_result_size);
auto buffer = reader.detach_buffer();
co_await reader.close();
// page_consume_result cannot fail so there's no risk of double-closing reader.
@@ -670,14 +674,14 @@ future<typename ResultBuilder::result_type> do_query(
tracing::trace_state_ptr trace_state,
db::timeout_clock::time_point timeout,
ResultBuilder&& result_builder) {
auto ctx = seastar::make_shared<read_context>(db, s, cmd, ranges, trace_state);
auto ctx = seastar::make_shared<read_context>(db, s, cmd, ranges, trace_state, timeout);
co_await ctx->lookup_readers();
co_await ctx->lookup_readers(timeout);
std::exception_ptr ex;
try {
auto [last_ckey, result, unconsumed_buffer, compaction_state] = co_await read_page<ResultBuilder>(ctx, s, cmd, ranges, trace_state, timeout,
auto [last_ckey, result, unconsumed_buffer, compaction_state] = co_await read_page<ResultBuilder>(ctx, s, cmd, ranges, trace_state,
std::move(result_builder));
if (compaction_state->are_limits_reached() || result.is_short_read()) {

View File

@@ -186,13 +186,13 @@ mutation mutation::sliced(const query::clustering_row_ranges& ranges) const {
return mutation(schema(), decorated_key(), partition().sliced(*schema(), ranges));
}
future<mutation_opt> read_mutation_from_flat_mutation_reader(flat_mutation_reader& r, db::timeout_clock::time_point timeout) {
future<mutation_opt> read_mutation_from_flat_mutation_reader(flat_mutation_reader& r) {
if (r.is_buffer_empty()) {
if (r.is_end_of_stream()) {
return make_ready_future<mutation_opt>();
}
return r.fill_buffer(timeout).then([&r, timeout] {
return read_mutation_from_flat_mutation_reader(r, timeout);
return r.fill_buffer().then([&r] {
return read_mutation_from_flat_mutation_reader(r);
});
}
// r.is_buffer_empty() is always false at this point
@@ -238,7 +238,7 @@ future<mutation_opt> read_mutation_from_flat_mutation_reader(flat_mutation_reade
return _builder->consume_end_of_stream();
}
};
return r.consume(adapter(r.schema()), timeout);
return r.consume(adapter(r.schema()));
}
std::ostream& operator<<(std::ostream& os, const mutation& m) {

View File

@@ -291,4 +291,4 @@ boost::iterator_range<std::vector<mutation>::const_iterator> slice(
class flat_mutation_reader;
// Reads a single partition from a reader. Returns empty optional if there are no more partitions to be read.
future<mutation_opt> read_mutation_from_flat_mutation_reader(flat_mutation_reader& reader, db::timeout_clock::time_point timeout);
future<mutation_opt> read_mutation_from_flat_mutation_reader(flat_mutation_reader& reader);

View File

@@ -2264,8 +2264,7 @@ mutation_partition::fully_discontinuous(const schema& s, const position_range& r
future<mutation_opt> counter_write_query(schema_ptr s, const mutation_source& source, reader_permit permit,
const dht::decorated_key& dk,
const query::partition_slice& slice,
tracing::trace_state_ptr trace_ptr,
db::timeout_clock::time_point timeout)
tracing::trace_state_ptr trace_ptr)
{
struct range_and_reader {
dht::partition_range range;
@@ -2290,7 +2289,7 @@ future<mutation_opt> counter_write_query(schema_ptr s, const mutation_source& so
auto cwqrb = counter_write_query_result_builder(*s);
auto cfq = make_stable_flattened_mutations_consumer<compact_for_query<emit_only_live_rows::yes, counter_write_query_result_builder>>(
*s, gc_clock::now(), slice, query::max_rows, query::max_partitions, std::move(cwqrb));
auto f = r_a_r->reader.consume(std::move(cfq), timeout);
auto f = r_a_r->reader.consume(std::move(cfq));
return f.finally([r_a_r = std::move(r_a_r)] {
return r_a_r->reader.close();
});

View File

@@ -186,6 +186,5 @@ query::result query_mutation(
future<mutation_opt> counter_write_query(schema_ptr, const mutation_source&, reader_permit permit,
const dht::decorated_key& dk,
const query::partition_slice& slice,
tracing::trace_state_ptr trace_ptr,
db::timeout_clock::time_point timeout);
tracing::trace_state_ptr trace_ptr);

View File

@@ -44,18 +44,17 @@ using merger_vector = utils::small_vector<T, merger_small_vector_size>;
using mutation_fragment_batch = boost::iterator_range<merger_vector<mutation_fragment>::iterator>;
template<typename Producer>
concept FragmentProducer = requires(Producer p, dht::partition_range part_range, position_range pos_range,
db::timeout_clock::time_point timeout) {
concept FragmentProducer = requires(Producer p, dht::partition_range part_range, position_range pos_range) {
// The returned fragments are expected to have the same
// position_in_partition. Iterators and references are expected
// to be valid until the next call to operator()().
{ p(timeout) } -> std::same_as<future<mutation_fragment_batch>>;
{ p() } -> std::same_as<future<mutation_fragment_batch>>;
// The following functions have the same semantics as their
// flat_mutation_reader counterparts.
{ p.next_partition() } -> std::same_as<future<>>;
{ p.fast_forward_to(part_range, timeout) } -> std::same_as<future<>>;
{ p.fast_forward_to(pos_range, timeout) } -> std::same_as<future<>>;
{ p.fast_forward_to(part_range) } -> std::same_as<future<>>;
{ p.fast_forward_to(pos_range) } -> std::same_as<future<>>;
};
/**
@@ -93,12 +92,12 @@ class mutation_fragment_merger {
iterator _it{};
iterator _end{};
future<> fetch(db::timeout_clock::time_point timeout) {
future<> fetch() {
if (!empty()) {
return make_ready_future<>();
}
return _producer(timeout).then([this] (mutation_fragment_batch fragments) {
return _producer().then([this] (mutation_fragment_batch fragments) {
_it = fragments.begin();
_end = fragments.end();
});
@@ -122,8 +121,8 @@ public:
, _producer(std::move(producer)) {
}
future<mutation_fragment_opt> operator()(db::timeout_clock::time_point timeout) {
return fetch(timeout).then([this] () -> mutation_fragment_opt {
future<mutation_fragment_opt> operator()() {
return fetch().then([this] () -> mutation_fragment_opt {
if (empty()) {
return mutation_fragment_opt();
}
@@ -139,12 +138,12 @@ public:
return _producer.next_partition();
}
future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) {
return _producer.fast_forward_to(pr, timeout);
future<> fast_forward_to(const dht::partition_range& pr) {
return _producer.fast_forward_to(pr);
}
future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) {
return _producer.fast_forward_to(std::move(pr), timeout);
future<> fast_forward_to(position_range pr) {
return _producer.fast_forward_to(std::move(pr));
}
future<> close() noexcept {
@@ -234,9 +233,9 @@ private:
void maybe_add_readers(const std::optional<dht::ring_position_view>& pos);
void add_readers(std::vector<flat_mutation_reader> new_readers);
bool in_gallop_mode() const;
future<needs_merge> prepare_one(db::timeout_clock::time_point timeout, reader_and_last_fragment_kind rk, reader_galloping reader_galloping);
future<needs_merge> advance_galloping_reader(db::timeout_clock::time_point timeout);
future<> prepare_next(db::timeout_clock::time_point timeout);
future<needs_merge> prepare_one(reader_and_last_fragment_kind rk, reader_galloping reader_galloping);
future<needs_merge> advance_galloping_reader();
future<> prepare_next();
// Collect all forwardable readers into _next, and remove them from
// their previous containers (_halted_readers and _fragment_heap).
void prepare_forwardable_readers();
@@ -247,10 +246,10 @@ public:
mutation_reader::forwarding fwd_mr);
// Produces the next batch of mutation-fragments of the same
// position.
future<mutation_fragment_batch> operator()(db::timeout_clock::time_point timeout);
future<mutation_fragment_batch> operator()();
future<> next_partition();
future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout);
future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout);
future<> fast_forward_to(const dht::partition_range& pr);
future<> fast_forward_to(position_range pr);
future<> close() noexcept;
};
@@ -275,10 +274,10 @@ public:
, _merger(_schema, std::move(producer))
, _fwd_sm(fwd_sm) {}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override;
virtual future<> fill_buffer() override;
virtual future<> next_partition() override;
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override;
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override;
virtual future<> fast_forward_to(const dht::partition_range& pr) override;
virtual future<> fast_forward_to(position_range pr) override;
virtual future<> close() noexcept override;
};
@@ -304,7 +303,7 @@ public:
return std::exchange(_readers, {});
}
virtual std::vector<flat_mutation_reader> fast_forward_to(const dht::partition_range&, db::timeout_clock::time_point timeout) override {
virtual std::vector<flat_mutation_reader> fast_forward_to(const dht::partition_range&) override {
return {};
}
};
@@ -365,25 +364,25 @@ void mutation_reader_merger::maybe_add_readers_at_partition_boundary() {
}
}
future<mutation_reader_merger::needs_merge> mutation_reader_merger::advance_galloping_reader(db::timeout_clock::time_point timeout) {
return prepare_one(timeout, _galloping_reader, reader_galloping::yes).then([this] (needs_merge needs_merge) {
future<mutation_reader_merger::needs_merge> mutation_reader_merger::advance_galloping_reader() {
return prepare_one(_galloping_reader, reader_galloping::yes).then([this] (needs_merge needs_merge) {
maybe_add_readers_at_partition_boundary();
return needs_merge;
});
}
future<> mutation_reader_merger::prepare_next(db::timeout_clock::time_point timeout) {
return parallel_for_each(_next, [this, timeout] (reader_and_last_fragment_kind rk) {
return prepare_one(timeout, rk, reader_galloping::no).discard_result();
future<> mutation_reader_merger::prepare_next() {
return parallel_for_each(_next, [this] (reader_and_last_fragment_kind rk) {
return prepare_one(rk, reader_galloping::no).discard_result();
}).then([this] {
_next.clear();
maybe_add_readers_at_partition_boundary();
});
}
future<mutation_reader_merger::needs_merge> mutation_reader_merger::prepare_one(db::timeout_clock::time_point timeout,
future<mutation_reader_merger::needs_merge> mutation_reader_merger::prepare_one(
reader_and_last_fragment_kind rk, reader_galloping reader_galloping) {
return (*rk.reader)(timeout).then([this, rk, reader_galloping] (mutation_fragment_opt mfo) {
return (*rk.reader)().then([this, rk, reader_galloping] (mutation_fragment_opt mfo) {
auto to_close = make_ready_future<>();
if (mfo) {
if (mfo->is_partition_start()) {
@@ -471,7 +470,7 @@ mutation_reader_merger::mutation_reader_merger(schema_ptr schema,
maybe_add_readers(std::nullopt);
}
future<mutation_fragment_batch> mutation_reader_merger::operator()(db::timeout_clock::time_point timeout) {
future<mutation_fragment_batch> mutation_reader_merger::operator()() {
// Avoid merging-related logic if we know that only a single reader owns
// current partition.
if (_single_reader.reader != reader_iterator{}) {
@@ -480,7 +479,7 @@ future<mutation_fragment_batch> mutation_reader_merger::operator()(db::timeout_c
_current.clear();
return make_ready_future<mutation_fragment_batch>(_current);
}
return _single_reader.reader->fill_buffer(timeout).then([this, timeout] { return operator()(timeout); });
return _single_reader.reader->fill_buffer().then([this] { return operator()(); });
}
_current.clear();
_current.emplace_back(_single_reader.reader->pop_mutation_fragment());
@@ -492,18 +491,18 @@ future<mutation_fragment_batch> mutation_reader_merger::operator()(db::timeout_c
}
if (in_gallop_mode()) {
return advance_galloping_reader(timeout).then([this, timeout] (needs_merge needs_merge) {
return advance_galloping_reader().then([this] (needs_merge needs_merge) {
if (!needs_merge) {
return make_ready_future<mutation_fragment_batch>(_current);
}
// Galloping reader may have lost to some other reader. In that case, we should proceed
// with standard merging logic.
return (*this)(timeout);
return (*this)();
});
}
if (!_next.empty()) {
return prepare_next(timeout).then([this, timeout] { return (*this)(timeout); });
return prepare_next().then([this] { return (*this)(); });
}
_current.clear();
@@ -581,7 +580,7 @@ future<> mutation_reader_merger::next_partition() {
}
}
future<> mutation_reader_merger::fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) {
future<> mutation_reader_merger::fast_forward_to(const dht::partition_range& pr) {
_single_reader = { };
_gallop_mode_hits = 0;
_next.clear();
@@ -592,17 +591,17 @@ future<> mutation_reader_merger::fast_forward_to(const dht::partition_range& pr,
for (auto it = _all_readers.begin(); it != _all_readers.end(); ++it) {
_next.emplace_back(it, mutation_fragment::kind::partition_end);
}
return parallel_for_each(_all_readers, [this, &pr, timeout] (flat_mutation_reader& mr) {
return mr.fast_forward_to(pr, timeout);
}).then([this, &pr, timeout] {
add_readers(_selector->fast_forward_to(pr, timeout));
return parallel_for_each(_all_readers, [this, &pr] (flat_mutation_reader& mr) {
return mr.fast_forward_to(pr);
}).then([this, &pr] {
add_readers(_selector->fast_forward_to(pr));
});
}
future<> mutation_reader_merger::fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) {
future<> mutation_reader_merger::fast_forward_to(position_range pr) {
prepare_forwardable_readers();
return parallel_for_each(_next, [this, pr = std::move(pr), timeout] (reader_and_last_fragment_kind rk) {
return rk.reader->fast_forward_to(pr, timeout);
return parallel_for_each(_next, [this, pr = std::move(pr)] (reader_and_last_fragment_kind rk) {
return rk.reader->fast_forward_to(pr);
});
}
@@ -617,9 +616,9 @@ future<> mutation_reader_merger::close() noexcept {
}
template <FragmentProducer P>
future<> merging_reader<P>::fill_buffer(db::timeout_clock::time_point timeout) {
return repeat([this, timeout] {
return _merger(timeout).then([this] (mutation_fragment_opt mfo) {
future<> merging_reader<P>::fill_buffer() {
return repeat([this] {
return _merger().then([this] (mutation_fragment_opt mfo) {
if (!mfo) {
_end_of_stream = true;
return stop_iteration::yes;
@@ -655,17 +654,17 @@ future<> merging_reader<P>::next_partition() {
}
template <FragmentProducer P>
future<> merging_reader<P>::fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) {
future<> merging_reader<P>::fast_forward_to(const dht::partition_range& pr) {
clear_buffer();
_end_of_stream = false;
return _merger.fast_forward_to(pr, timeout);
return _merger.fast_forward_to(pr);
}
template <FragmentProducer P>
future<> merging_reader<P>::fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) {
future<> merging_reader<P>::fast_forward_to(position_range pr) {
forward_buffer_to(pr.start());
_end_of_stream = false;
return _merger.fast_forward_to(std::move(pr), timeout);
return _merger.fast_forward_to(std::move(pr));
}
template <FragmentProducer P>
@@ -789,16 +788,15 @@ class foreign_reader : public flat_mutation_reader::impl {
// now). If all works well read-aheads complete by the next operation and
// we don't have to wait on the remote reader filling its buffer.
template <typename Operation, typename Result = futurize_t<std::result_of_t<Operation()>>>
Result forward_operation(db::timeout_clock::time_point timeout, Operation op) {
Result forward_operation(Operation op) {
reader_permit::blocked_guard bg{_permit};
return smp::submit_to(_reader.get_owner_shard(), [reader = _reader.get(),
read_ahead_future = std::exchange(_read_ahead_future, nullptr),
timeout,
op = std::move(op)] () mutable {
auto exec_op_and_read_ahead = [=] () mutable {
// Not really variadic, we expect 0 (void) or 1 parameter.
return op().then([=] (auto... result) {
auto f = reader->is_end_of_stream() ? nullptr : std::make_unique<future<>>(reader->fill_buffer(timeout));
auto f = reader->is_end_of_stream() ? nullptr : std::make_unique<future<>>(reader->fill_buffer());
return make_ready_future<std::tuple<foreign_unique_ptr<future<>>, decltype(result)...>>(
std::tuple(make_foreign(std::move(f)), std::move(result)...));
});
@@ -831,10 +829,10 @@ public:
foreign_reader(foreign_reader&&) = delete;
foreign_reader& operator=(foreign_reader&&) = delete;
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override;
virtual future<> fill_buffer() override;
virtual future<> next_partition() override;
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override;
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override;
virtual future<> fast_forward_to(const dht::partition_range& pr) override;
virtual future<> fast_forward_to(position_range pr) override;
virtual future<> close() noexcept override;
};
@@ -847,13 +845,13 @@ foreign_reader::foreign_reader(schema_ptr schema,
, _fwd_sm(fwd_sm) {
}
future<> foreign_reader::fill_buffer(db::timeout_clock::time_point timeout) {
future<> foreign_reader::fill_buffer() {
if (_end_of_stream || is_buffer_full()) {
return make_ready_future();
}
return forward_operation(timeout, [reader = _reader.get(), timeout] () {
auto f = reader->is_buffer_empty() ? reader->fill_buffer(timeout) : make_ready_future<>();
return forward_operation([reader = _reader.get()] () {
auto f = reader->is_buffer_empty() ? reader->fill_buffer() : make_ready_future<>();
return f.then([=] {
return make_ready_future<remote_fill_buffer_result>(remote_fill_buffer_result(reader->detach_buffer(), reader->is_end_of_stream()));
});
@@ -877,24 +875,24 @@ future<> foreign_reader::next_partition() {
}
_end_of_stream = false;
}
co_await forward_operation(db::no_timeout, [reader = _reader.get()] () {
co_await forward_operation([reader = _reader.get()] () {
return reader->next_partition();
});
}
future<> foreign_reader::fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) {
future<> foreign_reader::fast_forward_to(const dht::partition_range& pr) {
clear_buffer();
_end_of_stream = false;
return forward_operation(timeout, [reader = _reader.get(), &pr, timeout] () {
return reader->fast_forward_to(pr, timeout);
return forward_operation([reader = _reader.get(), &pr] () {
return reader->fast_forward_to(pr);
});
}
future<> foreign_reader::fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) {
future<> foreign_reader::fast_forward_to(position_range pr) {
forward_buffer_to(pr.start());
_end_of_stream = false;
return forward_operation(timeout, [reader = _reader.get(), pr = std::move(pr), timeout] () {
return reader->fast_forward_to(std::move(pr), timeout);
return forward_operation([reader = _reader.get(), pr = std::move(pr)] () {
return reader->fast_forward_to(std::move(pr));
});
}
@@ -971,13 +969,13 @@ private:
void update_next_position(flat_mutation_reader& reader);
void adjust_partition_slice();
flat_mutation_reader recreate_reader();
future<flat_mutation_reader> resume_or_create_reader(db::timeout_clock::time_point timeout);
future<flat_mutation_reader> resume_or_create_reader();
void maybe_validate_partition_start(const flat_mutation_reader::tracked_buffer& buffer);
void validate_position_in_partition(position_in_partition_view pos) const;
bool should_drop_fragment(const mutation_fragment& mf);
bool maybe_trim_range_tombstone(mutation_fragment& mf) const;
future<> do_fill_buffer(flat_mutation_reader& reader, db::timeout_clock::time_point timeout);
future<> fill_buffer(flat_mutation_reader& reader, db::timeout_clock::time_point timeout);
future<> do_fill_buffer(flat_mutation_reader& reader);
future<> fill_buffer(flat_mutation_reader& reader);
public:
evictable_reader(
@@ -990,10 +988,10 @@ public:
const io_priority_class& pc,
tracing::trace_state_ptr trace_state,
mutation_reader::forwarding fwd_mr);
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override;
virtual future<> fill_buffer() override;
virtual future<> next_partition() override;
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override;
virtual future<> fast_forward_to(position_range, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override;
virtual future<> fast_forward_to(position_range) override {
throw_with_backtrace<std::bad_function_call>();
}
virtual future<> close() noexcept override {
@@ -1140,14 +1138,14 @@ flat_mutation_reader evictable_reader::recreate_reader() {
_fwd_mr);
}
future<flat_mutation_reader> evictable_reader::resume_or_create_reader(db::timeout_clock::time_point timeout) {
future<flat_mutation_reader> evictable_reader::resume_or_create_reader() {
if (_reader) {
co_return std::move(*_reader);
}
if (auto reader_opt = try_resume()) {
co_return std::move(*reader_opt);
}
co_await _permit.maybe_wait_readmission(timeout);
co_await _permit.maybe_wait_readmission();
co_return recreate_reader();
}
@@ -1285,9 +1283,9 @@ bool evictable_reader::maybe_trim_range_tombstone(mutation_fragment& mf) const {
return true;
}
future<> evictable_reader::do_fill_buffer(flat_mutation_reader& reader, db::timeout_clock::time_point timeout) {
future<> evictable_reader::do_fill_buffer(flat_mutation_reader& reader) {
if (!_drop_partition_start && !_drop_static_row) {
auto fill_buf_fut = reader.fill_buffer(timeout);
auto fill_buf_fut = reader.fill_buffer();
if (_validate_partition_key) {
fill_buf_fut = fill_buf_fut.then([this, &reader] {
maybe_validate_partition_start(reader.buffer());
@@ -1295,8 +1293,8 @@ future<> evictable_reader::do_fill_buffer(flat_mutation_reader& reader, db::time
}
return fill_buf_fut;
}
return repeat([this, &reader, timeout] {
return reader.fill_buffer(timeout).then([this, &reader] {
return repeat([this, &reader] {
return reader.fill_buffer().then([this, &reader] {
maybe_validate_partition_start(reader.buffer());
while (!reader.is_buffer_empty() && should_drop_fragment(reader.peek_buffer())) {
reader.pop_mutation_fragment();
@@ -1306,8 +1304,8 @@ future<> evictable_reader::do_fill_buffer(flat_mutation_reader& reader, db::time
});
}
future<> evictable_reader::fill_buffer(flat_mutation_reader& reader, db::timeout_clock::time_point timeout) {
return do_fill_buffer(reader, timeout).then([this, &reader, timeout] {
future<> evictable_reader::fill_buffer(flat_mutation_reader& reader) {
return do_fill_buffer(reader).then([this, &reader] {
if (reader.is_buffer_empty()) {
return make_ready_future<>();
}
@@ -1352,9 +1350,9 @@ future<> evictable_reader::fill_buffer(flat_mutation_reader& reader, db::timeout
// to read until we are sure we've read all fragments sharing the same
// position, so that we can safely continue reading from after said
// position.
return do_until(stop, [this, &reader, timeout] {
return do_until(stop, [this, &reader] {
if (reader.is_buffer_empty()) {
return do_fill_buffer(reader, timeout);
return do_fill_buffer(reader);
}
if (_trim_range_tombstones) {
auto mf = reader.pop_mutation_fragment();
@@ -1391,12 +1389,12 @@ evictable_reader::evictable_reader(
, _tri_cmp(*_schema) {
}
future<> evictable_reader::fill_buffer(db::timeout_clock::time_point timeout) {
future<> evictable_reader::fill_buffer() {
if (is_end_of_stream()) {
co_return;
}
_reader = co_await resume_or_create_reader(timeout);
co_await fill_buffer(*_reader, timeout);
_reader = co_await resume_or_create_reader();
co_await fill_buffer(*_reader);
_end_of_stream = _reader->is_end_of_stream() && _reader->is_buffer_empty();
maybe_pause(std::move(*_reader));
}
@@ -1407,12 +1405,12 @@ future<> evictable_reader::next_partition() {
if (!is_buffer_empty()) {
co_return;
}
auto reader = co_await resume_or_create_reader(db::no_timeout);
auto reader = co_await resume_or_create_reader();
co_await reader.next_partition();
maybe_pause(std::move(reader));
}
future<> evictable_reader::fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) {
future<> evictable_reader::fast_forward_to(const dht::partition_range& pr) {
_pr = &pr;
_last_pkey.reset();
_next_position_in_partition = position_in_partition::for_partition_start();
@@ -1420,12 +1418,12 @@ future<> evictable_reader::fast_forward_to(const dht::partition_range& pr, db::t
_end_of_stream = false;
if (_reader) {
co_await _reader->fast_forward_to(pr, timeout);
co_await _reader->fast_forward_to(pr);
_range_override.reset();
co_return;
}
if (auto reader_opt = try_resume()) {
co_await reader_opt->fast_forward_to(pr, timeout);
co_await reader_opt->fast_forward_to(pr);
_range_override.reset();
maybe_pause(std::move(*reader_opt));
}
@@ -1490,7 +1488,7 @@ private:
foreign_ptr<std::unique_ptr<evictable_reader>> _reader;
private:
future<> do_fill_buffer(db::timeout_clock::time_point timeout);
future<> do_fill_buffer();
public:
shard_reader(
@@ -1522,15 +1520,15 @@ public:
const mutation_fragment& peek_buffer() const {
return buffer().front();
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override;
virtual future<> fill_buffer() override;
virtual future<> next_partition() override;
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override;
virtual future<> fast_forward_to(position_range, db::timeout_clock::time_point timeout) override;
virtual future<> fast_forward_to(const dht::partition_range& pr) override;
virtual future<> fast_forward_to(position_range) override;
virtual future<> close() noexcept override;
bool done() const {
return _reader && is_buffer_empty() && is_end_of_stream();
}
void read_ahead(db::timeout_clock::time_point timeout);
void read_ahead();
bool is_read_ahead_in_progress() const {
return _read_ahead.has_value();
}
@@ -1571,7 +1569,7 @@ future<> shard_reader::close() noexcept {
}
}
future<> shard_reader::do_fill_buffer(db::timeout_clock::time_point timeout) {
future<> shard_reader::do_fill_buffer() {
auto fill_buf_fut = make_ready_future<remote_fill_buffer_result>();
struct reader_and_buffer_fill_result {
@@ -1580,7 +1578,7 @@ future<> shard_reader::do_fill_buffer(db::timeout_clock::time_point timeout) {
};
if (!_reader) {
fill_buf_fut = smp::submit_to(_shard, [this, gs = global_schema_ptr(_schema), timeout] () -> future<reader_and_buffer_fill_result> {
fill_buf_fut = smp::submit_to(_shard, [this, gs = global_schema_ptr(_schema)] () -> future<reader_and_buffer_fill_result> {
auto ms = mutation_source([lifecycle_policy = _lifecycle_policy.get()] (
schema_ptr s,
reader_permit permit,
@@ -1593,7 +1591,7 @@ future<> shard_reader::do_fill_buffer(db::timeout_clock::time_point timeout) {
return lifecycle_policy->create_reader(std::move(s), std::move(permit), pr, ps, pc, std::move(ts), fwd_mr);
});
auto s = gs.get();
auto permit = co_await _lifecycle_policy->obtain_reader_permit(s, "shard-reader", timeout);
auto permit = co_await _lifecycle_policy->obtain_reader_permit(s, "shard-reader", timeout());
auto rreader = make_foreign(std::make_unique<evictable_reader>(evictable_reader::auto_pause::yes, std::move(ms),
s, std::move(permit), *_pr, _ps, _pc, _trace_state, _fwd_mr));
@@ -1601,7 +1599,7 @@ future<> shard_reader::do_fill_buffer(db::timeout_clock::time_point timeout) {
try {
tracing::trace(_trace_state, "Creating shard reader on shard: {}", this_shard_id());
reader_permit::used_guard ug{rreader->permit()};
co_await rreader->fill_buffer(timeout);
co_await rreader->fill_buffer();
auto res = remote_fill_buffer_result(rreader->detach_buffer(), rreader->is_end_of_stream());
co_return reader_and_buffer_fill_result{std::move(rreader), std::move(res)};
} catch (...) {
@@ -1609,14 +1607,14 @@ future<> shard_reader::do_fill_buffer(db::timeout_clock::time_point timeout) {
}
co_await rreader->close();
std::rethrow_exception(std::move(ex));
}).then([this, timeout] (reader_and_buffer_fill_result res) {
}).then([this] (reader_and_buffer_fill_result res) {
_reader = std::move(res.reader);
return std::move(res.result);
});
} else {
fill_buf_fut = smp::submit_to(_shard, [this, timeout] () mutable {
fill_buf_fut = smp::submit_to(_shard, [this] () mutable {
reader_permit::used_guard ug{_reader->permit()};
return _reader->fill_buffer(timeout).then([this, ug = std::move(ug)] {
return _reader->fill_buffer().then([this, ug = std::move(ug)] {
return remote_fill_buffer_result(_reader->detach_buffer(), _reader->is_end_of_stream());
});
});
@@ -1630,7 +1628,7 @@ future<> shard_reader::do_fill_buffer(db::timeout_clock::time_point timeout) {
});
}
future<> shard_reader::fill_buffer(db::timeout_clock::time_point timeout) {
future<> shard_reader::fill_buffer() {
// FIXME: want to move this to the inner scopes but it makes clang miscompile the code.
reader_permit::blocked_guard guard(_permit);
if (_read_ahead) {
@@ -1640,7 +1638,7 @@ future<> shard_reader::fill_buffer(db::timeout_clock::time_point timeout) {
if (!is_buffer_empty()) {
co_return;
}
co_await do_fill_buffer(timeout);
co_await do_fill_buffer();
}
future<> shard_reader::next_partition() {
@@ -1663,7 +1661,7 @@ future<> shard_reader::next_partition() {
});
}
future<> shard_reader::fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) {
future<> shard_reader::fast_forward_to(const dht::partition_range& pr) {
_pr = &pr;
if (!_reader && !_read_ahead) {
@@ -1680,21 +1678,21 @@ future<> shard_reader::fast_forward_to(const dht::partition_range& pr, db::timeo
_end_of_stream = false;
clear_buffer();
co_await smp::submit_to(_shard, [this, &pr, timeout] {
return _reader->fast_forward_to(pr, timeout);
co_await smp::submit_to(_shard, [this, &pr] {
return _reader->fast_forward_to(pr);
});
}
future<> shard_reader::fast_forward_to(position_range, db::timeout_clock::time_point timeout) {
future<> shard_reader::fast_forward_to(position_range) {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
void shard_reader::read_ahead(db::timeout_clock::time_point timeout) {
void shard_reader::read_ahead() {
if (_read_ahead || is_end_of_stream() || !is_buffer_empty()) {
return;
}
_read_ahead.emplace(do_fill_buffer(timeout));
_read_ahead.emplace(do_fill_buffer());
}
} // anonymous namespace
@@ -1723,7 +1721,7 @@ class multishard_combining_reader : public flat_mutation_reader::impl {
void on_partition_range_change(const dht::partition_range& pr);
bool maybe_move_to_next_shard(const dht::token* const t = nullptr);
future<> handle_empty_reader_buffer(db::timeout_clock::time_point timeout);
future<> handle_empty_reader_buffer();
public:
multishard_combining_reader(
@@ -1743,10 +1741,10 @@ public:
multishard_combining_reader(multishard_combining_reader&&) = delete;
multishard_combining_reader& operator=(multishard_combining_reader&&) = delete;
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override;
virtual future<> fill_buffer() override;
virtual future<> next_partition() override;
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override;
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override;
virtual future<> fast_forward_to(const dht::partition_range& pr) override;
virtual future<> fast_forward_to(position_range pr) override;
virtual future<> close() noexcept override;
};
@@ -1789,7 +1787,7 @@ bool multishard_combining_reader::maybe_move_to_next_shard(const dht::token* con
return true;
}
future<> multishard_combining_reader::handle_empty_reader_buffer(db::timeout_clock::time_point timeout) {
future<> multishard_combining_reader::handle_empty_reader_buffer() {
auto& reader = *_shard_readers[_current_shard];
if (reader.is_end_of_stream()) {
@@ -1800,7 +1798,7 @@ future<> multishard_combining_reader::handle_empty_reader_buffer(db::timeout_clo
}
return make_ready_future<>();
} else if (reader.is_read_ahead_in_progress()) {
return reader.fill_buffer(timeout);
return reader.fill_buffer();
} else {
// If we crossed shards and the next reader has an empty buffer we
// double concurrency so the next time we cross shards we will have
@@ -1818,10 +1816,10 @@ future<> multishard_combining_reader::handle_empty_reader_buffer(db::timeout_clo
boost::pop_heap(shard_selection_min_heap_copy);
const auto next_shard = shard_selection_min_heap_copy.back().shard;
shard_selection_min_heap_copy.pop_back();
_shard_readers[next_shard]->read_ahead(timeout);
_shard_readers[next_shard]->read_ahead();
}
}
return reader.fill_buffer(timeout);
return reader.fill_buffer();
}
}
@@ -1845,13 +1843,13 @@ multishard_combining_reader::multishard_combining_reader(
}
}
future<> multishard_combining_reader::fill_buffer(db::timeout_clock::time_point timeout) {
future<> multishard_combining_reader::fill_buffer() {
_crossed_shards = false;
return do_until([this] { return is_buffer_full() || is_end_of_stream(); }, [this, timeout] {
return do_until([this] { return is_buffer_full() || is_end_of_stream(); }, [this] {
auto& reader = *_shard_readers[_current_shard];
if (reader.is_buffer_empty()) {
return handle_empty_reader_buffer(timeout);
return handle_empty_reader_buffer();
}
while (!reader.is_buffer_empty() && !is_buffer_full()) {
@@ -1872,16 +1870,16 @@ future<> multishard_combining_reader::next_partition() {
return make_ready_future<>();
}
future<> multishard_combining_reader::fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) {
future<> multishard_combining_reader::fast_forward_to(const dht::partition_range& pr) {
clear_buffer();
_end_of_stream = false;
on_partition_range_change(pr);
return parallel_for_each(_shard_readers, [&pr, timeout] (std::unique_ptr<shard_reader>& sr) {
return sr->fast_forward_to(pr, timeout);
return parallel_for_each(_shard_readers, [&pr] (std::unique_ptr<shard_reader>& sr) {
return sr->fast_forward_to(pr);
});
}
future<> multishard_combining_reader::fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) {
future<> multishard_combining_reader::fast_forward_to(position_range pr) {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
@@ -1941,7 +1939,7 @@ public:
explicit queue_reader(schema_ptr s, reader_permit permit)
: impl(std::move(s), std::move(permit)) {
}
virtual future<> fill_buffer(db::timeout_clock::time_point) override {
virtual future<> fill_buffer() override {
if (_ex) {
return make_exception_future<>(_ex);
}
@@ -1958,16 +1956,16 @@ public:
virtual future<> next_partition() override {
clear_buffer_to_next_partition();
if (is_buffer_empty() && !is_end_of_stream()) {
return fill_buffer(db::no_timeout).then([this] {
return fill_buffer().then([this] {
return next_partition();
});
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range&, db::timeout_clock::time_point) override {
virtual future<> fast_forward_to(const dht::partition_range&) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> fast_forward_to(position_range, db::timeout_clock::time_point) override {
virtual future<> fast_forward_to(position_range) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> close() noexcept override {
@@ -2165,9 +2163,9 @@ public:
, _compactor(*_schema, compaction_time, get_max_purgeable)
, _last_uncompacted_partition_start(dht::decorated_key(dht::minimum_token(), partition_key::make_empty()), tombstone{}) {
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this, timeout] {
return _reader.fill_buffer(timeout).then([this, timeout] {
virtual future<> fill_buffer() override {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this] {
return _reader.fill_buffer().then([this] {
if (_reader.is_buffer_empty()) {
_end_of_stream = _reader.is_end_of_stream();
}
@@ -2218,13 +2216,13 @@ public:
maybe_inject_partition_end();
return _reader.next_partition();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
clear_buffer();
_end_of_stream = false;
maybe_inject_partition_end();
return _reader.fast_forward_to(pr, timeout);
return _reader.fast_forward_to(pr);
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> close() noexcept override {
@@ -2367,8 +2365,8 @@ class clustering_order_reader_merger {
// and all fragments previously returned from the reader have already been returned by operator().
//
// The peeked reader is pushed onto the _peeked_readers heap.
future<> peek_reader(reader_iterator it, db::timeout_clock::time_point timeout) {
return it->reader.peek(timeout).then([this, timeout, it] (mutation_fragment* mf) {
future<> peek_reader(reader_iterator it) {
return it->reader.peek().then([this, it] (mutation_fragment* mf) {
if (!mf) {
// The reader returned end-of-stream before returning end-of-partition
// (otherwise we would have removed it in a previous peek). This means that
@@ -2410,8 +2408,8 @@ class clustering_order_reader_merger {
}
it->reader.pop_mutation_fragment();
auto f = _forwarded_to ? it->reader.fast_forward_to(*_forwarded_to, timeout) : make_ready_future<>();
return f.then([this, timeout, it] { return peek_reader(it, timeout); });
auto f = _forwarded_to ? it->reader.fast_forward_to(*_forwarded_to) : make_ready_future<>();
return f.then([this, it] { return peek_reader(it); });
}
// We assume that the schema does not have any static columns, so there cannot be any static rows.
@@ -2433,9 +2431,9 @@ class clustering_order_reader_merger {
});
}
future<> peek_readers(db::timeout_clock::time_point timeout) {
return parallel_for_each(_unpeeked_readers, [this, timeout] (reader_iterator it) {
return peek_reader(it, timeout);
future<> peek_readers() {
return parallel_for_each(_unpeeked_readers, [this] (reader_iterator it) {
return peek_reader(it);
}).then([this] {
_unpeeked_readers.clear();
});
@@ -2447,8 +2445,8 @@ class clustering_order_reader_merger {
//
// If the galloping reader wins with other readers again, the fragment is returned as the next batch.
// Otherwise, the reader is pushed onto _peeked_readers and we retry in non-galloping mode.
future<mutation_fragment_batch> peek_galloping_reader(db::timeout_clock::time_point timeout) {
return _galloping_reader->reader.peek(timeout).then([this, timeout] (mutation_fragment* mf) {
future<mutation_fragment_batch> peek_galloping_reader() {
return _galloping_reader->reader.peek().then([this] (mutation_fragment* mf) {
bool erase = false;
if (mf) {
if (mf->is_partition_start()) {
@@ -2495,10 +2493,10 @@ class clustering_order_reader_merger {
// The galloping reader has either been removed, halted, or lost with the other readers.
// Proceed with the normal path.
return maybe_erase.then([this, timeout] {
return maybe_erase.then([this] {
_galloping_reader = {};
_gallop_mode_hits = 0;
return (*this)(timeout);
return (*this)();
});
});
}
@@ -2522,15 +2520,15 @@ public:
// We assume that operator() is called sequentially and that the caller doesn't use the batch
// returned by the previous operator() call after calling operator() again
// (the data from the previous batch is destroyed).
future<mutation_fragment_batch> operator()(db::timeout_clock::time_point timeout) {
future<mutation_fragment_batch> operator()() {
_current_batch.clear();
if (in_gallop_mode()) {
return peek_galloping_reader(timeout);
return peek_galloping_reader();
}
if (!_unpeeked_readers.empty()) {
return peek_readers(timeout).then([this, timeout] { return (*this)(timeout); });
return peek_readers().then([this] { return (*this)(); });
}
// Before we return a batch of fragments using currently opened readers we must check the queue
@@ -2555,7 +2553,7 @@ public:
_all_readers.push_front(std::move(r));
_unpeeked_readers.push_back(_all_readers.begin());
}
return peek_readers(timeout).then([this, timeout] { return (*this)(timeout); });
return peek_readers().then([this] { return (*this)(); });
}
if (_peeked_readers.empty()) {
@@ -2612,13 +2610,13 @@ public:
}
future<> fast_forward_to(const dht::partition_range&, db::timeout_clock::time_point) {
future<> fast_forward_to(const dht::partition_range&) {
throw std::runtime_error(
"clustering_order_reader_merger::fast_forward_to: this reader works only for single partition queries");
}
future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) {
future<> fast_forward_to(position_range pr) {
if (!_partition_start_fetched) {
on_internal_error(mrlog, "reader was forwarded before returning partition start");
}
@@ -2639,8 +2637,8 @@ public:
_forwarded_to = pr;
_pr_end = _forwarded_to->end();
return parallel_for_each(_unpeeked_readers, [this, pr = std::move(pr), timeout] (reader_iterator it) {
return it->reader.fast_forward_to(pr, timeout);
return parallel_for_each(_unpeeked_readers, [this, pr = std::move(pr)] (reader_iterator it) {
return it->reader.fast_forward_to(pr);
});
}

View File

@@ -41,7 +41,7 @@ public:
virtual ~reader_selector() = default;
// Call only if has_new_readers() returned true.
virtual std::vector<flat_mutation_reader> create_new_readers(const std::optional<dht::ring_position_view>& pos) = 0;
virtual std::vector<flat_mutation_reader> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) = 0;
virtual std::vector<flat_mutation_reader> fast_forward_to(const dht::partition_range& pr) = 0;
// Can be false-positive but never false-negative!
bool has_new_readers(const std::optional<dht::ring_position_view>& pos) const noexcept {
@@ -84,9 +84,9 @@ public:
, _rd(std::move(rd))
, _filter(std::forward<MutationFilter>(filter)) {
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return do_until([this] { return is_buffer_full() || is_end_of_stream(); }, [this, timeout] {
return _rd.fill_buffer(timeout).then([this] {
virtual future<> fill_buffer() override {
return do_until([this] { return is_buffer_full() || is_end_of_stream(); }, [this] {
return _rd.fill_buffer().then([this] {
return do_until([this] { return _rd.is_buffer_empty(); }, [this] {
auto mf = _rd.pop_mutation_fragment();
if (mf.is_partition_start()) {
@@ -111,15 +111,15 @@ public:
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
clear_buffer();
_end_of_stream = false;
return _rd.fast_forward_to(pr, timeout);
return _rd.fast_forward_to(pr);
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
forward_buffer_to(pr.start());
_end_of_stream = false;
return _rd.fast_forward_to(std::move(pr), timeout);
return _rd.fast_forward_to(std::move(pr));
}
virtual future<> close() noexcept {
return _rd.close();
@@ -474,6 +474,8 @@ public:
/// evicted (while paused). This method should also enter gates, take locks
/// or whatever is appropriate to make sure resources it is using on the
/// remote shard stay alive, during the lifetime of the created reader.
///
/// The \c permit parameter shall be obtained via `obtain_reader_permit()`
virtual flat_mutation_reader create_reader(
schema_ptr schema,
reader_permit permit,

View File

@@ -50,10 +50,10 @@ template <typename Writer>
requires MutationFragmentConsumer<Writer, future<>>
future<> feed_writer(flat_mutation_reader&& rd, Writer&& wr) {
return do_with(std::move(rd), std::move(wr), [] (flat_mutation_reader& rd, Writer& wr) {
return rd.fill_buffer(db::no_timeout).then([&rd, &wr] {
return rd.fill_buffer().then([&rd, &wr] {
return do_until([&rd] { return rd.is_buffer_empty() && rd.is_end_of_stream(); }, [&rd, &wr] {
auto f1 = rd.pop_mutation_fragment().consume(wr);
auto f2 = rd.is_buffer_empty() ? rd.fill_buffer(db::no_timeout) : make_ready_future<>();
auto f2 = rd.is_buffer_empty() ? rd.fill_buffer() : make_ready_future<>();
return when_all_succeed(std::move(f1), std::move(f2)).discard_result();
});
}).then_wrapped([&wr] (future<> f) {

View File

@@ -91,7 +91,7 @@ shard_writer::shard_writer(schema_ptr s,
}
future<> shard_writer::consume() {
return _reader.peek(db::no_timeout).then([this] (mutation_fragment* mf_ptr) {
return _reader.peek().then([this] (mutation_fragment* mf_ptr) {
if (mf_ptr) {
return _consumer(std::move(_reader));
}
@@ -124,7 +124,7 @@ future<> multishard_writer::make_shard_writer(unsigned shard) {
reader = make_foreign(std::make_unique<flat_mutation_reader>(std::move(reader)))] () mutable {
auto s = gs.get();
auto semaphore = std::make_unique<reader_concurrency_semaphore>(reader_concurrency_semaphore::no_limits{}, "shard_writer");
auto permit = semaphore->make_tracking_only_permit(s.get(), "multishard-writer");
auto permit = semaphore->make_tracking_only_permit(s.get(), "multishard-writer", db::no_timeout);
auto this_shard_reader = make_foreign_reader(s, std::move(permit), std::move(reader));
return make_foreign(std::make_unique<shard_writer>(gs.get(), std::move(semaphore), std::move(this_shard_reader), consumer));
}).then([this, shard] (foreign_ptr<std::unique_ptr<shard_writer>> writer) {
@@ -182,7 +182,7 @@ future<> multishard_writer::wait_pending_consumers() {
future<> multishard_writer::distribute_mutation_fragments() {
return repeat([this] () mutable {
return _producer(db::no_timeout).then([this] (mutation_fragment_opt mf_opt) mutable {
return _producer().then([this] (mutation_fragment_opt mf_opt) mutable {
if (mf_opt) {
return handle_mutation_fragment(std::move(*mf_opt));
} else {

View File

@@ -325,7 +325,7 @@ private:
_no_more_rows_in_current_range = false;
}
void do_fill_buffer(db::timeout_clock::time_point timeout) {
void do_fill_buffer() {
while (!is_end_of_stream() && !is_buffer_full()) {
auto mfopt = read_next();
if (mfopt) {
@@ -360,16 +360,16 @@ public:
});
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
// FIXME: indentation
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this, timeout] {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this] {
_reader.with_reserve([&] {
if (!_static_row_done) {
push_static_row();
on_new_range();
_static_row_done = true;
}
do_fill_buffer(timeout);
do_fill_buffer();
});
return make_ready_future<>();
});
@@ -381,10 +381,10 @@ public:
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
throw std::runtime_error("This reader can't be fast forwarded to another partition.");
};
virtual future<> fast_forward_to(position_range cr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range cr) override {
throw std::runtime_error("This reader can't be fast forwarded to another position.");
};
virtual future<> close() noexcept override {

View File

@@ -320,7 +320,8 @@ std::optional<Querier> querier_cache::lookup_querier(
const schema& s,
dht::partition_ranges_view ranges,
const query::partition_slice& slice,
tracing::trace_state_ptr trace_state) {
tracing::trace_state_ptr trace_state,
db::timeout_clock::time_point timeout) {
auto base_ptr = find_querier(index, key, ranges, trace_state);
auto& stats = _stats;
++stats.lookups;
@@ -338,6 +339,7 @@ std::optional<Querier> querier_cache::lookup_querier(
if (!reader_opt) {
throw std::runtime_error("lookup_querier(): found querier that is evicted");
}
reader_opt->set_timeout(timeout);
querier_utils::set_reader(q, std::move(*reader_opt));
--stats.population;
@@ -364,25 +366,28 @@ std::optional<data_querier> querier_cache::lookup_data_querier(utils::UUID key,
const schema& s,
const dht::partition_range& range,
const query::partition_slice& slice,
tracing::trace_state_ptr trace_state) {
return lookup_querier<data_querier>(_data_querier_index, key, s, range, slice, std::move(trace_state));
tracing::trace_state_ptr trace_state,
db::timeout_clock::time_point timeout) {
return lookup_querier<data_querier>(_data_querier_index, key, s, range, slice, std::move(trace_state), timeout);
}
std::optional<mutation_querier> querier_cache::lookup_mutation_querier(utils::UUID key,
const schema& s,
const dht::partition_range& range,
const query::partition_slice& slice,
tracing::trace_state_ptr trace_state) {
return lookup_querier<mutation_querier>(_mutation_querier_index, key, s, range, slice, std::move(trace_state));
tracing::trace_state_ptr trace_state,
db::timeout_clock::time_point timeout) {
return lookup_querier<mutation_querier>(_mutation_querier_index, key, s, range, slice, std::move(trace_state), timeout);
}
std::optional<shard_mutation_querier> querier_cache::lookup_shard_mutation_querier(utils::UUID key,
const schema& s,
const dht::partition_range_vector& ranges,
const query::partition_slice& slice,
tracing::trace_state_ptr trace_state) {
tracing::trace_state_ptr trace_state,
db::timeout_clock::time_point timeout) {
return lookup_querier<shard_mutation_querier>(_shard_mutation_querier_index, key, s, ranges, slice,
std::move(trace_state));
std::move(trace_state), timeout);
}
future<> querier_base::close() noexcept {

View File

@@ -84,9 +84,8 @@ auto consume_page(flat_mutation_reader& reader,
uint64_t row_limit,
uint32_t partition_limit,
gc_clock::time_point query_time,
db::timeout_clock::time_point timeout,
query::max_result_size max_size) {
return reader.peek(timeout).then([=, &reader, consumer = std::move(consumer), &slice] (
return reader.peek().then([=, &reader, consumer = std::move(consumer), &slice] (
mutation_fragment* next_fragment) mutable {
const auto next_fragment_kind = next_fragment ? next_fragment->mutation_fragment_kind() : mutation_fragment::kind::partition_end;
compaction_state->start_new_page(row_limit, partition_limit, query_time, next_fragment_kind, consumer);
@@ -96,14 +95,14 @@ auto consume_page(flat_mutation_reader& reader,
compaction_state,
clustering_position_tracker(std::move(consumer), last_ckey));
auto consume = [&reader, &slice, reader_consumer = std::move(reader_consumer), timeout, max_size] () mutable {
auto consume = [&reader, &slice, reader_consumer = std::move(reader_consumer), max_size] () mutable {
if (slice.options.contains(query::partition_slice::option::reversed)) {
return with_closeable(make_reversing_reader(reader, max_size),
[reader_consumer = std::move(reader_consumer), timeout] (flat_mutation_reader& reversing_reader) mutable {
return reversing_reader.consume(std::move(reader_consumer), timeout);
[reader_consumer = std::move(reader_consumer)] (flat_mutation_reader& reversing_reader) mutable {
return reversing_reader.consume(std::move(reader_consumer));
});
}
return reader.consume(std::move(reader_consumer), timeout);
return reader.consume(std::move(reader_consumer));
};
return consume().then([last_ckey] (auto&&... results) mutable {
@@ -228,10 +227,9 @@ public:
uint64_t row_limit,
uint32_t partition_limit,
gc_clock::time_point query_time,
db::timeout_clock::time_point timeout,
query::max_result_size max_size) {
return ::query::consume_page(std::get<flat_mutation_reader>(_reader), _compaction_state, *_slice, std::move(consumer), row_limit,
partition_limit, query_time, timeout, max_size).then([this] (auto&& results) {
partition_limit, query_time, max_size).then([this] (auto&& results) {
_last_ckey = std::get<std::optional<clustering_key>>(std::move(results));
constexpr auto size = std::tuple_size<std::decay_t<decltype(results)>>::value;
static_assert(size <= 2);
@@ -389,7 +387,8 @@ private:
const schema& s,
dht::partition_ranges_view ranges,
const query::partition_slice& slice,
tracing::trace_state_ptr trace_state);
tracing::trace_state_ptr trace_state,
db::timeout_clock::time_point timeout);
public:
explicit querier_cache(std::chrono::seconds entry_ttl = default_entry_ttl);
@@ -424,7 +423,8 @@ public:
const schema& s,
const dht::partition_range& range,
const query::partition_slice& slice,
tracing::trace_state_ptr trace_state);
tracing::trace_state_ptr trace_state,
db::timeout_clock::time_point timeout);
/// Lookup a mutation querier in the cache.
///
@@ -433,7 +433,8 @@ public:
const schema& s,
const dht::partition_range& range,
const query::partition_slice& slice,
tracing::trace_state_ptr trace_state);
tracing::trace_state_ptr trace_state,
db::timeout_clock::time_point timeout);
/// Lookup a shard mutation querier in the cache.
///
@@ -442,7 +443,8 @@ public:
const schema& s,
const dht::partition_range_vector& ranges,
const query::partition_slice& slice,
tracing::trace_state_ptr trace_state);
tracing::trace_state_ptr trace_state,
db::timeout_clock::time_point timeout);
/// Change the ttl of cache entries
///

View File

@@ -51,7 +51,7 @@ public:
: _cache(cache)
, _read_context(context)
{ }
future<mutation_fragment_opt> move_to_next_partition(db::timeout_clock::time_point timeout) {
future<mutation_fragment_opt> move_to_next_partition() {
_last_key = std::move(_new_last_key);
auto start = population_range_start();
auto phase = _cache.phase_of(start);
@@ -79,12 +79,12 @@ public:
return rd.close();
});
}
return refresh_reader.then([this, timeout] {
return _reader->next_partition().then([this, timeout] {
return refresh_reader.then([this] {
return _reader->next_partition().then([this] {
if (_reader->is_end_of_stream() && _reader->is_buffer_empty()) {
return make_ready_future<mutation_fragment_opt>();
}
return (*_reader)(timeout).then([this] (auto&& mfopt) {
return (*_reader)().then([this] (auto&& mfopt) {
if (mfopt) {
assert(mfopt->is_partition_start());
_new_last_key = mfopt->as_partition_start().key();
@@ -94,18 +94,18 @@ public:
});
});
}
future<> fast_forward_to(dht::partition_range&& range, db::timeout_clock::time_point timeout) {
future<> fast_forward_to(dht::partition_range&& range) {
auto snapshot_and_phase = _cache.snapshot_of(dht::ring_position_view::for_range_start(_range));
return fast_forward_to(std::move(range), snapshot_and_phase.snapshot, snapshot_and_phase.phase, timeout);
return fast_forward_to(std::move(range), snapshot_and_phase.snapshot, snapshot_and_phase.phase);
}
future<> fast_forward_to(dht::partition_range&& range, mutation_source& snapshot, row_cache::phase_type phase, db::timeout_clock::time_point timeout) {
future<> fast_forward_to(dht::partition_range&& range, mutation_source& snapshot, row_cache::phase_type phase) {
_range = std::move(range);
_last_key = { };
_new_last_key = { };
if (_reader) {
if (_reader_creation_phase == phase) {
++_cache._tracker._stats.underlying_partition_skips;
return _reader->fast_forward_to(_range, timeout);
return _reader->fast_forward_to(_range);
} else {
++_cache._tracker._stats.underlying_recreations;
}
@@ -208,10 +208,10 @@ public:
void on_underlying_created() { ++_underlying_created; }
bool digest_requested() const { return _slice.options.contains<query::partition_slice::option::with_digest>(); }
public:
future<> ensure_underlying(db::timeout_clock::time_point timeout) {
future<> ensure_underlying() {
if (_underlying_snapshot) {
return create_underlying(timeout).then([this, timeout] {
return _underlying.underlying()(timeout).then([this] (mutation_fragment_opt&& mfopt) {
return create_underlying().then([this] {
return _underlying.underlying()().then([this] (mutation_fragment_opt&& mfopt) {
_partition_exists = bool(mfopt);
});
});
@@ -224,7 +224,7 @@ public:
return make_ready_future<>();
}
public:
future<> create_underlying(db::timeout_clock::time_point timeout);
future<> create_underlying();
void enter_partition(const dht::decorated_key& dk, mutation_source& snapshot, row_cache::phase_type phase) {
_phase = phase;
_underlying_snapshot = snapshot;

View File

@@ -92,6 +92,7 @@ class reader_permit::impl
bool _marked_as_used = false;
uint64_t _blocked_branches = 0;
bool _marked_as_blocked = false;
db::timeout_clock::time_point _timeout;
private:
void on_permit_used() {
@@ -129,20 +130,22 @@ private:
public:
struct value_tag {};
impl(reader_concurrency_semaphore& semaphore, const schema* const schema, const std::string_view& op_name, reader_resources base_resources)
impl(reader_concurrency_semaphore& semaphore, const schema* const schema, const std::string_view& op_name, reader_resources base_resources, db::timeout_clock::time_point timeout)
: _semaphore(semaphore)
, _schema(schema)
, _op_name_view(op_name)
, _base_resources(base_resources)
, _timeout(timeout)
{
_semaphore.on_permit_created(*this);
}
impl(reader_concurrency_semaphore& semaphore, const schema* const schema, sstring&& op_name, reader_resources base_resources)
impl(reader_concurrency_semaphore& semaphore, const schema* const schema, sstring&& op_name, reader_resources base_resources, db::timeout_clock::time_point timeout)
: _semaphore(semaphore)
, _schema(schema)
, _op_name(std::move(op_name))
, _op_name_view(_op_name)
, _base_resources(base_resources)
, _timeout(timeout)
{
_semaphore.on_permit_created(*this);
}
@@ -295,11 +298,27 @@ public:
}
}
future<> maybe_wait_readmission(db::timeout_clock::time_point timeout) {
future<> maybe_wait_readmission() {
if (_state != reader_permit::state::evicted) {
return make_ready_future<>();
}
return _semaphore.do_wait_admission(shared_from_this(), timeout);
return _semaphore.do_wait_admission(shared_from_this());
}
db::timeout_clock::time_point timeout() const noexcept {
return _timeout;
}
void set_timeout(db::timeout_clock::time_point timeout) noexcept {
using namespace std::chrono_literals;
if (_timeout != db::no_timeout && timeout < _timeout) {
if (_timeout - timeout > 100ms) {
rcslog.warn("Detected timeout skew of {}ms, please check time skew between nodes in the cluster. backtrace: {}",
std::chrono::duration_cast<std::chrono::milliseconds>(_timeout - timeout).count(),
current_backtrace());
}
}
_timeout = timeout;
}
};
@@ -311,14 +330,14 @@ reader_permit::reader_permit(shared_ptr<impl> impl) : _impl(std::move(impl))
}
reader_permit::reader_permit(reader_concurrency_semaphore& semaphore, const schema* const schema, std::string_view op_name,
reader_resources base_resources)
: _impl(::seastar::make_shared<reader_permit::impl>(semaphore, schema, op_name, base_resources))
reader_resources base_resources, db::timeout_clock::time_point timeout)
: _impl(::seastar::make_shared<reader_permit::impl>(semaphore, schema, op_name, base_resources, timeout))
{
}
reader_permit::reader_permit(reader_concurrency_semaphore& semaphore, const schema* const schema, sstring&& op_name,
reader_resources base_resources)
: _impl(::seastar::make_shared<reader_permit::impl>(semaphore, schema, std::move(op_name), base_resources))
reader_resources base_resources, db::timeout_clock::time_point timeout)
: _impl(::seastar::make_shared<reader_permit::impl>(semaphore, schema, std::move(op_name), base_resources, timeout))
{
}
@@ -337,8 +356,8 @@ reader_concurrency_semaphore& reader_permit::semaphore() {
return _impl->semaphore();
}
future<> reader_permit::maybe_wait_readmission(db::timeout_clock::time_point timeout) {
return _impl->maybe_wait_readmission(timeout);
future<> reader_permit::maybe_wait_readmission() {
return _impl->maybe_wait_readmission();
}
void reader_permit::consume(reader_resources res) {
@@ -385,6 +404,14 @@ void reader_permit::mark_unblocked() noexcept {
_impl->mark_unblocked();
}
db::timeout_clock::time_point reader_permit::timeout() const noexcept {
return _impl->timeout();
}
void reader_permit::set_timeout(db::timeout_clock::time_point timeout) noexcept {
_impl->set_timeout(timeout);
}
std::ostream& operator<<(std::ostream& os, reader_permit::state s) {
switch (s) {
case reader_permit::state::waiting:
@@ -776,13 +803,14 @@ std::exception_ptr reader_concurrency_semaphore::check_queue_size(std::string_vi
return {};
}
future<> reader_concurrency_semaphore::enqueue_waiter(reader_permit permit, db::timeout_clock::time_point timeout, read_func func) {
future<> reader_concurrency_semaphore::enqueue_waiter(reader_permit permit, read_func func) {
if (auto ex = check_queue_size("wait")) {
return make_exception_future<>(std::move(ex));
}
promise<> pr;
auto fut = pr.get_future();
permit.on_waiting();
auto timeout = permit.timeout();
_wait_list.push_back(entry(std::move(pr), std::move(permit), std::move(func)), timeout);
++_stats.reads_enqueued;
return fut;
@@ -798,16 +826,16 @@ void reader_concurrency_semaphore::evict_readers_in_background() {
});
}
future<> reader_concurrency_semaphore::do_wait_admission(reader_permit permit, db::timeout_clock::time_point timeout, read_func func) {
future<> reader_concurrency_semaphore::do_wait_admission(reader_permit permit, read_func func) {
if (!_execution_loop_future) {
_execution_loop_future.emplace(execution_loop());
}
if (!_wait_list.empty() || !_ready_list.empty()) {
return enqueue_waiter(std::move(permit), timeout, std::move(func));
return enqueue_waiter(std::move(permit), std::move(func));
}
if (!has_available_units(permit.base_resources())) {
auto fut = enqueue_waiter(std::move(permit), timeout, std::move(func));
auto fut = enqueue_waiter(std::move(permit), std::move(func));
if (!_inactive_reads.empty()) {
evict_readers_in_background();
}
@@ -815,7 +843,7 @@ future<> reader_concurrency_semaphore::do_wait_admission(reader_permit permit, d
}
if (!all_used_permits_are_stalled()) {
return enqueue_waiter(std::move(permit), timeout, std::move(func));
return enqueue_waiter(std::move(permit), std::move(func));
}
permit.on_admission();
@@ -881,31 +909,31 @@ void reader_concurrency_semaphore::on_permit_unblocked() noexcept {
future<reader_permit> reader_concurrency_semaphore::obtain_permit(const schema* const schema, const char* const op_name, size_t memory,
db::timeout_clock::time_point timeout) {
auto permit = reader_permit(*this, schema, std::string_view(op_name), {1, static_cast<ssize_t>(memory)});
return do_wait_admission(permit, timeout).then([permit] () mutable {
auto permit = reader_permit(*this, schema, std::string_view(op_name), {1, static_cast<ssize_t>(memory)}, timeout);
return do_wait_admission(permit).then([permit] () mutable {
return std::move(permit);
});
}
future<reader_permit> reader_concurrency_semaphore::obtain_permit(const schema* const schema, sstring&& op_name, size_t memory,
db::timeout_clock::time_point timeout) {
auto permit = reader_permit(*this, schema, std::move(op_name), {1, static_cast<ssize_t>(memory)});
return do_wait_admission(permit, timeout).then([permit] () mutable {
auto permit = reader_permit(*this, schema, std::move(op_name), {1, static_cast<ssize_t>(memory)}, timeout);
return do_wait_admission(permit).then([permit] () mutable {
return std::move(permit);
});
}
reader_permit reader_concurrency_semaphore::make_tracking_only_permit(const schema* const schema, const char* const op_name) {
return reader_permit(*this, schema, std::string_view(op_name), {});
reader_permit reader_concurrency_semaphore::make_tracking_only_permit(const schema* const schema, const char* const op_name, db::timeout_clock::time_point timeout) {
return reader_permit(*this, schema, std::string_view(op_name), {}, timeout);
}
reader_permit reader_concurrency_semaphore::make_tracking_only_permit(const schema* const schema, sstring&& op_name) {
return reader_permit(*this, schema, std::move(op_name), {});
reader_permit reader_concurrency_semaphore::make_tracking_only_permit(const schema* const schema, sstring&& op_name, db::timeout_clock::time_point timeout) {
return reader_permit(*this, schema, std::move(op_name), {}, timeout);
}
future<> reader_concurrency_semaphore::with_permit(const schema* const schema, const char* const op_name, size_t memory,
db::timeout_clock::time_point timeout, read_func func) {
return do_wait_admission(reader_permit(*this, schema, std::string_view(op_name), {1, static_cast<ssize_t>(memory)}), timeout, std::move(func));
return do_wait_admission(reader_permit(*this, schema, std::string_view(op_name), {1, static_cast<ssize_t>(memory)}, timeout), std::move(func));
}
future<> reader_concurrency_semaphore::with_ready_permit(reader_permit permit, read_func func) {

View File

@@ -209,9 +209,9 @@ private:
// Add the permit to the wait queue and return the future which resolves when
// the permit is admitted (popped from the queue).
future<> enqueue_waiter(reader_permit permit, db::timeout_clock::time_point timeout, read_func func);
future<> enqueue_waiter(reader_permit permit, read_func func);
void evict_readers_in_background();
future<> do_wait_admission(reader_permit permit, db::timeout_clock::time_point timeout, read_func func = {});
future<> do_wait_admission(reader_permit permit, read_func func = {});
void maybe_admit_waiters() noexcept;
void on_permit_created(reader_permit::impl&);
@@ -344,8 +344,8 @@ public:
///
/// Some permits cannot be associated with any table, so passing nullptr as
/// the schema parameter is allowed.
reader_permit make_tracking_only_permit(const schema* const schema, const char* const op_name);
reader_permit make_tracking_only_permit(const schema* const schema, sstring&& op_name);
reader_permit make_tracking_only_permit(const schema* const schema, const char* const op_name, db::timeout_clock::time_point timeout);
reader_permit make_tracking_only_permit(const schema* const schema, sstring&& op_name, db::timeout_clock::time_point timeout);
/// Run the function through the semaphore's execution stage with an admitted permit
///

View File

@@ -112,9 +112,9 @@ private:
reader_permit() = default;
reader_permit(shared_ptr<impl>);
explicit reader_permit(reader_concurrency_semaphore& semaphore, const schema* const schema, std::string_view op_name,
reader_resources base_resources);
reader_resources base_resources, db::timeout_clock::time_point timeout);
explicit reader_permit(reader_concurrency_semaphore& semaphore, const schema* const schema, sstring&& op_name,
reader_resources base_resources);
reader_resources base_resources, db::timeout_clock::time_point timeout);
void on_waiting();
void on_admission();
@@ -146,7 +146,7 @@ public:
reader_concurrency_semaphore& semaphore();
future<> maybe_wait_readmission(db::timeout_clock::time_point timeout);
future<> maybe_wait_readmission();
void consume(reader_resources res);
@@ -161,6 +161,10 @@ public:
reader_resources base_resources() const;
sstring description() const;
db::timeout_clock::time_point timeout() const noexcept;
void set_timeout(db::timeout_clock::time_point timeout) noexcept;
};
using reader_permit_opt = optimized_optional<reader_permit>;

View File

@@ -424,7 +424,8 @@ public:
// deadlock within the reader. Thirty minutes should be more than
// enough to read a single mutation fragment.
auto timeout = db::timeout_clock::now() + std::chrono::minutes(30);
return _reader(timeout).then_wrapped([this] (future<mutation_fragment_opt> f) {
_reader.set_timeout(timeout); // reset to db::no_timeout in pause()
return _reader().then_wrapped([this] (future<mutation_fragment_opt> f) {
try {
auto mfopt = f.get0();
++_reads_finished;
@@ -471,6 +472,7 @@ public:
}
void pause() {
_reader.set_timeout(db::no_timeout);
if (_reader_handle) {
_reader_handle->pause();
}

View File

@@ -337,7 +337,7 @@ public:
}
};
future<> read_context::create_underlying(db::timeout_clock::time_point timeout) {
future<> read_context::create_underlying() {
if (_range_query) {
// FIXME: Singular-range mutation readers don't support fast_forward_to(), so need to use a wide range
// here in case the same reader will need to be fast forwarded later.
@@ -345,7 +345,7 @@ future<> read_context::create_underlying(db::timeout_clock::time_point timeout)
} else {
_sm_range = dht::partition_range::make_singular({dht::ring_position(*_key)});
}
return _underlying.fast_forward_to(std::move(_sm_range), *_underlying_snapshot, _phase, timeout).then([this] {
return _underlying.fast_forward_to(std::move(_sm_range), *_underlying_snapshot, _phase).then([this] {
_underlying_snapshot = {};
});
}
@@ -362,12 +362,12 @@ class single_partition_populating_reader final : public flat_mutation_reader::im
std::unique_ptr<read_context> _read_context;
flat_mutation_reader_opt _reader;
private:
future<> create_reader(db::timeout_clock::time_point timeout) {
future<> create_reader() {
auto src_and_phase = _cache.snapshot_of(_read_context->range().start()->value());
auto phase = src_and_phase.phase;
_read_context->enter_partition(_read_context->range().start()->value().as_decorated_key(), src_and_phase.snapshot, phase);
return _read_context->create_underlying(timeout).then([this, phase, timeout] {
return _read_context->underlying().underlying()(timeout).then([this, phase] (auto&& mfopt) {
return _read_context->create_underlying().then([this, phase] {
return _read_context->underlying().underlying()().then([this, phase] (auto&& mfopt) {
if (!mfopt) {
if (phase == _cache.phase_of(_read_context->range().start()->value())) {
_cache._read_section(_cache._tracker.region(), [this] {
@@ -398,17 +398,17 @@ public:
, _read_context(std::move(context))
{ }
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
if (!_reader) {
return create_reader(timeout).then([this, timeout] {
return create_reader().then([this] {
if (_end_of_stream) {
return make_ready_future<>();
}
return fill_buffer(timeout);
return fill_buffer();
});
}
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this, timeout] {
return fill_buffer_from(*_reader, timeout).then([this] (bool reader_finished) {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this] {
return fill_buffer_from(*_reader).then([this] (bool reader_finished) {
if (reader_finished) {
_end_of_stream = true;
}
@@ -422,12 +422,12 @@ public:
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range&, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range&) override {
clear_buffer();
_end_of_stream = true;
return make_ready_future<>();
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> close() noexcept override {
@@ -512,8 +512,8 @@ public:
using read_result = std::tuple<flat_mutation_reader_opt, mutation_fragment_opt>;
future<read_result> operator()(db::timeout_clock::time_point timeout) {
return _reader.move_to_next_partition(timeout).then([this] (auto&& mfopt) mutable {
future<read_result> operator()() {
return _reader.move_to_next_partition().then([this] (auto&& mfopt) mutable {
{
if (!mfopt) {
return _cache._read_section(_cache._tracker.region(), [&] {
@@ -542,7 +542,7 @@ public:
});
}
future<> fast_forward_to(dht::partition_range&& pr, db::timeout_clock::time_point timeout) {
future<> fast_forward_to(dht::partition_range&& pr) {
if (!pr.start()) {
_last_key = row_cache::previous_entry_pointer();
} else if (!pr.start()->is_inclusive() && pr.start()->value().has_key()) {
@@ -552,7 +552,7 @@ public:
_last_key = {};
}
return _reader.fast_forward_to(std::move(pr), timeout);
return _reader.fast_forward_to(std::move(pr));
}
future<> close() noexcept {
return _reader.close();
@@ -583,7 +583,7 @@ private:
: dht::ring_position_view::min();
}
flat_mutation_reader_opt do_read_from_primary(db::timeout_clock::time_point timeout) {
flat_mutation_reader_opt do_read_from_primary() {
return _cache._read_section(_cache._tracker.region(), [this] () -> flat_mutation_reader_opt {
bool not_moved = true;
if (!_primary.valid()) {
@@ -629,18 +629,18 @@ private:
});
}
future<flat_mutation_reader_opt> read_from_primary(db::timeout_clock::time_point timeout) {
auto fro = do_read_from_primary(timeout);
future<flat_mutation_reader_opt> read_from_primary() {
auto fro = do_read_from_primary();
if (!_secondary_in_progress) {
return make_ready_future<flat_mutation_reader_opt>(std::move(fro));
}
return _secondary_reader.fast_forward_to(std::move(_secondary_range), timeout).then([this, timeout] {
return read_from_secondary(timeout);
return _secondary_reader.fast_forward_to(std::move(_secondary_range)).then([this] {
return read_from_secondary();
});
}
future<flat_mutation_reader_opt> read_from_secondary(db::timeout_clock::time_point timeout) {
return _secondary_reader(timeout).then([this, timeout] (range_populating_reader::read_result&& res) {
future<flat_mutation_reader_opt> read_from_secondary() {
return _secondary_reader().then([this] (range_populating_reader::read_result&& res) {
auto&& [fropt, ps] = res;
if (fropt) {
if (ps) {
@@ -649,15 +649,15 @@ private:
return make_ready_future<flat_mutation_reader_opt>(std::move(fropt));
} else {
_secondary_in_progress = false;
return read_from_primary(timeout);
return read_from_primary();
}
});
}
future<> read_next_partition(db::timeout_clock::time_point timeout) {
future<> read_next_partition() {
auto close_reader = _reader ? _reader->close() : make_ready_future<>();
return close_reader.then([this, timeout] {
return close_reader.then([this] {
_read_next_partition = false;
return (_secondary_in_progress ? read_from_secondary(timeout) : read_from_primary(timeout)).then([this] (auto&& fropt) {
return (_secondary_in_progress ? read_from_secondary() : read_from_primary()).then([this] (auto&& fropt) {
if (bool(fropt)) {
_reader = std::move(fropt);
} else {
@@ -678,12 +678,12 @@ public:
, _secondary_reader(cache, *_read_context)
, _lower_bound(range.start())
{ }
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this, timeout] {
virtual future<> fill_buffer() override {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this] {
if (!_reader || _read_next_partition) {
return read_next_partition(timeout);
return read_next_partition();
} else {
return fill_buffer_from(*_reader, timeout).then([this] (bool reader_finished) {
return fill_buffer_from(*_reader).then([this] (bool reader_finished) {
if (reader_finished) {
_read_next_partition = true;
}
@@ -698,7 +698,7 @@ public:
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
clear_buffer();
_end_of_stream = false;
_secondary_in_progress = false;
@@ -708,7 +708,7 @@ public:
_lower_bound = pr.start();
return _reader->close();
}
virtual future<> fast_forward_to(position_range cr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range cr) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> close() noexcept override {

View File

@@ -3262,7 +3262,7 @@ future<> storage_service::load_and_stream(sstring ks_name, sstring cf_name,
bool failed = false;
try {
netw::messaging_service& ms = _messaging.local();
while (auto mf = co_await reader(db::no_timeout)) {
while (auto mf = co_await reader()) {
bool is_partition_start = mf->is_partition_start();
if (is_partition_start) {
++num_partitions_processed;

View File

@@ -806,7 +806,7 @@ public:
// The new range must not overlap with the previous range and
// must be after it.
//
std::optional<position_in_partition_view> fast_forward_to(position_range r, db::timeout_clock::time_point timeout) {
std::optional<position_in_partition_view> fast_forward_to(position_range r) {
sstlog.trace("mp_row_consumer_k_l {}: fast_forward_to({})", fmt::ptr(this), r);
_out_of_range = _is_mutation_end;
_fwd_end = std::move(r).end();
@@ -1340,7 +1340,7 @@ public:
_partition_finished = true;
}
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
return ensure_initialized().then([this, &pr] {
if (!is_initialized()) {
_end_of_stream = true;
@@ -1368,17 +1368,17 @@ public:
}
});
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
if (_end_of_stream) {
return make_ready_future<>();
}
if (!is_initialized()) {
return initialize().then([this, timeout] {
return initialize().then([this] {
if (!is_initialized()) {
_end_of_stream = true;
return make_ready_future<>();
} else {
return fill_buffer(timeout);
return fill_buffer();
}
});
}
@@ -1424,11 +1424,11 @@ public:
return make_ready_future<>();
// If _ds is not created then next_partition() has no effect because there was no partition_start emitted yet.
}
virtual future<> fast_forward_to(position_range cr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range cr) override {
forward_buffer_to(cr.start());
if (!_partition_finished) {
_end_of_stream = false;
return advance_context(_consumer.fast_forward_to(std::move(cr), timeout));
return advance_context(_consumer.fast_forward_to(std::move(cr)));
} else {
_end_of_stream = true;
return make_ready_future<>();

View File

@@ -268,7 +268,7 @@ public:
_mf_filter.emplace(*_schema, _slice, pk, _fwd);
}
std::optional<position_in_partition_view> fast_forward_to(position_range r, db::timeout_clock::time_point) {
std::optional<position_in_partition_view> fast_forward_to(position_range r) {
if (!_mf_filter) {
_reader->on_out_of_clustering_range();
return {};
@@ -1452,7 +1452,7 @@ public:
_partition_finished = true;
}
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
return ensure_initialized().then([this, &pr] {
if (!is_initialized()) {
_end_of_stream = true;
@@ -1480,35 +1480,35 @@ public:
}
});
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
if (_end_of_stream) {
return make_ready_future<>();
}
if (!is_initialized()) {
return initialize().then([this, timeout] {
return initialize().then([this] {
if (!is_initialized()) {
_end_of_stream = true;
return make_ready_future<>();
} else {
return fill_buffer(timeout);
return fill_buffer();
}
});
}
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this, timeout] {
return do_until([this] { return is_end_of_stream() || is_buffer_full(); }, [this] {
if (_partition_finished) {
maybe_timed_out(timeout);
maybe_timed_out();
if (_before_partition) {
return read_partition();
} else {
return read_next_partition();
}
} else {
return do_until([this] { return is_buffer_full() || _partition_finished || _end_of_stream; }, [this, timeout] {
return do_until([this] { return is_buffer_full() || _partition_finished || _end_of_stream; }, [this] {
_consumer.push_ready_fragments();
if (is_buffer_full() || _partition_finished || _end_of_stream) {
return make_ready_future<>();
}
maybe_timed_out(timeout);
maybe_timed_out();
return advance_context(_consumer.maybe_skip()).then([this] {
return _context->consume_input();
});
@@ -1538,11 +1538,11 @@ public:
return make_ready_future<>();
// If _ds is not created then next_partition() has no effect because there was no partition_start emitted yet.
}
virtual future<> fast_forward_to(position_range cr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range cr) override {
forward_buffer_to(cr.start());
if (!_partition_finished) {
_end_of_stream = false;
return advance_context(_consumer.fast_forward_to(std::move(cr), timeout));
return advance_context(_consumer.fast_forward_to(std::move(cr)));
} else {
_end_of_stream = true;
return make_ready_future<>();

View File

@@ -808,7 +808,7 @@ public:
// Initialize at the end of the constructor body, so we can delay making
// the semaphore used until we know that no more exceptions can be thrown.
_range_tombstones.emplace(range_tombstone_stream(_schema, _semaphore.make_tracking_only_permit(&s, "mx-writer")));
_range_tombstones.emplace(range_tombstone_stream(_schema, _semaphore.make_tracking_only_permit(&s, "mx-writer", db::no_timeout)));
}
~writer();

View File

@@ -166,14 +166,13 @@ template<typename T>
concept RowConsumer =
requires(T t,
const partition_key& pk,
position_range cr,
db::timeout_clock::time_point timeout) {
position_range cr) {
{ t.io_priority() } -> std::convertible_to<const io_priority_class&>;
{ t.is_mutation_end() } -> std::same_as<bool>;
{ t.setup_for_partition(pk) } -> std::same_as<void>;
{ t.push_ready_fragments() } -> std::same_as<void>;
{ t.maybe_skip() } -> std::same_as<std::optional<position_in_partition_view>>;
{ t.fast_forward_to(std::move(cr), timeout) } -> std::same_as<std::optional<position_in_partition_view>>;
{ t.fast_forward_to(std::move(cr)) } -> std::same_as<std::optional<position_in_partition_view>>;
};
/*

View File

@@ -660,7 +660,7 @@ public:
return readers;
}
virtual std::vector<flat_mutation_reader> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual std::vector<flat_mutation_reader> fast_forward_to(const dht::partition_range& pr) override {
_pr = &pr;
auto pos = dht::ring_position_view::for_range_start(*_pr);

View File

@@ -1712,7 +1712,7 @@ future<> sstable::write_components(
return seastar::async([this, mr = std::move(mr), estimated_partitions, schema = std::move(schema), cfg, stats, &pc] () mutable {
auto close_mr = deferred_close(mr);
auto wr = get_writer(*schema, estimated_partitions, cfg, stats, pc);
mr.consume_in_thread(std::move(wr), db::no_timeout);
mr.consume_in_thread(std::move(wr));
}).finally([this] {
assert_large_data_handler_is_running();
});
@@ -1768,7 +1768,7 @@ future<> sstable::generate_summary(const io_priority_class& pc) {
auto s = summary_generator(_schema->get_partitioner(), _components->summary, _manager.config().sstable_summary_ratio());
auto ctx = make_lw_shared<index_consume_entry_context<summary_generator>>(
sem.make_tracking_only_permit(_schema.get(), "generate-summary"), s, trust_promoted_index::yes, *_schema, index_file, std::move(options), 0, index_size,
sem.make_tracking_only_permit(_schema.get(), "generate-summary", db::no_timeout), s, trust_promoted_index::yes, *_schema, index_file, std::move(options), 0, index_size,
(_version >= sstable_version_types::mc
? std::make_optional(get_clustering_values_fixed_lengths(get_serialization_header()))
: std::optional<column_values_fixed_lengths>{}));
@@ -2696,7 +2696,7 @@ future<bool> sstable::has_partition_key(const utils::hashed_key& hk, const dht::
std::exception_ptr ex;
auto sem = reader_concurrency_semaphore(reader_concurrency_semaphore::no_limits{}, "sstables::has_partition_key()");
try {
auto lh_index_ptr = std::make_unique<sstables::index_reader>(s, sem.make_tracking_only_permit(_schema.get(), s->get_filename()), default_priority_class(), tracing::trace_state_ptr(), use_caching::yes);
auto lh_index_ptr = std::make_unique<sstables::index_reader>(s, sem.make_tracking_only_permit(_schema.get(), s->get_filename(), db::no_timeout), default_priority_class(), tracing::trace_state_ptr(), use_caching::yes);
present = co_await lh_index_ptr->advance_lower_and_check_if_present(dk);
} catch (...) {
ex = std::current_exception();

View File

@@ -134,7 +134,7 @@ struct send_info {
};
future<> send_mutation_fragments(lw_shared_ptr<send_info> si) {
return si->reader.peek(db::no_timeout).then([si] (mutation_fragment* mfp) {
return si->reader.peek().then([si] (mutation_fragment* mfp) {
if (!mfp) {
// The reader contains no data
sslog.info("[Stream #{}] Skip sending ks={}, cf={}, reader contains no data, with new rpc streaming",
@@ -169,7 +169,7 @@ future<> send_mutation_fragments(lw_shared_ptr<send_info> si) {
mutation_fragment_stream_validator validator(*(si->reader.schema()));
return do_with(std::move(sink), std::move(validator), [si, got_error_from_peer] (rpc::sink<frozen_mutation_fragment, stream_mutation_fragments_cmd>& sink, mutation_fragment_stream_validator& validator) {
return repeat([&sink, &validator, si, got_error_from_peer] () mutable {
return si->reader(db::no_timeout).then([&sink, &validator, si, s = si->reader.schema(), got_error_from_peer] (mutation_fragment_opt mf) mutable {
return si->reader().then([&sink, &validator, si, s = si->reader.schema(), got_error_from_peer] (mutation_fragment_opt mf) mutable {
if (*got_error_from_peer) {
return make_exception_future<stop_iteration>(std::runtime_error("Got status error code from peer"));
}

View File

@@ -106,7 +106,7 @@ future<table::const_mutation_partition_ptr>
table::find_partition(schema_ptr s, reader_permit permit, const dht::decorated_key& key) const {
return do_with(dht::partition_range::make_singular(key), [s = std::move(s), permit = std::move(permit), this] (auto& range) mutable {
return with_closeable(this->make_reader(std::move(s), std::move(permit), range), [] (flat_mutation_reader& reader) {
return read_mutation_from_flat_mutation_reader(reader, db::no_timeout).then([] (mutation_opt&& mo) -> std::unique_ptr<const mutation_partition> {
return read_mutation_from_flat_mutation_reader(reader).then([] (mutation_opt&& mo) -> std::unique_ptr<const mutation_partition> {
if (!mo) {
return {};
}
@@ -271,7 +271,7 @@ table::for_all_partitions_slow(schema_ptr s, reader_permit permit, std::function
return do_with(iteration_state(std::move(s), std::move(permit), *this, std::move(func)), [] (iteration_state& is) {
return do_until([&is] { return is.done(); }, [&is] {
return read_mutation_from_flat_mutation_reader(is.reader, db::no_timeout).then([&is](mutation_opt&& mo) {
return read_mutation_from_flat_mutation_reader(is.reader).then([&is](mutation_opt&& mo) {
if (!mo) {
is.empty = true;
} else {
@@ -601,7 +601,7 @@ table::try_flush_memtable_to_sstable(lw_shared_ptr<memtable> old, sstable_write_
auto f = consumer(old->make_flush_reader(
old->schema(),
compaction_concurrency_semaphore().make_tracking_only_permit(old->schema().get(), "try_flush_memtable_to_sstable()"),
compaction_concurrency_semaphore().make_tracking_only_permit(old->schema().get(), "try_flush_memtable_to_sstable()", db::no_timeout),
service::get_local_memtable_flush_priority()));
// Switch back to default scheduling group for post-flush actions, to avoid them being staved by the memtable flush
@@ -1911,7 +1911,7 @@ write_memtable_to_sstable(memtable& mt, sstables::shared_sstable sst, sstables::
std::make_unique<reader_concurrency_semaphore>(reader_concurrency_semaphore::no_limits{}, "write_memtable_to_sstable"),
cfg,
[&mt, sst] (auto& monitor, auto& semaphore, auto& cfg) {
return write_memtable_to_sstable(semaphore->make_tracking_only_permit(mt.schema().get(), "mt_to_sst"), mt, std::move(sst), monitor, cfg)
return write_memtable_to_sstable(semaphore->make_tracking_only_permit(mt.schema().get(), "mt_to_sst", db::no_timeout), mt, std::move(sst), monitor, cfg)
.finally([&semaphore] {
return semaphore->stop();
});
@@ -2001,7 +2001,7 @@ table::query(schema_ptr s,
std::exception_ptr ex;
try {
co_await q.consume_page(query_result_builder(*s, qs.builder), qs.remaining_rows(), qs.remaining_partitions(), qs.cmd.timestamp, timeout,
co_await q.consume_page(query_result_builder(*s, qs.builder), qs.remaining_rows(), qs.remaining_partitions(), qs.cmd.timestamp,
class_config.max_memory_for_unlimited_query);
} catch (...) {
ex = std::current_exception();
@@ -2053,7 +2053,7 @@ table::mutation_query(schema_ptr s,
std::exception_ptr ex;
try {
auto rrb = reconcilable_result_builder(*s, cmd.slice, std::move(accounter));
auto r = co_await q.consume_page(std::move(rrb), cmd.get_row_limit(), cmd.partition_limit, cmd.timestamp, timeout, class_config.max_memory_for_unlimited_query);
auto r = co_await q.consume_page(std::move(rrb), cmd.get_row_limit(), cmd.partition_limit, cmd.timestamp, class_config.max_memory_for_unlimited_query);
if (!saved_querier || (!q.are_limits_reached() && !r.is_short_read())) {
co_await q.close();
@@ -2243,7 +2243,7 @@ future<row_locker::lock_holder> table::do_push_view_replica_updates(schema_ptr s
auto cr_ranges = co_await db::view::calculate_affected_clustering_ranges(*base, m.decorated_key(), m.partition(), views);
if (cr_ranges.empty()) {
tracing::trace(tr_state, "View updates do not require read-before-write");
co_await generate_and_propagate_view_updates(base, sem.make_tracking_only_permit(s.get(), "push-view-updates-1"), std::move(views), std::move(m), { }, std::move(tr_state), now);
co_await generate_and_propagate_view_updates(base, sem.make_tracking_only_permit(s.get(), "push-view-updates-1", timeout), std::move(views), std::move(m), { }, std::move(tr_state), now);
// In this case we are not doing a read-before-write, just a
// write, so no lock is needed.
co_return row_locker::lock_holder();
@@ -2268,7 +2268,7 @@ future<row_locker::lock_holder> table::do_push_view_replica_updates(schema_ptr s
co_await utils::get_local_injector().inject("table_push_view_replica_updates_timeout", timeout);
auto lock = co_await std::move(lockf);
auto pk = dht::partition_range::make_singular(m.decorated_key());
auto permit = sem.make_tracking_only_permit(base.get(), "push-view-updates-2");
auto permit = sem.make_tracking_only_permit(base.get(), "push-view-updates-2", timeout);
auto reader = source.make_reader(base, permit, pk, slice, io_priority, tr_state, streamed_mutation::forwarding::no, mutation_reader::forwarding::no);
co_await this->generate_and_propagate_view_updates(base, std::move(permit), std::move(views), std::move(m), std::move(reader), tr_state, now);
tracing::trace(tr_state, "View updates for {}.{} were generated and propagated", base->ks_name(), base->cf_name());

View File

@@ -47,7 +47,7 @@ static void broken_sst(sstring dir, unsigned long generation, schema_ptr s, sstr
sstable_ptr sstp = env.reusable_sst(s, dir, generation, version).get0();
auto r = sstp->make_reader(s, env.make_reader_permit(), query::full_partition_range, s->full_slice());
auto close_r = deferred_close(r);
r.consume(my_consumer{}, db::no_timeout).get();
r.consume(my_consumer{}).get();
BOOST_FAIL("expecting exception");
} catch (malformed_sstable_exception& e) {
auto ex_what = sstring(e.what());

View File

@@ -647,13 +647,13 @@ SEASTAR_TEST_CASE(test_commitlog_replay_invalid_key){
}
{
auto rd = mt.make_flat_reader(s, db.get_reader_concurrency_semaphore().make_tracking_only_permit(s.get(), "test"));
auto rd = mt.make_flat_reader(s, db.get_reader_concurrency_semaphore().make_tracking_only_permit(s.get(), "test", db::no_timeout));
auto close_rd = deferred_close(rd);
auto mopt = read_mutation_from_flat_mutation_reader(rd, db::no_timeout).get0();
auto mopt = read_mutation_from_flat_mutation_reader(rd).get0();
BOOST_REQUIRE(mopt);
mopt = {};
mopt = read_mutation_from_flat_mutation_reader(rd, db::no_timeout).get0();
mopt = read_mutation_from_flat_mutation_reader(rd).get0();
BOOST_REQUIRE(!mopt);
}
});

View File

@@ -97,10 +97,10 @@ static size_t count_fragments(mutation m) {
auto r = flat_mutation_reader_from_mutations(semaphore.make_permit(), {m});
auto close_reader = deferred_close(r);
size_t res = 0;
auto mfopt = r(db::no_timeout).get0();
auto mfopt = r().get0();
while (bool(mfopt)) {
++res;
mfopt = r(db::no_timeout).get0();
mfopt = r().get0();
}
return res;
}
@@ -113,7 +113,7 @@ SEASTAR_TEST_CASE(test_flat_mutation_reader_consume_single_partition) {
for (size_t depth = 1; depth <= fragments_in_m + 1; ++depth) {
auto r = flat_mutation_reader_from_mutations(semaphore.make_permit(), {m});
auto close_reader = deferred_close(r);
auto result = r.consume(mock_consumer(*m.schema(), semaphore.make_permit(), depth), db::no_timeout).get0();
auto result = r.consume(mock_consumer(*m.schema(), semaphore.make_permit(), depth)).get0();
BOOST_REQUIRE(result._consume_end_of_stream_called);
BOOST_REQUIRE_EQUAL(1, result._consume_new_partition_call_count);
BOOST_REQUIRE_EQUAL(1, result._consume_end_of_partition_call_count);
@@ -137,18 +137,18 @@ SEASTAR_TEST_CASE(test_flat_mutation_reader_consume_two_partitions) {
for (size_t depth = 1; depth < fragments_in_m1; ++depth) {
auto r = flat_mutation_reader_from_mutations(semaphore.make_permit(), {m1, m2});
auto close_r = deferred_close(r);
auto result = r.consume(mock_consumer(*m1.schema(), semaphore.make_permit(), depth), db::no_timeout).get0();
auto result = r.consume(mock_consumer(*m1.schema(), semaphore.make_permit(), depth)).get0();
BOOST_REQUIRE(result._consume_end_of_stream_called);
BOOST_REQUIRE_EQUAL(1, result._consume_new_partition_call_count);
BOOST_REQUIRE_EQUAL(1, result._consume_end_of_partition_call_count);
BOOST_REQUIRE_EQUAL(m1.partition().partition_tombstone() ? 1 : 0, result._consume_tombstone_call_count);
auto r2 = flat_mutation_reader_from_mutations(semaphore.make_permit(), {m1, m2});
auto close_r2 = deferred_close(r2);
auto start = r2(db::no_timeout).get0();
auto start = r2().get0();
BOOST_REQUIRE(start);
BOOST_REQUIRE(start->is_partition_start());
for (auto& mf : result._fragments) {
auto mfopt = r2(db::no_timeout).get0();
auto mfopt = r2().get0();
BOOST_REQUIRE(mfopt);
BOOST_REQUIRE(mf.equal(*m1.schema(), *mfopt));
}
@@ -156,7 +156,7 @@ SEASTAR_TEST_CASE(test_flat_mutation_reader_consume_two_partitions) {
for (size_t depth = fragments_in_m1; depth < fragments_in_m1 + fragments_in_m2 + 1; ++depth) {
auto r = flat_mutation_reader_from_mutations(semaphore.make_permit(), {m1, m2});
auto close_r = deferred_close(r);
auto result = r.consume(mock_consumer(*m1.schema(), semaphore.make_permit(), depth), db::no_timeout).get0();
auto result = r.consume(mock_consumer(*m1.schema(), semaphore.make_permit(), depth)).get0();
BOOST_REQUIRE(result._consume_end_of_stream_called);
BOOST_REQUIRE_EQUAL(2, result._consume_new_partition_call_count);
BOOST_REQUIRE_EQUAL(2, result._consume_end_of_partition_call_count);
@@ -170,14 +170,14 @@ SEASTAR_TEST_CASE(test_flat_mutation_reader_consume_two_partitions) {
BOOST_REQUIRE_EQUAL(tombstones_count, result._consume_tombstone_call_count);
auto r2 = flat_mutation_reader_from_mutations(semaphore.make_permit(), {m1, m2});
auto close_r2 = deferred_close(r2);
auto start = r2(db::no_timeout).get0();
auto start = r2().get0();
BOOST_REQUIRE(start);
BOOST_REQUIRE(start->is_partition_start());
for (auto& mf : result._fragments) {
auto mfopt = r2(db::no_timeout).get0();
auto mfopt = r2().get0();
BOOST_REQUIRE(mfopt);
if (mfopt->is_partition_start() || mfopt->is_end_of_partition()) {
mfopt = r2(db::no_timeout).get0();
mfopt = r2().get0();
}
BOOST_REQUIRE(mfopt);
BOOST_REQUIRE(mf.equal(*m1.schema(), *mfopt));
@@ -294,10 +294,10 @@ SEASTAR_TEST_CASE(test_fragmenting_and_freezing) {
SEASTAR_THREAD_TEST_CASE(test_flat_mutation_reader_move_buffer_content_to) {
struct dummy_reader_impl : public flat_mutation_reader::impl {
using flat_mutation_reader::impl::impl;
virtual future<> fill_buffer(db::timeout_clock::time_point) override { return make_ready_future<>(); }
virtual future<> fill_buffer() override { return make_ready_future<>(); }
virtual future<> next_partition() { return make_ready_future<>(); }
virtual future<> fast_forward_to(const dht::partition_range&, db::timeout_clock::time_point) override { return make_ready_future<>(); }
virtual future<> fast_forward_to(position_range, db::timeout_clock::time_point) override { return make_ready_future<>(); }
virtual future<> fast_forward_to(const dht::partition_range&) override { return make_ready_future<>(); }
virtual future<> fast_forward_to(position_range) override { return make_ready_future<>(); }
virtual future<> close() noexcept override { return make_ready_future<>(); };
};
@@ -323,7 +323,7 @@ SEASTAR_THREAD_TEST_CASE(test_flat_mutation_reader_move_buffer_content_to) {
auto dummy_impl = std::make_unique<dummy_reader_impl>(s.schema(), semaphore.make_permit());
reader.set_max_buffer_size(max_buffer_size);
reader.fill_buffer(db::no_timeout).get();
reader.fill_buffer().get();
BOOST_REQUIRE(reader.is_buffer_full());
auto expected_buf_size = reader.buffer_size();
@@ -333,7 +333,7 @@ SEASTAR_THREAD_TEST_CASE(test_flat_mutation_reader_move_buffer_content_to) {
BOOST_CHECK_EQUAL(reader.buffer_size(), 0);
BOOST_CHECK_EQUAL(dummy_impl->buffer_size(), expected_buf_size);
reader.fill_buffer(db::no_timeout).get();
reader.fill_buffer().get();
BOOST_REQUIRE(!reader.is_buffer_empty());
expected_buf_size += reader.buffer_size();
@@ -344,7 +344,7 @@ SEASTAR_THREAD_TEST_CASE(test_flat_mutation_reader_move_buffer_content_to) {
BOOST_CHECK_EQUAL(dummy_impl->buffer_size(), expected_buf_size);
while (!reader.is_end_of_stream()) {
reader.fill_buffer(db::no_timeout).get();
reader.fill_buffer().get();
expected_buf_size += reader.buffer_size();
reader.move_buffer_content_to(*dummy_impl);
@@ -355,7 +355,7 @@ SEASTAR_THREAD_TEST_CASE(test_flat_mutation_reader_move_buffer_content_to) {
auto dummy_reader = flat_mutation_reader(std::move(dummy_impl));
auto close_dummy_reader = deferred_close(dummy_reader);
auto mut_new = read_mutation_from_flat_mutation_reader(dummy_reader, db::no_timeout).get0();
auto mut_new = read_mutation_from_flat_mutation_reader(dummy_reader).get0();
assert_that(mut_new)
.has_mutation()
@@ -561,14 +561,14 @@ void test_flat_stream(schema_ptr s, std::vector<mutation> muts, reversed_partiti
auto consume_fn = [&] (flat_mutation_reader& fmr, flat_stream_consumer fsc) {
if (thread) {
assert(bool(!reversed));
return fmr.consume_in_thread(std::move(fsc), db::no_timeout);
return fmr.consume_in_thread(std::move(fsc));
} else {
if (reversed) {
return with_closeable(make_reversing_reader(fmr, query::max_result_size(size_t(1) << 20)), [fsc = std::move(fsc)] (flat_mutation_reader& reverse_reader) mutable {
return reverse_reader.consume(std::move(fsc), db::no_timeout);
return reverse_reader.consume(std::move(fsc));
}).get0();
}
return fmr.consume(std::move(fsc), db::no_timeout).get0();
return fmr.consume(std::move(fsc)).get0();
}
};
@@ -618,7 +618,7 @@ void test_flat_stream(schema_ptr s, std::vector<mutation> muts, reversed_partiti
testlog.info("Consume all, filtered");
auto fmr = flat_mutation_reader_from_mutations(semaphore.make_permit(), muts);
auto close_fmr = deferred_close(fmr);
auto muts2 = fmr.consume_in_thread(flat_stream_consumer(s, semaphore.make_permit(), reversed), std::move(filter), db::no_timeout);
auto muts2 = fmr.consume_in_thread(flat_stream_consumer(s, semaphore.make_permit(), reversed), std::move(filter));
BOOST_REQUIRE_EQUAL(muts.size() / 2, muts2.size());
for (auto j = size_t(1); j < muts.size(); j += 2) {
BOOST_REQUIRE_EQUAL(muts[j], muts2[j / 2]);
@@ -711,8 +711,8 @@ SEASTAR_TEST_CASE(test_abandoned_flat_mutation_reader_from_mutation) {
for_each_mutation([&] (const mutation& m) {
auto rd = flat_mutation_reader_from_mutations(semaphore.make_permit(), {mutation(m)});
auto close_rd = deferred_close(rd);
rd(db::no_timeout).get();
rd(db::no_timeout).get();
rd().get();
rd().get();
// We rely on AddressSanitizer telling us if nothing was leaked.
});
});
@@ -769,7 +769,7 @@ SEASTAR_THREAD_TEST_CASE(test_mutation_reader_from_fragments_as_mutation_source)
rd.consume_pausable([&fragments] (mutation_fragment mf) {
fragments.emplace_back(std::move(mf));
return stop_iteration::no;
}, db::no_timeout).get();
}).get();
return fragments;
};
@@ -821,7 +821,7 @@ SEASTAR_THREAD_TEST_CASE(test_reverse_reader_memory_limit) {
auto close_reverse_reader = deferred_close(reverse_reader);
try {
reverse_reader.consume(phony_consumer{}, db::no_timeout).get();
reverse_reader.consume(phony_consumer{}).get();
BOOST_FAIL("No exception thrown for reversing overly big partition");
} catch (const std::runtime_error& e) {
testlog.info("Got exception with message: {}", e.what());

View File

@@ -111,7 +111,7 @@ SEASTAR_THREAD_TEST_CASE(test_frozen_mutation_fragment) {
rd.consume_pausable([&] (mutation_fragment mf) {
mfs.emplace_back(std::move(mf));
return stop_iteration::no;
}, db::no_timeout).get();
}).get();
auto permit = semaphore.make_permit();
for (auto&& mf : mfs) {

View File

@@ -19,6 +19,7 @@
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include "db/timeout_clock.hh"
#define BOOST_TEST_MODULE core
#include <boost/test/unit_test.hpp>
@@ -74,7 +75,7 @@ BOOST_AUTO_TEST_CASE(bytes_view_hasher_sanity_check) {
BOOST_AUTO_TEST_CASE(mutation_fragment_sanity_check) {
reader_concurrency_semaphore semaphore(reader_concurrency_semaphore::no_limits{}, __FILE__);
simple_schema s;
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), "test");
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), "test", db::no_timeout);
gc_clock::time_point ts(gc_clock::duration(1234567890000));
auto check_hash = [&] (const mutation_fragment& mf, uint64_t expected) {

View File

@@ -109,7 +109,7 @@ SEASTAR_TEST_CASE(test_memtable_with_many_versions_conforms_to_mutation_source)
// Create reader so that each mutation is in a separate version
flat_mutation_reader rd = mt->make_flat_reader(s, semaphore.make_permit(), ranges_storage.emplace_back(dht::partition_range::make_singular(m.decorated_key())));
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
readers.emplace_back(std::move(rd));
}
@@ -269,7 +269,7 @@ SEASTAR_TEST_CASE(test_virtual_dirty_accounting_on_flush) {
flat_mutation_reader_opt rd1 = mt->make_flat_reader(s, semaphore.make_permit());
auto close_rd1 = deferred_close(*rd1);
rd1->set_max_buffer_size(1);
rd1->fill_buffer(db::no_timeout).get();
rd1->fill_buffer().get();
// Override large cell value with a short one
{
@@ -289,7 +289,7 @@ SEASTAR_TEST_CASE(test_virtual_dirty_accounting_on_flush) {
flush_reader_check.produces_partition(current_ring[1]);
virtual_dirty_values.push_back(mgr.virtual_dirty_memory());
while ((*rd1)(db::no_timeout).get0()) ;
while ((*rd1)().get0()) ;
close_rd1.close_now();
logalloc::shard_tracker().full_compaction();
@@ -409,17 +409,17 @@ SEASTAR_TEST_CASE(test_segment_migration_during_flush) {
auto close_rd = deferred_close(rd);
for (int i = 0; i < partitions; ++i) {
auto mfopt = rd(db::no_timeout).get0();
auto mfopt = rd().get0();
BOOST_REQUIRE(bool(mfopt));
BOOST_REQUIRE(mfopt->is_partition_start());
while (!mfopt->is_end_of_partition()) {
logalloc::shard_tracker().full_compaction();
mfopt = rd(db::no_timeout).get0();
mfopt = rd().get0();
}
virtual_dirty_values.push_back(mgr.virtual_dirty_memory());
}
BOOST_REQUIRE(!rd(db::no_timeout).get0());
BOOST_REQUIRE(!rd().get0());
std::reverse(virtual_dirty_values.begin(), virtual_dirty_values.end());
BOOST_REQUIRE(std::is_sorted(virtual_dirty_values.begin(), virtual_dirty_values.end()));
@@ -533,8 +533,8 @@ SEASTAR_TEST_CASE(test_hash_is_cached) {
{
auto rd = mt->make_flat_reader(s, semaphore.make_permit());
auto close_rd = deferred_close(rd);
rd(db::no_timeout).get0()->as_partition_start();
clustering_row row = std::move(*rd(db::no_timeout).get0()).as_clustering_row();
rd().get0()->as_partition_start();
clustering_row row = std::move(*rd().get0()).as_clustering_row();
BOOST_REQUIRE(!row.cells().cell_hash_for(0));
}
@@ -543,16 +543,16 @@ SEASTAR_TEST_CASE(test_hash_is_cached) {
slice.options.set<query::partition_slice::option::with_digest>();
auto rd = mt->make_flat_reader(s, semaphore.make_permit(), query::full_partition_range, slice);
auto close_rd = deferred_close(rd);
rd(db::no_timeout).get0()->as_partition_start();
clustering_row row = std::move(*rd(db::no_timeout).get0()).as_clustering_row();
rd().get0()->as_partition_start();
clustering_row row = std::move(*rd().get0()).as_clustering_row();
BOOST_REQUIRE(row.cells().cell_hash_for(0));
}
{
auto rd = mt->make_flat_reader(s, semaphore.make_permit());
auto close_rd = deferred_close(rd);
rd(db::no_timeout).get0()->as_partition_start();
clustering_row row = std::move(*rd(db::no_timeout).get0()).as_clustering_row();
rd().get0()->as_partition_start();
clustering_row row = std::move(*rd().get0()).as_clustering_row();
BOOST_REQUIRE(row.cells().cell_hash_for(0));
}
@@ -562,8 +562,8 @@ SEASTAR_TEST_CASE(test_hash_is_cached) {
{
auto rd = mt->make_flat_reader(s, semaphore.make_permit());
auto close_rd = deferred_close(rd);
rd(db::no_timeout).get0()->as_partition_start();
clustering_row row = std::move(*rd(db::no_timeout).get0()).as_clustering_row();
rd().get0()->as_partition_start();
clustering_row row = std::move(*rd().get0()).as_clustering_row();
BOOST_REQUIRE(!row.cells().cell_hash_for(0));
}
@@ -572,16 +572,16 @@ SEASTAR_TEST_CASE(test_hash_is_cached) {
slice.options.set<query::partition_slice::option::with_digest>();
auto rd = mt->make_flat_reader(s, semaphore.make_permit(), query::full_partition_range, slice);
auto close_rd = deferred_close(rd);
rd(db::no_timeout).get0()->as_partition_start();
clustering_row row = std::move(*rd(db::no_timeout).get0()).as_clustering_row();
rd().get0()->as_partition_start();
clustering_row row = std::move(*rd().get0()).as_clustering_row();
BOOST_REQUIRE(row.cells().cell_hash_for(0));
}
{
auto rd = mt->make_flat_reader(s, semaphore.make_permit());
auto close_rd = deferred_close(rd);
rd(db::no_timeout).get0()->as_partition_start();
clustering_row row = std::move(*rd(db::no_timeout).get0()).as_clustering_row();
rd().get0()->as_partition_start();
clustering_row row = std::move(*rd().get0()).as_clustering_row();
BOOST_REQUIRE(row.cells().cell_hash_for(0));
}
});

View File

@@ -118,7 +118,7 @@ SEASTAR_TEST_CASE(test_mutation_merger_conforms_to_mutation_source) {
}
auto rd = flat_mutation_reader_from_mutations(semaphore.make_permit(), {m});
auto close_rd = deferred_close(rd);
rd.consume(fragment_scatterer{muts}, db::no_timeout).get();
rd.consume(fragment_scatterer{muts}).get();
for (int i = 0; i < n; ++i) {
memtables[i]->apply(std::move(muts[i]));
}
@@ -409,7 +409,7 @@ SEASTAR_TEST_CASE(test_schema_upgrader_is_equivalent_with_mutation_upgrade) {
auto reader = transform(flat_mutation_reader_from_mutations(semaphore.make_permit(), {m1}), schema_upgrader(m2.schema()));
auto close_reader = deferred_close(reader);
auto from_upgrader = read_mutation_from_flat_mutation_reader(reader, db::no_timeout).get0();
auto from_upgrader = read_mutation_from_flat_mutation_reader(reader).get0();
auto regular = m1;
regular.upgrade(m2.schema());
@@ -427,7 +427,7 @@ SEASTAR_THREAD_TEST_CASE(test_mutation_fragment_mutate_exception_safety) {
reader_concurrency_semaphore sem(1, 100, get_name());
auto stop_sem = deferred_stop(sem);
auto permit = sem.make_tracking_only_permit(s.schema().get(), get_name());
auto permit = sem.make_tracking_only_permit(s.schema().get(), get_name(), db::no_timeout);
const auto available_res = sem.available_resources();
const sstring val(1024, 'a');

View File

@@ -91,7 +91,7 @@ static reconcilable_result mutation_query(schema_ptr s, reader_permit permit, co
auto querier = query::mutation_querier(source, s, std::move(permit), range, slice, service::get_local_sstable_query_read_priority(), {});
auto close_querier = deferred_close(querier);
auto rrb = reconcilable_result_builder(*s, slice, make_accounter());
return querier.consume_page(std::move(rrb), row_limit, partition_limit, query_time, db::no_timeout,
return querier.consume_page(std::move(rrb), row_limit, partition_limit, query_time,
query::max_result_size(std::numeric_limits<uint64_t>::max())).get();
}
@@ -541,7 +541,7 @@ static void data_query(schema_ptr s, reader_permit permit, const mutation_source
auto querier = query::data_querier(source, s, std::move(permit), range, slice, service::get_local_sstable_query_read_priority(), {});
auto close_querier = deferred_close(querier);
auto qrb = query_result_builder(*s, builder);
querier.consume_page(std::move(qrb), std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint32_t>::max(), gc_clock::now(), db::no_timeout,
querier.consume_page(std::move(qrb), std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint32_t>::max(), gc_clock::now(),
query::max_result_size(std::numeric_limits<uint64_t>::max())).get();
}

View File

@@ -766,7 +766,7 @@ public:
}
return readers;
}
virtual std::vector<flat_mutation_reader> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual std::vector<flat_mutation_reader> fast_forward_to(const dht::partition_range& pr) override {
_pr = pr;
return create_new_readers(dht::ring_position_view::for_range_start(_pr));
}
@@ -994,11 +994,11 @@ SEASTAR_TEST_CASE(test_fast_forwarding_combined_reader_is_consistent_with_slicin
}
result.partition().apply(*s, std::move(mf));
return stop_iteration::no;
}, db::no_timeout).get();
}).get();
for (auto&& range : ranges) {
auto prange = position_range(range);
rd.fast_forward_to(prange, db::no_timeout).get();
rd.fast_forward_to(prange).get();
rd.consume_pausable([&](mutation_fragment&& mf) {
if (!mf.relevant_for_range(*s, prange.start())) {
BOOST_FAIL(format("Received fragment which is not relevant for range: {}, range: {}", mutation_fragment::printer(*s, mf), prange));
@@ -1009,14 +1009,14 @@ SEASTAR_TEST_CASE(test_fast_forwarding_combined_reader_is_consistent_with_slicin
}
result.partition().apply(*s, std::move(mf));
return stop_iteration::no;
}, db::no_timeout).get();
}).get();
}
assert_that(result).is_equal_to(expected, ranges);
};
check_next_partition(combined[0]);
rd.fast_forward_to(dht::partition_range::make_singular(keys[2]), db::no_timeout).get();
rd.fast_forward_to(dht::partition_range::make_singular(keys[2])).get();
check_next_partition(combined[2]);
});
}
@@ -1062,7 +1062,7 @@ SEASTAR_TEST_CASE(test_combined_reader_slicing_with_overlapping_range_tombstones
}
result.partition().apply(*s, std::move(mf));
return stop_iteration::no;
}, db::no_timeout).get();
}).get();
assert_that(result).is_equal_to(m1 + m2, query::clustering_row_ranges({range}));
}
@@ -1086,9 +1086,9 @@ SEASTAR_TEST_CASE(test_combined_reader_slicing_with_overlapping_range_tombstones
BOOST_REQUIRE(!mf.position().has_clustering_key());
result.partition().apply(*s, std::move(mf));
return stop_iteration::no;
}, db::no_timeout).get();
}).get();
rd.fast_forward_to(prange, db::no_timeout).get();
rd.fast_forward_to(prange).get();
position_in_partition last_pos = position_in_partition::before_all_clustered_rows();
auto consume_clustered = [&] (mutation_fragment&& mf) {
@@ -1101,9 +1101,9 @@ SEASTAR_TEST_CASE(test_combined_reader_slicing_with_overlapping_range_tombstones
return stop_iteration::no;
};
rd.consume_pausable(consume_clustered, db::no_timeout).get();
rd.fast_forward_to(position_range(prange.end(), position_in_partition::after_all_clustered_rows()), db::no_timeout).get();
rd.consume_pausable(consume_clustered, db::no_timeout).get();
rd.consume_pausable(consume_clustered).get();
rd.fast_forward_to(position_range(prange.end(), position_in_partition::after_all_clustered_rows())).get();
rd.consume_pausable(consume_clustered).get();
assert_that(result).is_equal_to(m1 + m2);
}
@@ -1132,7 +1132,7 @@ SEASTAR_TEST_CASE(test_combined_mutation_source_is_a_mutation_source) {
mf_m.partition().apply(*s, mf);
memtables[source_index++ % memtables.size()]->apply(mf_m);
return stop_iteration::no;
}, db::no_timeout).get();
}).get();
}
std::vector<mutation_source> sources;
@@ -1652,7 +1652,7 @@ public:
_ctrl.destroyed = true;
}
virtual future<> fill_buffer(db::timeout_clock::time_point) override {
virtual future<> fill_buffer() override {
if (is_end_of_stream() || !is_buffer_empty()) {
return make_ready_future<>();
}
@@ -1680,13 +1680,13 @@ public:
abort();
}
virtual future<> next_partition() override { return make_ready_future<>(); }
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
++_ctrl.fast_forward_to;
clear_buffer();
_end_of_stream = true;
return make_ready_future<>();
}
virtual future<> fast_forward_to(position_range, db::timeout_clock::time_point) override {
virtual future<> fast_forward_to(position_range) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> close() noexcept override {
@@ -1743,7 +1743,7 @@ SEASTAR_THREAD_TEST_CASE(test_stopping_reader_with_pending_read_ahead) {
make_reader_permit(env),
std::move(remote_reader));
reader.fill_buffer(db::no_timeout).get();
reader.fill_buffer().get();
BOOST_REQUIRE(!reader.is_buffer_empty());
@@ -1997,12 +1997,12 @@ SEASTAR_THREAD_TEST_CASE(test_multishard_combining_reader_destroyed_with_pending
auto&& remote_controls = reader_sharder_remote_controls__.remote_controls;
// This will read shard 0's buffer only
reader.fill_buffer(db::no_timeout).get();
reader.fill_buffer().get();
BOOST_REQUIRE(reader.is_buffer_full());
reader.detach_buffer();
// This will move to shard 1 and trigger read-ahead on shard 2
reader.fill_buffer(db::no_timeout).get();
reader.fill_buffer().get();
BOOST_REQUIRE(reader.is_buffer_full());
// Check that shard with read-ahead is indeed blocked.
@@ -2055,11 +2055,11 @@ SEASTAR_THREAD_TEST_CASE(test_multishard_combining_reader_fast_forwarded_with_pe
auto&& remote_controls = reader_sharder_remote_controls_pr.remote_controls;
auto&& pr = reader_sharder_remote_controls_pr.pr;
reader.fill_buffer(db::no_timeout).get();
reader.fill_buffer().get();
BOOST_REQUIRE(reader.is_buffer_full());
reader.detach_buffer();
reader.fill_buffer(db::no_timeout).get();
reader.fill_buffer().get();
BOOST_REQUIRE(reader.is_buffer_full());
reader.detach_buffer();
@@ -2074,7 +2074,7 @@ SEASTAR_THREAD_TEST_CASE(test_multishard_combining_reader_fast_forwarded_with_pe
++end_token._data;
auto next_pr = dht::partition_range::make_starting_with(dht::ring_position::starting_at(end_token));
auto fut = reader.fast_forward_to(next_pr, db::no_timeout);
auto fut = reader.fast_forward_to(next_pr);
smp::submit_to(multishard_reader_for_read_ahead::blocked_shard,
[control = remote_controls.at(multishard_reader_for_read_ahead::blocked_shard).get()] {
@@ -2095,7 +2095,7 @@ SEASTAR_THREAD_TEST_CASE(test_multishard_combining_reader_fast_forwarded_with_pe
BOOST_REQUIRE(all_shard_fast_forwarded);
reader.fill_buffer(db::no_timeout).get();
reader.fill_buffer().get();
BOOST_REQUIRE(reader.is_buffer_empty());
BOOST_REQUIRE(reader.is_end_of_stream());
@@ -2249,7 +2249,7 @@ SEASTAR_THREAD_TEST_CASE(test_multishard_combining_reader_non_strictly_monotonic
auto close_rd = deferred_close(rd);
rd.set_max_buffer_size(max_buffer_size);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
auto mf = rd.pop_mutation_fragment();
BOOST_REQUIRE_EQUAL(mf.mutation_fragment_kind(), mutation_fragment::kind::partition_start);
@@ -2264,7 +2264,7 @@ SEASTAR_THREAD_TEST_CASE(test_multishard_combining_reader_non_strictly_monotonic
BOOST_REQUIRE(mf.as_range_tombstone().start.equal(*s.schema(), ckey));
}
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
while (!rd.is_buffer_empty()) {
mf = rd.pop_mutation_fragment();
@@ -2272,7 +2272,7 @@ SEASTAR_THREAD_TEST_CASE(test_multishard_combining_reader_non_strictly_monotonic
BOOST_REQUIRE(mf.as_range_tombstone().start.equal(*s.schema(), ckey));
}
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
BOOST_REQUIRE(!rd.is_buffer_empty());
@@ -2305,7 +2305,7 @@ SEASTAR_THREAD_TEST_CASE(test_multishard_combining_reader_non_strictly_monotonic
auto fragments = make_fragments_with_non_monotonic_positions(s, mut_permit, s.make_pkey(pk), max_buffer_size, tombstone_deletion_time);
auto rd = make_flat_mutation_reader_from_fragments(s.schema(), mut_permit, std::move(fragments));
auto close_rd = deferred_close(rd);
auto mut_opt = read_mutation_from_flat_mutation_reader(rd, db::no_timeout).get0();
auto mut_opt = read_mutation_from_flat_mutation_reader(rd).get0();
BOOST_REQUIRE(mut_opt);
assert_that(make_multishard_combining_reader(
@@ -2383,12 +2383,12 @@ SEASTAR_THREAD_TEST_CASE(test_multishard_streaming_reader) {
auto close_reference_reader = deferred_close(reference_reader);
std::vector<mutation> reference_muts;
while (auto mut_opt = read_mutation_from_flat_mutation_reader(reference_reader, db::no_timeout).get0()) {
while (auto mut_opt = read_mutation_from_flat_mutation_reader(reference_reader).get0()) {
reference_muts.push_back(std::move(*mut_opt));
}
std::vector<mutation> tested_muts;
while (auto mut_opt = read_mutation_from_flat_mutation_reader(tested_reader, db::no_timeout).get0()) {
while (auto mut_opt = read_mutation_from_flat_mutation_reader(tested_reader).get0()) {
tested_muts.push_back(std::move(*mut_opt));
}
@@ -2415,7 +2415,7 @@ SEASTAR_THREAD_TEST_CASE(test_queue_reader) {
auto read_all = [] (flat_mutation_reader& reader, std::vector<mutation>& muts) {
return async([&reader, &muts] {
auto close_reader = deferred_close(reader);
while (auto mut_opt = read_mutation_from_flat_mutation_reader(reader, db::no_timeout).get0()) {
while (auto mut_opt = read_mutation_from_flat_mutation_reader(reader).get0()) {
muts.emplace_back(std::move(*mut_opt));
}
});
@@ -2425,7 +2425,7 @@ SEASTAR_THREAD_TEST_CASE(test_queue_reader) {
return async([&] {
auto reader = flat_mutation_reader_from_mutations(semaphore.make_permit(), muts);
auto close_reader = deferred_close(reader);
while (auto mf_opt = reader(db::no_timeout).get0()) {
while (auto mf_opt = reader().get0()) {
handle.push(std::move(*mf_opt)).get();
}
handle.push_end_of_stream();
@@ -2452,12 +2452,12 @@ SEASTAR_THREAD_TEST_CASE(test_queue_reader) {
auto& reader = std::get<0>(p);
auto& handle = std::get<1>(p);
auto close_reader = deferred_close(reader);
auto fill_buffer_fut = reader.fill_buffer(db::no_timeout);
auto fill_buffer_fut = reader.fill_buffer();
auto expected_reader = flat_mutation_reader_from_mutations(semaphore.make_permit(), expected_muts);
auto close_expected_reader = deferred_close(expected_reader);
handle.push(std::move(*expected_reader(db::no_timeout).get0())).get();
handle.push(std::move(*expected_reader().get0())).get();
BOOST_REQUIRE(!fill_buffer_fut.available());
@@ -2481,14 +2481,14 @@ SEASTAR_THREAD_TEST_CASE(test_queue_reader) {
auto push_fut = make_ready_future<>();
while (push_fut.available()) {
push_fut = handle.push(std::move(*expected_reader(db::no_timeout).get0()));
push_fut = handle.push(std::move(*expected_reader().get0()));
}
BOOST_REQUIRE(!push_fut.available());
handle.abort(std::make_exception_ptr<std::runtime_error>(std::runtime_error("error")));
BOOST_REQUIRE_THROW(reader.fill_buffer(db::no_timeout).get(), std::runtime_error);
BOOST_REQUIRE_THROW(reader.fill_buffer().get(), std::runtime_error);
BOOST_REQUIRE_THROW(push_fut.get(), std::runtime_error);
BOOST_REQUIRE(!reader.is_end_of_stream());
}
@@ -2498,7 +2498,7 @@ SEASTAR_THREAD_TEST_CASE(test_queue_reader) {
auto p = make_queue_reader(gen.schema(), semaphore.make_permit());
auto& reader = std::get<0>(p);
auto& handle = std::get<1>(p);
auto fill_buffer_fut = reader.fill_buffer(db::no_timeout);
auto fill_buffer_fut = reader.fill_buffer();
{
auto throwaway_reader = std::move(reader);
@@ -2516,12 +2516,12 @@ SEASTAR_THREAD_TEST_CASE(test_queue_reader) {
auto& reader = std::get<0>(p);
auto& handle = std::get<1>(p);
auto close_reader = deferred_close(reader);
auto fill_buffer_fut = reader.fill_buffer(db::no_timeout);
auto fill_buffer_fut = reader.fill_buffer();
auto expected_reader = flat_mutation_reader_from_mutations(semaphore.make_permit(), expected_muts);
auto close_expected_reader = deferred_close(expected_reader);
handle.push(std::move(*expected_reader(db::no_timeout).get0())).get();
handle.push(std::move(*expected_reader().get0())).get();
BOOST_REQUIRE(!fill_buffer_fut.available());
@@ -2544,12 +2544,12 @@ SEASTAR_THREAD_TEST_CASE(test_queue_reader) {
auto& reader = std::get<0>(p);
auto& handle = std::get<1>(p);
auto close_reader = deferred_close(reader);
auto fill_buffer_fut = reader.fill_buffer(db::no_timeout);
auto fill_buffer_fut = reader.fill_buffer();
auto expected_reader = flat_mutation_reader_from_mutations(semaphore.make_permit(), expected_muts);
auto close_expected_reader = deferred_close(expected_reader);
handle.push(std::move(*expected_reader(db::no_timeout).get0())).get();
handle.push(std::move(*expected_reader().get0())).get();
BOOST_REQUIRE(!fill_buffer_fut.available());
@@ -2741,8 +2741,8 @@ SEASTAR_THREAD_TEST_CASE(test_manual_paused_evictable_reader_is_mutation_source)
std::tie(_reader, _handle) = make_manually_paused_evictable_reader(mt.as_data_source(), mt.schema(), _permit, pr, ps, pc,
std::move(trace_state), fwd_mr);
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return _reader.fill_buffer(timeout).then([this] {
virtual future<> fill_buffer() override {
return _reader.fill_buffer().then([this] {
_end_of_stream = _reader.is_end_of_stream();
_reader.move_buffer_content_to(*this);
}).then([this] {
@@ -2757,14 +2757,14 @@ SEASTAR_THREAD_TEST_CASE(test_manual_paused_evictable_reader_is_mutation_source)
_end_of_stream = false;
return _reader.next_partition();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
clear_buffer();
_end_of_stream = false;
return _reader.fast_forward_to(pr, timeout).then([this] {
return _reader.fast_forward_to(pr).then([this] {
maybe_pause();
});
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
throw_with_backtrace<std::bad_function_call>();
}
virtual future<> close() noexcept override {
@@ -2873,7 +2873,7 @@ flat_mutation_reader create_evictable_reader_and_evict_after_first_buffer(
rd.set_max_buffer_size(max_buffer_size);
rd.fill_buffer(db::no_timeout).get0();
rd.fill_buffer().get0();
const auto eq_cmp = position_in_partition::equal_compare(*schema);
BOOST_REQUIRE(rd.is_buffer_full());
@@ -2919,7 +2919,7 @@ SEASTAR_THREAD_TEST_CASE(test_evictable_reader_trim_range_tombstones) {
reader_concurrency_semaphore semaphore(reader_concurrency_semaphore::no_limits{}, get_name());
auto stop_sem = deferred_stop(semaphore);
simple_schema s;
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name());
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name(), db::no_timeout);
const auto pkey = s.make_pkey();
size_t max_buffer_size = 512;
@@ -2954,7 +2954,7 @@ SEASTAR_THREAD_TEST_CASE(test_evictable_reader_trim_range_tombstones) {
s.schema()->full_slice(), std::move(first_buffer), last_fragment_position, std::move(second_buffer), max_buffer_size);
auto close_rd = deferred_close(rd);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
const auto tri_cmp = position_in_partition::tri_compare(*s.schema());
@@ -2984,7 +2984,7 @@ void check_evictable_reader_validation_is_triggered(
const bool fail = !error_prefix.empty();
try {
rd.fill_buffer(db::no_timeout).get0();
rd.fill_buffer().get0();
} catch (std::runtime_error& e) {
if (fail) {
if (error_prefix == std::string_view(e.what(), error_prefix.size())) {
@@ -3013,7 +3013,7 @@ SEASTAR_THREAD_TEST_CASE(test_evictable_reader_self_validation) {
reader_concurrency_semaphore semaphore(reader_concurrency_semaphore::no_limits{}, get_name());
auto stop_sem = deferred_stop(semaphore);
simple_schema s;
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name());
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name(), db::no_timeout);
auto pkeys = s.make_pkeys(4);
std::ranges::sort(pkeys, dht::decorated_key::less_comparator(s.schema()));
@@ -3333,7 +3333,7 @@ SEASTAR_THREAD_TEST_CASE(test_evictable_reader_recreate_before_fast_forward_to)
on_range_change(pr);
}
virtual future<> fill_buffer(db::timeout_clock::time_point) override {
virtual future<> fill_buffer() override {
if (_it == _end) {
_end_of_stream = true;
return make_ready_future<>();
@@ -3354,13 +3354,13 @@ SEASTAR_THREAD_TEST_CASE(test_evictable_reader_recreate_before_fast_forward_to)
virtual future<> next_partition() override {
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
on_range_change(pr);
clear_buffer();
_end_of_stream = false;
return make_ready_future<>();
}
virtual future<> fast_forward_to(position_range, db::timeout_clock::time_point) override {
virtual future<> fast_forward_to(position_range) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> close() noexcept override {
@@ -3371,7 +3371,7 @@ SEASTAR_THREAD_TEST_CASE(test_evictable_reader_recreate_before_fast_forward_to)
reader_concurrency_semaphore semaphore(reader_concurrency_semaphore::no_limits{}, get_name());
auto stop_sem = deferred_stop(semaphore);
simple_schema s;
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name());
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name(), db::no_timeout);
auto pkeys = s.make_pkeys(6);
boost::sort(pkeys, dht::decorated_key::less_comparator(s.schema()));
@@ -3422,7 +3422,7 @@ SEASTAR_THREAD_TEST_CASE(test_evictable_reader_drop_flags) {
reader_concurrency_semaphore semaphore(1, 0, get_name());
auto stop_sem = deferred_stop(semaphore);
simple_schema s;
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name());
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name(), db::no_timeout);
auto pkeys = s.make_pkeys(2);
std::sort(pkeys.begin(), pkeys.end(), [&s] (const auto& pk1, const auto& pk2) {
@@ -3915,9 +3915,9 @@ SEASTAR_THREAD_TEST_CASE(clustering_combined_reader_mutation_source_test) {
assert(!_readers.empty());
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
while (!is_buffer_full()) {
auto mfo = co_await next_fragment(timeout);
auto mfo = co_await next_fragment();
if (!mfo) {
_end_of_stream = true;
break;
@@ -3946,7 +3946,7 @@ SEASTAR_THREAD_TEST_CASE(clustering_combined_reader_mutation_source_test) {
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
// all fragments currently in the buffer come from the current partition range
// and pr must be strictly greater, so just clear the buffer
clear_buffer();
@@ -3959,7 +3959,7 @@ SEASTAR_THREAD_TEST_CASE(clustering_combined_reader_mutation_source_test) {
co_return;
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
if (!_inside_partition) {
// this should not happen - the specification of fast_forward_to says that it can only be called
// while inside partition. But if it happens for whatever reason just do nothing
@@ -3970,7 +3970,7 @@ SEASTAR_THREAD_TEST_CASE(clustering_combined_reader_mutation_source_test) {
// and pr must be strictly greater, so just clear the buffer
clear_buffer();
_end_of_stream = false;
return _it->second.fast_forward_to(std::move(pr), timeout);
return _it->second.fast_forward_to(std::move(pr));
}
virtual future<> close() noexcept override {
@@ -3979,12 +3979,12 @@ SEASTAR_THREAD_TEST_CASE(clustering_combined_reader_mutation_source_test) {
});
}
future<mutation_fragment_opt> next_fragment(db::timeout_clock::time_point timeout) {
future<mutation_fragment_opt> next_fragment() {
if (_it == _readers.end() || _range.get().after(_it->first, dht::ring_position_comparator(*_schema))) {
co_return mutation_fragment_opt{};
}
auto mfo = co_await _it->second(timeout);
auto mfo = co_await _it->second();
if (mfo) {
if (mfo->is_end_of_partition()) {
++_it;
@@ -4032,7 +4032,7 @@ SEASTAR_THREAD_TEST_CASE(clustering_combined_reader_mutation_source_test) {
good.apply(std::move(mf));
}
return stop_iteration::no;
}, db::no_timeout).get();
}).get();
mutation_bounds mb {
std::move(good),
@@ -4104,7 +4104,7 @@ SEASTAR_THREAD_TEST_CASE(test_clustering_combining_of_empty_readers) {
std::make_unique<simple_position_reader_queue>(*s, std::move(rs)));
auto close_r = deferred_close(r);
auto mf = r(db::no_timeout).get0();
auto mf = r().get0();
if (mf) {
BOOST_FAIL(format("reader combined of empty readers returned fragment {}", mutation_fragment::printer(*s, *mf)));
}

View File

@@ -94,7 +94,7 @@ static mutation_partition get_partition(reader_permit permit, memtable& mt, cons
auto range = dht::partition_range::make_singular(dk);
auto reader = mt.make_flat_reader(mt.schema(), std::move(permit), range);
auto close_reader = deferred_close(reader);
auto mo = read_mutation_from_flat_mutation_reader(reader, db::no_timeout).get0();
auto mo = read_mutation_from_flat_mutation_reader(reader).get0();
BOOST_REQUIRE(bool(mo));
return std::move(mo->partition());
}
@@ -454,7 +454,7 @@ SEASTAR_THREAD_TEST_CASE(test_large_collection_allocation) {
auto rd = mt->make_flat_reader(schema, semaphore.make_permit());
auto close_rd = deferred_close(rd);
auto res_mut_opt = read_mutation_from_flat_mutation_reader(rd, db::no_timeout).get0();
auto res_mut_opt = read_mutation_from_flat_mutation_reader(rd).get0();
BOOST_REQUIRE(res_mut_opt);
res_mut_opt->partition().compact_for_query(*schema, gc_clock::now(), {query::full_clustering_range}, true, false,
@@ -2955,7 +2955,7 @@ void run_compaction_data_stream_split_test(const schema& schema, reader_permit p
survived_compacted_fragments_consumer(schema, query_time, get_max_purgeable),
purged_compacted_fragments_consumer(schema, query_time, get_max_purgeable));
auto [survived_partitions, purged_partitions] = reader.consume(std::move(consumer), db::no_timeout).get0();
auto [survived_partitions, purged_partitions] = reader.consume(std::move(consumer)).get0();
testlog.info("Survived data: {}", create_stats(survived_partitions));
testlog.info("Purged data: {}", create_stats(purged_partitions));

View File

@@ -83,7 +83,7 @@ SEASTAR_TEST_CASE(test_multishard_writer) {
}
return with_closeable(std::move(reader), [&sharder, &shards_after, error] (flat_mutation_reader& reader) {
return repeat([&sharder, &shards_after, &reader, error] () mutable {
return reader(db::no_timeout).then([&sharder, &shards_after, error] (mutation_fragment_opt mf_opt) mutable {
return reader().then([&sharder, &shards_after, error] (mutation_fragment_opt mf_opt) mutable {
if (mf_opt) {
if (mf_opt->is_partition_start()) {
auto shard = sharder.shard_of(mf_opt->as_partition_start().key().token());
@@ -139,7 +139,7 @@ SEASTAR_TEST_CASE(test_multishard_writer_producer_aborts) {
if (mf_produced++ > 800) {
return make_exception_future<mutation_fragment_opt>(std::runtime_error("the producer failed"));
} else {
return source_reader(db::no_timeout);
return source_reader();
}
};
auto& sharder = s->get_sharder();
@@ -154,7 +154,7 @@ SEASTAR_TEST_CASE(test_multishard_writer_producer_aborts) {
}
return with_closeable(std::move(reader), [&sharder, error] (flat_mutation_reader& reader) {
return repeat([&sharder, &reader, error] () mutable {
return reader(db::no_timeout).then([&sharder, error] (mutation_fragment_opt mf_opt) mutable {
return reader().then([&sharder, error] (mutation_fragment_opt mf_opt) mutable {
if (mf_opt) {
if (mf_opt->is_partition_start()) {
auto shard = sharder.shard_of(mf_opt->as_partition_start().key().token());
@@ -354,7 +354,7 @@ SEASTAR_THREAD_TEST_CASE(test_timestamp_based_splitting_mutation_writer) {
auto consumer = [&] (flat_mutation_reader bucket_reader) {
return with_closeable(std::move(bucket_reader), [&] (flat_mutation_reader& rd) {
return rd.consume(test_bucket_writer(random_schema.schema(), rd.permit(), classify_fn, buckets), db::no_timeout);
return rd.consume(test_bucket_writer(random_schema.schema(), rd.permit(), classify_fn, buckets));
});
};
@@ -375,7 +375,7 @@ SEASTAR_THREAD_TEST_CASE(test_timestamp_based_splitting_mutation_writer) {
}
std::vector<mutation> combined_mutations;
while (auto m = read_mutation_from_flat_mutation_reader(reader, db::no_timeout).get0()) {
while (auto m = read_mutation_from_flat_mutation_reader(reader).get0()) {
m->partition().compact_for_compaction(*random_schema.schema(), always_gc, now);
combined_mutations.emplace_back(std::move(*m));
}
@@ -424,7 +424,7 @@ SEASTAR_THREAD_TEST_CASE(test_timestamp_based_splitting_mutation_writer_abort) {
testlog.info("Will raise exception after {}/{} mutations", throw_after, muts.size());
auto consumer = [&] (flat_mutation_reader bucket_reader) {
return with_closeable(std::move(bucket_reader), [&] (flat_mutation_reader& rd) {
return rd.consume(test_bucket_writer(random_schema.schema(), rd.permit(), classify_fn, buckets, throw_after), db::no_timeout);
return rd.consume(test_bucket_writer(random_schema.schema(), rd.permit(), classify_fn, buckets, throw_after));
});
};

View File

@@ -116,7 +116,7 @@ private:
Querier make_querier(const dht::partition_range& range) {
return Querier(_mutation_source,
_s.schema(),
_sem.make_tracking_only_permit(_s.schema().get(), "make-querier"),
_sem.make_tracking_only_permit(_s.schema().get(), "make-querier", db::no_timeout),
range,
_s.schema()->full_slice(),
service::get_local_sstable_query_read_priority(),
@@ -222,7 +222,7 @@ public:
auto querier = make_querier<Querier>(range);
auto dk_ck = querier.consume_page(dummy_result_builder{}, row_limit, std::numeric_limits<uint32_t>::max(),
gc_clock::now(), db::no_timeout, query::max_result_size(std::numeric_limits<uint64_t>::max())).get0();
gc_clock::now(), query::max_result_size(std::numeric_limits<uint64_t>::max())).get0();
auto&& dk = dk_ck.first;
auto&& ck = dk_ck.second;
auto permit = querier.permit();
@@ -317,7 +317,7 @@ public:
const dht::partition_range& lookup_range,
const query::partition_slice& lookup_slice) {
auto querier_opt = _cache.lookup_data_querier(make_cache_key(lookup_key), lookup_schema, lookup_range, lookup_slice, nullptr);
auto querier_opt = _cache.lookup_data_querier(make_cache_key(lookup_key), lookup_schema, lookup_range, lookup_slice, nullptr, db::no_timeout);
if (querier_opt) {
querier_opt->close().get();
}
@@ -330,7 +330,7 @@ public:
const dht::partition_range& lookup_range,
const query::partition_slice& lookup_slice) {
auto querier_opt = _cache.lookup_mutation_querier(make_cache_key(lookup_key), lookup_schema, lookup_range, lookup_slice, nullptr);
auto querier_opt = _cache.lookup_mutation_querier(make_cache_key(lookup_key), lookup_schema, lookup_range, lookup_slice, nullptr, db::no_timeout);
if (querier_opt) {
querier_opt->close().get();
}
@@ -789,8 +789,8 @@ SEASTAR_THREAD_TEST_CASE(test_unique_inactive_read_handle) {
.with_column("v", int32_type)
.build();
auto sem1_h1 = sem1.register_inactive_read(make_empty_flat_reader(schema, sem1.make_tracking_only_permit(schema.get(), get_name())));
auto sem2_h1 = sem2.register_inactive_read(make_empty_flat_reader(schema, sem2.make_tracking_only_permit(schema.get(), get_name())));
auto sem1_h1 = sem1.register_inactive_read(make_empty_flat_reader(schema, sem1.make_tracking_only_permit(schema.get(), get_name(), db::no_timeout)));
auto sem2_h1 = sem2.register_inactive_read(make_empty_flat_reader(schema, sem2.make_tracking_only_permit(schema.get(), get_name(), db::no_timeout)));
// Sanity check that lookup still works with empty handle.
BOOST_REQUIRE(!sem1.unregister_inactive_read(reader_concurrency_semaphore::inactive_read_handle{}));

View File

@@ -38,7 +38,7 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_clear_inactive_reads)
auto stop_sem = deferred_stop(semaphore);
for (int i = 0; i < 10; ++i) {
handles.emplace_back(semaphore.register_inactive_read(make_empty_flat_reader(s.schema(), semaphore.make_tracking_only_permit(s.schema().get(), get_name()))));
handles.emplace_back(semaphore.register_inactive_read(make_empty_flat_reader(s.schema(), semaphore.make_tracking_only_permit(s.schema().get(), get_name(), db::no_timeout))));
}
BOOST_REQUIRE(std::all_of(handles.begin(), handles.end(), [] (const reader_concurrency_semaphore::inactive_read_handle& handle) { return bool(handle); }));
@@ -50,7 +50,7 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_clear_inactive_reads)
handles.clear();
for (int i = 0; i < 10; ++i) {
handles.emplace_back(semaphore.register_inactive_read(make_empty_flat_reader(s.schema(), semaphore.make_tracking_only_permit(s.schema().get(), get_name()))));
handles.emplace_back(semaphore.register_inactive_read(make_empty_flat_reader(s.schema(), semaphore.make_tracking_only_permit(s.schema().get(), get_name(), db::no_timeout))));
}
BOOST_REQUIRE(std::all_of(handles.begin(), handles.end(), [] (const reader_concurrency_semaphore::inactive_read_handle& handle) { return bool(handle); }));
@@ -68,14 +68,14 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_destroyed_permit_rele
// Not admitted, active
{
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name());
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name(), db::no_timeout);
auto units2 = permit.consume_memory(1024);
}
BOOST_REQUIRE_EQUAL(semaphore.available_resources(), initial_resources);
// Not admitted, inactive
{
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name());
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name(), db::no_timeout);
auto units2 = permit.consume_memory(1024);
auto handle = semaphore.register_inactive_read(make_empty_flat_reader(s.schema(), permit));
@@ -106,7 +106,7 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_abandoned_handle_clos
reader_concurrency_semaphore semaphore(reader_concurrency_semaphore::no_limits{}, get_name());
auto stop_sem = deferred_stop(semaphore);
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name());
auto permit = semaphore.make_tracking_only_permit(s.schema().get(), get_name(), db::no_timeout);
{
auto handle = semaphore.register_inactive_read(make_empty_flat_reader(s.schema(), permit));
// The handle is destroyed here, triggering the destrution of the inactive read.
@@ -146,13 +146,13 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_readmission_preserves
const auto consumed_resources = semaphore.available_resources();
semaphore.consume(consumed_resources);
auto fut = permit->maybe_wait_readmission(db::no_timeout);
auto fut = permit->maybe_wait_readmission();
BOOST_REQUIRE(!fut.available());
semaphore.signal(consumed_resources);
fut.get();
} else {
permit->maybe_wait_readmission(db::no_timeout).get();
permit->maybe_wait_readmission().get();
}
BOOST_REQUIRE_EQUAL(permit->consumed_resources(), residue_units->resources() + base_resources);
@@ -188,14 +188,14 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_forward_progress) {
public:
skeleton_reader(schema_ptr s, reader_permit permit)
: impl(std::move(s), std::move(permit)) { }
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
reader_permit::blocked_guard _{_permit};
_resources.emplace(_permit.consume_resources(reader_resources(0, tests::random::get_int(1024, 2048))));
co_await sleep(std::chrono::milliseconds(1));
}
virtual future<> next_partition() override { return make_ready_future<>(); }
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override { return make_ready_future<>(); }
virtual future<> fast_forward_to(position_range, db::timeout_clock::time_point timeout) override { return make_ready_future<>(); }
virtual future<> fast_forward_to(const dht::partition_range& pr) override { return make_ready_future<>(); }
virtual future<> fast_forward_to(position_range) override { return make_ready_future<>(); }
virtual future<> close() noexcept override {
_resources.reset();
return make_ready_future<>();
@@ -226,7 +226,7 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_forward_progress) {
co_await tick(std::get<flat_mutation_reader>(_reader));
}
future<> tick(flat_mutation_reader& reader) {
co_await reader.fill_buffer(db::no_timeout);
co_await reader.fill_buffer();
if (_evictable) {
_reader = _permit->semaphore().register_inactive_read(std::move(reader));
}
@@ -235,7 +235,7 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_forward_progress) {
if (auto reader = _permit->semaphore().unregister_inactive_read(std::move(handle)); reader) {
_reader = std::move(*reader);
} else {
co_await _permit->maybe_wait_readmission(db::no_timeout);
co_await _permit->maybe_wait_readmission();
make_reader();
}
co_await tick(std::get<flat_mutation_reader>(_reader));
@@ -251,7 +251,7 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_forward_progress) {
}
future<> obtain_permit() {
if (_memory_only) {
_permit = _semaphore.make_tracking_only_permit(_schema.get(), "reader_m");
_permit = _semaphore.make_tracking_only_permit(_schema.get(), "reader_m", db::no_timeout);
} else {
_permit = co_await _semaphore.obtain_permit(_schema.get(), fmt::format("reader_{}", _evictable ? 'e' : 'a'), 1024, db::no_timeout);
}
@@ -318,7 +318,7 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_forward_progress) {
try {
co_await r->obtain_permit();
} catch (semaphore_timed_out&) {
semaphore.broken(std::make_exception_ptr(std::runtime_error("test failed due to read timeout")));
semaphore.broken(std::make_exception_ptr(std::runtime_error("test failed due to read ")));
co_return;
}
@@ -327,7 +327,7 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_forward_progress) {
watchdog_touched = true;
co_await r->tick();
} catch (semaphore_timed_out&) {
semaphore.broken(std::make_exception_ptr(std::runtime_error("test failed due to read timeout")));
semaphore.broken(std::make_exception_ptr(std::runtime_error("test failed due to read ")));
break;
}
}
@@ -548,7 +548,7 @@ SEASTAR_THREAD_TEST_CASE(reader_concurrency_semaphore_dump_reader_diganostics) {
for (auto& schema : schemas) {
const auto nr_permits = tests::random::get_int<unsigned>(2, 32);
for (unsigned i = 0; i < nr_permits; ++i) {
auto permit = semaphore.make_tracking_only_permit(schema.get(), op_names.at(tests::random::get_int<unsigned>(0, nr_ops - 1)));
auto permit = semaphore.make_tracking_only_permit(schema.get(), op_names.at(tests::random::get_int<unsigned>(0, nr_ops - 1)), db::no_timeout);
if (tests::random::get_int<unsigned>(0, 4)) {
auto units = permit.consume_resources(reader_resources(tests::random::get_int<unsigned>(0, 1), tests::random::get_int<unsigned>(1024, 16 * 1024 * 1024)));
permits.push_back(std::pair(std::move(permit), std::move(units)));
@@ -581,7 +581,7 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_stop_waits_on_permits
BOOST_TEST_MESSAGE("1 permit");
{
auto semaphore = std::make_unique<reader_concurrency_semaphore>(reader_concurrency_semaphore::no_limits{}, get_name());
auto permit = std::make_unique<reader_permit>(semaphore->make_tracking_only_permit(nullptr, "permit1"));
auto permit = std::make_unique<reader_permit>(semaphore->make_tracking_only_permit(nullptr, "permit1", db::no_timeout));
// Test will fail via use-after-free
auto f = semaphore->stop().then([semaphore = std::move(semaphore)] { });
@@ -702,7 +702,7 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_admission) {
const auto stats_before = semaphore.get_stats();
auto wait_fut = permit.maybe_wait_readmission(db::timeout_clock::now());
auto wait_fut = permit.maybe_wait_readmission();
wait_fut.wait();
BOOST_REQUIRE(!wait_fut.failed());

View File

@@ -106,7 +106,7 @@ bool has_key(row_cache& cache, const dht::decorated_key& key) {
auto range = dht::partition_range::make_singular(key);
auto reader = cache.make_reader(cache.schema(), semaphore.make_permit(), range);
auto close_reader = deferred_close(reader);
auto mo = read_mutation_from_flat_mutation_reader(reader, db::no_timeout).get0();
auto mo = read_mutation_from_flat_mutation_reader(reader).get0();
if (!bool(mo)) {
return false;
}
@@ -171,12 +171,12 @@ class partition_counting_reader final : public delegating_reader {
public:
partition_counting_reader(flat_mutation_reader mr, int& counter)
: delegating_reader(std::move(mr)), _counter(counter) { }
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
if (_count_fill_buffer) {
++_counter;
_count_fill_buffer = false;
}
return delegating_reader::fill_buffer(timeout);
return delegating_reader::fill_buffer();
}
virtual future<> next_partition() override {
_count_fill_buffer = false;
@@ -759,7 +759,7 @@ SEASTAR_TEST_CASE(test_reading_from_random_partial_partition) {
cache.populate(m1); // m1 is supposed to have random continuity and populate() should preserve it
auto rd1 = cache.make_reader(gen.schema(), semaphore.make_permit());
rd1.fill_buffer(db::no_timeout).get();
rd1.fill_buffer().get();
// Merge m2 into cache
auto mt = make_lw_shared<memtable>(gen.schema());
@@ -767,7 +767,7 @@ SEASTAR_TEST_CASE(test_reading_from_random_partial_partition) {
cache.update(row_cache::external_updater([&] { underlying.apply(m2); }), *mt).get();
auto rd2 = cache.make_reader(gen.schema(), semaphore.make_permit());
rd2.fill_buffer(db::no_timeout).get();
rd2.fill_buffer().get();
assert_that(std::move(rd1)).next_mutation().is_equal_to(m1);
assert_that(std::move(rd2)).next_mutation().is_equal_to(m1 + m2);
@@ -867,7 +867,7 @@ SEASTAR_TEST_CASE(test_eviction) {
auto rd = cache.make_reader(s, semaphore.make_permit(), pr);
auto close_rd = deferred_close(rd);
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
}
while (tracker.partitions() > 0) {
@@ -951,7 +951,7 @@ SEASTAR_TEST_CASE(test_eviction_after_schema_change) {
auto rd = cache.make_reader(s2, semaphore.make_permit(), pr);
auto close_rd = deferred_close(rd);
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
}
while (tracker.region().evict_some() == memory::reclaiming_result::reclaimed_something) ;
@@ -969,9 +969,9 @@ void test_sliced_read_row_presence(flat_mutation_reader reader, schema_ptr s, st
auto close_reader = deferred_close(reader);
clustering_key::equality ck_eq(*s);
auto mfopt = reader(db::no_timeout).get0();
auto mfopt = reader().get0();
BOOST_REQUIRE(mfopt->is_partition_start());
while ((mfopt = reader(db::no_timeout).get0()) && !mfopt->is_end_of_partition()) {
while ((mfopt = reader().get0()) && !mfopt->is_end_of_partition()) {
if (mfopt->is_clustering_row()) {
BOOST_REQUIRE(!expected.empty());
auto expected_ck = expected.front();
@@ -985,7 +985,7 @@ void test_sliced_read_row_presence(flat_mutation_reader reader, schema_ptr s, st
}
BOOST_REQUIRE(expected.empty());
BOOST_REQUIRE(mfopt && mfopt->is_end_of_partition());
BOOST_REQUIRE(!reader(db::no_timeout).get0());
BOOST_REQUIRE(!reader().get0());
}
SEASTAR_TEST_CASE(test_single_partition_update) {
@@ -1219,7 +1219,7 @@ SEASTAR_TEST_CASE(test_update_failure) {
auto reader = cache.make_reader(s, semaphore.make_permit(), query::full_partition_range);
auto close_reader = deferred_close(reader);
for (int i = 0; i < partition_count; i++) {
auto mopt = read_mutation_from_flat_mutation_reader(reader, db::no_timeout).get0();
auto mopt = read_mutation_from_flat_mutation_reader(reader).get0();
if (!mopt) {
break;
}
@@ -1227,7 +1227,7 @@ SEASTAR_TEST_CASE(test_update_failure) {
BOOST_REQUIRE(it != partitions.end());
BOOST_REQUIRE(it->second.equal(*s, mopt->partition()));
}
BOOST_REQUIRE(!reader(db::no_timeout).get0());
BOOST_REQUIRE(!reader().get0());
};
if (failed) {
@@ -1289,8 +1289,8 @@ private:
: delegating_reader(std::move(r))
, _throttle(t)
{}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return delegating_reader::fill_buffer(timeout).finally([this] () {
virtual future<> fill_buffer() override {
return delegating_reader::fill_buffer().finally([this] () {
return _throttle.enter();
});
}
@@ -1415,11 +1415,11 @@ SEASTAR_TEST_CASE(test_cache_population_and_update_race) {
auto m0_range = dht::partition_range::make_singular(ring[0].ring_position());
auto rd1 = cache.make_reader(s, semaphore.make_permit(), m0_range);
rd1.set_max_buffer_size(1);
auto rd1_fill_buffer = rd1.fill_buffer(db::no_timeout);
auto rd1_fill_buffer = rd1.fill_buffer();
auto rd2 = cache.make_reader(s, semaphore.make_permit());
rd2.set_max_buffer_size(1);
auto rd2_fill_buffer = rd2.fill_buffer(db::no_timeout);
auto rd2_fill_buffer = rd2.fill_buffer();
sleep(10ms).get();
@@ -1552,7 +1552,7 @@ SEASTAR_TEST_CASE(test_cache_population_and_clear_race) {
auto rd1 = cache.make_reader(s, semaphore.make_permit());
rd1.set_max_buffer_size(1);
auto rd1_fill_buffer = rd1.fill_buffer(db::no_timeout);
auto rd1_fill_buffer = rd1.fill_buffer();
sleep(10ms).get();
@@ -1613,10 +1613,10 @@ SEASTAR_TEST_CASE(test_mvcc) {
cache.populate(m1);
auto rd1 = cache.make_reader(s, semaphore.make_permit());
rd1.fill_buffer(db::no_timeout).get();
rd1.fill_buffer().get();
auto rd2 = cache.make_reader(s, semaphore.make_permit());
rd2.fill_buffer(db::no_timeout).get();
rd2.fill_buffer().get();
auto mt1 = make_lw_shared<memtable>(s);
mt1->apply(m2);
@@ -1632,7 +1632,7 @@ SEASTAR_TEST_CASE(test_mvcc) {
if (with_active_memtable_reader) {
mt1_reader_opt = mt1->make_flat_reader(s, semaphore.make_permit());
mt1_reader_opt->set_max_buffer_size(1);
mt1_reader_opt->fill_buffer(db::no_timeout).get();
mt1_reader_opt->fill_buffer().get();
}
auto mt1_copy = make_lw_shared<memtable>(s);
@@ -1640,19 +1640,19 @@ SEASTAR_TEST_CASE(test_mvcc) {
cache.update(row_cache::external_updater([&] { underlying.apply(mt1_copy); }), *mt1).get();
auto rd3 = cache.make_reader(s, semaphore.make_permit());
rd3.fill_buffer(db::no_timeout).get();
rd3.fill_buffer().get();
auto rd4 = cache.make_reader(s, semaphore.make_permit());
rd4.fill_buffer(db::no_timeout).get();
rd4.fill_buffer().get();
auto rd5 = cache.make_reader(s, semaphore.make_permit());
rd5.fill_buffer(db::no_timeout).get();
rd5.fill_buffer().get();
assert_that(std::move(rd3)).has_monotonic_positions();
if (with_active_memtable_reader) {
assert(mt1_reader_opt);
auto mt1_reader_mutation = read_mutation_from_flat_mutation_reader(*mt1_reader_opt, db::no_timeout).get0();
auto mt1_reader_mutation = read_mutation_from_flat_mutation_reader(*mt1_reader_opt).get0();
BOOST_REQUIRE(mt1_reader_mutation);
assert_that(*mt1_reader_mutation).is_equal_to(m2);
}
@@ -2078,7 +2078,7 @@ SEASTAR_TEST_CASE(test_tombstone_merging_in_partial_partition) {
}
static void consume_all(flat_mutation_reader& rd) {
while (auto mfopt = rd(db::no_timeout).get0()) {}
while (auto mfopt = rd().get0()) {}
}
static void populate_range(row_cache& cache, const dht::partition_range& pr = query::full_partition_range,
@@ -2131,7 +2131,7 @@ SEASTAR_TEST_CASE(test_readers_get_all_data_after_eviction) {
auto make_reader = [&] (const query::partition_slice& slice) {
auto rd = cache.make_reader(s, semaphore.make_permit(), query::full_partition_range, slice);
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
return assert_that(std::move(rd));
};
@@ -2260,7 +2260,7 @@ SEASTAR_TEST_CASE(test_tombstones_are_not_missed_when_range_is_invalidated) {
auto make_reader = [&] (const query::partition_slice& slice) {
auto rd = cache.make_reader(s.schema(), semaphore.make_permit(), pr, slice);
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
return assert_that(std::move(rd));
};
@@ -2365,7 +2365,7 @@ SEASTAR_TEST_CASE(test_exception_safety_of_update_from_memtable) {
auto make_reader = [&] (const dht::partition_range& pr) {
auto rd = cache.make_reader(s.schema(), semaphore.make_permit(), pr);
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
return rd;
};
@@ -2398,7 +2398,7 @@ SEASTAR_TEST_CASE(test_exception_safety_of_update_from_memtable) {
auto pr = dht::partition_range::make_singular(pkeys[2]);
snap = mt->make_flat_reader(s.schema(), semaphore.make_permit(), pr);
snap->set_max_buffer_size(1);
snap->fill_buffer(db::no_timeout).get();
snap->fill_buffer().get();
cache.update(row_cache::external_updater([&] {
auto mt2 = make_lw_shared<memtable>(cache.schema());
@@ -2446,9 +2446,9 @@ SEASTAR_TEST_CASE(test_exception_safety_of_reads) {
memory::with_allocation_failures([&] {
auto rd = cache.make_reader(s, semaphore.make_permit(), query::full_partition_range, slice);
auto close_rd = deferred_close(rd);
auto got_opt = read_mutation_from_flat_mutation_reader(rd, db::no_timeout).get0();
auto got_opt = read_mutation_from_flat_mutation_reader(rd).get0();
BOOST_REQUIRE(got_opt);
BOOST_REQUIRE(!read_mutation_from_flat_mutation_reader(rd, db::no_timeout).get0());
BOOST_REQUIRE(!read_mutation_from_flat_mutation_reader(rd).get0());
assert_that(*got_opt).is_equal_to(mut, ranges);
assert_that(cache.make_reader(s, semaphore.make_permit(), query::full_partition_range, slice))
@@ -2514,9 +2514,9 @@ SEASTAR_TEST_CASE(test_exception_safety_of_transitioning_from_underlying_read_to
auto rd = cache.make_reader(s.schema(), semaphore.make_permit(), pr, slice);
auto close_rd = deferred_close(rd);
auto got_opt = read_mutation_from_flat_mutation_reader(rd, db::no_timeout).get0();
auto got_opt = read_mutation_from_flat_mutation_reader(rd).get0();
BOOST_REQUIRE(got_opt);
auto mfopt = rd(db::no_timeout).get0();
auto mfopt = rd().get0();
BOOST_REQUIRE(!mfopt);
assert_that(*got_opt).is_equal_to(mut);
@@ -2578,7 +2578,7 @@ SEASTAR_TEST_CASE(test_concurrent_population_before_latest_version_iterator) {
auto make_reader = [&] (const query::partition_slice& slice) {
auto rd = cache.make_reader(s.schema(), semaphore.make_permit(), pr, slice);
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
return assert_that(std::move(rd));
};
@@ -2742,7 +2742,7 @@ SEASTAR_TEST_CASE(test_random_row_population) {
auto make_reader = [&] (const query::partition_slice* slice = nullptr) {
auto rd = cache.make_reader(s.schema(), semaphore.make_permit(), pr, slice ? *slice : s.schema()->full_slice());
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
return rd;
};
@@ -2786,7 +2786,7 @@ SEASTAR_TEST_CASE(test_random_row_population) {
while (!readers.empty()) {
std::vector<read> remaining_readers;
for (auto i = readers.begin(); i != readers.end(); i++) {
auto mfo = i->reader(db::no_timeout).get0();
auto mfo = i->reader().get0();
if (!mfo) {
auto&& ranges = i->slice->row_ranges(*s.schema(), pk.key());
assert_that(i->result).is_equal_to(m1, ranges);
@@ -2868,7 +2868,7 @@ SEASTAR_TEST_CASE(test_continuity_is_populated_when_read_overlaps_with_older_ver
auto make_reader = [&] {
auto rd = cache.make_reader(s.schema(), semaphore.make_permit(), pr);
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
return rd;
};
@@ -3000,7 +3000,7 @@ SEASTAR_TEST_CASE(test_continuity_population_with_multicolumn_clustering_key) {
auto make_reader = [&] (const query::partition_slice* slice = nullptr) {
auto rd = cache.make_reader(s, semaphore.make_permit(), pr, slice ? *slice : s->full_slice());
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
return rd;
};
@@ -3110,7 +3110,7 @@ SEASTAR_TEST_CASE(test_concurrent_setting_of_continuity_on_read_upper_bound) {
auto make_rd = [&] (const query::partition_slice* slice = nullptr) {
auto rd = cache.make_reader(s.schema(), semaphore.make_permit(), pr, slice ? *slice : s.schema()->full_slice());
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
return rd;
};
@@ -3176,7 +3176,7 @@ SEASTAR_TEST_CASE(test_tombstone_merging_of_overlapping_tombstones_in_many_versi
auto make_reader = [&] {
auto rd = cache.make_reader(s.schema(), semaphore.make_permit());
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
return rd;
};
@@ -3216,7 +3216,7 @@ SEASTAR_TEST_CASE(test_concurrent_reads_and_eviction) {
auto make_reader = [&] (const query::partition_slice& slice) {
auto rd = cache.make_reader(s, semaphore.make_permit(), pr, slice);
rd.set_max_buffer_size(3);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
return rd;
};
@@ -3244,7 +3244,7 @@ SEASTAR_TEST_CASE(test_concurrent_reads_and_eviction) {
auto rd = make_reader(slice);
auto close_rd = deferred_close(rd);
auto actual_opt = read_mutation_from_flat_mutation_reader(rd, db::no_timeout).get0();
auto actual_opt = read_mutation_from_flat_mutation_reader(rd).get0();
BOOST_REQUIRE(actual_opt);
auto actual = *actual_opt;
@@ -3388,12 +3388,12 @@ SEASTAR_TEST_CASE(test_cache_update_and_eviction_preserves_monotonicity_of_memta
auto mt_rd1 = mt->make_flat_reader(s, semaphore.make_permit());
mt_rd1.set_max_buffer_size(1);
mt_rd1.fill_buffer(db::no_timeout).get();
mt_rd1.fill_buffer().get();
BOOST_REQUIRE(mt_rd1.is_buffer_full()); // If fails, increase n_rows
auto mt_rd2 = mt->make_flat_reader(s, semaphore.make_permit());
mt_rd2.set_max_buffer_size(1);
mt_rd2.fill_buffer(db::no_timeout).get();
mt_rd2.fill_buffer().get();
apply(cache, underlying, *mt);
@@ -3402,13 +3402,13 @@ SEASTAR_TEST_CASE(test_cache_update_and_eviction_preserves_monotonicity_of_memta
auto c_rd1 = cache.make_reader(s, semaphore.make_permit());
c_rd1.set_max_buffer_size(1);
c_rd1.fill_buffer(db::no_timeout).get();
c_rd1.fill_buffer().get();
apply(cache, underlying, m2);
auto c_rd2 = cache.make_reader(s, semaphore.make_permit());
c_rd2.set_max_buffer_size(1);
c_rd2.fill_buffer(db::no_timeout).get();
c_rd2.fill_buffer().get();
cache.evict();
@@ -3435,8 +3435,8 @@ SEASTAR_TEST_CASE(test_hash_is_cached) {
{
auto rd = cache.make_reader(s, semaphore.make_permit());
auto close_rd = deferred_close(rd);
rd(db::no_timeout).get0()->as_partition_start();
clustering_row row = std::move(*rd(db::no_timeout).get0()).as_clustering_row();
rd().get0()->as_partition_start();
clustering_row row = std::move(*rd().get0()).as_clustering_row();
BOOST_REQUIRE(!row.cells().cell_hash_for(0));
}
@@ -3445,16 +3445,16 @@ SEASTAR_TEST_CASE(test_hash_is_cached) {
slice.options.set<query::partition_slice::option::with_digest>();
auto rd = cache.make_reader(s, semaphore.make_permit(), query::full_partition_range, slice);
auto close_rd = deferred_close(rd);
rd(db::no_timeout).get0()->as_partition_start();
clustering_row row = std::move(*rd(db::no_timeout).get0()).as_clustering_row();
rd().get0()->as_partition_start();
clustering_row row = std::move(*rd().get0()).as_clustering_row();
BOOST_REQUIRE(row.cells().cell_hash_for(0));
}
{
auto rd = cache.make_reader(s, semaphore.make_permit());
auto close_rd = deferred_close(rd);
rd(db::no_timeout).get0()->as_partition_start();
clustering_row row = std::move(*rd(db::no_timeout).get0()).as_clustering_row();
rd().get0()->as_partition_start();
clustering_row row = std::move(*rd().get0()).as_clustering_row();
BOOST_REQUIRE(row.cells().cell_hash_for(0));
}
@@ -3465,8 +3465,8 @@ SEASTAR_TEST_CASE(test_hash_is_cached) {
{
auto rd = cache.make_reader(s, semaphore.make_permit());
auto close_rd = deferred_close(rd);
rd(db::no_timeout).get0()->as_partition_start();
clustering_row row = std::move(*rd(db::no_timeout).get0()).as_clustering_row();
rd().get0()->as_partition_start();
clustering_row row = std::move(*rd().get0()).as_clustering_row();
BOOST_REQUIRE(!row.cells().cell_hash_for(0));
}
@@ -3475,16 +3475,16 @@ SEASTAR_TEST_CASE(test_hash_is_cached) {
slice.options.set<query::partition_slice::option::with_digest>();
auto rd = cache.make_reader(s, semaphore.make_permit(), query::full_partition_range, slice);
auto close_rd = deferred_close(rd);
rd(db::no_timeout).get0()->as_partition_start();
clustering_row row = std::move(*rd(db::no_timeout).get0()).as_clustering_row();
rd().get0()->as_partition_start();
clustering_row row = std::move(*rd().get0()).as_clustering_row();
BOOST_REQUIRE(row.cells().cell_hash_for(0));
}
{
auto rd = cache.make_reader(s, semaphore.make_permit());
auto close_rd = deferred_close(rd);
rd(db::no_timeout).get0()->as_partition_start();
clustering_row row = std::move(*rd(db::no_timeout).get0()).as_clustering_row();
rd().get0()->as_partition_start();
clustering_row row = std::move(*rd().get0()).as_clustering_row();
BOOST_REQUIRE(row.cells().cell_hash_for(0));
}
});
@@ -3511,7 +3511,7 @@ SEASTAR_TEST_CASE(test_random_population_with_many_versions) {
auto make_reader = [&] () {
auto rd = cache.make_reader(s, semaphore.make_permit(), query::full_partition_range, s->full_slice());
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
return assert_that(std::move(rd));
};
@@ -3620,7 +3620,7 @@ SEASTAR_TEST_CASE(test_eviction_after_old_snapshot_touches_overriden_rows_keeps_
auto pr1 = dht::partition_range::make_singular(pk);
auto rd1 = cache.make_reader(s, semaphore.make_permit(), pr1);
rd1.set_max_buffer_size(1);
rd1.fill_buffer(db::no_timeout).get();
rd1.fill_buffer().get();
apply(cache, underlying, m2);
@@ -3661,7 +3661,7 @@ SEASTAR_TEST_CASE(test_eviction_after_old_snapshot_touches_overriden_rows_keeps_
auto rd1 = cache.make_reader(s, semaphore.make_permit(), pr);
rd1.set_max_buffer_size(1);
rd1.fill_buffer(db::no_timeout).get();
rd1.fill_buffer().get();
apply(cache, underlying, m2);
@@ -3710,7 +3710,7 @@ SEASTAR_TEST_CASE(test_reading_progress_with_small_buffer_and_invalidation) {
while (!rd3.is_end_of_stream()) {
tracker.allocator().invalidate_references();
rd3.fill_buffer(db::no_timeout).get();
rd3.fill_buffer().get();
while (!rd3.is_buffer_empty()) {
result.partition().apply(*s.schema(), rd3.pop_mutation_fragment());
}

View File

@@ -3207,7 +3207,7 @@ SEASTAR_THREAD_TEST_CASE(compact_deleted_row) {
* ]
*/
mutation_opt m = with_closeable(compacted_sstable_reader(env, s, table_name, {1, 2}), [&] (flat_mutation_reader& reader) {
return read_mutation_from_flat_mutation_reader(reader, db::no_timeout);
return read_mutation_from_flat_mutation_reader(reader);
}).get0();
BOOST_REQUIRE(m);
BOOST_REQUIRE(m->key().equal(*s, partition_key::from_singular(*s, data_value(sstring("key")))));
@@ -3279,7 +3279,7 @@ SEASTAR_THREAD_TEST_CASE(compact_deleted_cell) {
*
*/
mutation_opt m = with_closeable(compacted_sstable_reader(env, s, table_name, {1, 2}), [&] (flat_mutation_reader& reader) {
return read_mutation_from_flat_mutation_reader(reader, db::no_timeout);
return read_mutation_from_flat_mutation_reader(reader);
}).get0();
BOOST_REQUIRE(m);
BOOST_REQUIRE(m->key().equal(*s, partition_key::from_singular(*s, data_value(sstring("key")))));

View File

@@ -233,7 +233,7 @@ SEASTAR_TEST_CASE(compact) {
// nadav - deleted partition
return open_sstable(env, s, tmpdir_path, generation).then([&env, s] (shared_sstable sst) {
auto reader = make_lw_shared<flat_mutation_reader>(sstable_reader(sst, s, env.make_reader_permit())); // reader holds sst and s alive.
return read_mutation_from_flat_mutation_reader(*reader, db::no_timeout).then([reader, s] (mutation_opt m) {
return read_mutation_from_flat_mutation_reader(*reader).then([reader, s] (mutation_opt m) {
BOOST_REQUIRE(m);
BOOST_REQUIRE(m->key().equal(*s, partition_key::from_singular(*s, data_value(sstring("jerry")))));
BOOST_REQUIRE(!m->partition().partition_tombstone());
@@ -246,7 +246,7 @@ SEASTAR_TEST_CASE(compact) {
auto& cdef2 = *s->get_column_definition("height");
BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == managed_bytes({0,0,0,40}));
BOOST_REQUIRE(cells.cell_at(cdef2.id).as_atomic_cell(cdef2).value() == managed_bytes({0,0,0,(int8_t)170}));
return read_mutation_from_flat_mutation_reader(*reader, db::no_timeout);
return read_mutation_from_flat_mutation_reader(*reader);
}).then([reader, s] (mutation_opt m) {
BOOST_REQUIRE(m);
BOOST_REQUIRE(m->key().equal(*s, partition_key::from_singular(*s, data_value(sstring("tom")))));
@@ -260,7 +260,7 @@ SEASTAR_TEST_CASE(compact) {
auto& cdef2 = *s->get_column_definition("height");
BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == managed_bytes({0,0,0,20}));
BOOST_REQUIRE(cells.cell_at(cdef2.id).as_atomic_cell(cdef2).value() == managed_bytes({0,0,0,(int8_t)180}));
return read_mutation_from_flat_mutation_reader(*reader, db::no_timeout);
return read_mutation_from_flat_mutation_reader(*reader);
}).then([reader, s] (mutation_opt m) {
BOOST_REQUIRE(m);
BOOST_REQUIRE(m->key().equal(*s, partition_key::from_singular(*s, data_value(sstring("john")))));
@@ -274,14 +274,14 @@ SEASTAR_TEST_CASE(compact) {
auto& cdef2 = *s->get_column_definition("height");
BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == managed_bytes({0,0,0,20}));
BOOST_REQUIRE(cells.find_cell(cdef2.id) == nullptr);
return read_mutation_from_flat_mutation_reader(*reader, db::no_timeout);
return read_mutation_from_flat_mutation_reader(*reader);
}).then([reader, s] (mutation_opt m) {
BOOST_REQUIRE(m);
BOOST_REQUIRE(m->key().equal(*s, partition_key::from_singular(*s, data_value(sstring("nadav")))));
BOOST_REQUIRE(m->partition().partition_tombstone());
auto rows = m->partition().clustered_rows();
BOOST_REQUIRE(rows.calculate_size() == 0);
return read_mutation_from_flat_mutation_reader(*reader, db::no_timeout);
return read_mutation_from_flat_mutation_reader(*reader);
}).then([reader] (mutation_opt m) {
BOOST_REQUIRE(!m);
}).finally([reader] {
@@ -426,7 +426,7 @@ static future<> check_compacted_sstables(test_env& env, sstring tmpdir_path, uns
return with_closeable(std::move(reader), [generations, s, keys] (flat_mutation_reader& reader) {
return do_for_each(*generations, [&reader, keys] (unsigned long generation) mutable {
return read_mutation_from_flat_mutation_reader(reader, db::no_timeout).then([generation, keys] (mutation_opt m) {
return read_mutation_from_flat_mutation_reader(reader).then([generation, keys] (mutation_opt m) {
BOOST_REQUIRE(m);
keys->push_back(m->key());
});
@@ -1026,7 +1026,7 @@ SEASTAR_TEST_CASE(tombstone_purge_test) {
auto assert_that_produces_dead_cell = [&] (auto& sst, partition_key& key) {
auto reader = make_lw_shared<flat_mutation_reader>(sstable_reader(sst, s, env.make_reader_permit()));
read_mutation_from_flat_mutation_reader(*reader, db::no_timeout).then([reader, s, &key] (mutation_opt m) {
read_mutation_from_flat_mutation_reader(*reader).then([reader, s, &key] (mutation_opt m) {
BOOST_REQUIRE(m);
BOOST_REQUIRE(m->key().equal(*s, key));
auto rows = m->partition().clustered_rows();
@@ -1036,7 +1036,7 @@ SEASTAR_TEST_CASE(tombstone_purge_test) {
BOOST_REQUIRE_EQUAL(cells.size(), 1);
auto& cdef = *s->get_column_definition("value");
BOOST_REQUIRE(!cells.cell_at(cdef.id).as_atomic_cell(cdef).is_live());
return (*reader)(db::no_timeout);
return (*reader)();
}).then([reader, s] (mutation_fragment_opt m) {
BOOST_REQUIRE(!m);
}).finally([reader] {
@@ -1212,14 +1212,14 @@ SEASTAR_TEST_CASE(sstable_rewrite) {
auto newsst = (*new_tables)[0];
BOOST_REQUIRE(newsst->generation() == 52);
auto reader = make_lw_shared<flat_mutation_reader>(sstable_reader(newsst, s, env.make_reader_permit()));
return (*reader)(db::no_timeout).then([s, reader, key] (mutation_fragment_opt m) {
return (*reader)().then([s, reader, key] (mutation_fragment_opt m) {
BOOST_REQUIRE(m);
BOOST_REQUIRE(m->is_partition_start());
auto pkey = partition_key::from_exploded(*s, {to_bytes(key)});
BOOST_REQUIRE(m->as_partition_start().key().key().equal(*s, pkey));
return reader->next_partition();
}).then([reader] {
return (*reader)(db::no_timeout);
return (*reader)();
}).then([reader] (mutation_fragment_opt m) {
BOOST_REQUIRE(!m);
}).finally([reader] {
@@ -2531,7 +2531,7 @@ SEASTAR_TEST_CASE(sstable_scrub_segregate_mode_test) {
auto sst_reader = assert_that(table->as_mutation_source().make_reader(schema, env.make_reader_permit()));
auto mt_reader = scrubbed_mt->as_data_source().make_reader(schema, env.make_reader_permit());
auto mt_reader_close = deferred_close(mt_reader);
while (auto mf_opt = mt_reader(db::no_timeout).get()) {
while (auto mf_opt = mt_reader().get()) {
testlog.trace("Expecting {}", mutation_fragment::printer(*schema, *mf_opt));
sst_reader.produces(*schema, *mf_opt);
}
@@ -2634,7 +2634,7 @@ SEASTAR_THREAD_TEST_CASE(test_scrub_segregate_stack) {
return async([&schema, &segregated_fragment_streams, rd = std::move(rd)] () mutable {
auto close = deferred_close(rd);
auto& fragments = segregated_fragment_streams.emplace_back();
while (auto mf_opt = rd(db::no_timeout).get()) {
while (auto mf_opt = rd().get()) {
fragments.emplace_back(*schema, rd.permit(), *mf_opt);
}
});
@@ -3158,7 +3158,7 @@ SEASTAR_TEST_CASE(purged_tombstone_consumer_sstable_test) {
auto r = std::move(reader);
auto close_r = deferred_close(r);
r.consume_in_thread(std::move(cfc), db::no_timeout);
r.consume_in_thread(std::move(cfc));
return {std::move(non_purged), std::move(purged_only)};
};
@@ -3188,14 +3188,14 @@ SEASTAR_TEST_CASE(purged_tombstone_consumer_sstable_test) {
auto assert_that_produces_purged_tombstone = [&] (auto& sst, partition_key& key, tombstone tomb) {
auto reader = make_lw_shared<flat_mutation_reader>(sstable_reader(sst, s, env.make_reader_permit()));
read_mutation_from_flat_mutation_reader(*reader, db::no_timeout).then([reader, s, &key, is_tombstone_purgeable, &tomb] (mutation_opt m) {
read_mutation_from_flat_mutation_reader(*reader).then([reader, s, &key, is_tombstone_purgeable, &tomb] (mutation_opt m) {
BOOST_REQUIRE(m);
BOOST_REQUIRE(m->key().equal(*s, key));
auto rows = m->partition().clustered_rows();
BOOST_REQUIRE_EQUAL(rows.calculate_size(), 0);
BOOST_REQUIRE(is_tombstone_purgeable(m->partition().partition_tombstone()));
BOOST_REQUIRE(m->partition().partition_tombstone() == tomb);
return (*reader)(db::no_timeout);
return (*reader)();
}).then([reader, s] (mutation_fragment_opt m) {
BOOST_REQUIRE(!m);
}).finally([reader] {
@@ -4080,7 +4080,7 @@ SEASTAR_TEST_CASE(test_twcs_single_key_reader_filtering) {
auto surviving_after_ck = cf_stats.surviving_sstables_after_clustering_filter;
// consume all fragments
while (reader(db::no_timeout).get());
while (reader().get());
// At least sst2 should be checked by the CK filter during fragment consumption and should pass.
// With the bug in #8432, sst2 wouldn't even be checked by the CK filter since it would pass right after checking the PK filter.
@@ -4378,9 +4378,9 @@ SEASTAR_TEST_CASE(twcs_single_key_reader_through_compound_set_test) {
tracing::trace_state_ptr(), ::streamed_mutation::forwarding::no,
::mutation_reader::forwarding::no);
auto close_reader = deferred_close(reader);
auto mfopt = read_mutation_from_flat_mutation_reader(reader, db::no_timeout).get0();
auto mfopt = read_mutation_from_flat_mutation_reader(reader).get0();
BOOST_REQUIRE(mfopt);
mfopt = read_mutation_from_flat_mutation_reader(reader, db::no_timeout).get0();
mfopt = read_mutation_from_flat_mutation_reader(reader).get0();
BOOST_REQUIRE(!mfopt);
BOOST_REQUIRE(cf_stats.clustering_filter_count > 0);
});

View File

@@ -43,7 +43,7 @@ mutation_source make_sstable_mutation_source(sstables::test_env& env, schema_ptr
}
static void consume_all(flat_mutation_reader& rd) {
while (auto mfopt = rd(db::no_timeout).get0()) {}
while (auto mfopt = rd().get0()) {}
}
// It is assumed that src won't change.

View File

@@ -190,7 +190,7 @@ SEASTAR_TEST_CASE(datafile_generation_11) {
return env.reusable_sst(s, tmpdir_path, 11).then([&env, s, verifier, tomb, &static_set_col] (auto sstp) mutable {
return do_with(dht::partition_range::make_singular(make_dkey(s, "key1")), [&env, sstp, s, verifier, tomb, &static_set_col] (auto& pr) {
auto rd = make_lw_shared<flat_mutation_reader_v2>(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()));
return read_mutation_from_flat_mutation_reader(*rd, db::no_timeout).then([sstp, s, verifier, tomb, &static_set_col, rd] (auto mutation) {
return read_mutation_from_flat_mutation_reader(*rd).then([sstp, s, verifier, tomb, &static_set_col, rd] (auto mutation) {
auto verify_set = [&tomb] (const collection_mutation_description& m) {
BOOST_REQUIRE(bool(m.tomb) == true);
BOOST_REQUIRE(m.tomb == tomb);
@@ -220,7 +220,7 @@ SEASTAR_TEST_CASE(datafile_generation_11) {
}).then([&env, sstp, s, verifier] {
return do_with(dht::partition_range::make_singular(make_dkey(s, "key2")), [&env, sstp, s, verifier] (auto& pr) {
auto rd = make_lw_shared<flat_mutation_reader_v2>(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()));
return read_mutation_from_flat_mutation_reader(*rd, db::no_timeout).then([sstp, s, verifier, rd] (auto mutation) {
return read_mutation_from_flat_mutation_reader(*rd).then([sstp, s, verifier, rd] (auto mutation) {
auto m = verifier(mutation);
BOOST_REQUIRE(!m.tomb);
BOOST_REQUIRE(m.cells.size() == 1);
@@ -255,7 +255,7 @@ SEASTAR_TEST_CASE(datafile_generation_12) {
return env.reusable_sst(s, tmpdir_path, 12).then([&env, s, tomb] (auto sstp) mutable {
return do_with(dht::partition_range::make_singular(make_dkey(s, "key1")), [&env, sstp, s, tomb] (auto& pr) {
auto rd = make_lw_shared<flat_mutation_reader_v2>(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()));
return read_mutation_from_flat_mutation_reader(*rd, db::no_timeout).then([sstp, s, tomb, rd] (auto mutation) {
return read_mutation_from_flat_mutation_reader(*rd).then([sstp, s, tomb, rd] (auto mutation) {
auto& mp = mutation->partition();
BOOST_REQUIRE(mp.row_tombstones().size() == 1);
for (auto& rt: mp.row_tombstones()) {
@@ -293,7 +293,7 @@ static future<> sstable_compression_test(compressor_ptr c, unsigned generation)
return env.reusable_sst(s, tmpdir_path, generation).then([&env, s, tomb] (auto sstp) mutable {
return do_with(dht::partition_range::make_singular(make_dkey(s, "key1")), [&env, sstp, s, tomb] (auto& pr) {
auto rd = make_lw_shared<flat_mutation_reader_v2>(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()));
return read_mutation_from_flat_mutation_reader(*rd, db::no_timeout).then([sstp, s, tomb, rd] (auto mutation) {
return read_mutation_from_flat_mutation_reader(*rd).then([sstp, s, tomb, rd] (auto mutation) {
auto& mp = mutation->partition();
BOOST_REQUIRE(mp.row_tombstones().size() == 1);
for (auto& rt: mp.row_tombstones()) {
@@ -376,7 +376,7 @@ SEASTAR_TEST_CASE(datafile_generation_37) {
return env.reusable_sst(s, tmpdir_path, 37).then([&env, s, tmpdir_path] (auto sstp) {
return do_with(dht::partition_range::make_singular(make_dkey(s, "key1")), [&env, sstp, s] (auto& pr) {
auto rd = make_lw_shared<flat_mutation_reader_v2>(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()));
return read_mutation_from_flat_mutation_reader(*rd, db::no_timeout).then([sstp, s, rd] (auto mutation) {
return read_mutation_from_flat_mutation_reader(*rd).then([sstp, s, rd] (auto mutation) {
auto& mp = mutation->partition();
auto clustering = clustering_key_prefix::from_exploded(*s, {to_bytes("cl1")});
@@ -413,7 +413,7 @@ SEASTAR_TEST_CASE(datafile_generation_38) {
return env.reusable_sst(s, tmpdir_path, 38).then([&env, s] (auto sstp) {
return do_with(dht::partition_range::make_singular(make_dkey(s, "key1")), [&env, sstp, s] (auto& pr) {
auto rd = make_lw_shared<flat_mutation_reader_v2>(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()));
return read_mutation_from_flat_mutation_reader(*rd, db::no_timeout).then([sstp, s, rd] (auto mutation) {
return read_mutation_from_flat_mutation_reader(*rd).then([sstp, s, rd] (auto mutation) {
auto& mp = mutation->partition();
auto clustering = clustering_key_prefix::from_exploded(*s, {to_bytes("cl1"), to_bytes("cl2")});
@@ -451,7 +451,7 @@ SEASTAR_TEST_CASE(datafile_generation_39) {
return env.reusable_sst(s, tmpdir_path, 39).then([&env, s] (auto sstp) {
return do_with(dht::partition_range::make_singular(make_dkey(s, "key1")), [&env, sstp, s] (auto& pr) {
auto rd = make_lw_shared<flat_mutation_reader_v2>(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()));
return read_mutation_from_flat_mutation_reader(*rd, db::no_timeout).then([sstp, s, rd] (auto mutation) {
return read_mutation_from_flat_mutation_reader(*rd).then([sstp, s, rd] (auto mutation) {
auto& mp = mutation->partition();
auto& row = mp.clustered_row(*s, clustering_key::make_empty());
match_live_cell(row.cells(), *s, "cl1", data_value(data_value(to_bytes("cl1"))));
@@ -486,7 +486,7 @@ SEASTAR_TEST_CASE(datafile_generation_41) {
return env.reusable_sst(s, tmpdir_path, 41).then([&env, s, tomb] (auto sstp) mutable {
return do_with(dht::partition_range::make_singular(make_dkey(s, "key1")), [&env, sstp, s, tomb] (auto& pr) {
auto rd = make_lw_shared<flat_mutation_reader_v2>(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()));
return read_mutation_from_flat_mutation_reader(*rd, db::no_timeout).then([sstp, s, tomb, rd] (auto mutation) {
return read_mutation_from_flat_mutation_reader(*rd).then([sstp, s, tomb, rd] (auto mutation) {
auto& mp = mutation->partition();
BOOST_REQUIRE(mp.clustered_rows().calculate_size() == 1);
auto& c_row = *(mp.clustered_rows().begin());
@@ -521,7 +521,7 @@ SEASTAR_TEST_CASE(datafile_generation_47) {
return env.reusable_sst(s, tmpdir_path, 47).then([&env, s] (auto sstp) mutable {
auto reader = make_lw_shared<flat_mutation_reader>(sstable_reader(sstp, s, env.make_reader_permit()));
return repeat([reader] {
return (*reader)(db::no_timeout).then([] (mutation_fragment_opt m) {
return (*reader)().then([] (mutation_fragment_opt m) {
if (!m) {
return make_ready_future<stop_iteration>(stop_iteration::yes);
}
@@ -644,7 +644,7 @@ SEASTAR_TEST_CASE(check_multi_schema) {
auto f = sst->load();
return f.then([&env, sst, s] {
auto reader = make_lw_shared<flat_mutation_reader>(sstable_reader(sst, s, env.make_reader_permit()));
return read_mutation_from_flat_mutation_reader(*reader, db::no_timeout).then([reader, s] (mutation_opt m) {
return read_mutation_from_flat_mutation_reader(*reader).then([reader, s] (mutation_opt m) {
BOOST_REQUIRE(m);
BOOST_REQUIRE(m->key().equal(*s, partition_key::from_singular(*s, 0)));
auto rows = m->partition().clustered_rows();
@@ -655,7 +655,7 @@ SEASTAR_TEST_CASE(check_multi_schema) {
BOOST_REQUIRE_EQUAL(cells.size(), 1);
auto& cdef = *s->get_column_definition("e");
BOOST_REQUIRE_EQUAL(cells.cell_at(cdef.id).as_atomic_cell(cdef).value(), managed_bytes(int32_type->decompose(5)));
return (*reader)(db::no_timeout);
return (*reader)();
}).then([reader, s] (mutation_fragment_opt m) {
BOOST_REQUIRE(!m);
}).finally([reader] {
@@ -676,7 +676,7 @@ void test_sliced_read_row_presence(shared_sstable sst, schema_ptr s, reader_perm
partition_key::equality pk_eq(*s);
clustering_key::equality ck_eq(*s);
auto mfopt = reader(db::no_timeout).get0();
auto mfopt = reader().get0();
while (mfopt) {
BOOST_REQUIRE(mfopt->is_partition_start());
auto it = std::find_if(expected.begin(), expected.end(), [&] (auto&& x) {
@@ -686,7 +686,7 @@ void test_sliced_read_row_presence(shared_sstable sst, schema_ptr s, reader_perm
auto expected_cr = std::move(it->second);
expected.erase(it);
mfopt = reader(db::no_timeout).get0();
mfopt = reader().get0();
BOOST_REQUIRE(mfopt);
while (!mfopt->is_end_of_partition()) {
if (mfopt->is_clustering_row()) {
@@ -700,12 +700,12 @@ void test_sliced_read_row_presence(shared_sstable sst, schema_ptr s, reader_perm
BOOST_REQUIRE(it != expected_cr.end());
expected_cr.erase(it);
}
mfopt = reader(db::no_timeout).get0();
mfopt = reader().get0();
BOOST_REQUIRE(mfopt);
}
BOOST_REQUIRE(expected_cr.empty());
mfopt = reader(db::no_timeout).get0();
mfopt = reader().get0();
}
BOOST_REQUIRE(expected.empty());
}
@@ -903,11 +903,11 @@ SEASTAR_TEST_CASE(test_counter_read) {
auto reader = sstable_reader(sst, s, env.make_reader_permit());
auto close_reader = deferred_close(reader);
auto mfopt = reader(db::no_timeout).get0();
auto mfopt = reader().get0();
BOOST_REQUIRE(mfopt);
BOOST_REQUIRE(mfopt->is_partition_start());
mfopt = reader(db::no_timeout).get0();
mfopt = reader().get0();
BOOST_REQUIRE(mfopt);
BOOST_REQUIRE(mfopt->is_clustering_row());
const clustering_row* cr = &mfopt->as_clustering_row();
@@ -935,7 +935,7 @@ SEASTAR_TEST_CASE(test_counter_read) {
}
});
mfopt = reader(db::no_timeout).get0();
mfopt = reader().get0();
BOOST_REQUIRE(mfopt);
BOOST_REQUIRE(mfopt->is_clustering_row());
cr = &mfopt->as_clustering_row();
@@ -948,11 +948,11 @@ SEASTAR_TEST_CASE(test_counter_read) {
}
});
mfopt = reader(db::no_timeout).get0();
mfopt = reader().get0();
BOOST_REQUIRE(mfopt);
BOOST_REQUIRE(mfopt->is_end_of_partition());
mfopt = reader(db::no_timeout).get0();
mfopt = reader().get0();
BOOST_REQUIRE(!mfopt);
}
});
@@ -2471,32 +2471,32 @@ SEASTAR_TEST_CASE(test_wrong_counter_shard_order) {
};
{
auto mfopt = reader(db::no_timeout).get0();
auto mfopt = reader().get0();
BOOST_REQUIRE(mfopt);
BOOST_REQUIRE(mfopt->is_partition_start());
verify_row(reader(db::no_timeout).get0(), 28545);
verify_row(reader(db::no_timeout).get0(), 27967);
verify_row(reader(db::no_timeout).get0(), 28342);
verify_row(reader(db::no_timeout).get0(), 28325);
mfopt = reader(db::no_timeout).get0();
verify_row(reader().get0(), 28545);
verify_row(reader().get0(), 27967);
verify_row(reader().get0(), 28342);
verify_row(reader().get0(), 28325);
mfopt = reader().get0();
BOOST_REQUIRE(mfopt);
BOOST_REQUIRE(mfopt->is_end_of_partition());
}
{
auto mfopt = reader(db::no_timeout).get0();
auto mfopt = reader().get0();
BOOST_REQUIRE(mfopt);
BOOST_REQUIRE(mfopt->is_partition_start());
verify_row(reader(db::no_timeout).get0(), 28386);
verify_row(reader(db::no_timeout).get0(), 28378);
verify_row(reader(db::no_timeout).get0(), 28129);
verify_row(reader(db::no_timeout).get0(), 28260);
mfopt = reader(db::no_timeout).get0();
verify_row(reader().get0(), 28386);
verify_row(reader().get0(), 28378);
verify_row(reader().get0(), 28129);
verify_row(reader().get0(), 28260);
mfopt = reader().get0();
BOOST_REQUIRE(mfopt);
BOOST_REQUIRE(mfopt->is_end_of_partition());
}
BOOST_REQUIRE(!reader(db::no_timeout).get0());
BOOST_REQUIRE(!reader().get0());
}
});
}
@@ -2820,7 +2820,7 @@ SEASTAR_TEST_CASE(test_zero_estimated_partitions) {
auto sst_mr = sst->as_mutation_source().make_reader(s, env.make_reader_permit(), query::full_partition_range, s->full_slice());
auto close_mr = deferred_close(sst_mr);
auto sst_mut = read_mutation_from_flat_mutation_reader(sst_mr, db::no_timeout).get0();
auto sst_mut = read_mutation_from_flat_mutation_reader(sst_mr).get0();
// The real test here is that we don't assert() in
// sstables::prepare_summary() with the write_components() call above,
@@ -2900,7 +2900,7 @@ SEASTAR_TEST_CASE(test_missing_partition_end_fragment) {
try {
auto wr = sst->get_writer(*s, 1, cfg, encoding_stats{}, default_priority_class());
mr.consume_in_thread(std::move(wr), db::no_timeout);
mr.consume_in_thread(std::move(wr));
BOOST_FAIL("write_components() should have failed");
} catch (const std::runtime_error&) {
testlog.info("failed as expected: {}", std::current_exception());
@@ -2991,3 +2991,31 @@ SEASTAR_TEST_CASE(compound_sstable_set_basic_test) {
});
}
SEASTAR_TEST_CASE(sstable_reader_with_timeout) {
return test_setup::do_with_tmp_directory([] (test_env& env, sstring tmpdir_path) {
return async([&env, tmpdir_path] {
auto s = complex_schema();
auto mt = make_lw_shared<memtable>(s);
auto key = partition_key::from_exploded(*s, {to_bytes("key1")});
auto cp = clustering_key_prefix::from_exploded(*s, {to_bytes("c1")});
mutation m(s, key);
tombstone tomb(api::new_timestamp(), gc_clock::now());
m.partition().apply_delete(*s, cp, tomb);
mt->apply(std::move(m));
auto sst = env.make_sstable(s, tmpdir_path, 12, sstables::get_highest_sstable_version(), big);
write_memtable_to_sstable_for_test(*mt, sst).get();
auto sstp = env.reusable_sst(s, tmpdir_path, 12).get0();
auto pr = dht::partition_range::make_singular(make_dkey(s, "key1"));
auto timeout = db::timeout_clock::now();
auto rd = sstp->make_reader(s, env.make_reader_permit(timeout), pr, s->full_slice());
auto close_rd = deferred_close(rd);
auto f = read_mutation_from_flat_mutation_reader(rd);
BOOST_REQUIRE_THROW(f.get(), timed_out_error);
});
});
}

View File

@@ -59,7 +59,7 @@ SEASTAR_THREAD_TEST_CASE(nonexistent_key) {
return do_with(dht::partition_range::make_singular(make_dkey(uncompressed_schema(), "invalid_key")), [&env, sstp] (auto& pr) {
auto s = uncompressed_schema();
return with_closeable(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()), [sstp, s] (auto& rd) {
return rd(db::no_timeout).then([sstp, s] (auto mutation) {
return rd().then([sstp, s] (auto mutation) {
BOOST_REQUIRE(!mutation);
return make_ready_future<>();
});
@@ -74,7 +74,7 @@ future<> test_no_clustered(sstables::test_env& env, bytes&& key, std::unordered_
return do_with(dht::partition_range::make_singular(make_dkey(uncompressed_schema(), std::move(k))), [&env, sstp, map = std::move(map)] (auto& pr) {
auto s = uncompressed_schema();
return with_closeable(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()), [sstp, s, map = std::move(map)] (auto& rd) mutable {
return read_mutation_from_flat_mutation_reader(rd, db::no_timeout).then([sstp, s, map = std::move(map)] (auto mutation) {
return read_mutation_from_flat_mutation_reader(rd).then([sstp, s, map = std::move(map)] (auto mutation) {
BOOST_REQUIRE(mutation);
auto& mp = mutation->partition();
for (auto&& e : mp.range(*s, nonwrapping_range<clustering_key_prefix>())) {
@@ -150,7 +150,7 @@ future<mutation> generate_clustered(sstables::test_env& env, bytes&& key) {
return do_with(dht::partition_range::make_singular(make_dkey(complex_schema(), std::move(k))), [&env, sstp] (auto& pr) {
auto s = complex_schema();
return with_closeable(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()), [sstp, s] (auto& rd) {
return read_mutation_from_flat_mutation_reader(rd, db::no_timeout).then([sstp, s] (auto mutation) {
return read_mutation_from_flat_mutation_reader(rd).then([sstp, s] (auto mutation) {
BOOST_REQUIRE(mutation);
return std::move(*mutation);
});
@@ -356,7 +356,7 @@ future<> test_range_reads(sstables::test_env& env, const dht::token& min, const
// "mutations", continues to live until after the last
// iteration's future completes, so its lifetime is safe.
[sstp, &mutations, &expected, expected_size, count, stop] () mutable {
return mutations(db::no_timeout).then([&expected, expected_size, count, stop, &mutations] (mutation_fragment_v2_opt mfopt) mutable {
return mutations().then([&expected, expected_size, count, stop, &mutations] (mutation_fragment_v2_opt mfopt) mutable {
if (mfopt) {
BOOST_REQUIRE(mfopt->is_partition_start());
BOOST_REQUIRE(*count < expected_size);
@@ -436,7 +436,7 @@ SEASTAR_TEST_CASE(test_sstable_can_write_and_read_range_tombstone) {
write_memtable_to_sstable_for_test(*mt, sst).get();
sst->load().get();
auto mut = with_closeable(sst->make_reader(s, env.make_reader_permit(), query::full_partition_range, s->full_slice()), [] (auto& mr) {
return read_mutation_from_flat_mutation_reader(mr, db::no_timeout);
return read_mutation_from_flat_mutation_reader(mr);
}).get0();
BOOST_REQUIRE(bool(mut));
auto rts = mut->partition().row_tombstones();
@@ -458,7 +458,7 @@ SEASTAR_THREAD_TEST_CASE(compact_storage_sparse_read) {
return do_with(dht::partition_range::make_singular(make_dkey(compact_sparse_schema(), "first_row")), [&env, sstp] (auto& pr) {
auto s = compact_sparse_schema();
return with_closeable(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()), [sstp, s] (auto& rd) {
return read_mutation_from_flat_mutation_reader(rd, db::no_timeout).then([sstp, s] (auto mutation) {
return read_mutation_from_flat_mutation_reader(rd).then([sstp, s] (auto mutation) {
BOOST_REQUIRE(mutation);
auto& mp = mutation->partition();
auto& row = mp.clustered_row(*s, clustering_key::make_empty());
@@ -478,7 +478,7 @@ SEASTAR_THREAD_TEST_CASE(compact_storage_simple_dense_read) {
return do_with(dht::partition_range::make_singular(make_dkey(compact_simple_dense_schema(), "first_row")), [&env, sstp] (auto& pr) {
auto s = compact_simple_dense_schema();
return with_closeable(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()), [sstp, s] (auto& rd) {
return read_mutation_from_flat_mutation_reader(rd, db::no_timeout).then([sstp, s] (auto mutation) {
return read_mutation_from_flat_mutation_reader(rd).then([sstp, s] (auto mutation) {
auto& mp = mutation->partition();
auto exploded = exploded_clustering_prefix({"cl1"});
@@ -500,7 +500,7 @@ SEASTAR_THREAD_TEST_CASE(compact_storage_dense_read) {
return do_with(dht::partition_range::make_singular(make_dkey(compact_dense_schema(), "first_row")), [&env, sstp] (auto& pr) {
auto s = compact_dense_schema();
return with_closeable(sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice()), [sstp, s] (auto& rd) {
return read_mutation_from_flat_mutation_reader(rd, db::no_timeout).then([sstp, s] (auto mutation) {
return read_mutation_from_flat_mutation_reader(rd).then([sstp, s] (auto mutation) {
auto& mp = mutation->partition();
auto exploded = exploded_clustering_prefix({"cl1", "cl2"});
@@ -526,7 +526,7 @@ SEASTAR_THREAD_TEST_CASE(broken_ranges_collection) {
auto s = peers_schema();
return with_closeable(sstp->as_mutation_source().make_reader(s, env.make_reader_permit(), query::full_partition_range), [s] (flat_mutation_reader& reader) {
return repeat([s, &reader] {
return read_mutation_from_flat_mutation_reader(reader, db::no_timeout).then([s] (mutation_opt mut) {
return read_mutation_from_flat_mutation_reader(reader).then([s] (mutation_opt mut) {
auto key_equal = [s, &mut] (sstring ip) {
return mut->key().equal(*s, partition_key::from_deeply_exploded(*s, { net::inet_address(ip) }));
};
@@ -595,7 +595,7 @@ SEASTAR_THREAD_TEST_CASE(tombstone_in_tombstone) {
auto s = tombstone_overlap_schema();
return with_closeable(sstp->make_reader(s, env.make_reader_permit(), query::full_partition_range, s->full_slice()), [sstp, s] (auto& reader) {
return repeat([sstp, s, &reader] {
return read_mutation_from_flat_mutation_reader(reader, db::no_timeout).then([s] (mutation_opt mut) {
return read_mutation_from_flat_mutation_reader(reader).then([s] (mutation_opt mut) {
if (!mut) {
return stop_iteration::yes;
}
@@ -660,7 +660,7 @@ SEASTAR_THREAD_TEST_CASE(range_tombstone_reading) {
auto s = tombstone_overlap_schema();
return with_closeable(sstp->make_reader(s, env.make_reader_permit(), query::full_partition_range, s->full_slice()), [sstp, s] (auto& reader) {
return repeat([sstp, s, &reader] {
return read_mutation_from_flat_mutation_reader(reader, db::no_timeout).then([s] (mutation_opt mut) {
return read_mutation_from_flat_mutation_reader(reader).then([s] (mutation_opt mut) {
if (!mut) {
return stop_iteration::yes;
}
@@ -739,7 +739,7 @@ SEASTAR_THREAD_TEST_CASE(tombstone_in_tombstone2) {
auto s = tombstone_overlap_schema2();
return with_closeable(sstp->make_reader(s, env.make_reader_permit(), query::full_partition_range, s->full_slice()), [sstp, s] (auto& reader) {
return repeat([sstp, s, &reader] {
return read_mutation_from_flat_mutation_reader(reader, db::no_timeout).then([s] (mutation_opt mut) {
return read_mutation_from_flat_mutation_reader(reader).then([s] (mutation_opt mut) {
if (!mut) {
return stop_iteration::yes;
}
@@ -882,7 +882,7 @@ SEASTAR_TEST_CASE(test_non_compound_table_row_is_not_marked_as_static) {
write_memtable_to_sstable_for_test(*mt, sst).get();
sst->load().get();
auto mut = with_closeable(sst->make_reader(s, env.make_reader_permit(), query::full_partition_range, s->full_slice()), [] (auto& mr) {
return read_mutation_from_flat_mutation_reader(mr, db::no_timeout);
return read_mutation_from_flat_mutation_reader(mr);
}).get0();
BOOST_REQUIRE(bool(mut));
}
@@ -1466,13 +1466,13 @@ SEASTAR_THREAD_TEST_CASE(test_large_index_pages_do_not_cause_large_allocations)
auto pr = dht::partition_range::make_singular(small_keys[0]);
mutation expected = *with_closeable(mt->make_flat_reader(s, env.make_reader_permit(), pr), [] (flat_mutation_reader& mt_reader) {
return read_mutation_from_flat_mutation_reader(mt_reader, db::no_timeout);
return read_mutation_from_flat_mutation_reader(mt_reader);
}).get0();
auto t0 = std::chrono::steady_clock::now();
auto large_allocs_before = memory::stats().large_allocations();
mutation actual = *with_closeable(sst->as_mutation_source().make_reader(s, env.make_reader_permit(), pr), [] (flat_mutation_reader& sst_reader) {
return read_mutation_from_flat_mutation_reader(sst_reader, db::no_timeout);
return read_mutation_from_flat_mutation_reader(sst_reader);
}).get0();
auto large_allocs_after = memory::stats().large_allocations();
auto duration = std::chrono::steady_clock::now() - t0;

View File

@@ -472,7 +472,7 @@ SEASTAR_TEST_CASE(wrong_range) {
return do_with(dht::partition_range::make_singular(make_dkey(uncompressed_schema(), "todata")), [&env, sstp] (auto& range) {
auto s = columns_schema();
return with_closeable(sstp->make_reader(s, env.make_reader_permit(), range, s->full_slice()), [sstp, s] (auto& rd) {
return read_mutation_from_flat_mutation_reader(rd, db::no_timeout).then([sstp, s] (auto mutation) {
return read_mutation_from_flat_mutation_reader(rd).then([sstp, s] (auto mutation) {
return make_ready_future<>();
});
});
@@ -610,17 +610,17 @@ static future<int> count_rows(test_env& env, sstable_ptr sstp, schema_ptr s, sst
auto pr = dht::partition_range::make_singular(make_dkey(s, key.c_str()));
auto rd = sstp->make_reader(s, env.make_reader_permit(), pr, ps);
auto close_rd = deferred_close(rd);
auto mfopt = rd(db::no_timeout).get0();
auto mfopt = rd().get0();
if (!mfopt) {
return 0;
}
int nrows = 0;
mfopt = rd(db::no_timeout).get0();
mfopt = rd().get0();
while (mfopt) {
if (mfopt->is_clustering_row()) {
nrows++;
}
mfopt = rd(db::no_timeout).get0();
mfopt = rd().get0();
}
return nrows;
});
@@ -632,17 +632,17 @@ static future<int> count_rows(test_env& env, sstable_ptr sstp, schema_ptr s, sst
auto pr = dht::partition_range::make_singular(make_dkey(s, key.c_str()));
auto rd = sstp->make_reader(s, env.make_reader_permit(), pr, s->full_slice());
auto close_rd = deferred_close(rd);
auto mfopt = rd(db::no_timeout).get0();
auto mfopt = rd().get0();
if (!mfopt) {
return 0;
}
int nrows = 0;
mfopt = rd(db::no_timeout).get0();
mfopt = rd().get0();
while (mfopt) {
if (mfopt->is_clustering_row()) {
nrows++;
}
mfopt = rd(db::no_timeout).get0();
mfopt = rd().get0();
}
return nrows;
});
@@ -656,17 +656,17 @@ static future<int> count_rows(test_env& env, sstable_ptr sstp, schema_ptr s, sst
auto reader = sstp->make_reader(s, env.make_reader_permit(), query::full_partition_range, ps);
auto close_reader = deferred_close(reader);
int nrows = 0;
auto mfopt = reader(db::no_timeout).get0();
auto mfopt = reader().get0();
while (mfopt) {
mfopt = reader(db::no_timeout).get0();
mfopt = reader().get0();
BOOST_REQUIRE(mfopt);
while (!mfopt->is_end_of_partition()) {
if (mfopt->is_clustering_row()) {
nrows++;
}
mfopt = reader(db::no_timeout).get0();
mfopt = reader().get0();
}
mfopt = reader(db::no_timeout).get0();
mfopt = reader().get0();
}
return nrows;
});

View File

@@ -440,7 +440,7 @@ SEASTAR_TEST_CASE(test_view_update_generator) {
sstables::sstable_writer_config sst_cfg = e.db().local().get_user_sstables_manager().configure_writer("test");
auto& pc = service::get_local_streaming_priority();
auto permit = e.local_db().get_reader_concurrency_semaphore().make_tracking_only_permit(s.get(), "test");
auto permit = e.local_db().get_reader_concurrency_semaphore().make_tracking_only_permit(s.get(), "test", db::no_timeout);
sst->write_components(flat_mutation_reader_from_mutations(std::move(permit), {m}), 1ul, s, sst_cfg, {}, pc).get();
sst->open_data().get();
t->add_sstable_and_update_cache(sst).get();
@@ -550,7 +550,7 @@ SEASTAR_THREAD_TEST_CASE(test_view_update_generator_deadlock) {
sstables::sstable_writer_config sst_cfg = e.local_db().get_user_sstables_manager().configure_writer("test");
auto& pc = service::get_local_streaming_priority();
auto permit = e.local_db().get_reader_concurrency_semaphore().make_tracking_only_permit(s.get(), "test");
auto permit = e.local_db().get_reader_concurrency_semaphore().make_tracking_only_permit(s.get(), "test", db::no_timeout);
sst->write_components(flat_mutation_reader_from_mutations(std::move(permit), {m}), 1ul, s, sst_cfg, {}, pc).get();
sst->open_data().get();
t->add_sstable_and_update_cache(sst).get();
@@ -627,7 +627,7 @@ SEASTAR_THREAD_TEST_CASE(test_view_update_generator_register_semaphore_unit_leak
sstables::sstable_writer_config sst_cfg = e.local_db().get_user_sstables_manager().configure_writer("test");
auto& pc = service::get_local_streaming_priority();
auto permit = e.local_db().get_reader_concurrency_semaphore().make_tracking_only_permit(s.get(), "test");
auto permit = e.local_db().get_reader_concurrency_semaphore().make_tracking_only_permit(s.get(), "test", db::no_timeout);
sst->write_components(flat_mutation_reader_from_mutations(std::move(permit), {m}), 1ul, s, sst_cfg, {}, pc).get();
sst->open_data().get();
t->add_sstable_and_update_cache(sst).get();
@@ -857,7 +857,7 @@ SEASTAR_THREAD_TEST_CASE(test_view_update_generator_buffering) {
bool ok = true;
staging_reader.consume_in_thread(db::view::view_updating_consumer(schema, permit, as, staging_reader_handle,
consumer_verifier(schema, sem, partition_rows, collected_muts, ok)), db::no_timeout);
consumer_verifier(schema, sem, partition_rows, collected_muts, ok)));
BOOST_REQUIRE(ok);

View File

@@ -34,8 +34,8 @@ public:
: memtable_filling_virtual_table(s)
, _mutations(std::move(mutations)) {}
future<> execute(std::function<void(mutation)> mutation_sink, db::timeout_clock::time_point timeout) override {
return with_timeout(timeout, do_for_each(_mutations, [mutation_sink = std::move(mutation_sink)] (const mutation& m) { mutation_sink(m); }));
future<> execute(std::function<void(mutation)> mutation_sink) override {
return with_timeout(db::no_timeout, do_for_each(_mutations, [mutation_sink = std::move(mutation_sink)] (const mutation& m) { mutation_sink(m); }));
}
};
@@ -58,7 +58,7 @@ public:
auto close_rdr = deferred_close(rdr);
rdr.consume_pausable([&rc] (mutation_fragment mf) {
return rc.take(std::move(mf)).then([] { return stop_iteration::no; });
}, db::no_timeout).get();
}).get();
});
}
};

View File

@@ -340,7 +340,7 @@ public:
table_name = std::move(table_name)] (database& db) mutable {
auto& cf = db.find_column_family(ks_name, table_name);
auto schema = cf.schema();
auto permit = db.get_reader_concurrency_semaphore().make_tracking_only_permit(schema.get(), "require_column_has_value()");
auto permit = db.get_reader_concurrency_semaphore().make_tracking_only_permit(schema.get(), "require_column_has_value()", db::no_timeout);
return cf.find_partition_slow(schema, permit, pkey)
.then([schema, ckey, column_name, exp] (column_family::const_mutation_partition_ptr p) {
assert(p != nullptr);
@@ -808,7 +808,7 @@ future<> do_with_cql_env_thread(std::function<void(cql_test_env&)> func, cql_tes
}
reader_permit make_reader_permit(cql_test_env& env) {
return env.local_db().get_reader_concurrency_semaphore().make_tracking_only_permit(nullptr, "test");
return env.local_db().get_reader_concurrency_semaphore().make_tracking_only_permit(nullptr, "test", db::no_timeout);
}
namespace debug {

View File

@@ -35,7 +35,7 @@ class flat_reader_assertions {
range_tombstone_list _tombstones;
private:
mutation_fragment_opt read_next() {
return _reader(db::no_timeout).get0();
return _reader().get0();
}
static bool are_tombstones_mergeable(const schema& s, const range_tombstone& a, const range_tombstone& b) {
@@ -121,14 +121,14 @@ public:
}
flat_reader_assertions& may_produce_tombstones(position_range range) {
while (mutation_fragment* next = _reader.peek(db::no_timeout).get0()) {
while (mutation_fragment* next = _reader.peek().get0()) {
if (next->is_range_tombstone()) {
if (!range.overlaps(*_reader.schema(), next->as_range_tombstone().position(), next->as_range_tombstone().end_position())) {
break;
}
testlog.trace("Received range tombstone: {}", mutation_fragment::printer(*_reader.schema(), *next));
range = position_range(position_in_partition(next->position()), range.end());
_tombstones.apply(*_reader.schema(), _reader(db::no_timeout).get0()->as_range_tombstone());
_tombstones.apply(*_reader.schema(), _reader().get0()->as_range_tombstone());
} else if (next->is_clustering_row() && next->as_clustering_row().empty()) {
if (!range.contains(*_reader.schema(), next->position())) {
break;
@@ -138,7 +138,7 @@ public:
// incorrect to do so, so let's ignore them.
testlog.trace("Received empty clustered row: {}", mutation_fragment::printer(*_reader.schema(), *next));
range = position_range(position_in_partition(next->position()), range.end());
_reader(db::no_timeout).get();
_reader().get();
} else {
break;
}
@@ -269,11 +269,11 @@ public:
actual_list.apply(s, mfo->as_range_tombstone());
_tombstones.apply(s, mfo->as_range_tombstone());
position_in_partition::equal_compare eq(s);
while (mutation_fragment* next = _reader.peek(db::no_timeout).get0()) {
while (mutation_fragment* next = _reader.peek().get0()) {
if (!next->is_range_tombstone() || !are_tombstones_mergeable(s, *actual_list.begin(), next->as_range_tombstone())) {
break;
}
auto rt = _reader(db::no_timeout).get0()->as_range_tombstone();
auto rt = _reader().get0()->as_range_tombstone();
actual_list.apply(s, rt);
assert(actual_list.size() == 1);
_tombstones.apply(s, rt);
@@ -358,7 +358,7 @@ public:
}
flat_reader_assertions& produces(const mutation& m, const std::optional<query::clustering_row_ranges>& ck_ranges = {}) {
auto mo = read_mutation_from_flat_mutation_reader(_reader, db::no_timeout).get0();
auto mo = read_mutation_from_flat_mutation_reader(_reader).get0();
if (!mo) {
BOOST_FAIL(format("Expected {}, but got end of stream, at: {}", m, seastar::current_backtrace()));
}
@@ -383,7 +383,7 @@ public:
flat_reader_assertions& produces_eos_or_empty_mutation() {
testlog.trace("Expecting eos or empty mutation");
auto mo = read_mutation_from_flat_mutation_reader(_reader, db::no_timeout).get0();
auto mo = read_mutation_from_flat_mutation_reader(_reader).get0();
if (mo) {
if (!mo->partition().empty()) {
BOOST_FAIL(format("Mutation is not empty: {}", *mo));
@@ -432,7 +432,7 @@ public:
flat_reader_assertions& fast_forward_to(const dht::partition_range& pr) {
testlog.trace("Fast forward to partition range: {}", pr);
_pr = pr;
_reader.fast_forward_to(_pr, db::no_timeout).get();
_reader.fast_forward_to(_pr).get();
return *this;
}
@@ -444,7 +444,7 @@ public:
flat_reader_assertions& fast_forward_to(position_range pr) {
testlog.trace("Fast forward to clustering range: {}", pr);
_reader.fast_forward_to(std::move(pr), db::no_timeout).get();
_reader.fast_forward_to(std::move(pr)).get();
return *this;
}
@@ -458,7 +458,7 @@ public:
flat_reader_assertions& produces_compacted(const mutation& m, gc_clock::time_point query_time,
const std::optional<query::clustering_row_ranges>& ck_ranges = {}) {
auto mo = read_mutation_from_flat_mutation_reader(_reader, db::no_timeout).get0();
auto mo = read_mutation_from_flat_mutation_reader(_reader).get0();
// If the passed in mutation is empty, allow for the reader to produce an empty or no partition.
if (m.partition().empty() && !mo) {
return *this;
@@ -472,13 +472,13 @@ public:
}
mutation_assertion next_mutation() {
auto mo = read_mutation_from_flat_mutation_reader(_reader, db::no_timeout).get0();
auto mo = read_mutation_from_flat_mutation_reader(_reader).get0();
BOOST_REQUIRE(bool(mo));
return mutation_assertion(std::move(*mo));
}
future<> fill_buffer() {
return _reader.fill_buffer(db::no_timeout);
return _reader.fill_buffer();
}
bool is_buffer_full() const {
@@ -501,7 +501,7 @@ class flat_reader_assertions_v2 {
dht::partition_range _pr;
private:
mutation_fragment_v2_opt read_next() {
return _reader(db::no_timeout).get0();
return _reader().get0();
}
public:
flat_reader_assertions_v2(flat_mutation_reader_v2 reader)
@@ -755,7 +755,7 @@ public:
}
flat_reader_assertions_v2& produces(const mutation& m, const std::optional<query::clustering_row_ranges>& ck_ranges = {}) {
auto mo = read_mutation_from_flat_mutation_reader(_reader, db::no_timeout).get0();
auto mo = read_mutation_from_flat_mutation_reader(_reader).get0();
if (!mo) {
BOOST_FAIL(format("Expected {}, but got end of stream, at: {}", m, seastar::current_backtrace()));
}
@@ -780,7 +780,7 @@ public:
flat_reader_assertions_v2& produces_eos_or_empty_mutation() {
testlog.trace("Expecting eos or empty mutation");
auto mo = read_mutation_from_flat_mutation_reader(_reader, db::no_timeout).get0();
auto mo = read_mutation_from_flat_mutation_reader(_reader).get0();
if (mo) {
if (!mo->partition().empty()) {
BOOST_FAIL(format("Mutation is not empty: {}", *mo));
@@ -829,7 +829,7 @@ public:
flat_reader_assertions_v2& fast_forward_to(const dht::partition_range& pr) {
testlog.trace("Fast forward to partition range: {}", pr);
_pr = pr;
_reader.fast_forward_to(_pr, db::no_timeout).get();
_reader.fast_forward_to(_pr).get();
return *this;
}
@@ -841,7 +841,7 @@ public:
flat_reader_assertions_v2& fast_forward_to(position_range pr) {
testlog.trace("Fast forward to clustering range: {}", pr);
_reader.fast_forward_to(std::move(pr), db::no_timeout).get();
_reader.fast_forward_to(std::move(pr)).get();
return *this;
}
@@ -855,7 +855,7 @@ public:
flat_reader_assertions_v2& produces_compacted(const mutation& m, gc_clock::time_point query_time,
const std::optional<query::clustering_row_ranges>& ck_ranges = {}) {
auto mo = read_mutation_from_flat_mutation_reader(_reader, db::no_timeout).get0();
auto mo = read_mutation_from_flat_mutation_reader(_reader).get0();
// If the passed in mutation is empty, allow for the reader to produce an empty or no partition.
if (m.partition().empty() && !mo) {
return *this;
@@ -869,13 +869,13 @@ public:
}
mutation_assertion next_mutation() {
auto mo = read_mutation_from_flat_mutation_reader(_reader, db::no_timeout).get0();
auto mo = read_mutation_from_flat_mutation_reader(_reader).get0();
BOOST_REQUIRE(bool(mo));
return mutation_assertion(std::move(*mo));
}
future<> fill_buffer() {
return _reader.fill_buffer(db::no_timeout);
return _reader.fill_buffer();
}
bool is_buffer_full() const {

View File

@@ -84,7 +84,7 @@ private:
consume_partitions(rd, [&] (mutation&& m) {
new_mt->apply(std::move(m));
return stop_iteration::no;
}, db::no_timeout).get();
}).get();
_memtables.erase(_memtables.begin(), _memtables.begin() + count);
_memtables.push_back(new_mt);
}

View File

@@ -436,17 +436,17 @@ static void test_streamed_mutation_forwarding_is_consistent_with_slicing(tests::
void consume_end_of_stream() { }
};
fwd_reader.consume(consumer(m.schema(), builder), db::no_timeout).get0();
fwd_reader.consume(consumer(m.schema(), builder)).get0();
BOOST_REQUIRE(bool(builder));
for (auto&& range : ranges) {
testlog.trace("fwd {}", range);
fwd_reader.fast_forward_to(position_range(range), db::no_timeout).get();
fwd_reader.consume(consumer(m.schema(), builder), db::no_timeout).get0();
fwd_reader.fast_forward_to(position_range(range)).get();
fwd_reader.consume(consumer(m.schema(), builder)).get0();
}
mutation_opt fwd_m = builder->consume_end_of_stream();
BOOST_REQUIRE(bool(fwd_m));
mutation_opt sliced_m = read_mutation_from_flat_mutation_reader(sliced_reader, db::no_timeout).get0();
mutation_opt sliced_m = read_mutation_from_flat_mutation_reader(sliced_reader).get0();
BOOST_REQUIRE(bool(sliced_m));
assert_that(*sliced_m).is_equal_to(*fwd_m, slice_with_ranges.row_ranges(*m.schema(), m.key()));
}
@@ -1356,7 +1356,7 @@ void test_slicing_with_overlapping_range_tombstones(tests::reader_concurrency_se
}
result.partition().apply(*s, std::move(mf));
return stop_iteration::no;
}, db::no_timeout).get();
}).get();
assert_that(result).is_equal_to(m1 + m2, query::clustering_row_ranges({range}));
}
@@ -1377,7 +1377,7 @@ void test_slicing_with_overlapping_range_tombstones(tests::reader_concurrency_se
mutation_fragment_v2::printer(*s, mf), prange));
}
return rebuilder.consume(std::move(mf));
}, db::no_timeout).get();
}).get();
auto result = *rebuilder.consume_end_of_stream();
assert_that(result).is_equal_to(m1 + m2, query::clustering_row_ranges({range}));
@@ -1396,9 +1396,9 @@ void test_slicing_with_overlapping_range_tombstones(tests::reader_concurrency_se
BOOST_REQUIRE(!mf.position().has_clustering_key());
result.partition().apply(*s, std::move(mf));
return stop_iteration::no;
}, db::no_timeout).get();
}).get();
rd.fast_forward_to(prange, db::no_timeout).get();
rd.fast_forward_to(prange).get();
position_in_partition last_pos = position_in_partition::before_all_clustered_rows();
auto consume_clustered = [&] (mutation_fragment&& mf) {
@@ -1411,9 +1411,9 @@ void test_slicing_with_overlapping_range_tombstones(tests::reader_concurrency_se
return stop_iteration::no;
};
rd.consume_pausable(consume_clustered, db::no_timeout).get();
rd.fast_forward_to(position_range(prange.end(), position_in_partition::after_all_clustered_rows()), db::no_timeout).get();
rd.consume_pausable(consume_clustered, db::no_timeout).get();
rd.consume_pausable(consume_clustered).get();
rd.fast_forward_to(position_range(prange.end(), position_in_partition::after_all_clustered_rows())).get();
rd.consume_pausable(consume_clustered).get();
assert_that(result).is_equal_to(m1 + m2);
}
@@ -2638,7 +2638,7 @@ void for_each_schema_change(std::function<void(schema_ptr, const std::vector<mut
// Returns true iff the readers were non-empty.
static bool compare_readers(const schema& s, flat_mutation_reader& authority, flat_reader_assertions& tested) {
bool empty = true;
while (auto expected = authority(db::no_timeout).get()) {
while (auto expected = authority().get()) {
tested.produces(s, *expected);
empty = false;
}
@@ -2658,7 +2658,7 @@ void compare_readers(const schema& s, flat_mutation_reader authority, flat_mutat
auto assertions = assert_that(std::move(tested));
if (compare_readers(s, authority, assertions)) {
for (auto& r: fwd_ranges) {
authority.fast_forward_to(r, db::no_timeout).get();
authority.fast_forward_to(r).get();
assertions.fast_forward_to(r);
compare_readers(s, authority, assertions);
}

View File

@@ -28,9 +28,9 @@ normalizing_reader::normalizing_reader(flat_mutation_reader rd)
, _range_tombstones(*_rd.schema(), _rd.permit())
{}
future<> normalizing_reader::fill_buffer(db::timeout_clock::time_point timeout) {
return do_until([this] { return is_buffer_full() || is_end_of_stream(); }, [this, timeout] {
return _rd.fill_buffer(timeout).then([this] {
future<> normalizing_reader::fill_buffer() {
return do_until([this] { return is_buffer_full() || is_end_of_stream(); }, [this] {
return _rd.fill_buffer().then([this] {
position_in_partition::less_compare less{*_rd.schema()};
while (!_rd.is_buffer_empty()) {
auto mf = _rd.pop_mutation_fragment();
@@ -79,18 +79,18 @@ future<> normalizing_reader::next_partition() {
return make_ready_future<>();
}
future<> normalizing_reader::fast_forward_to(
const dht::partition_range& pr, db::timeout_clock::time_point timeout) {
const dht::partition_range& pr) {
_range_tombstones.reset();
clear_buffer();
_end_of_stream = false;
return _rd.fast_forward_to(pr, timeout);
return _rd.fast_forward_to(pr);
}
future<> normalizing_reader::fast_forward_to(
position_range pr, db::timeout_clock::time_point timeout) {
position_range pr) {
_range_tombstones.forward_to(pr.start());
forward_buffer_to(pr.start());
_end_of_stream = false;
return _rd.fast_forward_to(std::move(pr), timeout);
return _rd.fast_forward_to(std::move(pr));
}
future<> normalizing_reader::close() noexcept {
return _rd.close();

View File

@@ -39,13 +39,13 @@ class normalizing_reader : public flat_mutation_reader::impl {
public:
normalizing_reader(flat_mutation_reader rd);
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override;
virtual future<> fill_buffer() override;
virtual future<> next_partition() override;
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override;
virtual future<> fast_forward_to(const dht::partition_range& pr) override;
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override;
virtual future<> fast_forward_to(position_range pr) override;
virtual future<> close() noexcept override;
};

View File

@@ -39,7 +39,7 @@ public:
}
reader_concurrency_semaphore& semaphore() { return *_semaphore; };
reader_permit make_permit() { return _semaphore->make_tracking_only_permit(nullptr, "test"); }
reader_permit make_permit() { return _semaphore->make_tracking_only_permit(nullptr, "test", db::no_timeout); }
};
} // namespace tests

View File

@@ -82,7 +82,12 @@ public:
test_env_sstables_manager& manager() { return *_mgr; }
reader_concurrency_semaphore& semaphore() { return *_semaphore; }
reader_permit make_reader_permit(const schema* const s = nullptr, const char* n = "test") { return _semaphore->make_tracking_only_permit(s, n); }
reader_permit make_reader_permit(const schema* const s, const char* n, db::timeout_clock::time_point timeout) {
return _semaphore->make_tracking_only_permit(s, n, timeout);
}
reader_permit make_reader_permit(db::timeout_clock::time_point timeout = db::no_timeout) {
return _semaphore->make_tracking_only_permit(nullptr, "test", timeout);
}
future<> working_sst(schema_ptr schema, sstring dir, unsigned long generation) {
return reusable_sst(std::move(schema), dir, generation).then([] (auto ptr) { return make_ready_future<>(); });

View File

@@ -34,7 +34,6 @@
#include <boost/range/algorithm/find_if.hpp>
#include "clustering_bounds_comparator.hh"
#include "db/timeout_clock.hh"
#include "dht/i_partitioner.hh"
#include "mutation_fragment.hh"
#include "mutation_reader.hh"
@@ -58,7 +57,7 @@ public:
virtual ~enormous_table_reader() {
}
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
virtual future<> fill_buffer() override {
if (!_partition_in_range) {
return make_ready_future<>();
}
@@ -123,12 +122,12 @@ public:
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range& pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(const dht::partition_range& pr) override {
do_fast_forward_to(pr);
return make_ready_future<>();
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
virtual future<> fast_forward_to(position_range pr) override {
throw runtime_exception("not forwardable");
return make_ready_future<>();
}

View File

@@ -79,7 +79,7 @@ reader_concurrency_semaphore_wrapper::~reader_concurrency_semaphore_wrapper() {
}
reader_permit reader_concurrency_semaphore_wrapper::make_permit() {
return _semaphore->make_tracking_only_permit(nullptr, "perf");
return _semaphore->make_tracking_only_permit(nullptr, "perf", db::no_timeout);
}
} // namespace perf

View File

@@ -769,7 +769,7 @@ public:
static
uint64_t consume_all(flat_mutation_reader& rd) {
return rd.consume(counting_consumer(), db::no_timeout).get0();
return rd.consume(counting_consumer()).get0();
}
static
@@ -778,13 +778,13 @@ uint64_t consume_all_with_next_partition(flat_mutation_reader& rd) {
do {
fragments += consume_all(rd);
rd.next_partition().get();
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
} while(!rd.is_end_of_stream() || !rd.is_buffer_empty());
return fragments;
}
static void assert_partition_start(flat_mutation_reader& rd) {
auto mfopt = rd(db::no_timeout).get0();
auto mfopt = rd().get0();
assert(mfopt);
assert(mfopt->is_partition_start());
}
@@ -823,7 +823,7 @@ static test_result scan_rows_with_stride(column_family& cf, clustered_ds& ds, in
rd.fast_forward_to(position_range(
position_in_partition(position_in_partition::clustering_row_tag_t(), ds.make_ck(*cf.schema(), ck)),
position_in_partition(position_in_partition::clustering_row_tag_t(), ds.make_ck(*cf.schema(), ck + n_read))
), db::no_timeout).get();
)).get();
}
fragments += consume_all(rd);
ck += n_read + n_skip;
@@ -864,7 +864,7 @@ static test_result scan_with_stride_partitions(column_family& cf, int n, int n_r
dht::partition_range::bound(keys[pk], true),
dht::partition_range::bound(keys[std::min(n, pk + n_read) - 1], true)
);
rd.fast_forward_to(pr, db::no_timeout).get();
rd.fast_forward_to(pr).get();
}
fragments += consume_all(rd);
pk += n_read + n_skip;
@@ -889,7 +889,7 @@ static test_result slice_rows(column_family& cf, clustered_ds& ds, int offset =
rd.fast_forward_to(position_range(
position_in_partition::for_key(ds.make_ck(*cf.schema(), offset)),
position_in_partition::for_key(ds.make_ck(*cf.schema(), offset + n_read))), db::no_timeout).get();
position_in_partition::for_key(ds.make_ck(*cf.schema(), offset + n_read)))).get();
uint64_t fragments = consume_all_with_next_partition(rd);
return {before, fragments};
@@ -956,7 +956,7 @@ static test_result slice_rows_single_key(column_family& cf, clustered_ds& ds, in
assert_partition_start(rd);
rd.fast_forward_to(position_range(
position_in_partition::for_key(ds.make_ck(*cf.schema(), offset)),
position_in_partition::for_key(ds.make_ck(*cf.schema(), offset + n_read))), db::no_timeout).get();
position_in_partition::for_key(ds.make_ck(*cf.schema(), offset + n_read)))).get();
uint64_t fragments = consume_all_with_next_partition(rd);
return {before, fragments};
@@ -1147,13 +1147,13 @@ static test_result test_forwarding_with_restriction(column_family& cf, clustered
rd.fast_forward_to(position_range(
position_in_partition::for_key(ds.make_ck(*cf.schema(), 1)),
position_in_partition::for_key(ds.make_ck(*cf.schema(), 2))), db::no_timeout).get();
position_in_partition::for_key(ds.make_ck(*cf.schema(), 2)))).get();
fragments += consume_all(rd);
rd.fast_forward_to(position_range(
position_in_partition::for_key(ds.make_ck(*cf.schema(), first_key - 2)),
position_in_partition::for_key(ds.make_ck(*cf.schema(), first_key + 2))), db::no_timeout).get();
position_in_partition::for_key(ds.make_ck(*cf.schema(), first_key + 2)))).get();
fragments += consume_all_with_next_partition(rd);
return {before, fragments};

View File

@@ -157,7 +157,7 @@ future<> combined::consume_all(flat_mutation_reader mr) const
return mr.consume_pausable([] (mutation_fragment mf) {
perf_tests::do_not_optimize(mf);
return stop_iteration::no;
}, db::no_timeout).then([] {
}).then([] {
perf_tests::stop_measuring_time();
});
});
@@ -278,7 +278,7 @@ future<size_t> clustering_combined::consume_all(flat_mutation_reader mr) const
++num_mfs;
perf_tests::do_not_optimize(mf);
return stop_iteration::no;
}, db::no_timeout).then([&num_mfs] {
}).then([&num_mfs] {
perf_tests::stop_measuring_time();
return num_mfs;
}).finally([&mr] {
@@ -399,7 +399,7 @@ protected:
return mr.consume_pausable([] (mutation_fragment mf) {
perf_tests::do_not_optimize(mf);
return stop_iteration::no;
}, db::no_timeout);
});
});
}
};

View File

@@ -108,7 +108,7 @@ void test_scans_with_dummy_entries() {
auto rd = cache.make_reader(s, semaphore.make_permit(), pr, slice);
auto close_reader = deferred_close(rd);
rd.set_max_buffer_size(1);
rd.fill_buffer(db::no_timeout).get();
rd.fill_buffer().get();
seastar::thread::maybe_yield();
if (cancelled) {
@@ -127,7 +127,7 @@ void test_scans_with_dummy_entries() {
auto d = duration_in_seconds([&] {
rd.consume_pausable([](mutation_fragment) {
return stop_iteration(cancelled);
}, db::no_timeout).get();
}).get();
});
slm.stop();
@@ -211,7 +211,6 @@ void test_scan_with_range_delete_over_rows() {
std::numeric_limits<uint32_t>::max(),
std::numeric_limits<uint32_t>::max(),
gc_clock::now(),
db::no_timeout,
query::max_result_size()).get();
});

View File

@@ -88,7 +88,7 @@ void run_test(const sstring& name, schema_ptr s, MutationGenerator&& gen) {
make_combined_reader(s, permit, cache.make_reader(s, permit), mt->make_flat_reader(s, permit)));
auto close_rd = defer([&rd] { rd->close().get(); });
rd->set_max_buffer_size(1);
rd->fill_buffer(db::no_timeout).get();
rd->fill_buffer().get();
scheduling_latency_measurer slm;
slm.start();
@@ -99,7 +99,7 @@ void run_test(const sstring& name, schema_ptr s, MutationGenerator&& gen) {
rd->set_max_buffer_size(1024*1024);
rd->consume_pausable([] (mutation_fragment) {
return stop_iteration::no;
}, db::no_timeout).get();
}).get();
mt = {};

View File

@@ -217,7 +217,7 @@ public:
auto total = make_lw_shared<size_t>(0);
auto done = make_lw_shared<bool>(false);
return do_until([done] { return *done; }, [this, done, total, &r] {
return read_mutation_from_flat_mutation_reader(r, db::no_timeout).then([this, done, total] (mutation_opt m) {
return read_mutation_from_flat_mutation_reader(r).then([this, done, total] (mutation_opt m) {
if (!m) {
*done = true;
} else {

View File

@@ -191,7 +191,7 @@ int main(int argc, char** argv) {
auto range = dht::partition_range::make_singular(key);
auto reader = cache.make_reader(s, semaphore.make_permit(), range);
auto close_reader = deferred_close(reader);
auto mo = read_mutation_from_flat_mutation_reader(reader, db::no_timeout).get0();
auto mo = read_mutation_from_flat_mutation_reader(reader).get0();
assert(mo);
assert(mo->partition().live_row_count(*s) ==
row_count + 1 /* one row was already in cache before update()*/);
@@ -209,7 +209,7 @@ int main(int argc, char** argv) {
auto range = dht::partition_range::make_singular(key);
auto reader = cache.make_reader(s, semaphore.make_permit(), range);
auto close_reader = deferred_close(reader);
auto mfopt = reader(db::no_timeout).get0();
auto mfopt = reader().get0();
assert(mfopt);
assert(mfopt->is_partition_start());
}
@@ -248,7 +248,7 @@ int main(int argc, char** argv) {
try {
auto reader = cache.make_reader(s, semaphore.make_permit(), range);
auto close_reader = deferred_close(reader);
assert(!reader(db::no_timeout).get0());
assert(!reader().get0());
auto evicted_from_cache = logalloc::segment_size + large_cell_size;
// GCC's -fallocation-dce can remove dead calls to new and malloc, so
// assign the result to a global variable to disable it.

View File

@@ -67,7 +67,7 @@ struct table {
}
reader_permit make_permit() {
return semaphore.make_tracking_only_permit(s.schema().get(), "test");
return semaphore.make_tracking_only_permit(s.schema().get(), "test", db::no_timeout);
}
future<> stop() noexcept {
return semaphore.stop();
@@ -348,7 +348,7 @@ int main(int argc, char** argv) {
while (!cancelled) {
testlog.trace("{}: starting read", id);
auto rd = t.make_single_key_reader(pk, ck_range);
auto row_count = rd->rd.consume(validating_consumer(t, id, t.s.schema()), db::no_timeout).get0();
auto row_count = rd->rd.consume(validating_consumer(t, id, t.s.schema())).get0();
if (row_count != len) {
throw std::runtime_error(format("Expected {:d} fragments, got {:d}", len, row_count));
}
@@ -360,7 +360,7 @@ int main(int argc, char** argv) {
while (!cancelled) {
testlog.trace("{}: starting read", id);
auto rd = t.make_scanning_reader();
auto row_count = rd->rd.consume(validating_consumer(t, id, t.s.schema()), db::no_timeout).get0();
auto row_count = rd->rd.consume(validating_consumer(t, id, t.s.schema())).get0();
if (row_count != expected_row_count) {
throw std::runtime_error(format("Expected {:d} fragments, got {:d}", expected_row_count, row_count));
}

View File

@@ -179,7 +179,7 @@ Note: UDT is not supported for now.
auto stop_semaphore = deferred_stop(rcs_sem);
{
sstables::index_reader idx_reader(sst, rcs_sem.make_tracking_only_permit(primary_key_schema.get(), "idx"), default_priority_class(), {},
sstables::index_reader idx_reader(sst, rcs_sem.make_tracking_only_permit(primary_key_schema.get(), "idx", db::no_timeout), default_priority_class(), {},
sstables::use_caching::yes);
list_partitions(*primary_key_schema, idx_reader);