everywhere: make deferred actions noexcept

Prepare for updating seastar submodule to a change
that requires deferred actions to be noexcept
(and return void).

Test: unit(dev, debug)

Signed-off-by: Benny Halevy <bhalevy@scylladb.com>
This commit is contained in:
Benny Halevy
2021-08-22 20:03:45 +03:00
parent eba4191223
commit e9aff2426e
14 changed files with 44 additions and 35 deletions

View File

@@ -403,7 +403,7 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
co_return api_error::request_limit_exceeded(format("too many in-flight requests (configured via max_concurrent_requests_per_shard): {}", _pending_requests.get_count()));
}
_pending_requests.enter();
auto leave = defer([this] { _pending_requests.leave(); });
auto leave = defer([this] () noexcept { _pending_requests.leave(); });
//FIXME: Client state can provide more context, e.g. client's endpoint address
// We use unique_ptr because client_state cannot be moved or copied
executor::client_state client_state{executor::client_state::internal_tag()};

View File

@@ -1708,8 +1708,12 @@ static future<compaction_info> scrub_sstables_validate_mode(sstables::compaction
auto info = compaction::create_compaction_info(cf, descriptor);
info->sstables = descriptor.sstables.size();
cf.get_compaction_manager().register_compaction(info);
auto deregister_compaction = defer([&cf, info] {
cf.get_compaction_manager().deregister_compaction(info);
auto deregister_compaction = defer([&cf, info] () noexcept {
try {
cf.get_compaction_manager().deregister_compaction(info);
} catch (...) {
clogger.warn("Could not deregister compaction: {}. Ignored.", std::current_exception());
}
});
clogger.info("Scrubbing in validate mode {}", sstables_list_msg);

View File

@@ -70,7 +70,7 @@ single_column_relation::to_term(const std::vector<lw_shared_ptr<column_specifica
::shared_ptr<restrictions::restriction>
single_column_relation::new_EQ_restriction(database& db, schema_ptr schema, prepare_context& ctx) {
const column_definition& column_def = to_column_definition(*schema, *_entity);
auto reset_processing_pk_column = defer([&ctx] { ctx.set_processing_pk_restrictions(false); });
auto reset_processing_pk_column = defer([&ctx] () noexcept { ctx.set_processing_pk_restrictions(false); });
if (column_def.is_partition_key()) {
ctx.set_processing_pk_restrictions(true);
}
@@ -93,7 +93,7 @@ single_column_relation::new_EQ_restriction(database& db, schema_ptr schema, prep
single_column_relation::new_IN_restriction(database& db, schema_ptr schema, prepare_context& ctx) {
using namespace restrictions;
const column_definition& column_def = to_column_definition(*schema, *_entity);
auto reset_processing_pk_column = defer([&ctx] { ctx.set_processing_pk_restrictions(false); });
auto reset_processing_pk_column = defer([&ctx] () noexcept { ctx.set_processing_pk_restrictions(false); });
if (column_def.is_partition_key()) {
ctx.set_processing_pk_restrictions(true);
}

View File

@@ -339,7 +339,7 @@ public:
}
return _flush_semaphore.wait();
}
void end_flush() {
void end_flush() noexcept {
_flush_semaphore.signal();
--totals.pending_flushes;
}
@@ -732,7 +732,7 @@ public:
auto me = shared_from_this();
co_await begin_flush();
auto finally = defer([&] {
auto finally = defer([&] () noexcept {
end_flush();
});
@@ -859,7 +859,7 @@ public:
auto&& priority_class = service::get_local_commitlog_priority();
auto finally = defer([&] {
auto finally = defer([&] () noexcept {
_segment_manager->notify_memory_written(size);
_segment_manager->totals.buffer_list_bytes -= buf.size_bytes();
if (_size_on_disk < _file_pos) {
@@ -1631,7 +1631,7 @@ future<db::commitlog::segment_manager::sseg_ptr> db::commitlog::segment_manager:
promise<> p;
_segment_allocating.emplace(p.get_future());
auto finally = defer([&] { _segment_allocating = std::nullopt; });
auto finally = defer([&] () noexcept { _segment_allocating = std::nullopt; });
try {
gate::holder g(_gate);
auto s = co_await with_timeout(timeout, new_segment());

View File

@@ -39,7 +39,7 @@ future<> view_update_generator::start() {
thread_attributes attr;
attr.sched_group = _db.get_streaming_scheduling_group();
_started = seastar::async(std::move(attr), [this]() mutable {
auto drop_sstable_references = defer([&] {
auto drop_sstable_references = defer([&] () noexcept {
// Clear sstable references so sstables_manager::stop() doesn't hang.
vug_logger.info("leaving {} unstaged sstables unprocessed",
_sstables_to_move.size(), _sstables_with_tables.size());

View File

@@ -458,7 +458,7 @@ flat_mutation_reader_from_mutations(reader_permit permit, std::vector<mutation>
if (!re) {
break;
}
auto re_deleter = defer([re] { current_deleter<rows_entry>()(re); });
auto re_deleter = defer([re] () noexcept { current_deleter<rows_entry>()(re); });
if (!re->dummy()) {
_cr = mutation_fragment(*_schema, _permit, std::move(*re));
break;
@@ -469,7 +469,7 @@ flat_mutation_reader_from_mutations(reader_permit permit, std::vector<mutation>
auto& rts = _cur->partition().mutable_row_tombstones();
auto rt = rts.pop_front_and_lock();
if (rt) {
auto rt_deleter = defer([rt] { current_deleter<range_tombstone>()(rt); });
auto rt_deleter = defer([rt] () noexcept { current_deleter<range_tombstone>()(rt); });
_rt = mutation_fragment(*_schema, _permit, std::move(*rt));
}
}
@@ -574,7 +574,7 @@ flat_mutation_reader_from_mutations(reader_permit permit, std::vector<mutation>
{
_end_of_stream = _cur == _end;
if (!_end_of_stream) {
auto mutation_destroyer = defer([this] { destroy_mutations(); });
auto mutation_destroyer = defer([this] () noexcept { destroy_mutations(); });
start_new_partition();
do_fill_buffer(db::no_timeout);

View File

@@ -223,7 +223,11 @@ public:
void clone_from(const intrusive_set_external_comparator &src, Cloner cloner, Disposer disposer) {
clear_and_dispose(disposer);
if (!src.empty()) {
auto rollback = defer([this, &disposer] { this->clear_and_dispose(disposer); });
auto rollback = defer([this, &disposer] () noexcept {
// terminate if clear_and_dispose throws
// since we cannot recover from that.
this->clear_and_dispose(disposer);
});
algo::clone(src._header.this_ptr(),
_header.this_ptr(),
[&cloner] (const node_ptr& p) {

View File

@@ -19,6 +19,7 @@
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include <seastar/util/closeable.hh>
#include "utils/build_id.hh"
#include "supervisor.hh"
#include "database.hh"
@@ -622,7 +623,7 @@ int main(int ac, char** av) {
logalloc::shard_tracker().configure(st_cfg);
}).get();
auto stop_lsa_background_reclaim = defer([&] {
auto stop_lsa_background_reclaim = defer([&] () noexcept {
smp::invoke_on_all([&] {
return logalloc::shard_tracker().stop();
}).get();
@@ -873,7 +874,7 @@ int main(int ac, char** av) {
cql_config.start().get();
//FIXME: discarded future
(void)cql_config_updater.start(std::ref(cql_config), std::ref(*cfg));
auto stop_cql_config_updater = defer([&] { cql_config_updater.stop().get(); });
auto stop_cql_config_updater = deferred_stop(cql_config_updater);
gms::gossip_config gcfg;
gcfg.gossip_scheduling_group = dbcfg.gossip_scheduling_group;

View File

@@ -261,14 +261,14 @@ void querier_cache::insert_querier(
return;
}
try {
auto cleanup_irh = defer([&] {
auto cleanup_irh = defer([&] () noexcept {
sem.unregister_inactive_read(std::move(irh));
});
auto it = index.emplace(key, std::make_unique<Querier>(std::move(q)));
++stats.population;
auto cleanup_index = defer([&] {
auto cleanup_index = defer([&] () noexcept {
index.erase(it);
--stats.population;
});

View File

@@ -206,7 +206,7 @@ public:
auto pop_as(iterator it) {
range_tombstone& rt = *it;
_tombstones.erase(it);
auto rt_deleter = seastar::defer([&rt] { current_deleter<range_tombstone>()(&rt); });
auto rt_deleter = seastar::defer([&rt] () noexcept { current_deleter<range_tombstone>()(&rt); });
return T(std::move(rt));
}

View File

@@ -127,7 +127,7 @@ future<> redis_server::connection::process_request() {
_pending_requests_gate.enter();
utils::latency_counter lc;
lc.start();
auto leave = defer([this] { _pending_requests_gate.leave(); });
auto leave = defer([this] () noexcept { _pending_requests_gate.leave(); });
return process_request_internal().then([this, leave = std::move(leave), lc = std::move(lc)] (auto&& result) mutable {
--_server._stats._requests_serving;
try {

View File

@@ -1136,7 +1136,7 @@ int repair_service::do_repair_start(sstring keyspace, std::unordered_map<sstring
}).get();
}
});
auto stop_off_strategy_updater = defer([uuid, &off_strategy_updater, &as] () mutable {
auto stop_off_strategy_updater = defer([uuid, &off_strategy_updater, &as] () mutable noexcept {
try {
rlogger.info("repair[{}]: Started to shutdown off-strategy compaction updater", uuid);
if (!as.abort_requested()) {

View File

@@ -942,7 +942,7 @@ future<> row_cache::do_update(external_updater eu, memtable& m, Updater updater)
_tracker.region().merge(m); // Now all data in memtable belongs to cache
_tracker.memtable_cleaner().merge(m._cleaner);
STAP_PROBE(scylla, row_cache_update_start);
auto cleanup = defer([&m, this] {
auto cleanup = defer([&m, this] () noexcept {
invalidate_sync(m);
STAP_PROBE(scylla, row_cache_update_end);
});
@@ -950,7 +950,7 @@ future<> row_cache::do_update(external_updater eu, memtable& m, Updater updater)
return seastar::async([this, &m, updater = std::move(updater), real_dirty_acc = std::move(real_dirty_acc)] () mutable {
size_t size_entry;
// In case updater fails, we must bring the cache to consistency without deferring.
auto cleanup = defer([&m, this] {
auto cleanup = defer([&m, this] () noexcept {
invalidate_sync(m);
_prev_snapshot_pos = {};
_prev_snapshot = {};
@@ -1124,7 +1124,7 @@ future<> row_cache::invalidate(external_updater eu, const dht::partition_range&
future<> row_cache::invalidate(external_updater eu, dht::partition_range_vector&& ranges) {
return do_update(std::move(eu), [this, ranges = std::move(ranges)] {
return seastar::async([this, ranges = std::move(ranges)] {
auto on_failure = defer([this] {
auto on_failure = defer([this] () noexcept {
this->clear_now();
_prev_snapshot_pos = {};
_prev_snapshot = {};

View File

@@ -1159,7 +1159,7 @@ SEASTAR_TEST_CASE(test_no_crash_when_a_lot_of_requests_released_which_change_reg
size_t threshold = size_t(0.75 * free_space);
region_group_reclaimer recl(threshold, threshold);
region_group gr(test_name, recl);
auto close_gr = defer([&gr] { gr.shutdown().get(); });
auto close_gr = defer([&gr] () noexcept { gr.shutdown().get(); });
region r(gr);
with_allocator(r.allocator(), [&] {
@@ -1182,7 +1182,7 @@ SEASTAR_TEST_CASE(test_no_crash_when_a_lot_of_requests_released_which_change_reg
};
utils::phased_barrier request_barrier;
auto wait_for_requests = defer([&] { request_barrier.advance_and_await().get(); });
auto wait_for_requests = defer([&] () noexcept { request_barrier.advance_and_await().get(); });
for (int i = 0; i < 1000000; ++i) {
fill_to_pressure();
@@ -1227,7 +1227,7 @@ SEASTAR_TEST_CASE(test_reclaiming_runs_as_long_as_there_is_soft_pressure) {
reclaimer recl(hard_threshold, soft_threshold);
region_group gr(test_name, recl);
auto close_gr = defer([&gr] { gr.shutdown().get(); });
auto close_gr = defer([&gr] () noexcept { gr.shutdown().get(); });
region r(gr);
with_allocator(r.allocator(), [&] {
@@ -1312,7 +1312,7 @@ SEASTAR_THREAD_TEST_CASE(test_can_reclaim_contiguous_memory_with_mixed_allocatio
auto& rnd = seastar::testing::local_random_engine;
auto clean_up = defer([&] {
auto clean_up = defer([&] () noexcept {
with_allocator(evictable.allocator(), [&] {
evictable_allocs.clear();
});
@@ -1377,7 +1377,7 @@ SEASTAR_THREAD_TEST_CASE(test_decay_reserves) {
auto small_thing = bytes(10'000, int8_t(0));
auto large_thing = bytes(100'000'000, int8_t(0));
auto cleanup = defer([&] {
auto cleanup = defer([&] () noexcept {
with_allocator(region.allocator(), [&] {
lru.clear();
});
@@ -1472,7 +1472,7 @@ SEASTAR_THREAD_TEST_CASE(background_reclaim) {
auto& rnd = seastar::testing::local_random_engine;
auto clean_up = defer([&] {
auto clean_up = defer([&] () noexcept {
with_allocator(evictable.allocator(), [&] {
evictable_allocs.clear();
});
@@ -1508,7 +1508,7 @@ SEASTAR_THREAD_TEST_CASE(background_reclaim) {
// Set up the background reclaimer
auto background_reclaim_scheduling_group = create_scheduling_group("background_reclaim", 100).get0();
auto kill_sched_group = defer([&] {
auto kill_sched_group = defer([&] () noexcept {
destroy_scheduling_group(background_reclaim_scheduling_group).get();
});
@@ -1519,8 +1519,8 @@ SEASTAR_THREAD_TEST_CASE(background_reclaim) {
st_cfg.background_reclaim_sched_group = background_reclaim_scheduling_group;
logalloc::shard_tracker().configure(st_cfg);
auto stop_lsa_background_reclaim = defer([&] {
return logalloc::shard_tracker().stop().get();
auto stop_lsa_background_reclaim = defer([&] () noexcept {
logalloc::shard_tracker().stop().get();
});
sleep(500ms).get(); // sleep a little, to give the reclaimer a head start
@@ -1835,7 +1835,7 @@ SEASTAR_THREAD_TEST_CASE(test_weak_ptr) {
managed_ref<Obj> obj_ptr = with_allocator(region.allocator(), [&] {
return make_managed<Obj>(cookie);
});
auto del_obj_ptr = defer([&] {
auto del_obj_ptr = defer([&] () noexcept {
with_allocator(region.allocator(), [&] {
obj_ptr = {};
});
@@ -1844,7 +1844,7 @@ SEASTAR_THREAD_TEST_CASE(test_weak_ptr) {
managed_ref<Obj> obj2_ptr = with_allocator(region.allocator(), [&] {
return make_managed<Obj>(cookie2);
});
auto del_obj2_ptr = defer([&] {
auto del_obj2_ptr = defer([&] () noexcept {
with_allocator(region.allocator(), [&] {
obj2_ptr = {};
});