mirror of
https://github.com/scylladb/scylladb.git
synced 2026-05-12 19:02:12 +00:00
treewide: silence discarded future warnings for questionable discards
This patches silences the remaining discarded future warnings, those where it cannot be determined with reasonable confidence that this was indeed the actual intent of the author, or that the discarding of the future could lead to problems. For all those places a FIXME is added, with the intent that these will be soon followed-up with an actual fix. I deliberately haven't fixed any of these, even if the fix seems trivial. It is too easy to overlook a bad fix mixed in with so many mechanical changes.
This commit is contained in:
@@ -550,7 +550,8 @@ public:
|
||||
*/
|
||||
future<sseg_ptr> finish_and_get_new(db::timeout_clock::time_point timeout) {
|
||||
_closed = true;
|
||||
sync();
|
||||
//FIXME: discarded future.
|
||||
(void)sync();
|
||||
return _segment_manager->active_segment(timeout);
|
||||
}
|
||||
void reset_sync_time() {
|
||||
@@ -849,7 +850,8 @@ public:
|
||||
return new_seg->allocate(id, std::move(writer), std::move(permit), timeout);
|
||||
});
|
||||
} else {
|
||||
cycle().discard_result().handle_exception([] (auto ex) {
|
||||
//FIXME: discarded future
|
||||
(void)cycle().discard_result().handle_exception([] (auto ex) {
|
||||
clogger.error("Failed to flush commits to disk: {}", ex);
|
||||
});
|
||||
}
|
||||
@@ -901,7 +903,8 @@ public:
|
||||
// then no other request will be allowed in to force the cycle()ing of this buffer. We
|
||||
// have to do it ourselves.
|
||||
if ((buffer_position() >= (db::commitlog::segment::default_size))) {
|
||||
cycle().discard_result().handle_exception([] (auto ex) {
|
||||
//FIXME: discarded future.
|
||||
(void)cycle().discard_result().handle_exception([] (auto ex) {
|
||||
clogger.error("Failed to flush commits to disk: {}", ex);
|
||||
});
|
||||
}
|
||||
@@ -1608,7 +1611,8 @@ void db::commitlog::segment_manager::on_timer() {
|
||||
// while they are running.
|
||||
(void)seastar::with_gate(_gate, [this] {
|
||||
if (cfg.mode != sync_mode::BATCH) {
|
||||
sync();
|
||||
//FIXME: discarded future.
|
||||
(void)sync();
|
||||
}
|
||||
// IFF a new segment was put in use since last we checked, and we're
|
||||
// above threshold, request flush.
|
||||
@@ -2054,7 +2058,8 @@ db::commitlog::read_log_file(const sstring& filename, const sstring& pfx, seasta
|
||||
auto w = make_lw_shared<work>(std::move(f), d, read_io_prio_class, off);
|
||||
auto ret = w->s.listen(next);
|
||||
|
||||
w->s.started().then(std::bind(&work::read_file, w.get())).then([w] {
|
||||
//FIXME: discarded future.
|
||||
(void)w->s.started().then(std::bind(&work::read_file, w.get())).then([w] {
|
||||
if (!w->failed) {
|
||||
w->s.close();
|
||||
}
|
||||
|
||||
@@ -1348,7 +1348,8 @@ future<> migrate_truncation_records() {
|
||||
need_legacy_truncation_records = !ss.cluster_supports_truncation_table();
|
||||
|
||||
if (need_legacy_truncation_records || !tmp.empty()) {
|
||||
ss.cluster_supports_truncation_table().when_enabled().then([] {
|
||||
//FIXME: discarded future.
|
||||
(void)ss.cluster_supports_truncation_table().when_enabled().then([] {
|
||||
// this potentially races with a truncation, i.e. someone could be inserting into
|
||||
// the legacy column while we delete it. But this is ok, it will just mean we have
|
||||
// some unneeded data and will do a merge again next boot, but eventually we
|
||||
|
||||
@@ -1340,7 +1340,8 @@ future<> view_builder::calculate_shard_build_step(
|
||||
if (built_views.find(view->id()) != built_views.end()) {
|
||||
if (engine().cpu_id() == 0) {
|
||||
auto f = _sys_dist_ks.finish_view_build(std::move(view_name.first), std::move(view_name.second)).then([view = std::move(view)] {
|
||||
system_keyspace::remove_view_build_progress_across_all_shards(view->cf_name(), view->ks_name());
|
||||
//FIXME: discarded future.
|
||||
(void)system_keyspace::remove_view_build_progress_across_all_shards(view->cf_name(), view->ks_name());
|
||||
});
|
||||
bookkeeping_ops->push_back(std::move(f));
|
||||
}
|
||||
|
||||
@@ -384,7 +384,8 @@ void distributed_loader::reshard(distributed<database>& db, sstring ks_name, sst
|
||||
// refresh (triggers resharding) is issued by user while resharding is going on).
|
||||
static semaphore sem(1);
|
||||
|
||||
with_semaphore(sem, 1, [&db, ks_name = std::move(ks_name), cf_name = std::move(cf_name)] () mutable {
|
||||
// FIXME: discarded future.
|
||||
(void)with_semaphore(sem, 1, [&db, ks_name = std::move(ks_name), cf_name = std::move(cf_name)] () mutable {
|
||||
return seastar::async([&db, ks_name = std::move(ks_name), cf_name = std::move(cf_name)] () mutable {
|
||||
global_column_family_ptr cf(db, ks_name, cf_name);
|
||||
|
||||
@@ -506,7 +507,8 @@ future<> distributed_loader::load_new_sstables(distributed<database>& db, distri
|
||||
abort();
|
||||
}
|
||||
if (sst->requires_view_building()) {
|
||||
view_update_generator.local().register_staging_sstable(sst, cf.shared_from_this());
|
||||
// FIXME: discarded future.
|
||||
(void)view_update_generator.local().register_staging_sstable(sst, cf.shared_from_this());
|
||||
}
|
||||
}
|
||||
cf._sstables_opened_but_not_loaded.clear();
|
||||
|
||||
@@ -104,7 +104,8 @@ future<> gossiping_property_file_snitch::start() {
|
||||
|
||||
void gossiping_property_file_snitch::periodic_reader_callback() {
|
||||
_file_reader_runs = true;
|
||||
property_file_was_modified().then([this] (bool was_modified) {
|
||||
//FIXME: discarded future.
|
||||
(void)property_file_was_modified().then([this] (bool was_modified) {
|
||||
|
||||
if (was_modified) {
|
||||
return read_property_file();
|
||||
|
||||
24
main.cc
24
main.cc
@@ -600,7 +600,8 @@ int main(int ac, char** av) {
|
||||
startlog.info("stopping prometheus API server");
|
||||
prometheus_server.stop().get();
|
||||
}));
|
||||
prometheus::start(prometheus_server, pctx);
|
||||
//FIXME discarded future
|
||||
(void)prometheus::start(prometheus_server, pctx);
|
||||
with_scheduling_group(maintenance_scheduling_group, [&] {
|
||||
return prometheus_server.listen(socket_address{prom_addr, pport}).handle_exception([pport, &cfg] (auto ep) {
|
||||
startlog.error("Could not start Prometheus API server on {}:{}: {}", cfg->prometheus_address(), pport, ep);
|
||||
@@ -699,7 +700,8 @@ int main(int ac, char** av) {
|
||||
static sharded<cql3::cql_config> cql_config;
|
||||
static sharded<::cql_config_updater> cql_config_updater;
|
||||
cql_config.start().get();
|
||||
cql_config_updater.start(std::ref(cql_config), std::ref(*cfg));
|
||||
//FIXME: discarded future
|
||||
(void)cql_config_updater.start(std::ref(cql_config), std::ref(*cfg));
|
||||
auto stop_cql_config_updater = defer([&] { cql_config_updater.stop().get(); });
|
||||
auto& gossiper = gms::get_gossiper();
|
||||
gossiper.start(std::ref(feature_service), std::ref(*cfg)).get();
|
||||
@@ -708,7 +710,8 @@ int main(int ac, char** av) {
|
||||
supervisor::notify("initializing storage service");
|
||||
service::storage_service_config sscfg;
|
||||
sscfg.available_memory = memory::stats().total_memory();
|
||||
init_storage_service(stop_signal.as_sharded_abort_source(), db, gossiper, auth_service, cql_config, sys_dist_ks, view_update_generator, feature_service, sscfg);
|
||||
//FIXME: discarded future
|
||||
(void)init_storage_service(stop_signal.as_sharded_abort_source(), db, gossiper, auth_service, cql_config, sys_dist_ks, view_update_generator, feature_service, sscfg);
|
||||
supervisor::notify("starting per-shard database core");
|
||||
|
||||
// Note: changed from using a move here, because we want the config object intact.
|
||||
@@ -888,7 +891,8 @@ int main(int ac, char** av) {
|
||||
table& t = *(x.second);
|
||||
for (sstables::shared_sstable sst : *t.get_sstables()) {
|
||||
if (sst->requires_view_building()) {
|
||||
view_update_generator.local().register_staging_sstable(std::move(sst), t.shared_from_this());
|
||||
// FIXME: discarded future.
|
||||
(void)view_update_generator.local().register_staging_sstable(std::move(sst), t.shared_from_this());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -913,7 +917,8 @@ int main(int ac, char** av) {
|
||||
return db.flush_all_memtables();
|
||||
}).get();
|
||||
supervisor::notify("replaying commit log - removing old commitlog segments");
|
||||
cl->delete_segments(std::move(paths));
|
||||
//FIXME: discarded future
|
||||
(void)cl->delete_segments(std::move(paths));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -943,7 +948,8 @@ int main(int ac, char** av) {
|
||||
api::set_server_storage_proxy(ctx).get();
|
||||
api::set_server_load_sstable(ctx).get();
|
||||
static seastar::sharded<memory_threshold_guard> mtg;
|
||||
mtg.start(cfg->large_memory_allocation_warning_threshold());
|
||||
//FIXME: discarded future
|
||||
(void)mtg.start(cfg->large_memory_allocation_warning_threshold());
|
||||
supervisor::notify("initializing migration manager RPC verbs");
|
||||
service::get_migration_manager().invoke_on_all([] (auto& mm) {
|
||||
mm.init_messaging_service();
|
||||
@@ -966,7 +972,8 @@ int main(int ac, char** av) {
|
||||
proxy.invoke_on_all([] (service::storage_proxy& local_proxy) {
|
||||
auto& ss = service::get_local_storage_service();
|
||||
ss.register_subscriber(&local_proxy);
|
||||
local_proxy.start_hints_manager(gms::get_local_gossiper().shared_from_this(), ss.shared_from_this());
|
||||
//FIXME: discarded future
|
||||
(void)local_proxy.start_hints_manager(gms::get_local_gossiper().shared_from_this(), ss.shared_from_this());
|
||||
}).get();
|
||||
|
||||
supervisor::notify("starting messaging service");
|
||||
@@ -1020,7 +1027,8 @@ int main(int ac, char** av) {
|
||||
view_backlog_broker.stop().get();
|
||||
});
|
||||
|
||||
api::set_server_cache(ctx);
|
||||
//FIXME: discarded future
|
||||
(void)api::set_server_cache(ctx);
|
||||
startlog.info("Waiting for gossip to settle before accepting client requests...");
|
||||
gms::get_local_gossiper().wait_for_gossip_to_settle().get();
|
||||
api::set_server_gossip_settle(ctx).get();
|
||||
|
||||
@@ -84,7 +84,8 @@ void migration_manager::init_messaging_service()
|
||||
auto& ss = service::get_local_storage_service();
|
||||
|
||||
auto update_schema = [this, &ss] {
|
||||
with_gate(_background_tasks, [this, &ss] {
|
||||
//FIXME: future discarded.
|
||||
(void)with_gate(_background_tasks, [this, &ss] {
|
||||
mlogger.debug("features changed, recalculating schema version");
|
||||
return update_schema_version(get_storage_proxy(), ss.cluster_schema_features());
|
||||
});
|
||||
|
||||
@@ -201,7 +201,8 @@ future<> view_update_backlog_broker::start() {
|
||||
auto backlog = _sp.local().get_view_update_backlog();
|
||||
auto now = api::timestamp_type(std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now().time_since_epoch()).count());
|
||||
_gossiper.add_local_application_state(
|
||||
//FIXME: discarded future.
|
||||
(void)_gossiper.add_local_application_state(
|
||||
gms::application_state::VIEW_BACKLOG,
|
||||
gms::versioned_value(seastar::format("{}:{}:{}", backlog.current, backlog.max, now)));
|
||||
sleep_abortable(gms::gossiper::INTERVAL, _as).get();
|
||||
|
||||
@@ -190,9 +190,11 @@ storage_service::storage_service(abort_source& abort_source, distributed<databas
|
||||
_sstables_format = sstables::sstable_version_types::mc;
|
||||
}
|
||||
|
||||
_unbounded_range_tombstones_feature.when_enabled().then([&db] () mutable {
|
||||
//FIXME: discarded future.
|
||||
(void)_unbounded_range_tombstones_feature.when_enabled().then([&db] () mutable {
|
||||
slogger.debug("Enabling infinite bound range deletions");
|
||||
db.invoke_on_all([] (database& local_db) mutable {
|
||||
//FIXME: discarded future.
|
||||
(void)db.invoke_on_all([] (database& local_db) mutable {
|
||||
local_db.enable_infinite_bound_range_deletions();
|
||||
});
|
||||
});
|
||||
@@ -1113,7 +1115,8 @@ void storage_service::handle_state_leaving(inet_address endpoint) {
|
||||
}
|
||||
|
||||
void storage_service::update_pending_ranges_nowait(inet_address endpoint) {
|
||||
update_pending_ranges().handle_exception([endpoint] (std::exception_ptr ep) {
|
||||
//FIXME: discarded future.
|
||||
(void)update_pending_ranges().handle_exception([endpoint] (std::exception_ptr ep) {
|
||||
slogger.info("Failed to update_pending_ranges for node {}: {}", endpoint, ep);
|
||||
});
|
||||
}
|
||||
@@ -1190,7 +1193,8 @@ void storage_service::handle_state_removing(inet_address endpoint, std::vector<s
|
||||
// will be removed from _replicating_nodes on the
|
||||
// removal_coordinator.
|
||||
auto notify_endpoint = ep.value();
|
||||
restore_replica_count(endpoint, notify_endpoint).handle_exception([endpoint, notify_endpoint] (auto ep) {
|
||||
//FIXME: discarded future.
|
||||
(void)restore_replica_count(endpoint, notify_endpoint).handle_exception([endpoint, notify_endpoint] (auto ep) {
|
||||
slogger.info("Failed to restore_replica_count for node {}, notify_endpoint={} : {}", endpoint, notify_endpoint, ep);
|
||||
});
|
||||
}
|
||||
@@ -1207,14 +1211,16 @@ void storage_service::on_join(gms::inet_address endpoint, gms::endpoint_state ep
|
||||
for (const auto& e : ep_state.get_application_state_map()) {
|
||||
on_change(endpoint, e.first, e.second);
|
||||
}
|
||||
get_local_migration_manager().schedule_schema_pull(endpoint, ep_state).handle_exception([endpoint] (auto ep) {
|
||||
//FIXME: discarded future.
|
||||
(void)get_local_migration_manager().schedule_schema_pull(endpoint, ep_state).handle_exception([endpoint] (auto ep) {
|
||||
slogger.warn("Fail to pull schema from {}: {}", endpoint, ep);
|
||||
});
|
||||
}
|
||||
|
||||
void storage_service::on_alive(gms::inet_address endpoint, gms::endpoint_state state) {
|
||||
slogger.debug("endpoint={} on_alive", endpoint);
|
||||
get_local_migration_manager().schedule_schema_pull(endpoint, state).handle_exception([endpoint] (auto ep) {
|
||||
//FIXME: discarded future.
|
||||
(void)get_local_migration_manager().schedule_schema_pull(endpoint, state).handle_exception([endpoint] (auto ep) {
|
||||
slogger.warn("Fail to pull schema from {}: {}", endpoint, ep);
|
||||
});
|
||||
if (_token_metadata.is_member(endpoint)) {
|
||||
@@ -1267,7 +1273,8 @@ void storage_service::on_change(inet_address endpoint, application_state state,
|
||||
if (get_token_metadata().is_member(endpoint)) {
|
||||
do_update_system_peers_table(endpoint, state, value);
|
||||
if (state == application_state::SCHEMA) {
|
||||
get_local_migration_manager().schedule_schema_pull(endpoint, *ep_state).handle_exception([endpoint] (auto ep) {
|
||||
//FIXME: discarded future.
|
||||
(void)get_local_migration_manager().schedule_schema_pull(endpoint, *ep_state).handle_exception([endpoint] (auto ep) {
|
||||
slogger.warn("Failed to pull schema from {}: {}", endpoint, ep);
|
||||
});
|
||||
} else if (state == application_state::RPC_READY) {
|
||||
@@ -2489,7 +2496,8 @@ future<> storage_service::removenode(sstring host_id_string) {
|
||||
// No need to wait for restore_replica_count to complete, since
|
||||
// when it completes, the node will be removed from _replicating_nodes,
|
||||
// and we wait for _replicating_nodes to become empty below
|
||||
ss.restore_replica_count(endpoint, my_address).handle_exception([endpoint, my_address] (auto ep) {
|
||||
//FIXME: discarded future.
|
||||
(void)ss.restore_replica_count(endpoint, my_address).handle_exception([endpoint, my_address] (auto ep) {
|
||||
slogger.info("Failed to restore_replica_count for node {} on node {}", endpoint, my_address);
|
||||
});
|
||||
|
||||
@@ -3234,7 +3242,8 @@ void storage_service::do_isolate_on_error(disk_error type)
|
||||
slogger.warn("Shutting down communications due to I/O errors until operator intervention");
|
||||
slogger.warn("{} error: {}", type == disk_error::commit ? "Commitlog" : "Disk", std::current_exception());
|
||||
// isolated protect us against multiple stops
|
||||
service::get_local_storage_service().stop_transport();
|
||||
//FIXME: discarded future.
|
||||
(void)service::get_local_storage_service().stop_transport();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3426,7 +3435,8 @@ void feature_enabled_listener::on_enabled() {
|
||||
return;
|
||||
}
|
||||
_started = true;
|
||||
with_semaphore(_sem, 1, [this] {
|
||||
//FIXME: discarded future.
|
||||
(void)with_semaphore(_sem, 1, [this] {
|
||||
if (!sstables::is_later(_format, _s._sstables_format)) {
|
||||
return make_ready_future<bool>(false);
|
||||
}
|
||||
|
||||
@@ -493,7 +493,8 @@ inline bool compaction_manager::maybe_stop_on_error(future<> f, stop_iteration w
|
||||
} catch (storage_io_error& e) {
|
||||
cmlog.error("compaction failed due to storage io error: {}: stopping", e.what());
|
||||
retry = false;
|
||||
stop();
|
||||
// FIXME discarded future.
|
||||
(void)stop();
|
||||
} catch (...) {
|
||||
cmlog.error("compaction failed: {}: {}", std::current_exception(), retry_msg);
|
||||
retry = true;
|
||||
|
||||
@@ -127,7 +127,7 @@ public:
|
||||
~data_consume_context() {
|
||||
if (_ctx) {
|
||||
auto f = _ctx->close();
|
||||
// Can't wait on the future in the destructor.
|
||||
//FIXME: discarded future.
|
||||
(void)f.handle_exception([ctx = std::move(_ctx), sst = std::move(_sst)](auto) {});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -224,7 +224,8 @@ public:
|
||||
auto close = [this] (std::unique_ptr<index_reader>& ptr) {
|
||||
if (ptr) {
|
||||
auto f = ptr->close();
|
||||
f.handle_exception([index = std::move(ptr)] (auto&&) { });
|
||||
// FIXME: discarded future.
|
||||
(void)f.handle_exception([index = std::move(ptr)] (auto&&) { });
|
||||
}
|
||||
};
|
||||
close(_index_reader);
|
||||
|
||||
@@ -274,7 +274,8 @@ void stream_manager::fail_all_sessions() {
|
||||
void stream_manager::on_remove(inet_address endpoint) {
|
||||
if (has_peer(endpoint)) {
|
||||
sslog.info("stream_manager: Close all stream_session with peer = {} in on_remove", endpoint);
|
||||
get_stream_manager().invoke_on_all([endpoint] (auto& sm) {
|
||||
//FIXME: discarded future.
|
||||
(void)get_stream_manager().invoke_on_all([endpoint] (auto& sm) {
|
||||
sm.fail_sessions(endpoint);
|
||||
}).handle_exception([endpoint] (auto ep) {
|
||||
sslog.warn("stream_manager: Fail to close sessions peer = {} in on_remove", endpoint);
|
||||
@@ -285,7 +286,8 @@ void stream_manager::on_remove(inet_address endpoint) {
|
||||
void stream_manager::on_restart(inet_address endpoint, endpoint_state ep_state) {
|
||||
if (has_peer(endpoint)) {
|
||||
sslog.info("stream_manager: Close all stream_session with peer = {} in on_restart", endpoint);
|
||||
get_stream_manager().invoke_on_all([endpoint] (auto& sm) {
|
||||
//FIXME: discarded future.
|
||||
(void)get_stream_manager().invoke_on_all([endpoint] (auto& sm) {
|
||||
sm.fail_sessions(endpoint);
|
||||
}).handle_exception([endpoint] (auto ep) {
|
||||
sslog.warn("stream_manager: Fail to close sessions peer = {} in on_restart", endpoint);
|
||||
@@ -296,7 +298,8 @@ void stream_manager::on_restart(inet_address endpoint, endpoint_state ep_state)
|
||||
void stream_manager::on_dead(inet_address endpoint, endpoint_state ep_state) {
|
||||
if (has_peer(endpoint)) {
|
||||
sslog.info("stream_manager: Close all stream_session with peer = {} in on_dead", endpoint);
|
||||
get_stream_manager().invoke_on_all([endpoint] (auto& sm) {
|
||||
//FIXME: discarded future.
|
||||
(void)get_stream_manager().invoke_on_all([endpoint] (auto& sm) {
|
||||
sm.fail_sessions(endpoint);
|
||||
}).handle_exception([endpoint] (auto ep) {
|
||||
sslog.warn("stream_manager: Fail to close sessions peer = {} in on_dead", endpoint);
|
||||
|
||||
@@ -121,7 +121,8 @@ void stream_result_future::maybe_complete() {
|
||||
}
|
||||
auto duration = std::chrono::duration_cast<std::chrono::duration<float>>(lowres_clock::now() - _start_time).count();
|
||||
auto stats = make_lw_shared<sstring>("");
|
||||
sm.get_progress_on_all_shards(plan_id).then([plan_id, duration, stats] (auto sbytes) {
|
||||
//FIXME: discarded future.
|
||||
(void)sm.get_progress_on_all_shards(plan_id).then([plan_id, duration, stats] (auto sbytes) {
|
||||
auto tx_bw = sstring("0");
|
||||
auto rx_bw = sstring("0");
|
||||
if (std::fabs(duration) > FLT_EPSILON) {
|
||||
|
||||
@@ -213,7 +213,8 @@ void stream_session::init_messaging_service_handler() {
|
||||
}
|
||||
});
|
||||
};
|
||||
mutation_writer::distribute_reader_and_consume_on_shards(s, dht::global_partitioner(),
|
||||
//FIXME: discarded future.
|
||||
(void)mutation_writer::distribute_reader_and_consume_on_shards(s, dht::global_partitioner(),
|
||||
make_generating_reader(s, std::move(get_next_mutation_fragment)),
|
||||
[plan_id, estimated_partitions, reason] (flat_mutation_reader reader) {
|
||||
auto& cf = get_local_db().find_column_family(reader.schema());
|
||||
@@ -508,7 +509,8 @@ void stream_session::send_failed_complete_message() {
|
||||
sslog.debug("[Stream #{}] SEND COMPLETE_MESSAGE to {}", plan_id, id);
|
||||
auto session = shared_from_this();
|
||||
bool failed = true;
|
||||
this->ms().send_complete_message(id, plan_id, this->dst_cpu_id, failed).then([session, id, plan_id] {
|
||||
//FIXME: discarded future.
|
||||
(void)this->ms().send_complete_message(id, plan_id, this->dst_cpu_id, failed).then([session, id, plan_id] {
|
||||
sslog.debug("[Stream #{}] GOT COMPLETE_MESSAGE Reply from {}", plan_id, id.addr);
|
||||
}).handle_exception([session, id, plan_id] (auto ep) {
|
||||
sslog.debug("[Stream #{}] COMPLETE_MESSAGE for {} has failed: {}", plan_id, id.addr, ep);
|
||||
@@ -536,7 +538,8 @@ void stream_session::start_streaming_files() {
|
||||
if (!_transfers.empty()) {
|
||||
set_state(stream_session_state::STREAMING);
|
||||
}
|
||||
do_for_each(_transfers.begin(), _transfers.end(), [this] (auto& item) {
|
||||
//FIXME: discarded future.
|
||||
(void)do_for_each(_transfers.begin(), _transfers.end(), [this] (auto& item) {
|
||||
sslog.debug("[Stream #{}] Start to send cf_id={}", this->plan_id(), item.first);
|
||||
return item.second.execute();
|
||||
}).then([this] {
|
||||
@@ -618,7 +621,8 @@ void stream_session::close_session(stream_session_state final_state) {
|
||||
for (auto& x : _receivers) {
|
||||
stream_receive_task& task = x.second;
|
||||
sslog.debug("[Stream #{}] close_session session={}, state={}, abort stream_receive_task cf_id={}", plan_id(), this, final_state, task.cf_id);
|
||||
receiving_failed(x.first);
|
||||
//FIXME: discarded future.
|
||||
(void)receiving_failed(x.first);
|
||||
task.abort();
|
||||
}
|
||||
send_failed_complete_message();
|
||||
@@ -647,7 +651,8 @@ void stream_session::start() {
|
||||
} else {
|
||||
sslog.debug("[Stream #{}] Starting streaming to {} through {}", plan_id(), peer, connecting);
|
||||
}
|
||||
on_initialization_complete().handle_exception([this] (auto ep) {
|
||||
//FIXME: discarded future.
|
||||
(void)on_initialization_complete().handle_exception([this] (auto ep) {
|
||||
this->on_error();
|
||||
});
|
||||
}
|
||||
|
||||
3
table.cc
3
table.cc
@@ -2083,7 +2083,8 @@ future<> table::generate_and_propagate_view_updates(const schema_ptr& base,
|
||||
flat_mutation_reader_from_mutations({std::move(m)}),
|
||||
std::move(existings)).then([this, base_token = std::move(base_token)] (std::vector<frozen_mutation_and_schema>&& updates) mutable {
|
||||
auto units = seastar::consume_units(*_config.view_update_concurrency_semaphore, memory_usage_of(updates));
|
||||
db::view::mutate_MV(std::move(base_token), std::move(updates), _view_stats, *_config.cf_stats, std::move(units)).handle_exception([] (auto ignored) { });
|
||||
//FIXME: discarded future.
|
||||
(void)db::view::mutate_MV(std::move(base_token), std::move(updates), _view_stats, *_config.cf_stats, std::move(units)).handle_exception([] (auto ignored) { });
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -69,7 +69,8 @@ future<> table_helper::cache_table_info(service::query_state& qs) {
|
||||
_insert_stmt = dynamic_pointer_cast<cql3::statements::modification_statement>(cql_stmt);
|
||||
}).handle_exception([this] (auto eptr) {
|
||||
// One of the possible causes for an error here could be the table that doesn't exist.
|
||||
this->setup_table().discard_result();
|
||||
//FIXME: discarded future.
|
||||
(void)this->setup_table().discard_result();
|
||||
|
||||
// We throw the bad_column_family exception because the caller
|
||||
// expects and accounts this type of errors.
|
||||
|
||||
Reference in New Issue
Block a user