diff --git a/db/system_keyspace.cc b/db/system_keyspace.cc index d08cd3dd75..d348640cc9 100644 --- a/db/system_keyspace.cc +++ b/db/system_keyspace.cc @@ -3609,18 +3609,20 @@ system_keyspace::topology_requests_entry system_keyspace::topology_request_row_t return entry; } -future system_keyspace::get_topology_request_entry(utils::UUID id, bool require_entry) { +future system_keyspace::get_topology_request_entry(utils::UUID id) { + auto r = co_await get_topology_request_entry_opt(id); + if (!r) { + on_internal_error(slogger, format("no entry for request id {}", id)); + } + co_return std::move(*r); +} + +future> system_keyspace::get_topology_request_entry_opt(utils::UUID id) { auto rs = co_await execute_cql( format("SELECT * FROM system.{} WHERE id = {}", TOPOLOGY_REQUESTS, id)); if (!rs || rs->empty()) { - if (require_entry) { - on_internal_error(slogger, format("no entry for request id {}", id)); - } else { - co_return topology_requests_entry{ - .id = utils::null_uuid() - }; - } + co_return std::nullopt; } const auto& row = rs->one(); diff --git a/db/system_keyspace.hh b/db/system_keyspace.hh index 5f0e3f825d..b0a0f8166c 100644 --- a/db/system_keyspace.hh +++ b/db/system_keyspace.hh @@ -689,7 +689,8 @@ public: future get_topology_request_state(utils::UUID id, bool require_entry); topology_requests_entry topology_request_row_to_entry(utils::UUID id, const cql3::untyped_result_set_row& row); - future get_topology_request_entry(utils::UUID id, bool require_entry); + future get_topology_request_entry(utils::UUID id); + future> get_topology_request_entry_opt(utils::UUID id); future get_node_ops_request_entries(db_clock::time_point end_time_limit); public: diff --git a/node_ops/task_manager_module.cc b/node_ops/task_manager_module.cc index 1441788bbe..e715e8ebd8 100644 --- a/node_ops/task_manager_module.cc +++ b/node_ops/task_manager_module.cc @@ -61,11 +61,11 @@ static future get_entries(db::sy } future> node_ops_virtual_task::get_status(tasks::task_id id, tasks::virtual_task_hint hint) { - auto entry = co_await _ss._sys_ks.local().get_topology_request_entry(id.uuid(), false); - auto started = entry.id; - if (!started) { + auto entry_opt = co_await _ss._sys_ks.local().get_topology_request_entry_opt(id.uuid()); + if (!entry_opt) { co_return std::nullopt; } + auto& entry = *entry_opt; co_return tasks::task_status{ .task_id = id, .type = request_type_to_task_type(entry.request_type), @@ -84,7 +84,7 @@ future> node_ops_virtual_task::get_status(task .entity = "", .progress_units = "", .progress = tasks::task_manager::task::progress{}, - .children = started ? co_await get_children(get_module(), id, std::bind_front(&gms::gossiper::is_alive, &_ss.gossiper())) : utils::chunked_vector{} + .children = co_await get_children(get_module(), id, std::bind_front(&gms::gossiper::is_alive, &_ss.gossiper())) }; } @@ -106,8 +106,8 @@ future> node_ops_virtual_task::contains( } } - auto entry = co_await _ss._sys_ks.local().get_topology_request_entry(task_id.uuid(), false); - co_return bool(entry.id) && std::holds_alternative(entry.request_type) ? empty_hint : std::nullopt; + auto entry = co_await _ss._sys_ks.local().get_topology_request_entry_opt(task_id.uuid()); + co_return entry && std::holds_alternative(entry->request_type) ? empty_hint : std::nullopt; } future node_ops_virtual_task::is_abortable(tasks::virtual_task_hint) const { diff --git a/service/storage_proxy.cc b/service/storage_proxy.cc index ca21411922..5b707c7fa7 100644 --- a/service/storage_proxy.cc +++ b/service/storage_proxy.cc @@ -1113,7 +1113,7 @@ private: // only for a truncate which is still waiting. if (_topology_state_machine._topology.global_request) { utils::UUID ongoing_global_request_id = _topology_state_machine._topology.global_request_id.value(); - const auto topology_requests_entry = co_await _sys_ks.local().get_topology_request_entry(ongoing_global_request_id, true); + const auto topology_requests_entry = co_await _sys_ks.local().get_topology_request_entry(ongoing_global_request_id); auto global_request = std::get(topology_requests_entry.request_type); if (global_request == global_topology_request::truncate_table) { std::optional& tstate = _topology_state_machine._topology.tstate; diff --git a/service/storage_service.cc b/service/storage_service.cc index 8b66c4aced..9b9b563999 100644 --- a/service/storage_service.cc +++ b/service/storage_service.cc @@ -5228,7 +5228,7 @@ future<> storage_service::raft_check_and_repair_cdc_streams() { request_id = _topology_state_machine._topology.global_request_id.value(); } else if (!_topology_state_machine._topology.global_requests_queue.empty()) { request_id = _topology_state_machine._topology.global_requests_queue[0]; - auto req_entry = co_await _sys_ks.local().get_topology_request_entry(request_id, true); + auto req_entry = co_await _sys_ks.local().get_topology_request_entry(request_id); curr_req = std::get(req_entry.request_type); } else { request_id = utils::UUID{}; diff --git a/service/topology_coordinator.cc b/service/topology_coordinator.cc index 2efbc7ffa8..21792bff1a 100644 --- a/service/topology_coordinator.cc +++ b/service/topology_coordinator.cc @@ -953,7 +953,7 @@ class topology_coordinator : public endpoint_lifecycle_subscriber { } else { assert(_feature_service.topology_global_request_queue); req_id = _topo_sm._topology.global_requests_queue[0]; - req_entry = co_await _sys_ks.get_topology_request_entry(req_id, true); + req_entry = co_await _sys_ks.get_topology_request_entry(req_id); req = std::get(req_entry.request_type); } switch (req) { @@ -2051,7 +2051,7 @@ class topology_coordinator : public endpoint_lifecycle_subscriber { // We should perform TRUNCATE only if the session is still valid. It could be cleared if a previous truncate // handler performed the truncate and cleared the session, but crashed before finalizing the request if (_topo_sm._topology.session) { - const auto topology_requests_entry = co_await _sys_ks.get_topology_request_entry(global_request_id, true); + const auto topology_requests_entry = co_await _sys_ks.get_topology_request_entry(global_request_id); const table_id& table_id = topology_requests_entry.truncate_table_id; lw_shared_ptr table = _db.get_tables_metadata().get_table_if_exists(table_id);