storage_proxy: Keep own updateable_timeout_config

Storage_proxy was reading read_request_timeout_in_ms and
write_request_timeout_in_ms directly from db::config via
database::get_config() at four call sites. Give storage_proxy its own
updateable_timeout_config member (built from db::config the same way
cql transport controller and alternator server do) and use its
read_timeout_in_ms / write_timeout_in_ms observers instead.

Storage_proxy no longer needs database::get_config() for coordinator
timeout values. A later refactor may turn these per-owner copies into
references to a single shared updateable_timeout_config.

Signed-off-by: Pavel Emelyanov <xemul@scylladb.com>
Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
This commit is contained in:
Pavel Emelyanov
2026-04-24 14:27:09 +03:00
parent d280517e27
commit 7ca8a863d9
2 changed files with 7 additions and 4 deletions

View File

@@ -590,7 +590,7 @@ private:
storage_proxy::clock_type::time_point timeout;
if (!t) {
auto timeout_in_ms = _sp._db.local().get_config().write_request_timeout_in_ms();
auto timeout_in_ms = _sp._timeout_config.write_timeout_in_ms();
timeout = clock_type::now() + std::chrono::milliseconds(timeout_in_ms);
} else {
timeout = *t;
@@ -3340,6 +3340,7 @@ storage_proxy::storage_proxy(sharded<replica::database>& db, storage_proxy::conf
, _background_write_throttle_threahsold(cfg.available_memory / 10)
, _mutate_stage{"storage_proxy_mutate", &storage_proxy::do_mutate}
, _max_view_update_backlog(max_view_update_backlog)
, _timeout_config(_db.local().get_config())
, _cancellable_write_handlers_list(std::make_unique<cancellable_write_handlers_list>())
{
namespace sm = seastar::metrics;
@@ -3969,7 +3970,7 @@ future<result<>> storage_proxy::mutate_begin(unique_response_handler_vector ids,
// frozen_mutation copy, or manage handler live time differently.
hint_to_dead_endpoints(response_id, cl);
auto timeout = timeout_opt.value_or(clock_type::now() + std::chrono::milliseconds(_db.local().get_config().write_request_timeout_in_ms()));
auto timeout = timeout_opt.value_or(clock_type::now() + std::chrono::milliseconds(_timeout_config.write_timeout_in_ms()));
// call before send_to_live_endpoints() for the same reason as above
auto f = response_wait(response_id, timeout);
send_to_live_endpoints(protected_response.release(), timeout); // response is now running and it will either complete or timeout
@@ -5941,7 +5942,7 @@ public:
// occur within write_timeout of a write, as these are the cases where repair is most
// beneficial.
if (is_datacenter_local(exec->_cl) && exec->_cmd->read_timestamp >= 0 && digest_resolver->last_modified() >= 0) {
auto write_timeout = exec->_proxy->_db.local().get_config().write_request_timeout_in_ms() * 1000;
auto write_timeout = exec->_proxy->_timeout_config.write_timeout_in_ms() * 1000;
auto delta = int64_t(digest_resolver->last_modified()) - int64_t(exec->_cmd->read_timestamp);
if (std::abs(delta) <= write_timeout) {
exec->_proxy->get_stats().global_read_repairs_canceled_due_to_concurrent_write++;
@@ -6065,7 +6066,7 @@ public:
});
auto& sr = _schema->speculative_retry();
auto t = (sr.get_type() == speculative_retry::type::PERCENTILE) ?
std::min(_cf->get_coordinator_read_latency_percentile(sr.get_value()), std::chrono::milliseconds(_proxy->get_db().local().get_config().read_request_timeout_in_ms()/2)) :
std::min(_cf->get_coordinator_read_latency_percentile(sr.get_value()), std::chrono::milliseconds(_proxy->_timeout_config.read_timeout_in_ms()/2)) :
std::chrono::milliseconds(unsigned(sr.get_value()));
_speculate_timer.arm(t);
resolver->set_on_disconnect([this] {

View File

@@ -40,6 +40,7 @@
#include "dht/token_range_endpoints.hh"
#include "service/storage_service.hh"
#include "service/cas_shard.hh"
#include "timeout_config.hh"
#include "service/storage_proxy_fwd.hh"
class reconcilable_result;
@@ -316,6 +317,7 @@ private:
lw_shared_ptr<cdc::operation_result_tracker>,
coordinator_mutate_options> _mutate_stage;
db::view::node_update_backlog& _max_view_update_backlog;
updateable_timeout_config _timeout_config;
std::unordered_map<locator::host_id, view_update_backlog_timestamped> _view_update_backlogs;
//NOTICE(sarna): This opaque pointer is here just to avoid moving write handler class definitions from .cc to .hh. It's slow path.