main: Start sharded<view_builder> earlier

The view_builder service is needed by repair service, but is started
after it. It's OK in a sense that repair service holds a sharded
reference on it and checks whether local_is_initialized() before using
it, which is not nice.

Fortunately, starting sharded view buidler can be done early enough,
because most of its dependencies would be already started by that time.
Two exceptions are -- view_update_generator and
system_distributed_keyspace. Both can be moved up too with the same
justification.

Signed-off-by: Pavel Emelyanov <xemul@scylladb.com>
This commit is contained in:
Pavel Emelyanov
2025-02-04 10:26:33 +03:00
parent f650e75137
commit 5d1f74b86a
2 changed files with 29 additions and 28 deletions

34
main.cc
View File

@@ -1699,6 +1699,23 @@ To start the scylla server proper, simply invoke as: scylla server (or just scyl
co_await utils::announce_dict_to_shards(compressor_tracker, std::move(dict));
};
sys_dist_ks.start(std::ref(qp), std::ref(mm), std::ref(proxy)).get();
auto stop_sdks = defer_verbose_shutdown("system distributed keyspace", [] {
sys_dist_ks.invoke_on_all(&db::system_distributed_keyspace::stop).get();
});
supervisor::notify("starting view update generator");
view_update_generator.start(std::ref(db), std::ref(proxy), std::ref(stop_signal.as_sharded_abort_source())).get();
auto stop_view_update_generator = defer_verbose_shutdown("view update generator", [] {
view_update_generator.stop().get();
});
supervisor::notify("starting the view builder");
view_builder.start(std::ref(db), std::ref(sys_ks), std::ref(sys_dist_ks), std::ref(mm_notifier), std::ref(view_update_generator), std::ref(group0_client), std::ref(qp)).get();
auto stop_view_builder = defer_verbose_shutdown("view builder", [cfg] {
view_builder.stop().get();
});
supervisor::notify("starting repair service");
auto max_memory_repair = memory::stats().total_memory() * 0.1;
repair.start(std::ref(tsm), std::ref(gossiper), std::ref(messaging), std::ref(db), std::ref(proxy), std::ref(bm), std::ref(sys_ks), std::ref(view_builder), std::ref(task_manager), std::ref(mm), max_memory_repair).get();
@@ -1817,23 +1834,6 @@ To start the scylla server proper, simply invoke as: scylla server (or just scyl
supervisor::notify("loading non-system sstables");
replica::distributed_loader::init_non_system_keyspaces(db, proxy, sys_ks).get();
sys_dist_ks.start(std::ref(qp), std::ref(mm), std::ref(proxy)).get();
auto stop_sdks = defer_verbose_shutdown("system distributed keyspace", [] {
sys_dist_ks.invoke_on_all(&db::system_distributed_keyspace::stop).get();
});
supervisor::notify("starting view update generator");
view_update_generator.start(std::ref(db), std::ref(proxy), std::ref(stop_signal.as_sharded_abort_source())).get();
auto stop_view_update_generator = defer_verbose_shutdown("view update generator", [] {
view_update_generator.stop().get();
});
supervisor::notify("starting the view builder");
view_builder.start(std::ref(db), std::ref(sys_ks), std::ref(sys_dist_ks), std::ref(mm_notifier), std::ref(view_update_generator), std::ref(group0_client), std::ref(qp)).get();
auto stop_view_builder = defer_verbose_shutdown("view builder", [cfg] {
view_builder.stop().get();
});
supervisor::notify("starting commit log");
auto cl = db.local().commitlog();

View File

@@ -873,6 +873,18 @@ private:
auto compression_dict_updated_callback = [] { return make_ready_future<>(); };
_sys_dist_ks.start(std::ref(_qp), std::ref(_mm), std::ref(_proxy)).get();
_view_update_generator.start(std::ref(_db), std::ref(_proxy), std::ref(abort_sources)).get();
auto stop_view_update_generator = defer_verbose_shutdown("view update generator", [this] {
_view_update_generator.stop().get();
});
_view_builder.start(std::ref(_db), std::ref(_sys_ks), std::ref(_sys_dist_ks), std::ref(_mnotifier), std::ref(_view_update_generator), std::ref(group0_client), std::ref(_qp)).get();
auto stop_view_builder = defer_verbose_shutdown("view builder", [this] {
_view_builder.stop().get();
});
_ss.start(std::ref(abort_sources), std::ref(_db),
std::ref(_gossiper),
std::ref(_sys_ks),
@@ -960,18 +972,7 @@ private:
_group0_registry.invoke_on_all(&service::raft_group_registry::drain_on_shutdown).get();
});
_view_update_generator.start(std::ref(_db), std::ref(_proxy), std::ref(abort_sources)).get();
_view_update_generator.invoke_on_all(&db::view::view_update_generator::start).get();
auto stop_view_update_generator = defer_verbose_shutdown("view update generator", [this] {
_view_update_generator.stop().get();
});
_sys_dist_ks.start(std::ref(_qp), std::ref(_mm), std::ref(_proxy)).get();
_view_builder.start(std::ref(_db), std::ref(_sys_ks), std::ref(_sys_dist_ks), std::ref(_mnotifier), std::ref(_view_update_generator), std::ref(group0_client), std::ref(_qp)).get();
auto stop_view_builder = defer_verbose_shutdown("view builder", [this] {
_view_builder.stop().get();
});
if (cfg_in.need_remote_proxy) {
_proxy.invoke_on_all(&service::storage_proxy::start_remote, std::ref(_ms), std::ref(_gossiper), std::ref(_mm), std::ref(_sys_ks), std::ref(group0_client), std::ref(_topology_state_machine)).get();