token_metadata: Add config, spread everywhere

Next patches will need to provide some early-start data for topology.
The standard way of doing it is via service config, so this patch adds
one. The new config is empty in this patch, to be filled later

Signed-off-by: Pavel Emelyanov <xemul@scylladb.com>
This commit is contained in:
Pavel Emelyanov
2022-09-19 16:00:36 +03:00
parent 7c211e8e50
commit d60ebc5ace
8 changed files with 30 additions and 18 deletions

View File

@@ -76,10 +76,11 @@ private:
struct shallow_copy {};
token_metadata_impl(shallow_copy, const token_metadata_impl& o) noexcept
: _topology(topology::config{})
{}
public:
token_metadata_impl() noexcept {};
token_metadata_impl(token_metadata::config cfg) noexcept : _topology(std::move(cfg.topo_cfg)) {};
token_metadata_impl(const token_metadata_impl&) = delete; // it's too huge for direct copy, use clone_async()
token_metadata_impl(token_metadata_impl&&) noexcept = default;
const std::vector<token>& sorted_tokens() const;
@@ -973,8 +974,8 @@ token_metadata::token_metadata(std::unique_ptr<token_metadata_impl> impl)
: _impl(std::move(impl)) {
}
token_metadata::token_metadata()
: _impl(std::make_unique<token_metadata_impl>()) {
token_metadata::token_metadata(config cfg)
: _impl(std::make_unique<token_metadata_impl>(std::move(cfg))) {
}
token_metadata::~token_metadata() = default;
@@ -1245,6 +1246,10 @@ inline future<> topology::clear_gently() noexcept {
co_return;
}
topology::topology(config cfg)
{
}
topology::topology(const topology& other)
: _dc_endpoints(other._dc_endpoints)
, _dc_racks(other._dc_racks)

View File

@@ -45,7 +45,9 @@ struct endpoint_dc_rack {
class topology {
public:
topology() {}
struct config {
};
topology(config cfg);
topology(const topology& other);
future<> clear_gently() noexcept;
@@ -135,6 +137,9 @@ class token_metadata_impl;
class token_metadata final {
std::unique_ptr<token_metadata_impl> _impl;
public:
struct config {
topology::config topo_cfg;
};
using inet_address = gms::inet_address;
private:
class tokens_iterator {
@@ -158,7 +163,7 @@ private:
friend class token_metadata_impl;
};
public:
token_metadata();
token_metadata(config cfg);
explicit token_metadata(std::unique_ptr<token_metadata_impl> impl);
token_metadata(token_metadata&&) noexcept; // Can't use "= default;" - hits some static_assert in unique_ptr
token_metadata& operator=(token_metadata&&) noexcept;
@@ -350,8 +355,8 @@ class shared_token_metadata {
public:
// used to construct the shared object as a sharded<> instance
// lock_func returns semaphore_units<>
explicit shared_token_metadata(token_metadata_lock_func lock_func)
: _shared(make_token_metadata_ptr())
explicit shared_token_metadata(token_metadata_lock_func lock_func, token_metadata::config cfg)
: _shared(make_token_metadata_ptr(std::move(cfg)))
, _lock_func(std::move(lock_func))
{ }

View File

@@ -713,7 +713,8 @@ To start the scylla server proper, simply invoke as: scylla server (or just scyl
set_abort_on_internal_error(cfg->abort_on_internal_error());
supervisor::notify("starting tokens manager");
token_metadata.start([] () noexcept { return db::schema_tables::hold_merge_lock(); }).get();
locator::token_metadata::config tm_cfg;
token_metadata.start([] () noexcept { return db::schema_tables::hold_merge_lock(); }, tm_cfg).get();
// storage_proxy holds a reference on it and is not yet stopped.
// what's worse is that the calltrace
// storage_proxy::do_query

View File

@@ -172,7 +172,7 @@ void full_ring_check(const std::vector<ring_point>& ring_points,
}
std::unique_ptr<locator::topology> generate_topology(const std::vector<ring_point>& pts) {
auto topo = std::make_unique<locator::topology>();
auto topo = std::make_unique<locator::topology>(locator::topology::config{});
// This resembles rack_inferring_snitch dc/rack generation which is
// still in use by this test via token_metadata internals
@@ -199,7 +199,7 @@ void simple_test() {
auto stop_snitch = defer([&snitch] { snitch.stop().get(); });
snitch.invoke_on_all(&snitch_ptr::start).get();
locator::shared_token_metadata stm([] () noexcept { return db::schema_tables::hold_merge_lock(); });
locator::shared_token_metadata stm([] () noexcept { return db::schema_tables::hold_merge_lock(); }, locator::token_metadata::config{});
std::vector<ring_point> ring_points = {
{ 1.0, inet_address("192.100.10.1") },
@@ -284,7 +284,7 @@ void heavy_origin_test() {
auto stop_snitch = defer([&snitch] { snitch.stop().get(); });
snitch.invoke_on_all(&snitch_ptr::start).get();
locator::shared_token_metadata stm([] () noexcept { return db::schema_tables::hold_merge_lock(); });
locator::shared_token_metadata stm([] () noexcept { return db::schema_tables::hold_merge_lock(); }, locator::token_metadata::config{});
std::vector<int> dc_racks = {2, 4, 8};
std::vector<int> dc_endpoints = {128, 256, 512};
@@ -559,7 +559,7 @@ std::unique_ptr<locator::topology> generate_topology(const std::unordered_map<ss
out = std::fill_n(out, rf, std::cref(dc));
}
auto topo = std::make_unique<locator::topology>();
auto topo = std::make_unique<locator::topology>(locator::topology::config{});
for (auto& node : nodes) {
const sstring& dc = dcs[udist(0, dcs.size() - 1)(e1)];
@@ -594,7 +594,7 @@ SEASTAR_THREAD_TEST_CASE(testCalculateEndpoints) {
for (size_t run = 0; run < RUNS; ++run) {
semaphore sem(1);
shared_token_metadata stm([&sem] () noexcept { return get_units(sem, 1); });
shared_token_metadata stm([&sem] () noexcept { return get_units(sem, 1); }, locator::token_metadata::config{});
auto topo = generate_topology(datacenters, nodes);
std::unordered_set<dht::token> random_tokens;

View File

@@ -54,7 +54,7 @@ SEASTAR_TEST_CASE(test_get_restricted_ranges) {
{
// Ring with minimum token
auto tmptr = locator::make_token_metadata_ptr();
auto tmptr = locator::make_token_metadata_ptr(locator::token_metadata::config{});
tmptr->update_topology(gms::inet_address("10.0.0.1"), {});
tmptr->update_normal_tokens(std::unordered_set<dht::token>({dht::minimum_token()}), gms::inet_address("10.0.0.1")).get();
@@ -68,7 +68,7 @@ SEASTAR_TEST_CASE(test_get_restricted_ranges) {
}
{
auto tmptr = locator::make_token_metadata_ptr();
auto tmptr = locator::make_token_metadata_ptr(locator::token_metadata::config{});
tmptr->update_topology(gms::inet_address("10.0.0.1"), {});
tmptr->update_normal_tokens(std::unordered_set<dht::token>({ring[2].token()}), gms::inet_address("10.0.0.1")).get();
tmptr->update_topology(gms::inet_address("10.0.0.2"), {});

View File

@@ -526,7 +526,8 @@ public:
}
sharded<locator::shared_token_metadata> token_metadata;
token_metadata.start([] () noexcept { return db::schema_tables::hold_merge_lock(); }).get();
locator::token_metadata::config tm_cfg;
token_metadata.start([] () noexcept { return db::schema_tables::hold_merge_lock(); }, tm_cfg).get();
auto stop_token_metadata = defer([&token_metadata] { token_metadata.stop().get(); });
sharded<locator::effective_replication_map_factory> erm_factory;

View File

@@ -72,7 +72,7 @@ int main(int ac, char ** av) {
abort_sources.start().get();
auto stop_abort_source = defer([&] { abort_sources.stop().get(); });
token_metadata.start([] () noexcept { return db::schema_tables::hold_merge_lock(); }).get();
token_metadata.start([] () noexcept { return db::schema_tables::hold_merge_lock(); }, locator::token_metadata::config{}).get();
auto stop_token_mgr = defer([&] { token_metadata.stop().get(); });
messaging.start(listen).get();

View File

@@ -198,7 +198,7 @@ std::vector<schema_ptr> do_load_schemas(std::string_view schema_str) {
feature_service.enable(feature_service.supported_feature_set());
sharded<locator::shared_token_metadata> token_metadata;
token_metadata.start([] () noexcept { return db::schema_tables::hold_merge_lock(); }).get();
token_metadata.start([] () noexcept { return db::schema_tables::hold_merge_lock(); }, locator::token_metadata::config{}).get();
auto stop_token_metadata = deferred_stop(token_metadata);
data_dictionary_impl dd_impl;