mirror of
https://github.com/scylladb/scylladb.git
synced 2026-05-12 19:02:12 +00:00
schema, everywhere: define and use table_id as a strong type
Define table_id as a distinct utils::tagged_uuid modeled after raft tagged_id, so it can be differentiated from other uuid-class types, in particular from table_schema_version. Fixes #11207 Signed-off-by: Benny Halevy <bhalevy@scylladb.com>
This commit is contained in:
@@ -74,8 +74,8 @@ struct rapidjson::internal::TypeHelper<ValueType, utils::UUID>
|
||||
: public from_string_helper<ValueType, utils::UUID>
|
||||
{};
|
||||
|
||||
static db_clock::time_point as_timepoint(const utils::UUID& uuid) {
|
||||
return db_clock::time_point{utils::UUID_gen::unix_timestamp(uuid)};
|
||||
static db_clock::time_point as_timepoint(const table_id& tid) {
|
||||
return db_clock::time_point{utils::UUID_gen::unix_timestamp(tid.uuid())};
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -106,6 +106,9 @@ public:
|
||||
stream_arn(const UUID& uuid)
|
||||
: UUID(uuid)
|
||||
{}
|
||||
stream_arn(const table_id& tid)
|
||||
: UUID(tid.uuid())
|
||||
{}
|
||||
stream_arn(std::string_view v)
|
||||
: UUID(v.substr(1))
|
||||
{
|
||||
@@ -155,7 +158,7 @@ future<alternator::executor::request_return_type> alternator::executor::list_str
|
||||
// and we can probably expect this to be a single call.
|
||||
if (streams_start) {
|
||||
i = std::find_if(i, e, [&](data_dictionary::table t) {
|
||||
return t.schema()->id() == streams_start
|
||||
return t.schema()->id().uuid() == streams_start
|
||||
&& cdc::get_base_table(db.real_database(), *t.schema())
|
||||
&& is_alternator_keyspace(t.schema()->ks_name())
|
||||
;
|
||||
@@ -430,7 +433,7 @@ future<executor::request_return_type> executor::describe_stream(client_state& cl
|
||||
auto db = _proxy.data_dictionary();
|
||||
|
||||
try {
|
||||
auto cf = db.find_column_family(stream_arn);
|
||||
auto cf = db.find_column_family(table_id(stream_arn));
|
||||
schema = cf.schema();
|
||||
bs = cdc::get_base_table(db.real_database(), *schema);
|
||||
} catch (...) {
|
||||
@@ -717,7 +720,7 @@ future<executor::request_return_type> executor::get_shard_iterator(client_state&
|
||||
std::optional<shard_id> sid;
|
||||
|
||||
try {
|
||||
auto cf = db.find_column_family(stream_arn);
|
||||
auto cf = db.find_column_family(table_id(stream_arn));
|
||||
schema = cf.schema();
|
||||
sid = rjson::get<shard_id>(request, "ShardId");
|
||||
} catch (...) {
|
||||
@@ -802,7 +805,7 @@ future<executor::request_return_type> executor::get_records(client_state& client
|
||||
auto db = _proxy.data_dictionary();
|
||||
schema_ptr schema, base;
|
||||
try {
|
||||
auto log_table = db.find_column_family(iter.table);
|
||||
auto log_table = db.find_column_family(table_id(iter.table));
|
||||
schema = log_table.schema();
|
||||
base = cdc::get_base_table(db.real_database(), *schema);
|
||||
} catch (...) {
|
||||
|
||||
@@ -43,7 +43,7 @@ std::tuple<sstring, sstring> parse_fully_qualified_cf_name(sstring name) {
|
||||
return std::make_tuple(name.substr(0, pos), name.substr(end));
|
||||
}
|
||||
|
||||
const utils::UUID& get_uuid(const sstring& ks, const sstring& cf, const replica::database& db) {
|
||||
const table_id& get_uuid(const sstring& ks, const sstring& cf, const replica::database& db) {
|
||||
try {
|
||||
return db.find_uuid(ks, cf);
|
||||
} catch (replica::no_such_column_family& e) {
|
||||
@@ -51,7 +51,7 @@ const utils::UUID& get_uuid(const sstring& ks, const sstring& cf, const replica:
|
||||
}
|
||||
}
|
||||
|
||||
const utils::UUID& get_uuid(const sstring& name, const replica::database& db) {
|
||||
const table_id& get_uuid(const sstring& name, const replica::database& db) {
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(name);
|
||||
return get_uuid(ks, cf, db);
|
||||
}
|
||||
@@ -110,7 +110,7 @@ static future<json::json_return_type> get_cf_stats_count(http_context& ctx,
|
||||
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_and_histogram replica::column_family_stats::*f) {
|
||||
utils::UUID uuid = get_uuid(name, ctx.db.local());
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const replica::database& p) {
|
||||
return (p.find_column_family(uuid).get_stats().*f).hist;},
|
||||
utils::ihistogram(),
|
||||
@@ -122,7 +122,7 @@ static future<json::json_return_type> get_cf_histogram(http_context& ctx, const
|
||||
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
utils::UUID uuid = get_uuid(name, ctx.db.local());
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const replica::database& p) {
|
||||
return (p.find_column_family(uuid).get_stats().*f).hist;},
|
||||
utils::ihistogram(),
|
||||
@@ -149,7 +149,7 @@ static future<json::json_return_type> get_cf_histogram(http_context& ctx, utils:
|
||||
|
||||
static future<json::json_return_type> get_cf_rate_and_histogram(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
utils::UUID uuid = get_uuid(name, ctx.db.local());
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const replica::database& p) {
|
||||
return (p.find_column_family(uuid).get_stats().*f).rate();},
|
||||
utils::rate_moving_average_and_histogram(),
|
||||
@@ -855,7 +855,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_auto_compaction.set(r, [&ctx] (const_req req) {
|
||||
const utils::UUID& uuid = get_uuid(req.param["name"], ctx.db.local());
|
||||
auto uuid = get_uuid(req.param["name"], ctx.db.local());
|
||||
replica::column_family& cf = ctx.db.local().find_column_family(uuid);
|
||||
return !cf.is_auto_compaction_disabled_by_user();
|
||||
});
|
||||
|
||||
@@ -18,7 +18,7 @@ namespace api {
|
||||
|
||||
void set_column_family(http_context& ctx, routes& r);
|
||||
|
||||
const utils::UUID& get_uuid(const sstring& name, const replica::database& db);
|
||||
const table_id& get_uuid(const sstring& name, const replica::database& db);
|
||||
future<> foreach_column_family(http_context& ctx, const sstring& name, std::function<void(replica::column_family&)> f);
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ struct map_reduce_column_families_locally {
|
||||
std::function<std::unique_ptr<std::any>(std::unique_ptr<std::any>, std::unique_ptr<std::any>)> reducer;
|
||||
future<std::unique_ptr<std::any>> operator()(replica::database& db) const {
|
||||
auto res = seastar::make_lw_shared<std::unique_ptr<std::any>>(std::make_unique<std::any>(init));
|
||||
return do_for_each(db.get_column_families(), [res, this](const std::pair<utils::UUID, seastar::lw_shared_ptr<replica::table>>& i) {
|
||||
return do_for_each(db.get_column_families(), [res, this](const std::pair<table_id, seastar::lw_shared_ptr<replica::table>>& i) {
|
||||
*res = reducer(std::move(*res), mapper(*i.second.get()));
|
||||
}).then([res] {
|
||||
return std::move(*res);
|
||||
|
||||
@@ -68,7 +68,7 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
cm::get_pending_tasks_by_table.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([&ctx](replica::database& db) {
|
||||
return do_with(std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_hash>(), [&ctx, &db](std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_hash>& tasks) {
|
||||
return do_for_each(db.get_column_families(), [&tasks](const std::pair<utils::UUID, seastar::lw_shared_ptr<replica::table>>& i) {
|
||||
return do_for_each(db.get_column_families(), [&tasks](const std::pair<table_id, seastar::lw_shared_ptr<replica::table>>& i) {
|
||||
replica::table& cf = *i.second.get();
|
||||
tasks[std::make_pair(cf.schema()->ks_name(), cf.schema()->cf_name())] = cf.get_compaction_strategy().estimated_pending_compactions(cf.as_table_state());
|
||||
return make_ready_future<>();
|
||||
|
||||
@@ -611,11 +611,11 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
column_families = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
return ctx.db.invoke_on_all([keyspace, column_families] (replica::database& db) -> future<> {
|
||||
auto table_ids = boost::copy_range<std::vector<utils::UUID>>(column_families | boost::adaptors::transformed([&] (auto& cf_name) {
|
||||
auto table_ids = boost::copy_range<std::vector<table_id>>(column_families | boost::adaptors::transformed([&] (auto& cf_name) {
|
||||
return db.find_uuid(keyspace, cf_name);
|
||||
}));
|
||||
// major compact smaller tables first, to increase chances of success if low on space.
|
||||
std::ranges::sort(table_ids, std::less<>(), [&] (const utils::UUID& id) {
|
||||
std::ranges::sort(table_ids, std::less<>(), [&] (const table_id& id) {
|
||||
return db.find_column_family(id).get_stats().live_disk_space_used;
|
||||
});
|
||||
// as a table can be dropped during loop below, let's find it before issuing major compaction request.
|
||||
@@ -641,11 +641,11 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
std::runtime_error("Can not perform cleanup operation when topology changes"));
|
||||
}
|
||||
return ctx.db.invoke_on_all([keyspace, column_families] (replica::database& db) -> future<> {
|
||||
auto table_ids = boost::copy_range<std::vector<utils::UUID>>(column_families | boost::adaptors::transformed([&] (auto& table_name) {
|
||||
auto table_ids = boost::copy_range<std::vector<table_id>>(column_families | boost::adaptors::transformed([&] (auto& table_name) {
|
||||
return db.find_uuid(keyspace, table_name);
|
||||
}));
|
||||
// cleanup smaller tables first, to increase chances of success if low on space.
|
||||
std::ranges::sort(table_ids, std::less<>(), [&] (const utils::UUID& id) {
|
||||
std::ranges::sort(table_ids, std::less<>(), [&] (const table_id& id) {
|
||||
return db.find_column_family(id).get_stats().live_disk_space_used;
|
||||
});
|
||||
auto& cm = db.get_compaction_manager();
|
||||
|
||||
@@ -37,7 +37,7 @@ canonical_mutation::canonical_mutation(const mutation& m)
|
||||
}).end_canonical_mutation();
|
||||
}
|
||||
|
||||
utils::UUID canonical_mutation::column_family_id() const {
|
||||
table_id canonical_mutation::column_family_id() const {
|
||||
auto in = ser::as_input_stream(_data);
|
||||
auto mv = ser::deserialize(in, boost::type<ser::canonical_mutation_view>());
|
||||
return mv.table_id();
|
||||
|
||||
@@ -14,10 +14,6 @@
|
||||
#include "bytes_ostream.hh"
|
||||
#include <iosfwd>
|
||||
|
||||
namespace utils {
|
||||
class UUID;
|
||||
} // namespace utils
|
||||
|
||||
// Immutable mutation form which can be read using any schema version of the same table.
|
||||
// Safe to access from other shards via const&.
|
||||
// Safe to pass serialized across nodes.
|
||||
@@ -39,7 +35,7 @@ public:
|
||||
// is not intended, user should sync the schema first.
|
||||
mutation to_mutation(schema_ptr) const;
|
||||
|
||||
utils::UUID column_family_id() const;
|
||||
table_id column_family_id() const;
|
||||
|
||||
const bytes_ostream& representation() const { return _data; }
|
||||
|
||||
|
||||
@@ -607,7 +607,7 @@ future<> generation_service::maybe_rewrite_streams_descriptions() {
|
||||
continue;
|
||||
}
|
||||
|
||||
times_and_ttls.push_back(time_and_ttl{as_timepoint(s.id()), cdc_opts.ttl()});
|
||||
times_and_ttls.push_back(time_and_ttl{as_timepoint(s.id().uuid()), cdc_opts.ttl()});
|
||||
}
|
||||
|
||||
if (times_and_ttls.empty()) {
|
||||
|
||||
@@ -59,7 +59,7 @@ using namespace std::chrono_literals;
|
||||
logging::logger cdc_log("cdc");
|
||||
|
||||
namespace cdc {
|
||||
static schema_ptr create_log_schema(const schema&, std::optional<utils::UUID> = {}, schema_ptr = nullptr);
|
||||
static schema_ptr create_log_schema(const schema&, std::optional<table_id> = {}, schema_ptr = nullptr);
|
||||
}
|
||||
|
||||
static constexpr auto cdc_group_name = "cdc";
|
||||
@@ -485,7 +485,7 @@ bytes log_data_column_deleted_elements_name_bytes(const bytes& column_name) {
|
||||
return to_bytes(cdc_deleted_elements_column_prefix) + column_name;
|
||||
}
|
||||
|
||||
static schema_ptr create_log_schema(const schema& s, std::optional<utils::UUID> uuid, schema_ptr old) {
|
||||
static schema_ptr create_log_schema(const schema& s, std::optional<table_id> uuid, schema_ptr old) {
|
||||
schema_builder b(s.ks_name(), log_name(s.cf_name()));
|
||||
b.with_partitioner(cdc::cdc_partitioner::classname);
|
||||
b.set_compaction_strategy(sstables::compaction_strategy_type::time_window);
|
||||
|
||||
@@ -189,10 +189,10 @@ int32_t cf_prop_defs::get_paxos_grace_seconds() const {
|
||||
return get_int(KW_PAXOSGRACESECONDS, DEFAULT_GC_GRACE_SECONDS);
|
||||
}
|
||||
|
||||
std::optional<utils::UUID> cf_prop_defs::get_id() const {
|
||||
std::optional<table_id> cf_prop_defs::get_id() const {
|
||||
auto id = get_simple(KW_ID);
|
||||
if (id) {
|
||||
return utils::UUID(*id);
|
||||
return std::make_optional<table_id>(utils::UUID(*id));
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
|
||||
@@ -100,7 +100,7 @@ public:
|
||||
int32_t get_default_time_to_live() const;
|
||||
int32_t get_gc_grace_seconds() const;
|
||||
int32_t get_paxos_grace_seconds() const;
|
||||
std::optional<utils::UUID> get_id() const;
|
||||
std::optional<table_id> get_id() const;
|
||||
bool get_synchronous_updates_flag() const;
|
||||
|
||||
void apply_to_builder(schema_builder& builder, schema::extensions_map schema_extensions) const;
|
||||
|
||||
@@ -41,7 +41,7 @@ create_table_statement::create_table_statement(cf_name name,
|
||||
::shared_ptr<cf_prop_defs> properties,
|
||||
bool if_not_exists,
|
||||
column_set_type static_columns,
|
||||
const std::optional<utils::UUID>& id)
|
||||
const std::optional<table_id>& id)
|
||||
: schema_altering_statement{name}
|
||||
, _use_compact_storage(false)
|
||||
, _static_columns{static_columns}
|
||||
|
||||
@@ -60,13 +60,13 @@ class create_table_statement : public schema_altering_statement {
|
||||
column_set_type _static_columns;
|
||||
const ::shared_ptr<cf_prop_defs> _properties;
|
||||
const bool _if_not_exists;
|
||||
std::optional<utils::UUID> _id;
|
||||
std::optional<table_id> _id;
|
||||
public:
|
||||
create_table_statement(cf_name name,
|
||||
::shared_ptr<cf_prop_defs> properties,
|
||||
bool if_not_exists,
|
||||
column_set_type static_columns,
|
||||
const std::optional<utils::UUID>& id);
|
||||
const std::optional<table_id>& id);
|
||||
|
||||
virtual future<> check_access(query_processor& qp, const service::client_state& state) const override;
|
||||
|
||||
|
||||
@@ -100,12 +100,12 @@ database::find_table(std::string_view ks, std::string_view table) const {
|
||||
}
|
||||
|
||||
std::optional<table>
|
||||
database::try_find_table(utils::UUID id) const {
|
||||
database::try_find_table(table_id id) const {
|
||||
return _ops->try_find_table(*this, id);
|
||||
}
|
||||
|
||||
table
|
||||
database::find_column_family(utils::UUID uuid) const {
|
||||
database::find_column_family(table_id uuid) const {
|
||||
auto t = try_find_table(uuid);
|
||||
if (!t) {
|
||||
throw no_such_column_family(uuid);
|
||||
@@ -119,7 +119,7 @@ database::find_schema(std::string_view ks, std::string_view table) const {
|
||||
}
|
||||
|
||||
schema_ptr
|
||||
database::find_schema(utils::UUID uuid) const {
|
||||
database::find_schema(table_id uuid) const {
|
||||
return find_column_family(uuid).schema();
|
||||
}
|
||||
|
||||
@@ -326,7 +326,7 @@ no_such_keyspace::no_such_keyspace(std::string_view ks_name)
|
||||
{
|
||||
}
|
||||
|
||||
no_such_column_family::no_such_column_family(const utils::UUID& uuid)
|
||||
no_such_column_family::no_such_column_family(const table_id& uuid)
|
||||
: runtime_error{format("Can't find a column family with UUID {}", uuid)}
|
||||
{
|
||||
}
|
||||
@@ -336,7 +336,7 @@ no_such_column_family::no_such_column_family(std::string_view ks_name, std::stri
|
||||
{
|
||||
}
|
||||
|
||||
no_such_column_family::no_such_column_family(std::string_view ks_name, const utils::UUID& uuid)
|
||||
no_such_column_family::no_such_column_family(std::string_view ks_name, const table_id& uuid)
|
||||
: runtime_error{format("Can't find a column family with UUID {} in keyspace {}", uuid, ks_name)}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include <vector>
|
||||
#include <seastar/core/shared_ptr.hh>
|
||||
#include "seastarx.hh"
|
||||
#include "utils/UUID.hh"
|
||||
#include "schema_fwd.hh"
|
||||
|
||||
namespace replica {
|
||||
class database; // For transition; remove
|
||||
@@ -62,9 +62,9 @@ public:
|
||||
|
||||
class no_such_column_family : public std::runtime_error {
|
||||
public:
|
||||
no_such_column_family(const utils::UUID& uuid);
|
||||
no_such_column_family(const table_id& uuid);
|
||||
no_such_column_family(std::string_view ks_name, std::string_view cf_name);
|
||||
no_such_column_family(std::string_view ks_name, const utils::UUID& uuid);
|
||||
no_such_column_family(std::string_view ks_name, const table_id& uuid);
|
||||
};
|
||||
|
||||
class table {
|
||||
@@ -105,13 +105,13 @@ public:
|
||||
std::vector<keyspace> get_keyspaces() const;
|
||||
std::vector<table> get_tables() const;
|
||||
table find_table(std::string_view ks, std::string_view table) const; // throws no_such_column_family
|
||||
table find_column_family(utils::UUID uuid) const; // throws no_such_column_family
|
||||
table find_column_family(table_id uuid) const; // throws no_such_column_family
|
||||
schema_ptr find_schema(std::string_view ks, std::string_view table) const; // throws no_such_column_family
|
||||
schema_ptr find_schema(utils::UUID uuid) const; // throws no_such_column_family
|
||||
schema_ptr find_schema(table_id uuid) const; // throws no_such_column_family
|
||||
table find_column_family(schema_ptr s) const;
|
||||
bool has_schema(std::string_view ks_name, std::string_view cf_name) const;
|
||||
std::optional<table> try_find_table(std::string_view ks, std::string_view table) const;
|
||||
std::optional<table> try_find_table(utils::UUID id) const;
|
||||
std::optional<table> try_find_table(table_id id) const;
|
||||
const db::config& get_config() const;
|
||||
std::set<sstring> existing_index_names(std::string_view ks_name, std::string_view cf_to_exclude = sstring()) const;
|
||||
schema_ptr find_indexed_table(std::string_view ks_name, std::string_view index_name) const;
|
||||
|
||||
@@ -21,7 +21,7 @@ public:
|
||||
virtual std::vector<keyspace> get_keyspaces(database db) const = 0;
|
||||
virtual std::vector<table> get_tables(database db) const = 0;
|
||||
virtual std::optional<table> try_find_table(database db, std::string_view ks, std::string_view tab) const = 0;
|
||||
virtual std::optional<table> try_find_table(database db, utils::UUID id) const = 0;
|
||||
virtual std::optional<table> try_find_table(database db, table_id id) const = 0;
|
||||
virtual const secondary_index::secondary_index_manager& get_index_manager(table t) const = 0;
|
||||
virtual schema_ptr get_table_schema(table t) const = 0;
|
||||
virtual lw_shared_ptr<keyspace_metadata> get_keyspace_metadata(keyspace ks) const = 0;
|
||||
|
||||
@@ -84,7 +84,7 @@ public:
|
||||
future<> process(stats*, commitlog::buffer_and_replay_position buf_rp) const;
|
||||
future<stats> recover(sstring file, const sstring& fname_prefix) const;
|
||||
|
||||
typedef std::unordered_map<utils::UUID, replay_position> rp_map;
|
||||
typedef std::unordered_map<table_id, replay_position> rp_map;
|
||||
typedef std::unordered_map<unsigned, rp_map> shard_rpm_map;
|
||||
typedef std::unordered_map<unsigned, replay_position> shard_rp_map;
|
||||
|
||||
@@ -92,7 +92,7 @@ public:
|
||||
auto i = _min_pos.find(shard);
|
||||
return i != _min_pos.end() ? i->second : replay_position();
|
||||
}
|
||||
replay_position cf_min_pos(const utils::UUID& uuid, unsigned shard) const {
|
||||
replay_position cf_min_pos(const table_id& uuid, unsigned shard) const {
|
||||
auto i = _rpm.find(shard);
|
||||
if (i == _rpm.end()) {
|
||||
return replay_position();
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
#include <stdint.h>
|
||||
#include <seastar/core/shared_ptr.hh>
|
||||
#include "utils/UUID.hh"
|
||||
#include "schema_fwd.hh"
|
||||
#include "utils/hash.hh"
|
||||
#include "sstables/version.hh"
|
||||
|
||||
@@ -72,7 +72,7 @@ struct replay_position {
|
||||
class commitlog;
|
||||
class cf_holder;
|
||||
|
||||
using cf_id_type = utils::UUID;
|
||||
using cf_id_type = table_id;
|
||||
|
||||
class rp_handle {
|
||||
public:
|
||||
|
||||
@@ -147,7 +147,7 @@ public:
|
||||
|
||||
auto ks_name = td.get_as<sstring>("keyspace_name");
|
||||
auto cf_name = td.get_as<sstring>("columnfamily_name");
|
||||
auto id = td.get_or("cf_id", generate_legacy_id(ks_name, cf_name));
|
||||
auto id = table_id(td.get_or("cf_id", generate_legacy_id(ks_name, cf_name).uuid()));
|
||||
|
||||
schema_builder builder(dst.name, cf_name, id);
|
||||
|
||||
|
||||
@@ -138,10 +138,10 @@ struct qualified_name {
|
||||
static future<schema_mutations> read_table_mutations(distributed<service::storage_proxy>& proxy, const qualified_name& table, schema_ptr s);
|
||||
|
||||
static future<> merge_tables_and_views(distributed<service::storage_proxy>& proxy,
|
||||
std::map<utils::UUID, schema_mutations>&& tables_before,
|
||||
std::map<utils::UUID, schema_mutations>&& tables_after,
|
||||
std::map<utils::UUID, schema_mutations>&& views_before,
|
||||
std::map<utils::UUID, schema_mutations>&& views_after);
|
||||
std::map<table_id, schema_mutations>&& tables_before,
|
||||
std::map<table_id, schema_mutations>&& tables_after,
|
||||
std::map<table_id, schema_mutations>&& views_before,
|
||||
std::map<table_id, schema_mutations>&& views_after);
|
||||
|
||||
struct [[nodiscard]] user_types_to_drop final {
|
||||
seastar::noncopyable_function<future<> ()> drop;
|
||||
@@ -967,17 +967,17 @@ static read_table_names_of_keyspace(distributed<service::storage_proxy>& proxy,
|
||||
}));
|
||||
}
|
||||
|
||||
static utils::UUID table_id_from_mutations(const schema_mutations& sm) {
|
||||
static table_id table_id_from_mutations(const schema_mutations& sm) {
|
||||
auto table_rs = query::result_set(sm.columnfamilies_mutation());
|
||||
query::result_set_row table_row = table_rs.row(0);
|
||||
return table_row.get_nonnull<utils::UUID>("id");
|
||||
return table_id(table_row.get_nonnull<utils::UUID>("id"));
|
||||
}
|
||||
|
||||
static
|
||||
future<std::map<utils::UUID, schema_mutations>>
|
||||
future<std::map<table_id, schema_mutations>>
|
||||
read_tables_for_keyspaces(distributed<service::storage_proxy>& proxy, const std::set<sstring>& keyspace_names, schema_ptr s)
|
||||
{
|
||||
std::map<utils::UUID, schema_mutations> result;
|
||||
std::map<table_id, schema_mutations> result;
|
||||
for (auto&& keyspace_name : keyspace_names) {
|
||||
for (auto&& table_name : co_await read_table_names_of_keyspace(proxy, keyspace_name, s)) {
|
||||
auto qn = qualified_name(keyspace_name, table_name);
|
||||
@@ -1063,7 +1063,7 @@ future<> store_column_mapping(distributed<service::storage_proxy>& proxy, schema
|
||||
|
||||
// Insert the new column mapping for a given schema version (without TTL)
|
||||
std::vector<mutation> muts;
|
||||
partition_key pk = partition_key::from_exploded(*history_tbl, {uuid_type->decompose(s->id())});
|
||||
partition_key pk = partition_key::from_exploded(*history_tbl, {uuid_type->decompose(s->id().uuid())});
|
||||
|
||||
ttl_opt ttl;
|
||||
if (with_ttl) {
|
||||
@@ -1087,7 +1087,7 @@ static future<> do_merge_schema(distributed<service::storage_proxy>& proxy, std:
|
||||
schema_ptr s = keyspaces();
|
||||
// compare before/after schemas of the affected keyspaces only
|
||||
std::set<sstring> keyspaces;
|
||||
std::set<utils::UUID> column_families;
|
||||
std::set<table_id> column_families;
|
||||
for (auto&& mutation : mutations) {
|
||||
keyspaces.emplace(value_cast<sstring>(utf8_type->deserialize(mutation.key().get_component(*s, 0))));
|
||||
column_families.emplace(mutation.column_family_id());
|
||||
@@ -1112,7 +1112,7 @@ static future<> do_merge_schema(distributed<service::storage_proxy>& proxy, std:
|
||||
|
||||
if (do_flush) {
|
||||
auto& db = proxy.local().get_db();
|
||||
co_await coroutine::parallel_for_each(column_families, [&db] (const utils::UUID& id) -> future<> {
|
||||
co_await coroutine::parallel_for_each(column_families, [&db] (const table_id& id) -> future<> {
|
||||
return replica::database::flush_table_on_all_shards(db, id);
|
||||
});
|
||||
}
|
||||
@@ -1236,8 +1236,8 @@ enum class schema_diff_side {
|
||||
};
|
||||
|
||||
static schema_diff diff_table_or_view(distributed<service::storage_proxy>& proxy,
|
||||
std::map<utils::UUID, schema_mutations>&& before,
|
||||
std::map<utils::UUID, schema_mutations>&& after,
|
||||
std::map<table_id, schema_mutations>&& before,
|
||||
std::map<table_id, schema_mutations>&& after,
|
||||
noncopyable_function<schema_ptr (schema_mutations sm, schema_diff_side)> create_schema)
|
||||
{
|
||||
schema_diff d;
|
||||
@@ -1267,10 +1267,10 @@ static schema_diff diff_table_or_view(distributed<service::storage_proxy>& proxy
|
||||
// upon an alter table or alter type statement), then they are published together
|
||||
// as well, without any deferring in-between.
|
||||
static future<> merge_tables_and_views(distributed<service::storage_proxy>& proxy,
|
||||
std::map<utils::UUID, schema_mutations>&& tables_before,
|
||||
std::map<utils::UUID, schema_mutations>&& tables_after,
|
||||
std::map<utils::UUID, schema_mutations>&& views_before,
|
||||
std::map<utils::UUID, schema_mutations>&& views_after)
|
||||
std::map<table_id, schema_mutations>&& tables_before,
|
||||
std::map<table_id, schema_mutations>&& tables_after,
|
||||
std::map<table_id, schema_mutations>&& views_before,
|
||||
std::map<table_id, schema_mutations>&& views_after)
|
||||
{
|
||||
auto tables_diff = diff_table_or_view(proxy, std::move(tables_before), std::move(tables_after), [&] (schema_mutations sm, schema_diff_side) {
|
||||
return create_table_from_mutations(proxy, std::move(sm));
|
||||
@@ -2356,7 +2356,7 @@ static schema_mutations make_table_mutations(schema_ptr table, api::timestamp_ty
|
||||
auto pkey = partition_key::from_singular(*s, table->ks_name());
|
||||
mutation m{s, pkey};
|
||||
auto ckey = clustering_key::from_singular(*s, table->cf_name());
|
||||
m.set_clustered_cell(ckey, "id", table->id(), timestamp);
|
||||
m.set_clustered_cell(ckey, "id", table->id().uuid(), timestamp);
|
||||
|
||||
auto scylla_tables_mutation = make_scylla_tables_mutation(table, timestamp);
|
||||
|
||||
@@ -2804,7 +2804,7 @@ schema_ptr create_table_from_mutations(const schema_ctxt& ctxt, schema_mutations
|
||||
|
||||
auto ks_name = table_row.get_nonnull<sstring>("keyspace_name");
|
||||
auto cf_name = table_row.get_nonnull<sstring>("table_name");
|
||||
auto id = table_row.get_nonnull<utils::UUID>("id");
|
||||
auto id = table_id(table_row.get_nonnull<utils::UUID>("id"));
|
||||
schema_builder builder{ks_name, cf_name, id};
|
||||
|
||||
auto cf = cf_type::standard;
|
||||
@@ -3081,7 +3081,7 @@ view_ptr create_view_from_mutations(const schema_ctxt& ctxt, schema_mutations sm
|
||||
|
||||
auto ks_name = row.get_nonnull<sstring>("keyspace_name");
|
||||
auto cf_name = row.get_nonnull<sstring>("view_name");
|
||||
auto id = row.get_nonnull<utils::UUID>("id");
|
||||
auto id = table_id(row.get_nonnull<utils::UUID>("id"));
|
||||
|
||||
schema_builder builder{ks_name, cf_name, id};
|
||||
prepare_builder_from_table_row(ctxt, builder, row);
|
||||
@@ -3104,7 +3104,7 @@ view_ptr create_view_from_mutations(const schema_ctxt& ctxt, schema_mutations sm
|
||||
builder.with_version(sm.digest());
|
||||
}
|
||||
|
||||
auto base_id = row.get_nonnull<utils::UUID>("base_table_id");
|
||||
auto base_id = table_id(row.get_nonnull<utils::UUID>("base_table_id"));
|
||||
auto base_name = row.get_nonnull<sstring>("base_table_name");
|
||||
auto include_all_columns = row.get_nonnull<bool>("include_all_columns");
|
||||
auto where_clause = row.get_nonnull<sstring>("where_clause");
|
||||
@@ -3149,12 +3149,12 @@ static schema_mutations make_view_mutations(view_ptr view, api::timestamp_type t
|
||||
mutation m{s, pkey};
|
||||
auto ckey = clustering_key::from_singular(*s, view->cf_name());
|
||||
|
||||
m.set_clustered_cell(ckey, "base_table_id", view->view_info()->base_id(), timestamp);
|
||||
m.set_clustered_cell(ckey, "base_table_id", view->view_info()->base_id().uuid(), timestamp);
|
||||
m.set_clustered_cell(ckey, "base_table_name", view->view_info()->base_name(), timestamp);
|
||||
m.set_clustered_cell(ckey, "where_clause", view->view_info()->where_clause(), timestamp);
|
||||
m.set_clustered_cell(ckey, "bloom_filter_fp_chance", view->bloom_filter_fp_chance(), timestamp);
|
||||
m.set_clustered_cell(ckey, "include_all_columns", view->view_info()->include_all_columns(), timestamp);
|
||||
m.set_clustered_cell(ckey, "id", view->id(), timestamp);
|
||||
m.set_clustered_cell(ckey, "id", view->id().uuid(), timestamp);
|
||||
|
||||
add_table_params_to_mutations(m, ckey, view, timestamp);
|
||||
|
||||
@@ -3471,11 +3471,11 @@ future<schema_mutations> read_table_mutations(distributed<service::storage_proxy
|
||||
static auto GET_COLUMN_MAPPING_QUERY = format("SELECT column_name, clustering_order, column_name_bytes, kind, position, type FROM system.{} WHERE cf_id = ? AND schema_version = ?",
|
||||
db::schema_tables::SCYLLA_TABLE_SCHEMA_HISTORY);
|
||||
|
||||
future<column_mapping> get_column_mapping(utils::UUID table_id, table_schema_version version) {
|
||||
future<column_mapping> get_column_mapping(::table_id table_id, table_schema_version version) {
|
||||
shared_ptr<cql3::untyped_result_set> results = co_await qctx->qp().execute_internal(
|
||||
GET_COLUMN_MAPPING_QUERY,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
{table_id, version},
|
||||
{table_id.uuid(), version},
|
||||
cql3::query_processor::cache_internal::no
|
||||
);
|
||||
if (results->empty()) {
|
||||
@@ -3512,24 +3512,24 @@ future<column_mapping> get_column_mapping(utils::UUID table_id, table_schema_ver
|
||||
co_return std::move(cm);
|
||||
}
|
||||
|
||||
future<bool> column_mapping_exists(utils::UUID table_id, table_schema_version version) {
|
||||
future<bool> column_mapping_exists(table_id table_id, table_schema_version version) {
|
||||
shared_ptr<cql3::untyped_result_set> results = co_await qctx->qp().execute_internal(
|
||||
GET_COLUMN_MAPPING_QUERY,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
{table_id, version},
|
||||
{table_id.uuid(), version},
|
||||
cql3::query_processor::cache_internal::yes
|
||||
);
|
||||
co_return !results->empty();
|
||||
}
|
||||
|
||||
future<> drop_column_mapping(utils::UUID table_id, table_schema_version version) {
|
||||
future<> drop_column_mapping(table_id table_id, table_schema_version version) {
|
||||
const static sstring DEL_COLUMN_MAPPING_QUERY =
|
||||
format("DELETE FROM system.{} WHERE cf_id = ? and schema_version = ?",
|
||||
db::schema_tables::SCYLLA_TABLE_SCHEMA_HISTORY);
|
||||
co_await qctx->qp().execute_internal(
|
||||
DEL_COLUMN_MAPPING_QUERY,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
{table_id, version},
|
||||
{table_id.uuid(), version},
|
||||
cql3::query_processor::cache_internal::no);
|
||||
}
|
||||
|
||||
|
||||
@@ -285,11 +285,11 @@ std::optional<std::map<K, V>> get_map(const query::result_set_row& row, const ss
|
||||
/// overwriting an existing column mapping to garbage collect obsolete entries.
|
||||
future<> store_column_mapping(distributed<service::storage_proxy>& proxy, schema_ptr s, bool with_ttl);
|
||||
/// Query column mapping for a given version of the table locally.
|
||||
future<column_mapping> get_column_mapping(utils::UUID table_id, table_schema_version version);
|
||||
future<column_mapping> get_column_mapping(table_id table_id, table_schema_version version);
|
||||
/// Check that column mapping exists for a given version of the table
|
||||
future<bool> column_mapping_exists(utils::UUID table_id, table_schema_version version);
|
||||
future<bool> column_mapping_exists(table_id table_id, table_schema_version version);
|
||||
/// Delete matching column mapping entries from the `system.scylla_table_schema_history` table
|
||||
future<> drop_column_mapping(utils::UUID table_id, table_schema_version version);
|
||||
future<> drop_column_mapping(table_id table_id, table_schema_version version);
|
||||
|
||||
} // namespace schema_tables
|
||||
} // namespace db
|
||||
|
||||
@@ -85,7 +85,7 @@ api::timestamp_type system_keyspace::schema_creation_timestamp() {
|
||||
// FIXME: Make automatic by calculating from schema structure.
|
||||
static const uint16_t version_sequence_number = 1;
|
||||
|
||||
table_schema_version system_keyspace::generate_schema_version(utils::UUID table_id, uint16_t offset) {
|
||||
table_schema_version system_keyspace::generate_schema_version(::table_id table_id, uint16_t offset) {
|
||||
md5_hasher h;
|
||||
feed_hash(h, table_id);
|
||||
feed_hash(h, version_sequence_number + offset);
|
||||
@@ -1388,13 +1388,13 @@ typedef std::unordered_map<truncation_key, truncation_record> truncation_map;
|
||||
|
||||
static constexpr uint8_t current_version = 1;
|
||||
|
||||
future<truncation_record> system_keyspace::get_truncation_record(utils::UUID cf_id) {
|
||||
future<truncation_record> system_keyspace::get_truncation_record(table_id cf_id) {
|
||||
if (qctx->qp().db().get_config().ignore_truncation_record.is_set()) {
|
||||
truncation_record r{truncation_record::current_magic};
|
||||
return make_ready_future<truncation_record>(std::move(r));
|
||||
}
|
||||
sstring req = format("SELECT * from system.{} WHERE table_uuid = ?", TRUNCATED);
|
||||
return qctx->qp().execute_internal(req, {cf_id}, cql3::query_processor::cache_internal::yes).then([](::shared_ptr<cql3::untyped_result_set> rs) {
|
||||
return qctx->qp().execute_internal(req, {cf_id.uuid()}, cql3::query_processor::cache_internal::yes).then([](::shared_ptr<cql3::untyped_result_set> rs) {
|
||||
truncation_record r{truncation_record::current_magic};
|
||||
|
||||
for (const cql3::untyped_result_set_row& row : *rs) {
|
||||
@@ -1418,7 +1418,7 @@ future<> system_keyspace::cache_truncation_record() {
|
||||
sstring req = format("SELECT DISTINCT table_uuid, truncated_at from system.{}", TRUNCATED);
|
||||
return execute_cql(req).then([this] (::shared_ptr<cql3::untyped_result_set> rs) {
|
||||
return parallel_for_each(rs->begin(), rs->end(), [this] (const cql3::untyped_result_set_row& row) {
|
||||
auto table_uuid = row.get_as<utils::UUID>("table_uuid");
|
||||
auto table_uuid = table_id(row.get_as<utils::UUID>("table_uuid"));
|
||||
auto ts = row.get_as<db_clock::time_point>("truncated_at");
|
||||
|
||||
return _db.invoke_on_all([table_uuid, ts] (replica::database& db) mutable {
|
||||
@@ -1433,9 +1433,9 @@ future<> system_keyspace::cache_truncation_record() {
|
||||
});
|
||||
}
|
||||
|
||||
future<> system_keyspace::save_truncation_record(utils::UUID id, db_clock::time_point truncated_at, db::replay_position rp) {
|
||||
future<> system_keyspace::save_truncation_record(table_id id, db_clock::time_point truncated_at, db::replay_position rp) {
|
||||
sstring req = format("INSERT INTO system.{} (table_uuid, shard, position, segment_id, truncated_at) VALUES(?,?,?,?,?)", TRUNCATED);
|
||||
return qctx->qp().execute_internal(req, {id, int32_t(rp.shard_id()), int32_t(rp.pos), int64_t(rp.base_id()), truncated_at}, cql3::query_processor::cache_internal::yes).discard_result().then([] {
|
||||
return qctx->qp().execute_internal(req, {id.uuid(), int32_t(rp.shard_id()), int32_t(rp.pos), int64_t(rp.base_id()), truncated_at}, cql3::query_processor::cache_internal::yes).discard_result().then([] {
|
||||
return force_blocking_flush(TRUNCATED);
|
||||
});
|
||||
}
|
||||
@@ -1444,7 +1444,7 @@ future<> system_keyspace::save_truncation_record(const replica::column_family& c
|
||||
return save_truncation_record(cf.schema()->id(), truncated_at, rp);
|
||||
}
|
||||
|
||||
future<db::replay_position> system_keyspace::get_truncated_position(utils::UUID cf_id, uint32_t shard) {
|
||||
future<db::replay_position> system_keyspace::get_truncated_position(table_id cf_id, uint32_t shard) {
|
||||
return get_truncated_position(std::move(cf_id)).then([shard](replay_positions positions) {
|
||||
for (auto& rp : positions) {
|
||||
if (shard == rp.shard_id()) {
|
||||
@@ -1455,13 +1455,13 @@ future<db::replay_position> system_keyspace::get_truncated_position(utils::UUID
|
||||
});
|
||||
}
|
||||
|
||||
future<replay_positions> system_keyspace::get_truncated_position(utils::UUID cf_id) {
|
||||
future<replay_positions> system_keyspace::get_truncated_position(table_id cf_id) {
|
||||
return get_truncation_record(cf_id).then([](truncation_record e) {
|
||||
return make_ready_future<replay_positions>(e.positions);
|
||||
});
|
||||
}
|
||||
|
||||
future<db_clock::time_point> system_keyspace::get_truncated_at(utils::UUID cf_id) {
|
||||
future<db_clock::time_point> system_keyspace::get_truncated_at(table_id cf_id) {
|
||||
return get_truncation_record(cf_id).then([](truncation_record e) {
|
||||
return make_ready_future<db_clock::time_point>(e.time_stamp);
|
||||
});
|
||||
@@ -2633,7 +2633,7 @@ public:
|
||||
};
|
||||
|
||||
// Map from table's schema ID to table itself. Helps avoiding accidental duplication.
|
||||
static thread_local std::map<utils::UUID, std::unique_ptr<virtual_table>> virtual_tables;
|
||||
static thread_local std::map<table_id, std::unique_ptr<virtual_table>> virtual_tables;
|
||||
|
||||
void register_virtual_tables(distributed<replica::database>& dist_db, distributed<service::storage_service>& dist_ss, sharded<gms::gossiper>& dist_gossiper, db::config& cfg) {
|
||||
auto add_table = [] (std::unique_ptr<virtual_table>&& tbl) {
|
||||
@@ -2867,15 +2867,15 @@ future<> system_keyspace::get_compaction_history(compaction_history_consumer&& f
|
||||
|
||||
future<> system_keyspace::update_repair_history(repair_history_entry entry) {
|
||||
sstring req = format("INSERT INTO system.{} (table_uuid, repair_time, repair_uuid, keyspace_name, table_name, range_start, range_end) VALUES (?, ?, ?, ?, ?, ?, ?)", REPAIR_HISTORY);
|
||||
co_await execute_cql(req, entry.table_uuid, entry.ts, entry.id, entry.ks, entry.cf, entry.range_start, entry.range_end).discard_result();
|
||||
co_await execute_cql(req, entry.table_uuid.uuid(), entry.ts, entry.id, entry.ks, entry.cf, entry.range_start, entry.range_end).discard_result();
|
||||
}
|
||||
|
||||
future<> system_keyspace::get_repair_history(utils::UUID table_id, repair_history_consumer f) {
|
||||
future<> system_keyspace::get_repair_history(::table_id table_id, repair_history_consumer f) {
|
||||
sstring req = format("SELECT * from system.{} WHERE table_uuid = {}", REPAIR_HISTORY, table_id);
|
||||
co_await _qp.local().query_internal(req, [&f] (const cql3::untyped_result_set::row& row) mutable -> future<stop_iteration> {
|
||||
repair_history_entry ent;
|
||||
ent.id = row.get_as<utils::UUID>("repair_uuid");
|
||||
ent.table_uuid = row.get_as<utils::UUID>("table_uuid");
|
||||
ent.table_uuid = ::table_id(row.get_as<utils::UUID>("table_uuid"));
|
||||
ent.range_start = row.get_as<int64_t>("range_start");
|
||||
ent.range_end = row.get_as<int64_t>("range_end");
|
||||
ent.ks = row.get_as<sstring>("keyspace_name");
|
||||
@@ -3027,7 +3027,7 @@ future<service::paxos::paxos_state> system_keyspace::load_paxos_state(partition_
|
||||
static auto cql = format("SELECT * FROM system.{} WHERE row_key = ? AND cf_id = ?", PAXOS);
|
||||
// FIXME: we need execute_cql_with_now()
|
||||
(void)now;
|
||||
auto f = qctx->execute_cql_with_timeout(cql, timeout, to_legacy(*key.get_compound_type(*s), key.representation()), s->id());
|
||||
auto f = qctx->execute_cql_with_timeout(cql, timeout, to_legacy(*key.get_compound_type(*s), key.representation()), s->id().uuid());
|
||||
return f.then([s, key = std::move(key)] (shared_ptr<cql3::untyped_result_set> results) mutable {
|
||||
if (results->empty()) {
|
||||
return service::paxos::paxos_state();
|
||||
@@ -3072,7 +3072,7 @@ future<> system_keyspace::save_paxos_promise(const schema& s, const partition_ke
|
||||
paxos_ttl_sec(s),
|
||||
ballot,
|
||||
to_legacy(*key.get_compound_type(s), key.representation()),
|
||||
s.id()
|
||||
s.id().uuid()
|
||||
).discard_result();
|
||||
}
|
||||
|
||||
@@ -3087,7 +3087,7 @@ future<> system_keyspace::save_paxos_proposal(const schema& s, const service::pa
|
||||
proposal.ballot,
|
||||
ser::serialize_to_buffer<bytes>(proposal.update),
|
||||
to_legacy(*key.get_compound_type(s), key.representation()),
|
||||
s.id()
|
||||
s.id().uuid()
|
||||
).discard_result();
|
||||
}
|
||||
|
||||
@@ -3108,7 +3108,7 @@ future<> system_keyspace::save_paxos_decision(const schema& s, const service::pa
|
||||
decision.ballot,
|
||||
ser::serialize_to_buffer<bytes>(decision.update),
|
||||
to_legacy(*key.get_compound_type(s), key.representation()),
|
||||
s.id()
|
||||
s.id().uuid()
|
||||
).discard_result();
|
||||
}
|
||||
|
||||
@@ -3122,7 +3122,7 @@ future<> system_keyspace::delete_paxos_decision(const schema& s, const partition
|
||||
timeout,
|
||||
utils::UUID_gen::micros_timestamp(ballot),
|
||||
to_legacy(*key.get_compound_type(s), key.representation()),
|
||||
s.id()
|
||||
s.id().uuid()
|
||||
).discard_result();
|
||||
}
|
||||
|
||||
|
||||
@@ -220,7 +220,7 @@ public:
|
||||
static schema_ptr group0_history();
|
||||
static schema_ptr discovery();
|
||||
|
||||
static table_schema_version generate_schema_version(utils::UUID table_id, uint16_t offset = 0);
|
||||
static table_schema_version generate_schema_version(table_id table_id, uint16_t offset = 0);
|
||||
|
||||
future<> setup(sharded<netw::messaging_service>& ms);
|
||||
future<> update_schema_version(utils::UUID version);
|
||||
@@ -308,7 +308,7 @@ public:
|
||||
|
||||
struct repair_history_entry {
|
||||
utils::UUID id;
|
||||
utils::UUID table_uuid;
|
||||
table_id table_uuid;
|
||||
db_clock::time_point ts;
|
||||
sstring ks;
|
||||
sstring cf;
|
||||
@@ -318,16 +318,16 @@ public:
|
||||
|
||||
future<> update_repair_history(repair_history_entry);
|
||||
using repair_history_consumer = noncopyable_function<future<>(const repair_history_entry&)>;
|
||||
future<> get_repair_history(utils::UUID table_id, repair_history_consumer f);
|
||||
future<> get_repair_history(table_id, repair_history_consumer f);
|
||||
|
||||
typedef std::vector<db::replay_position> replay_positions;
|
||||
|
||||
static future<> save_truncation_record(utils::UUID, db_clock::time_point truncated_at, db::replay_position);
|
||||
static future<> save_truncation_record(table_id, db_clock::time_point truncated_at, db::replay_position);
|
||||
static future<> save_truncation_record(const replica::column_family&, db_clock::time_point truncated_at, db::replay_position);
|
||||
static future<replay_positions> get_truncated_position(utils::UUID);
|
||||
static future<db::replay_position> get_truncated_position(utils::UUID, uint32_t shard);
|
||||
static future<db_clock::time_point> get_truncated_at(utils::UUID);
|
||||
static future<truncation_record> get_truncation_record(utils::UUID cf_id);
|
||||
static future<replay_positions> get_truncated_position(table_id);
|
||||
static future<db::replay_position> get_truncated_position(table_id, uint32_t shard);
|
||||
static future<db_clock::time_point> get_truncated_at(table_id);
|
||||
static future<truncation_record> get_truncation_record(table_id cf_id);
|
||||
|
||||
/**
|
||||
* Return a map of stored tokens to IP addresses
|
||||
|
||||
@@ -1486,7 +1486,7 @@ future<> view_builder::drain() {
|
||||
}).handle_exception_type([] (const semaphore_timed_out&) {
|
||||
// ignored
|
||||
}).finally([this] {
|
||||
return parallel_for_each(_base_to_build_step, [] (std::pair<const utils::UUID, build_step>& p) {
|
||||
return parallel_for_each(_base_to_build_step, [] (std::pair<const table_id, build_step>& p) {
|
||||
return p.second.reader.close();
|
||||
});
|
||||
});
|
||||
@@ -1498,7 +1498,7 @@ future<> view_builder::stop() {
|
||||
return drain();
|
||||
}
|
||||
|
||||
view_builder::build_step& view_builder::get_or_create_build_step(utils::UUID base_id) {
|
||||
view_builder::build_step& view_builder::get_or_create_build_step(table_id base_id) {
|
||||
auto it = _base_to_build_step.find(base_id);
|
||||
if (it == _base_to_build_step.end()) {
|
||||
auto base = _db.find_column_family(base_id).shared_from_this();
|
||||
@@ -1526,7 +1526,7 @@ future<> view_builder::initialize_reader_at_current_token(build_step& step) {
|
||||
});
|
||||
}
|
||||
|
||||
void view_builder::load_view_status(view_builder::view_build_status status, std::unordered_set<utils::UUID>& loaded_views) {
|
||||
void view_builder::load_view_status(view_builder::view_build_status status, std::unordered_set<table_id>& loaded_views) {
|
||||
if (!status.next_token) {
|
||||
// No progress was made on this view, so we'll treat it as new.
|
||||
return;
|
||||
@@ -1544,14 +1544,14 @@ void view_builder::load_view_status(view_builder::view_build_status status, std:
|
||||
|
||||
void view_builder::reshard(
|
||||
std::vector<std::vector<view_builder::view_build_status>> view_build_status_per_shard,
|
||||
std::unordered_set<utils::UUID>& loaded_views) {
|
||||
std::unordered_set<table_id>& loaded_views) {
|
||||
// We must reshard. We aim for a simple algorithm, a step above not starting from scratch.
|
||||
// Shards build entries at different paces, so both first and last tokens will differ. We
|
||||
// want to be conservative when selecting the range that has been built. To do that, we
|
||||
// select the intersection of all the previous shard's ranges for each view.
|
||||
struct view_ptr_hash {
|
||||
std::size_t operator()(const view_ptr& v) const noexcept {
|
||||
return std::hash<utils::UUID>()(v->id());
|
||||
return std::hash<table_id>()(v->id());
|
||||
}
|
||||
};
|
||||
struct view_ptr_equals {
|
||||
@@ -1646,7 +1646,7 @@ void view_builder::setup_shard_build_step(
|
||||
return view_ptr(nullptr);
|
||||
};
|
||||
|
||||
vbi.built_views = boost::copy_range<std::unordered_set<utils::UUID>>(built
|
||||
vbi.built_views = boost::copy_range<std::unordered_set<table_id>>(built
|
||||
| boost::adaptors::transformed(maybe_fetch_view)
|
||||
| boost::adaptors::filtered([] (const view_ptr& v) { return bool(v); })
|
||||
| boost::adaptors::transformed([] (const view_ptr& v) { return v->id(); }));
|
||||
@@ -1683,7 +1683,7 @@ future<> view_builder::calculate_shard_build_step(view_builder_init_state& vbi)
|
||||
return false;
|
||||
}
|
||||
};
|
||||
std::unordered_set<utils::UUID> loaded_views;
|
||||
std::unordered_set<table_id> loaded_views;
|
||||
if (vbi.status_per_shard.size() != smp::count) {
|
||||
reshard(std::move(vbi.status_per_shard), loaded_views);
|
||||
} else if (!vbi.status_per_shard.empty()) {
|
||||
|
||||
@@ -144,7 +144,7 @@ class view_builder final : public service::migration_listener::only_view_notific
|
||||
}
|
||||
};
|
||||
|
||||
using base_to_build_step_type = std::unordered_map<utils::UUID, build_step>;
|
||||
using base_to_build_step_type = std::unordered_map<table_id, build_step>;
|
||||
|
||||
replica::database& _db;
|
||||
db::system_distributed_keyspace& _sys_dist_ks;
|
||||
@@ -160,7 +160,7 @@ class view_builder final : public service::migration_listener::only_view_notific
|
||||
seastar::abort_source _as;
|
||||
future<> _started = make_ready_future<>();
|
||||
// Used to coordinate between shards the conclusion of the build process for a particular view.
|
||||
std::unordered_set<utils::UUID> _built_views;
|
||||
std::unordered_set<table_id> _built_views;
|
||||
// Counter and promise (both on shard 0 only!) allowing to wait for all
|
||||
// shards to have read the view build statuses
|
||||
unsigned _shards_finished_read = 0;
|
||||
@@ -173,7 +173,7 @@ class view_builder final : public service::migration_listener::only_view_notific
|
||||
struct view_builder_init_state {
|
||||
std::vector<future<>> bookkeeping_ops;
|
||||
std::vector<std::vector<view_build_status>> status_per_shard;
|
||||
std::unordered_set<utils::UUID> built_views;
|
||||
std::unordered_set<table_id> built_views;
|
||||
};
|
||||
|
||||
public:
|
||||
@@ -215,10 +215,10 @@ public:
|
||||
future<std::unordered_map<sstring, sstring>> view_build_statuses(sstring keyspace, sstring view_name) const;
|
||||
|
||||
private:
|
||||
build_step& get_or_create_build_step(utils::UUID);
|
||||
build_step& get_or_create_build_step(table_id);
|
||||
future<> initialize_reader_at_current_token(build_step&);
|
||||
void load_view_status(view_build_status, std::unordered_set<utils::UUID>&);
|
||||
void reshard(std::vector<std::vector<view_build_status>>, std::unordered_set<utils::UUID>&);
|
||||
void load_view_status(view_build_status, std::unordered_set<table_id>&);
|
||||
void reshard(std::vector<std::vector<view_build_status>>, std::unordered_set<table_id>&);
|
||||
void setup_shard_build_step(view_builder_init_state& vbi, std::vector<system_keyspace_view_name>, std::vector<system_keyspace_view_build_progress>);
|
||||
future<> calculate_shard_build_step(view_builder_init_state& vbi);
|
||||
future<> add_new_view(view_ptr, build_step&);
|
||||
|
||||
@@ -36,7 +36,7 @@ ser::mutation_view frozen_mutation::mutation_view() const {
|
||||
return ser::deserialize(in, boost::type<ser::mutation_view>());
|
||||
}
|
||||
|
||||
utils::UUID
|
||||
table_id
|
||||
frozen_mutation::column_family_id() const {
|
||||
return mutation_view().table_id();
|
||||
}
|
||||
|
||||
@@ -166,7 +166,7 @@ public:
|
||||
frozen_mutation& operator=(frozen_mutation&&) = default;
|
||||
frozen_mutation& operator=(const frozen_mutation&) = default;
|
||||
const bytes_ostream& representation() const { return _bytes; }
|
||||
utils::UUID column_family_id() const;
|
||||
table_id column_family_id() const;
|
||||
utils::UUID schema_version() const; // FIXME: Should replace column_family_id()
|
||||
partition_key_view key() const;
|
||||
dht::decorated_key decorated_key(const schema& s) const;
|
||||
|
||||
@@ -123,7 +123,7 @@ class mutation_partition stub [[writable]] {
|
||||
};
|
||||
|
||||
class mutation stub [[writable]] {
|
||||
utils::UUID table_id;
|
||||
::table_id table_id;
|
||||
utils::UUID schema_version;
|
||||
partition_key key;
|
||||
mutation_partition partition;
|
||||
@@ -140,7 +140,7 @@ class column_mapping {
|
||||
};
|
||||
|
||||
class canonical_mutation stub [[writable]] {
|
||||
utils::UUID table_id;
|
||||
::table_id table_id;
|
||||
utils::UUID schema_version;
|
||||
partition_key key;
|
||||
column_mapping mapping;
|
||||
|
||||
@@ -109,7 +109,7 @@ struct node_ops_cmd_request {
|
||||
// Optional field, map bootstrapping nodes to bootstrap tokens, set by bootstrap cmd
|
||||
std::unordered_map<gms::inet_address, std::list<dht::token>> bootstrap_nodes;
|
||||
// Optional field, list uuids of tables being repaired, set by repair cmd
|
||||
std::list<utils::UUID> repair_tables;
|
||||
std::list<table_id> repair_tables;
|
||||
};
|
||||
|
||||
struct node_ops_cmd_response {
|
||||
@@ -121,7 +121,7 @@ struct node_ops_cmd_response {
|
||||
|
||||
struct repair_update_system_table_request {
|
||||
utils::UUID repair_uuid;
|
||||
utils::UUID table_uuid;
|
||||
table_id table_uuid;
|
||||
sstring keyspace_name;
|
||||
sstring table_name;
|
||||
dht::token_range range;
|
||||
|
||||
@@ -47,7 +47,7 @@ struct max_result_size {
|
||||
}
|
||||
|
||||
class read_command {
|
||||
utils::UUID cf_id;
|
||||
table_id cf_id;
|
||||
utils::UUID schema_version;
|
||||
query::partition_slice slice;
|
||||
uint32_t row_limit_low_bits;
|
||||
|
||||
@@ -21,7 +21,7 @@ class stream_request {
|
||||
};
|
||||
|
||||
class stream_summary {
|
||||
utils::UUID cf_id;
|
||||
table_id cf_id;
|
||||
int files;
|
||||
long total_size;
|
||||
};
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
*/
|
||||
|
||||
#include "utils/UUID.hh"
|
||||
#include "schema_fwd.hh"
|
||||
|
||||
namespace utils {
|
||||
class UUID final {
|
||||
@@ -14,3 +15,7 @@ class UUID final {
|
||||
int64_t get_least_significant_bits();
|
||||
};
|
||||
}
|
||||
|
||||
class table_id final {
|
||||
utils::UUID uuid();
|
||||
};
|
||||
|
||||
@@ -823,14 +823,14 @@ rpc::sink<int32_t> messaging_service::make_sink_for_stream_mutation_fragments(rp
|
||||
}
|
||||
|
||||
future<std::tuple<rpc::sink<frozen_mutation_fragment, streaming::stream_mutation_fragments_cmd>, rpc::source<int32_t>>>
|
||||
messaging_service::make_sink_and_source_for_stream_mutation_fragments(utils::UUID schema_id, utils::UUID plan_id, utils::UUID cf_id, uint64_t estimated_partitions, streaming::stream_reason reason, msg_addr id) {
|
||||
messaging_service::make_sink_and_source_for_stream_mutation_fragments(utils::UUID schema_id, utils::UUID plan_id, table_id cf_id, uint64_t estimated_partitions, streaming::stream_reason reason, msg_addr id) {
|
||||
using value_type = std::tuple<rpc::sink<frozen_mutation_fragment, streaming::stream_mutation_fragments_cmd>, rpc::source<int32_t>>;
|
||||
if (is_shutting_down()) {
|
||||
return make_exception_future<value_type>(rpc::closed_error());
|
||||
}
|
||||
auto rpc_client = get_rpc_client(messaging_verb::STREAM_MUTATION_FRAGMENTS, id);
|
||||
return rpc_client->make_stream_sink<netw::serializer, frozen_mutation_fragment, streaming::stream_mutation_fragments_cmd>().then([this, plan_id, schema_id, cf_id, estimated_partitions, reason, rpc_client] (rpc::sink<frozen_mutation_fragment, streaming::stream_mutation_fragments_cmd> sink) mutable {
|
||||
auto rpc_handler = rpc()->make_client<rpc::source<int32_t> (utils::UUID, utils::UUID, utils::UUID, uint64_t, streaming::stream_reason, rpc::sink<frozen_mutation_fragment, streaming::stream_mutation_fragments_cmd>)>(messaging_verb::STREAM_MUTATION_FRAGMENTS);
|
||||
auto rpc_handler = rpc()->make_client<rpc::source<int32_t> (utils::UUID, utils::UUID, table_id, uint64_t, streaming::stream_reason, rpc::sink<frozen_mutation_fragment, streaming::stream_mutation_fragments_cmd>)>(messaging_verb::STREAM_MUTATION_FRAGMENTS);
|
||||
return rpc_handler(*rpc_client , plan_id, schema_id, cf_id, estimated_partitions, reason, sink).then_wrapped([sink, rpc_client] (future<rpc::source<int32_t>> source) mutable {
|
||||
return (source.failed() ? sink.close() : make_ready_future<>()).then([sink = std::move(sink), source = std::move(source)] () mutable {
|
||||
return make_ready_future<value_type>(value_type(std::move(sink), source.get0()));
|
||||
@@ -839,7 +839,7 @@ messaging_service::make_sink_and_source_for_stream_mutation_fragments(utils::UUI
|
||||
});
|
||||
}
|
||||
|
||||
void messaging_service::register_stream_mutation_fragments(std::function<future<rpc::sink<int32_t>> (const rpc::client_info& cinfo, UUID plan_id, UUID schema_id, UUID cf_id, uint64_t estimated_partitions, rpc::optional<streaming::stream_reason>, rpc::source<frozen_mutation_fragment, rpc::optional<streaming::stream_mutation_fragments_cmd>> source)>&& func) {
|
||||
void messaging_service::register_stream_mutation_fragments(std::function<future<rpc::sink<int32_t>> (const rpc::client_info& cinfo, UUID plan_id, UUID schema_id, table_id cf_id, uint64_t estimated_partitions, rpc::optional<streaming::stream_reason>, rpc::source<frozen_mutation_fragment, rpc::optional<streaming::stream_mutation_fragments_cmd>> source)>&& func) {
|
||||
register_handler(this, messaging_verb::STREAM_MUTATION_FRAGMENTS, std::move(func));
|
||||
}
|
||||
|
||||
@@ -962,15 +962,15 @@ future<> messaging_service::unregister_prepare_done_message() {
|
||||
|
||||
// STREAM_MUTATION_DONE
|
||||
void messaging_service::register_stream_mutation_done(std::function<future<> (const rpc::client_info& cinfo,
|
||||
UUID plan_id, dht::token_range_vector ranges, UUID cf_id, unsigned dst_cpu_id)>&& func) {
|
||||
UUID plan_id, dht::token_range_vector ranges, table_id cf_id, unsigned dst_cpu_id)>&& func) {
|
||||
register_handler(this, messaging_verb::STREAM_MUTATION_DONE,
|
||||
[func = std::move(func)] (const rpc::client_info& cinfo,
|
||||
UUID plan_id, std::vector<wrapping_range<dht::token>> ranges,
|
||||
UUID cf_id, unsigned dst_cpu_id) mutable {
|
||||
table_id cf_id, unsigned dst_cpu_id) mutable {
|
||||
return func(cinfo, plan_id, ::compat::unwrap(std::move(ranges)), cf_id, dst_cpu_id);
|
||||
});
|
||||
}
|
||||
future<> messaging_service::send_stream_mutation_done(msg_addr id, UUID plan_id, dht::token_range_vector ranges, UUID cf_id, unsigned dst_cpu_id) {
|
||||
future<> messaging_service::send_stream_mutation_done(msg_addr id, UUID plan_id, dht::token_range_vector ranges, table_id cf_id, unsigned dst_cpu_id) {
|
||||
return send_message<void>(this, messaging_verb::STREAM_MUTATION_DONE, id,
|
||||
plan_id, std::move(ranges), cf_id, dst_cpu_id);
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <unordered_map>
|
||||
#include "range.hh"
|
||||
#include "tracing/tracing.hh"
|
||||
#include "schema_fwd.hh"
|
||||
|
||||
#include <list>
|
||||
#include <vector>
|
||||
@@ -349,10 +350,10 @@ public:
|
||||
|
||||
// Wrapper for STREAM_MUTATION_FRAGMENTS
|
||||
// The receiver of STREAM_MUTATION_FRAGMENTS sends status code to the sender to notify any error on the receiver side. The status code is of type int32_t. 0 means successful, -1 means error, other status code value are reserved for future use.
|
||||
void register_stream_mutation_fragments(std::function<future<rpc::sink<int32_t>> (const rpc::client_info& cinfo, UUID plan_id, UUID schema_id, UUID cf_id, uint64_t estimated_partitions, rpc::optional<streaming::stream_reason> reason_opt, rpc::source<frozen_mutation_fragment, rpc::optional<streaming::stream_mutation_fragments_cmd>> source)>&& func);
|
||||
void register_stream_mutation_fragments(std::function<future<rpc::sink<int32_t>> (const rpc::client_info& cinfo, UUID plan_id, UUID schema_id, table_id cf_id, uint64_t estimated_partitions, rpc::optional<streaming::stream_reason> reason_opt, rpc::source<frozen_mutation_fragment, rpc::optional<streaming::stream_mutation_fragments_cmd>> source)>&& func);
|
||||
future<> unregister_stream_mutation_fragments();
|
||||
rpc::sink<int32_t> make_sink_for_stream_mutation_fragments(rpc::source<frozen_mutation_fragment, rpc::optional<streaming::stream_mutation_fragments_cmd>>& source);
|
||||
future<std::tuple<rpc::sink<frozen_mutation_fragment, streaming::stream_mutation_fragments_cmd>, rpc::source<int32_t>>> make_sink_and_source_for_stream_mutation_fragments(utils::UUID schema_id, utils::UUID plan_id, utils::UUID cf_id, uint64_t estimated_partitions, streaming::stream_reason reason, msg_addr id);
|
||||
future<std::tuple<rpc::sink<frozen_mutation_fragment, streaming::stream_mutation_fragments_cmd>, rpc::source<int32_t>>> make_sink_and_source_for_stream_mutation_fragments(utils::UUID schema_id, utils::UUID plan_id, table_id cf_id, uint64_t estimated_partitions, streaming::stream_reason reason, msg_addr id);
|
||||
|
||||
// Wrapper for REPAIR_GET_ROW_DIFF_WITH_RPC_STREAM
|
||||
future<std::tuple<rpc::sink<repair_hash_with_cmd>, rpc::source<repair_row_on_wire_with_cmd>>> make_sink_and_source_for_repair_get_row_diff_with_rpc_stream(uint32_t repair_meta_id, msg_addr id);
|
||||
@@ -372,8 +373,8 @@ public:
|
||||
void register_repair_get_full_row_hashes_with_rpc_stream(std::function<future<rpc::sink<repair_hash_with_cmd>> (const rpc::client_info& cinfo, uint32_t repair_meta_id, rpc::source<repair_stream_cmd> source)>&& func);
|
||||
future<> unregister_repair_get_full_row_hashes_with_rpc_stream();
|
||||
|
||||
void register_stream_mutation_done(std::function<future<> (const rpc::client_info& cinfo, UUID plan_id, dht::token_range_vector ranges, UUID cf_id, unsigned dst_cpu_id)>&& func);
|
||||
future<> send_stream_mutation_done(msg_addr id, UUID plan_id, dht::token_range_vector ranges, UUID cf_id, unsigned dst_cpu_id);
|
||||
void register_stream_mutation_done(std::function<future<> (const rpc::client_info& cinfo, UUID plan_id, dht::token_range_vector ranges, table_id cf_id, unsigned dst_cpu_id)>&& func);
|
||||
future<> send_stream_mutation_done(msg_addr id, UUID plan_id, dht::token_range_vector ranges, table_id cf_id, unsigned dst_cpu_id);
|
||||
future<> unregister_stream_mutation_done();
|
||||
|
||||
void register_complete_message(std::function<future<> (const rpc::client_info& cinfo, UUID plan_id, unsigned dst_cpu_id, rpc::optional<bool> failed)>&& func);
|
||||
|
||||
@@ -139,7 +139,7 @@ public:
|
||||
const schema_ptr& schema() const { return _ptr->_schema; }
|
||||
const mutation_partition& partition() const { return _ptr->_p; }
|
||||
mutation_partition& partition() { return _ptr->_p; }
|
||||
const utils::UUID& column_family_id() const { return _ptr->_schema->id(); }
|
||||
const table_id& column_family_id() const { return _ptr->_schema->id(); }
|
||||
// Consistent with hash<canonical_mutation>
|
||||
bool operator==(const mutation&) const;
|
||||
bool operator!=(const mutation&) const;
|
||||
|
||||
@@ -413,7 +413,7 @@ future<bool> querier_cache::evict_one() noexcept {
|
||||
co_return false;
|
||||
}
|
||||
|
||||
future<> querier_cache::evict_all_for_table(const utils::UUID& schema_id) noexcept {
|
||||
future<> querier_cache::evict_all_for_table(const table_id& schema_id) noexcept {
|
||||
for (auto ip : {&_data_querier_index, &_mutation_querier_index, &_shard_mutation_querier_index}) {
|
||||
auto& idx = *ip;
|
||||
for (auto it = idx.begin(); it != idx.end();) {
|
||||
|
||||
@@ -386,7 +386,7 @@ public:
|
||||
/// Evict all queriers that belong to a table.
|
||||
///
|
||||
/// Should be used when dropping a table.
|
||||
future<> evict_all_for_table(const utils::UUID& schema_id) noexcept;
|
||||
future<> evict_all_for_table(const table_id& schema_id) noexcept;
|
||||
|
||||
/// Close all queriers and wait on background work.
|
||||
///
|
||||
|
||||
@@ -277,7 +277,7 @@ using is_first_page = bool_class<class is_first_page_tag>;
|
||||
// Can be accessed across cores.
|
||||
class read_command {
|
||||
public:
|
||||
utils::UUID cf_id;
|
||||
table_id cf_id;
|
||||
table_schema_version schema_version; // TODO: This should be enough, drop cf_id
|
||||
partition_slice slice;
|
||||
uint32_t row_limit_low_bits;
|
||||
@@ -306,7 +306,7 @@ public:
|
||||
db::allow_per_partition_rate_limit allow_limit; // not serialized
|
||||
public:
|
||||
// IDL constructor
|
||||
read_command(utils::UUID cf_id,
|
||||
read_command(table_id cf_id,
|
||||
table_schema_version schema_version,
|
||||
partition_slice slice,
|
||||
uint32_t row_limit_low_bits,
|
||||
@@ -332,7 +332,7 @@ public:
|
||||
, allow_limit(db::allow_per_partition_rate_limit::no)
|
||||
{ }
|
||||
|
||||
read_command(utils::UUID cf_id,
|
||||
read_command(table_id cf_id,
|
||||
table_schema_version schema_version,
|
||||
partition_slice slice,
|
||||
query::max_result_size max_result_size,
|
||||
|
||||
@@ -143,8 +143,8 @@ std::ostream& operator<<(std::ostream& os, const repair_uniq_id& x) {
|
||||
}
|
||||
|
||||
// Must run inside a seastar thread
|
||||
static std::vector<utils::UUID> get_table_ids(const replica::database& db, const sstring& keyspace, const std::vector<sstring>& tables) {
|
||||
std::vector<utils::UUID> table_ids;
|
||||
static std::vector<table_id> get_table_ids(const replica::database& db, const sstring& keyspace, const std::vector<sstring>& tables) {
|
||||
std::vector<table_id> table_ids;
|
||||
table_ids.reserve(tables.size());
|
||||
for (auto& table : tables) {
|
||||
thread::maybe_yield();
|
||||
@@ -153,7 +153,7 @@ static std::vector<utils::UUID> get_table_ids(const replica::database& db, const
|
||||
return table_ids;
|
||||
}
|
||||
|
||||
static std::vector<sstring> get_table_names(const replica::database& db, const std::vector<utils::UUID>& table_ids) {
|
||||
static std::vector<sstring> get_table_names(const replica::database& db, const std::vector<table_id>& table_ids) {
|
||||
std::vector<sstring> table_names;
|
||||
table_names.reserve(table_ids.size());
|
||||
for (auto& table_id : table_ids) {
|
||||
@@ -508,7 +508,7 @@ future<uint64_t> estimate_partitions(seastar::sharded<replica::database>& db, co
|
||||
|
||||
static
|
||||
const dht::sharder&
|
||||
get_sharder_for_tables(seastar::sharded<replica::database>& db, const sstring& keyspace, const std::vector<utils::UUID>& table_ids) {
|
||||
get_sharder_for_tables(seastar::sharded<replica::database>& db, const sstring& keyspace, const std::vector<table_id>& table_ids) {
|
||||
schema_ptr last_s;
|
||||
for (size_t idx = 0 ; idx < table_ids.size(); idx++) {
|
||||
schema_ptr s;
|
||||
@@ -536,7 +536,7 @@ get_sharder_for_tables(seastar::sharded<replica::database>& db, const sstring& k
|
||||
repair_info::repair_info(repair_service& repair,
|
||||
const sstring& keyspace_,
|
||||
const dht::token_range_vector& ranges_,
|
||||
std::vector<utils::UUID> table_ids_,
|
||||
std::vector<table_id> table_ids_,
|
||||
repair_uniq_id id_,
|
||||
const std::vector<sstring>& data_centers_,
|
||||
const std::vector<sstring>& hosts_,
|
||||
@@ -605,7 +605,7 @@ size_t repair_info::ranges_size() {
|
||||
|
||||
// Repair a single local range, multiple column families.
|
||||
// Comparable to RepairSession in Origin
|
||||
future<> repair_info::repair_range(const dht::token_range& range, utils::UUID table_id) {
|
||||
future<> repair_info::repair_range(const dht::token_range& range, ::table_id table_id) {
|
||||
check_in_shutdown();
|
||||
check_in_abort();
|
||||
ranges_index++;
|
||||
@@ -1153,7 +1153,7 @@ int repair_service::do_repair_start(sstring keyspace, std::unordered_map<sstring
|
||||
auto table_ids = get_table_ids(db.local(), keyspace, cfs);
|
||||
abort_source as;
|
||||
auto off_strategy_updater = seastar::async([this, uuid, &table_ids, &participants, &as] {
|
||||
auto tables = std::list<utils::UUID>(table_ids.begin(), table_ids.end());
|
||||
auto tables = std::list<table_id>(table_ids.begin(), table_ids.end());
|
||||
auto req = node_ops_cmd_request(node_ops_cmd::repair_updater, uuid, {}, {}, {}, {}, std::move(tables));
|
||||
auto update_interval = std::chrono::seconds(30);
|
||||
while (!as.abort_requested()) {
|
||||
|
||||
@@ -151,7 +151,7 @@ public:
|
||||
sstring keyspace;
|
||||
dht::token_range_vector ranges;
|
||||
std::vector<sstring> cfs;
|
||||
std::vector<utils::UUID> table_ids;
|
||||
std::vector<table_id> table_ids;
|
||||
repair_uniq_id id;
|
||||
shard_id shard;
|
||||
std::vector<sstring> data_centers;
|
||||
@@ -173,7 +173,7 @@ public:
|
||||
repair_info(repair_service& repair,
|
||||
const sstring& keyspace_,
|
||||
const dht::token_range_vector& ranges_,
|
||||
std::vector<utils::UUID> table_ids_,
|
||||
std::vector<table_id> table_ids_,
|
||||
repair_uniq_id id_,
|
||||
const std::vector<sstring>& data_centers_,
|
||||
const std::vector<sstring>& hosts_,
|
||||
@@ -200,7 +200,7 @@ public:
|
||||
return _hints_batchlog_flushed;
|
||||
}
|
||||
|
||||
future<> repair_range(const dht::token_range& range, utils::UUID table_id);
|
||||
future<> repair_range(const dht::token_range& range, table_id);
|
||||
|
||||
size_t ranges_size();
|
||||
};
|
||||
@@ -386,14 +386,14 @@ struct node_ops_cmd_request {
|
||||
// Optional field, map bootstrapping nodes to bootstrap tokens, set by bootstrap cmd
|
||||
std::unordered_map<gms::inet_address, std::list<dht::token>> bootstrap_nodes;
|
||||
// Optional field, list uuids of tables being repaired, set by repair cmd
|
||||
std::list<utils::UUID> repair_tables;
|
||||
std::list<table_id> repair_tables;
|
||||
node_ops_cmd_request(node_ops_cmd command,
|
||||
utils::UUID uuid,
|
||||
std::list<gms::inet_address> ignore = {},
|
||||
std::list<gms::inet_address> leaving = {},
|
||||
std::unordered_map<gms::inet_address, gms::inet_address> replace = {},
|
||||
std::unordered_map<gms::inet_address, std::list<dht::token>> bootstrap = {},
|
||||
std::list<utils::UUID> tables = {})
|
||||
std::list<table_id> tables = {})
|
||||
: cmd(command)
|
||||
, ops_uuid(std::move(uuid))
|
||||
, ignore_nodes(std::move(ignore))
|
||||
@@ -418,7 +418,7 @@ struct node_ops_cmd_response {
|
||||
|
||||
struct repair_update_system_table_request {
|
||||
utils::UUID repair_uuid;
|
||||
utils::UUID table_uuid;
|
||||
table_id table_uuid;
|
||||
sstring keyspace_name;
|
||||
sstring table_name;
|
||||
dht::token_range range;
|
||||
|
||||
@@ -2370,7 +2370,7 @@ static void add_to_repair_meta_for_followers(repair_meta& rm) {
|
||||
class row_level_repair {
|
||||
repair_info& _ri;
|
||||
sstring _cf_name;
|
||||
utils::UUID _table_id;
|
||||
table_id _table_id;
|
||||
dht::token_range _range;
|
||||
inet_address_vector_replica_set _all_live_peer_nodes;
|
||||
replica::column_family& _cf;
|
||||
@@ -2419,7 +2419,7 @@ class row_level_repair {
|
||||
public:
|
||||
row_level_repair(repair_info& ri,
|
||||
sstring cf_name,
|
||||
utils::UUID table_id,
|
||||
table_id table_id,
|
||||
dht::token_range range,
|
||||
std::vector<gms::inet_address> all_live_peer_nodes)
|
||||
: _ri(ri)
|
||||
@@ -2844,7 +2844,7 @@ public:
|
||||
};
|
||||
|
||||
future<> repair_cf_range_row_level(repair_info& ri,
|
||||
sstring cf_name, utils::UUID table_id, dht::token_range range,
|
||||
sstring cf_name, table_id table_id, dht::token_range range,
|
||||
const std::vector<gms::inet_address>& all_peer_nodes) {
|
||||
return seastar::futurize_invoke([&ri, cf_name = std::move(cf_name), table_id = std::move(table_id), range = std::move(range), &all_peer_nodes] () mutable {
|
||||
auto repair = row_level_repair(ri, std::move(cf_name), std::move(table_id), std::move(range), all_peer_nodes);
|
||||
@@ -2962,7 +2962,7 @@ static shard_id repair_id_to_shard(utils::UUID& repair_id) {
|
||||
}
|
||||
|
||||
future<std::optional<gc_clock::time_point>>
|
||||
repair_service::update_history(utils::UUID repair_id, utils::UUID table_id, dht::token_range range, gc_clock::time_point repair_time) {
|
||||
repair_service::update_history(utils::UUID repair_id, table_id table_id, dht::token_range range, gc_clock::time_point repair_time) {
|
||||
auto shard = repair_id_to_shard(repair_id);
|
||||
return container().invoke_on(shard, [repair_id, table_id, range, repair_time] (repair_service& rs) mutable -> future<std::optional<gc_clock::time_point>> {
|
||||
repair_history& rh = rs._finished_ranges_history[repair_id];
|
||||
@@ -2996,7 +2996,7 @@ future<> repair_service::load_history() {
|
||||
for (const auto& x : tables) {
|
||||
auto& table_uuid = x.first;
|
||||
auto& table = x.second;
|
||||
auto shard = unsigned(table_uuid.get_most_significant_bits()) % smp::count;
|
||||
auto shard = unsigned(table_uuid.uuid().get_most_significant_bits()) % smp::count;
|
||||
if (shard != this_shard_id()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -47,8 +47,7 @@ struct shard_config {
|
||||
|
||||
class repair_history {
|
||||
public:
|
||||
// The key for the map is the table_id
|
||||
std::unordered_map<utils::UUID, std::unordered_map<dht::token_range, size_t>> finished_ranges;
|
||||
std::unordered_map<table_id, std::unordered_map<dht::token_range, size_t>> finished_ranges;
|
||||
gc_clock::time_point repair_time = gc_clock::time_point::max();
|
||||
};
|
||||
|
||||
@@ -127,7 +126,7 @@ public:
|
||||
// stop them abruptly).
|
||||
future<> shutdown();
|
||||
|
||||
future<std::optional<gc_clock::time_point>> update_history(utils::UUID repair_id, utils::UUID table_id, dht::token_range range, gc_clock::time_point repair_time);
|
||||
future<std::optional<gc_clock::time_point>> update_history(utils::UUID repair_id, table_id table_id, dht::token_range range, gc_clock::time_point repair_time);
|
||||
future<> cleanup_history(utils::UUID repair_id);
|
||||
future<> load_history();
|
||||
|
||||
@@ -243,7 +242,7 @@ class repair_hasher;
|
||||
class repair_writer;
|
||||
|
||||
future<> repair_cf_range_row_level(repair_info& ri,
|
||||
sstring cf_name, utils::UUID table_id, dht::token_range range,
|
||||
sstring cf_name, table_id table_id, dht::token_range range,
|
||||
const std::vector<gms::inet_address>& all_peer_nodes);
|
||||
future<std::list<repair_row>> to_repair_rows_list(repair_rows_on_wire rows,
|
||||
schema_ptr s, uint64_t seed, repair_master is_master,
|
||||
|
||||
@@ -70,7 +70,7 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
virtual std::optional<data_dictionary::table> try_find_table(data_dictionary::database db, utils::UUID id) const override {
|
||||
virtual std::optional<data_dictionary::table> try_find_table(data_dictionary::database db, table_id id) const override {
|
||||
try {
|
||||
return wrap(unwrap(db).find_column_family(id));
|
||||
} catch (no_such_column_family&) {
|
||||
|
||||
@@ -141,7 +141,7 @@ public:
|
||||
};
|
||||
|
||||
const boost::container::static_vector<std::pair<size_t, boost::container::static_vector<table*, 16>>, 10>
|
||||
phased_barrier_top_10_counts(const std::unordered_map<utils::UUID, lw_shared_ptr<column_family>>& tables, std::function<size_t(table&)> op_count_getter) {
|
||||
phased_barrier_top_10_counts(const std::unordered_map<table_id, lw_shared_ptr<column_family>>& tables, std::function<size_t(table&)> op_count_getter) {
|
||||
using table_list = boost::container::static_vector<table*, 16>;
|
||||
using count_and_tables = std::pair<size_t, table_list>;
|
||||
const auto less = [] (const count_and_tables& a, const count_and_tables& b) {
|
||||
@@ -1012,7 +1012,7 @@ future<> database::detach_column_family(table& cf) {
|
||||
co_await _querier_cache.evict_all_for_table(uuid);
|
||||
}
|
||||
|
||||
future<std::vector<foreign_ptr<lw_shared_ptr<table>>>> database::get_table_on_all_shards(sharded<database>& sharded_db, utils::UUID uuid) {
|
||||
future<std::vector<foreign_ptr<lw_shared_ptr<table>>>> database::get_table_on_all_shards(sharded<database>& sharded_db, table_id uuid) {
|
||||
std::vector<foreign_ptr<lw_shared_ptr<table>>> table_shards;
|
||||
table_shards.resize(smp::count);
|
||||
co_await coroutine::parallel_for_each(boost::irange(0u, smp::count), [&] (unsigned shard) -> future<> {
|
||||
@@ -1049,7 +1049,7 @@ future<> database::drop_table_on_all_shards(sharded<database>& sharded_db, sstri
|
||||
co_await sstables::remove_table_directory_if_has_no_snapshots(table_dir);
|
||||
}
|
||||
|
||||
const utils::UUID& database::find_uuid(std::string_view ks, std::string_view cf) const {
|
||||
const table_id& database::find_uuid(std::string_view ks, std::string_view cf) const {
|
||||
try {
|
||||
return _ks_cf_to_uuid.at(std::make_pair(ks, cf));
|
||||
} catch (std::out_of_range&) {
|
||||
@@ -1057,7 +1057,7 @@ const utils::UUID& database::find_uuid(std::string_view ks, std::string_view cf)
|
||||
}
|
||||
}
|
||||
|
||||
const utils::UUID& database::find_uuid(const schema_ptr& schema) const {
|
||||
const table_id& database::find_uuid(const schema_ptr& schema) const {
|
||||
return find_uuid(schema->ks_name(), schema->cf_name());
|
||||
}
|
||||
|
||||
@@ -1137,7 +1137,7 @@ const column_family& database::find_column_family(std::string_view ks_name, std:
|
||||
}
|
||||
}
|
||||
|
||||
column_family& database::find_column_family(const utils::UUID& uuid) {
|
||||
column_family& database::find_column_family(const table_id& uuid) {
|
||||
try {
|
||||
return *_column_families.at(uuid);
|
||||
} catch (...) {
|
||||
@@ -1145,7 +1145,7 @@ column_family& database::find_column_family(const utils::UUID& uuid) {
|
||||
}
|
||||
}
|
||||
|
||||
const column_family& database::find_column_family(const utils::UUID& uuid) const {
|
||||
const column_family& database::find_column_family(const table_id& uuid) const {
|
||||
try {
|
||||
return *_column_families.at(uuid);
|
||||
} catch (...) {
|
||||
@@ -1153,7 +1153,7 @@ const column_family& database::find_column_family(const utils::UUID& uuid) const
|
||||
}
|
||||
}
|
||||
|
||||
bool database::column_family_exists(const utils::UUID& uuid) const {
|
||||
bool database::column_family_exists(const table_id& uuid) const {
|
||||
return _column_families.contains(uuid);
|
||||
}
|
||||
|
||||
@@ -1237,19 +1237,19 @@ keyspace::make_column_family_config(const schema& s, const database& db) const {
|
||||
}
|
||||
|
||||
sstring
|
||||
keyspace::column_family_directory(const sstring& name, utils::UUID uuid) const {
|
||||
keyspace::column_family_directory(const sstring& name, table_id uuid) const {
|
||||
return column_family_directory(_config.datadir, name, uuid);
|
||||
}
|
||||
|
||||
sstring
|
||||
keyspace::column_family_directory(const sstring& base_path, const sstring& name, utils::UUID uuid) const {
|
||||
keyspace::column_family_directory(const sstring& base_path, const sstring& name, table_id uuid) const {
|
||||
auto uuid_sstring = uuid.to_sstring();
|
||||
boost::erase_all(uuid_sstring, "-");
|
||||
return format("{}/{}-{}", base_path, name, uuid_sstring);
|
||||
}
|
||||
|
||||
future<>
|
||||
keyspace::make_directory_for_column_family(const sstring& name, utils::UUID uuid) {
|
||||
keyspace::make_directory_for_column_family(const sstring& name, table_id uuid) {
|
||||
std::vector<sstring> cfdirs;
|
||||
for (auto& extra : _config.all_datadirs) {
|
||||
cfdirs.push_back(column_family_directory(extra, name, uuid));
|
||||
@@ -1294,7 +1294,7 @@ schema_ptr database::find_schema(const sstring& ks_name, const sstring& cf_name)
|
||||
}
|
||||
}
|
||||
|
||||
schema_ptr database::find_schema(const utils::UUID& uuid) const {
|
||||
schema_ptr database::find_schema(const table_id& uuid) const {
|
||||
return find_column_family(uuid).schema();
|
||||
}
|
||||
|
||||
@@ -1349,7 +1349,7 @@ database::create_keyspace(const lw_shared_ptr<keyspace_metadata>& ksm, locator::
|
||||
|
||||
future<>
|
||||
database::drop_caches() const {
|
||||
std::unordered_map<utils::UUID, lw_shared_ptr<column_family>> tables = get_column_families();
|
||||
std::unordered_map<table_id, lw_shared_ptr<column_family>> tables = get_column_families();
|
||||
for (auto&& e : tables) {
|
||||
table& t = *e.second;
|
||||
co_await t.get_row_cache().invalidate(row_cache::external_updater([] {}));
|
||||
@@ -2292,7 +2292,7 @@ future<> database::flush(const sstring& ksname, const sstring& cfname) {
|
||||
return cf.flush();
|
||||
}
|
||||
|
||||
future<> database::flush_table_on_all_shards(sharded<database>& sharded_db, utils::UUID id) {
|
||||
future<> database::flush_table_on_all_shards(sharded<database>& sharded_db, table_id id) {
|
||||
return sharded_db.invoke_on_all([id] (replica::database& db) {
|
||||
return db.find_column_family(id).flush();
|
||||
});
|
||||
@@ -2489,7 +2489,7 @@ static sstring get_snapshot_table_dir_prefix(const sstring& table_name) {
|
||||
return table_name + "-";
|
||||
}
|
||||
|
||||
static std::pair<sstring, utils::UUID> extract_cf_name_and_uuid(const sstring& directory_name) {
|
||||
static std::pair<sstring, table_id> extract_cf_name_and_uuid(const sstring& directory_name) {
|
||||
// cf directory is of the form: 'cf_name-uuid'
|
||||
// uuid is assumed to be exactly 32 hex characters wide.
|
||||
constexpr size_t uuid_size = 32;
|
||||
@@ -2497,7 +2497,7 @@ static std::pair<sstring, utils::UUID> extract_cf_name_and_uuid(const sstring& d
|
||||
if (pos <= 0 || directory_name[pos] != '-') {
|
||||
on_internal_error(dblog, format("table directory entry name '{}' is invalid: no '-' separator found at pos {}", directory_name, pos));
|
||||
}
|
||||
return std::make_pair(directory_name.substr(0, pos), utils::UUID(directory_name.substr(pos + 1)));
|
||||
return std::make_pair(directory_name.substr(0, pos), table_id(utils::UUID(directory_name.substr(pos + 1))));
|
||||
}
|
||||
|
||||
future<std::vector<database::snapshot_details_result>> database::get_snapshot_details() {
|
||||
@@ -2741,10 +2741,10 @@ flat_mutation_reader_v2 make_multishard_streaming_reader(distributed<replica::da
|
||||
reader_concurrency_semaphore* semaphore;
|
||||
};
|
||||
distributed<replica::database>& _db;
|
||||
utils::UUID _table_id;
|
||||
table_id _table_id;
|
||||
std::vector<reader_context> _contexts;
|
||||
public:
|
||||
streaming_reader_lifecycle_policy(distributed<replica::database>& db, utils::UUID table_id) : _db(db), _table_id(table_id), _contexts(smp::count) {
|
||||
streaming_reader_lifecycle_policy(distributed<replica::database>& db, table_id table_id) : _db(db), _table_id(table_id), _contexts(smp::count) {
|
||||
}
|
||||
virtual flat_mutation_reader_v2 create_reader(
|
||||
schema_ptr schema,
|
||||
|
||||
@@ -1187,7 +1187,7 @@ public:
|
||||
}
|
||||
|
||||
column_family::config make_column_family_config(const schema& s, const database& db) const;
|
||||
future<> make_directory_for_column_family(const sstring& name, utils::UUID uuid);
|
||||
future<> make_directory_for_column_family(const sstring& name, table_id uuid);
|
||||
void add_or_update_column_family(const schema_ptr& s);
|
||||
void add_user_type(const user_type ut);
|
||||
void remove_user_type(const user_type ut);
|
||||
@@ -1204,8 +1204,8 @@ public:
|
||||
return _config.datadir;
|
||||
}
|
||||
|
||||
sstring column_family_directory(const sstring& base_path, const sstring& name, utils::UUID uuid) const;
|
||||
sstring column_family_directory(const sstring& name, utils::UUID uuid) const;
|
||||
sstring column_family_directory(const sstring& base_path, const sstring& name, table_id uuid) const;
|
||||
sstring column_family_directory(const sstring& name, table_id uuid) const;
|
||||
|
||||
future<> ensure_populated() const;
|
||||
void mark_as_populated();
|
||||
@@ -1323,9 +1323,9 @@ private:
|
||||
db::per_partition_rate_limit::info> _apply_stage;
|
||||
|
||||
flat_hash_map<sstring, keyspace> _keyspaces;
|
||||
std::unordered_map<utils::UUID, lw_shared_ptr<column_family>> _column_families;
|
||||
std::unordered_map<table_id, lw_shared_ptr<column_family>> _column_families;
|
||||
using ks_cf_to_uuid_t =
|
||||
flat_hash_map<std::pair<sstring, sstring>, utils::UUID, utils::tuple_hash, string_pair_eq>;
|
||||
flat_hash_map<std::pair<sstring, sstring>, table_id, utils::tuple_hash, string_pair_eq>;
|
||||
ks_cf_to_uuid_t _ks_cf_to_uuid;
|
||||
std::unique_ptr<db::commitlog> _commitlog;
|
||||
std::unique_ptr<db::commitlog> _schema_commitlog;
|
||||
@@ -1480,8 +1480,8 @@ public:
|
||||
future<> add_column_family_and_make_directory(schema_ptr schema);
|
||||
|
||||
/* throws no_such_column_family if missing */
|
||||
const utils::UUID& find_uuid(std::string_view ks, std::string_view cf) const;
|
||||
const utils::UUID& find_uuid(const schema_ptr&) const;
|
||||
const table_id& find_uuid(std::string_view ks, std::string_view cf) const;
|
||||
const table_id& find_uuid(const schema_ptr&) const;
|
||||
|
||||
/**
|
||||
* Creates a keyspace for a given metadata if it still doesn't exist.
|
||||
@@ -1502,13 +1502,13 @@ public:
|
||||
std::vector<sstring> get_all_keyspaces() const;
|
||||
column_family& find_column_family(std::string_view ks, std::string_view name);
|
||||
const column_family& find_column_family(std::string_view ks, std::string_view name) const;
|
||||
column_family& find_column_family(const utils::UUID&);
|
||||
const column_family& find_column_family(const utils::UUID&) const;
|
||||
column_family& find_column_family(const table_id&);
|
||||
const column_family& find_column_family(const table_id&) const;
|
||||
column_family& find_column_family(const schema_ptr&);
|
||||
const column_family& find_column_family(const schema_ptr&) const;
|
||||
bool column_family_exists(const utils::UUID& uuid) const;
|
||||
bool column_family_exists(const table_id& uuid) const;
|
||||
schema_ptr find_schema(const sstring& ks_name, const sstring& cf_name) const;
|
||||
schema_ptr find_schema(const utils::UUID&) const;
|
||||
schema_ptr find_schema(const table_id&) const;
|
||||
bool has_schema(std::string_view ks_name, std::string_view cf_name) const;
|
||||
std::set<sstring> existing_index_names(const sstring& ks_name, const sstring& cf_to_exclude = sstring()) const;
|
||||
sstring get_available_index_name(const sstring& ks_name, const sstring& cf_name,
|
||||
@@ -1587,11 +1587,11 @@ public:
|
||||
return _keyspaces;
|
||||
}
|
||||
|
||||
const std::unordered_map<utils::UUID, lw_shared_ptr<column_family>>& get_column_families() const {
|
||||
const std::unordered_map<table_id, lw_shared_ptr<column_family>>& get_column_families() const {
|
||||
return _column_families;
|
||||
}
|
||||
|
||||
std::unordered_map<utils::UUID, lw_shared_ptr<column_family>>& get_column_families() {
|
||||
std::unordered_map<table_id, lw_shared_ptr<column_family>>& get_column_families() {
|
||||
return _column_families;
|
||||
}
|
||||
|
||||
@@ -1629,7 +1629,7 @@ public:
|
||||
future<> flush_all_memtables();
|
||||
future<> flush(const sstring& ks, const sstring& cf);
|
||||
// flush a table identified by the given id on all shards.
|
||||
static future<> flush_table_on_all_shards(sharded<database>& sharded_db, utils::UUID id);
|
||||
static future<> flush_table_on_all_shards(sharded<database>& sharded_db, table_id id);
|
||||
// flush a single table in a keyspace on all shards.
|
||||
static future<> flush_table_on_all_shards(sharded<database>& sharded_db, std::string_view ks_name, std::string_view table_name);
|
||||
// flush a list of tables in a keyspace on all shards.
|
||||
@@ -1646,7 +1646,7 @@ public:
|
||||
private:
|
||||
future<> detach_column_family(table& cf);
|
||||
|
||||
static future<std::vector<foreign_ptr<lw_shared_ptr<table>>>> get_table_on_all_shards(sharded<database>& db, utils::UUID uuid);
|
||||
static future<std::vector<foreign_ptr<lw_shared_ptr<table>>>> get_table_on_all_shards(sharded<database>& db, table_id uuid);
|
||||
|
||||
struct table_truncate_state {
|
||||
gate::holder holder;
|
||||
|
||||
@@ -70,7 +70,7 @@ io_error_handler error_handler_gen_for_upload_dir(disk_error_signal_type& dummy)
|
||||
// global_column_family_ptr provides a way to easily retrieve local instance of a given column family.
|
||||
class global_column_family_ptr {
|
||||
distributed<replica::database>& _db;
|
||||
utils::UUID _id;
|
||||
table_id _id;
|
||||
private:
|
||||
replica::column_family& get() const { return _db.local().find_column_family(_id); }
|
||||
public:
|
||||
@@ -357,7 +357,7 @@ distributed_loader::process_upload_dir(distributed<replica::database>& db, distr
|
||||
});
|
||||
}
|
||||
|
||||
future<std::tuple<utils::UUID, std::vector<std::vector<sstables::shared_sstable>>>>
|
||||
future<std::tuple<table_id, std::vector<std::vector<sstables::shared_sstable>>>>
|
||||
distributed_loader::get_sstables_from_upload_dir(distributed<replica::database>& db, sstring ks, sstring cf) {
|
||||
return seastar::async([&db, ks = std::move(ks), cf = std::move(cf)] {
|
||||
global_column_family_ptr global_table(db, ks, cf);
|
||||
@@ -539,7 +539,7 @@ future<> distributed_loader::populate_keyspace(distributed<replica::database>& d
|
||||
auto& column_families = db.local().get_column_families();
|
||||
|
||||
co_await coroutine::parallel_for_each(ks.metadata()->cf_meta_data() | boost::adaptors::map_values, [&] (schema_ptr s) -> future<> {
|
||||
utils::UUID uuid = s->id();
|
||||
auto uuid = s->id();
|
||||
lw_shared_ptr<replica::column_family> cf = column_families[uuid];
|
||||
sstring cfname = cf->schema()->cf_name();
|
||||
auto sstdir = ks.column_family_directory(ksdir, cfname, uuid);
|
||||
|
||||
@@ -85,7 +85,7 @@ public:
|
||||
// Each entry with index of idx should be accessed on shard idx only.
|
||||
// Each entry contains a vector of sstables for this shard.
|
||||
// The table UUID is returned too.
|
||||
static future<std::tuple<utils::UUID, std::vector<std::vector<sstables::shared_sstable>>>>
|
||||
static future<std::tuple<table_id, std::vector<std::vector<sstables::shared_sstable>>>>
|
||||
get_sstables_from_upload_dir(distributed<replica::database>& db, sstring ks, sstring cf);
|
||||
static future<> process_upload_dir(distributed<replica::database>& db, distributed<db::system_distributed_keyspace>& sys_dist_ks,
|
||||
distributed<db::view::view_update_generator>& view_update_generator, sstring ks_name, sstring cf_name);
|
||||
|
||||
24
schema.cc
24
schema.cc
@@ -310,7 +310,7 @@ const column_mapping& schema::get_column_mapping() const {
|
||||
return _column_mapping;
|
||||
}
|
||||
|
||||
schema::raw_schema::raw_schema(utils::UUID id)
|
||||
schema::raw_schema::raw_schema(table_id id)
|
||||
: _id(id)
|
||||
, _partitioner(::get_partitioner(default_partitioner_name))
|
||||
, _sharder(::get_sharder(smp::count, default_partitioner_ignore_msb))
|
||||
@@ -441,7 +441,7 @@ schema::schema(reversed_tag, const schema& o)
|
||||
{
|
||||
}
|
||||
|
||||
lw_shared_ptr<const schema> make_shared_schema(std::optional<utils::UUID> id, std::string_view ks_name,
|
||||
lw_shared_ptr<const schema> make_shared_schema(std::optional<table_id> id, std::string_view ks_name,
|
||||
std::string_view cf_name, std::vector<schema::column> partition_key, std::vector<schema::column> clustering_key,
|
||||
std::vector<schema::column> regular_columns, std::vector<schema::column> static_columns,
|
||||
data_type regular_column_name_type, sstring comment) {
|
||||
@@ -554,7 +554,7 @@ bool index_metadata::equals_noname(const index_metadata& other) const {
|
||||
return _kind == other._kind && _options == other._options;
|
||||
}
|
||||
|
||||
const utils::UUID& index_metadata::id() const {
|
||||
const table_id& index_metadata::id() const {
|
||||
return _id;
|
||||
}
|
||||
|
||||
@@ -752,11 +752,11 @@ static std::ostream& column_definition_as_cql_key(std::ostream& os, const column
|
||||
return os;
|
||||
}
|
||||
|
||||
static bool is_global_index(replica::database& db, const utils::UUID& id, const schema& s) {
|
||||
static bool is_global_index(replica::database& db, const table_id& id, const schema& s) {
|
||||
return db.find_column_family(id).get_index_manager().is_global_index(s);
|
||||
}
|
||||
|
||||
static bool is_index(replica::database& db, const utils::UUID& id, const schema& s) {
|
||||
static bool is_index(replica::database& db, const table_id& id, const schema& s) {
|
||||
return db.find_column_family(id).get_index_manager().is_index(s);
|
||||
}
|
||||
|
||||
@@ -910,9 +910,9 @@ bool operator==(const column_definition& x, const column_definition& y)
|
||||
}
|
||||
|
||||
// Based on org.apache.cassandra.config.CFMetaData#generateLegacyCfId
|
||||
utils::UUID
|
||||
table_id
|
||||
generate_legacy_id(const sstring& ks_name, const sstring& cf_name) {
|
||||
return utils::UUID_gen::get_name_UUID(ks_name + cf_name);
|
||||
return table_id(utils::UUID_gen::get_name_UUID(ks_name + cf_name));
|
||||
}
|
||||
|
||||
bool thrift_schema::has_compound_comparator() const {
|
||||
@@ -944,8 +944,8 @@ schema_builder& schema_builder::with_null_sharder() {
|
||||
}
|
||||
|
||||
schema_builder::schema_builder(std::string_view ks_name, std::string_view cf_name,
|
||||
std::optional<utils::UUID> id, data_type rct)
|
||||
: _raw(id ? *id : utils::UUID_gen::get_time_UUID())
|
||||
std::optional<table_id> id, data_type rct)
|
||||
: _raw(id ? *id : table_id(utils::UUID_gen::get_time_UUID()))
|
||||
{
|
||||
// Various schema-creation commands (creating tables, indexes, etc.)
|
||||
// usually place limits on which characters are allowed in keyspace or
|
||||
@@ -989,7 +989,7 @@ schema_builder::schema_builder(const schema::raw_schema& raw)
|
||||
}
|
||||
|
||||
schema_builder::schema_builder(
|
||||
std::optional<utils::UUID> id,
|
||||
std::optional<table_id> id,
|
||||
std::string_view ks_name,
|
||||
std::string_view cf_name,
|
||||
std::vector<schema::column> partition_key,
|
||||
@@ -1204,7 +1204,7 @@ void schema_builder::prepare_dense_schema(schema::raw_schema& raw) {
|
||||
}
|
||||
}
|
||||
|
||||
schema_builder& schema_builder::with_view_info(utils::UUID base_id, sstring base_name, bool include_all_columns, sstring where_clause) {
|
||||
schema_builder& schema_builder::with_view_info(table_id base_id, sstring base_name, bool include_all_columns, sstring where_clause) {
|
||||
_view_info = raw_view_info(std::move(base_id), std::move(base_name), include_all_columns, std::move(where_clause));
|
||||
return *this;
|
||||
}
|
||||
@@ -1675,7 +1675,7 @@ schema_ptr schema::get_reversed() const {
|
||||
});
|
||||
}
|
||||
|
||||
raw_view_info::raw_view_info(utils::UUID base_id, sstring base_name, bool include_all_columns, sstring where_clause)
|
||||
raw_view_info::raw_view_info(table_id base_id, sstring base_name, bool include_all_columns, sstring where_clause)
|
||||
: _base_id(std::move(base_id))
|
||||
, _base_name(std::move(base_name))
|
||||
, _include_all_columns(include_all_columns)
|
||||
|
||||
20
schema.hh
20
schema.hh
@@ -248,7 +248,7 @@ public:
|
||||
struct is_local_index_tag {};
|
||||
using is_local_index = bool_class<is_local_index_tag>;
|
||||
private:
|
||||
utils::UUID _id;
|
||||
table_id _id;
|
||||
sstring _name;
|
||||
index_metadata_kind _kind;
|
||||
index_options_map _options;
|
||||
@@ -257,7 +257,7 @@ public:
|
||||
index_metadata(const sstring& name, const index_options_map& options, index_metadata_kind kind, is_local_index local);
|
||||
bool operator==(const index_metadata& other) const;
|
||||
bool equals_noname(const index_metadata& other) const;
|
||||
const utils::UUID& id() const;
|
||||
const table_id& id() const;
|
||||
const sstring& name() const;
|
||||
const index_metadata_kind kind() const;
|
||||
const index_options_map& options() const;
|
||||
@@ -486,14 +486,14 @@ bool operator==(const column_mapping& lhs, const column_mapping& rhs);
|
||||
* Effectively immutable.
|
||||
*/
|
||||
class raw_view_info final {
|
||||
utils::UUID _base_id;
|
||||
table_id _base_id;
|
||||
sstring _base_name;
|
||||
bool _include_all_columns;
|
||||
sstring _where_clause;
|
||||
public:
|
||||
raw_view_info(utils::UUID base_id, sstring base_name, bool include_all_columns, sstring where_clause);
|
||||
raw_view_info(table_id base_id, sstring base_name, bool include_all_columns, sstring where_clause);
|
||||
|
||||
const utils::UUID& base_id() const {
|
||||
const table_id& base_id() const {
|
||||
return _base_id;
|
||||
}
|
||||
|
||||
@@ -595,8 +595,8 @@ private:
|
||||
// More complex fields are derived from these inside rebuild().
|
||||
// Contains only fields which can be safely default-copied.
|
||||
struct raw_schema {
|
||||
raw_schema(utils::UUID id);
|
||||
utils::UUID _id;
|
||||
raw_schema(table_id id);
|
||||
table_id _id;
|
||||
sstring _ks_name;
|
||||
sstring _cf_name;
|
||||
// regular columns are sorted by name
|
||||
@@ -735,7 +735,7 @@ public:
|
||||
const thrift_schema& thrift() const {
|
||||
return _thrift;
|
||||
}
|
||||
const utils::UUID& id() const {
|
||||
const table_id& id() const {
|
||||
return _raw._id;
|
||||
}
|
||||
const sstring& comment() const {
|
||||
@@ -996,7 +996,7 @@ public:
|
||||
schema_ptr get_reversed() const;
|
||||
};
|
||||
|
||||
lw_shared_ptr<const schema> make_shared_schema(std::optional<utils::UUID> id, std::string_view ks_name, std::string_view cf_name,
|
||||
lw_shared_ptr<const schema> make_shared_schema(std::optional<table_id> id, std::string_view ks_name, std::string_view cf_name,
|
||||
std::vector<schema::column> partition_key, std::vector<schema::column> clustering_key, std::vector<schema::column> regular_columns,
|
||||
std::vector<schema::column> static_columns, data_type regular_column_name_type, sstring comment = "");
|
||||
|
||||
@@ -1031,7 +1031,7 @@ public:
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const view_ptr& view);
|
||||
|
||||
utils::UUID generate_legacy_id(const sstring& ks_name, const sstring& cf_name);
|
||||
table_id generate_legacy_id(const sstring& ks_name, const sstring& cf_name);
|
||||
|
||||
|
||||
// Thrown when attempted to access a schema-dependent object using
|
||||
|
||||
@@ -29,10 +29,10 @@ private:
|
||||
schema_builder(const schema::raw_schema&);
|
||||
public:
|
||||
schema_builder(std::string_view ks_name, std::string_view cf_name,
|
||||
std::optional<utils::UUID> = { },
|
||||
std::optional<table_id> = { },
|
||||
data_type regular_column_name_type = utf8_type);
|
||||
schema_builder(
|
||||
std::optional<utils::UUID> id,
|
||||
std::optional<table_id> id,
|
||||
std::string_view ks_name,
|
||||
std::string_view cf_name,
|
||||
std::vector<schema::column> partition_key,
|
||||
@@ -43,11 +43,11 @@ public:
|
||||
sstring comment = "");
|
||||
schema_builder(const schema_ptr);
|
||||
|
||||
schema_builder& set_uuid(const utils::UUID& id) {
|
||||
schema_builder& set_uuid(const table_id& id) {
|
||||
_raw._id = id;
|
||||
return *this;
|
||||
}
|
||||
const utils::UUID& uuid() const {
|
||||
const table_id& uuid() const {
|
||||
return _raw._id;
|
||||
}
|
||||
schema_builder& set_regular_column_name_type(const data_type& t) {
|
||||
@@ -273,7 +273,7 @@ public:
|
||||
schema_builder& with(compact_storage);
|
||||
schema_builder& with_version(table_schema_version);
|
||||
|
||||
schema_builder& with_view_info(utils::UUID base_id, sstring base_name, bool include_all_columns, sstring where_clause);
|
||||
schema_builder& with_view_info(table_id base_id, sstring base_name, bool include_all_columns, sstring where_clause);
|
||||
schema_builder& with_view_info(const schema& base_schema, bool include_all_columns, sstring where_clause) {
|
||||
return with_view_info(base_schema.id(), base_schema.cf_name(), include_all_columns, where_clause);
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
|
||||
#include <seastar/core/shared_ptr.hh>
|
||||
|
||||
#include "utils/UUID.hh"
|
||||
|
||||
using column_count_type = uint32_t;
|
||||
|
||||
// Column ID, unique within column_kind
|
||||
@@ -19,3 +21,5 @@ class schema;
|
||||
class schema_extension;
|
||||
|
||||
using schema_ptr = seastar::lw_shared_ptr<const schema>;
|
||||
|
||||
using table_id = utils::tagged_uuid<struct table_id_tag>;
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "replica/database_fwd.hh"
|
||||
#include "utils/UUID.hh"
|
||||
#include "schema_fwd.hh"
|
||||
#include <seastar/core/timer.hh>
|
||||
#include <seastar/core/sharded.hh>
|
||||
|
||||
@@ -35,7 +35,7 @@ class cache_hitrate_calculator : public seastar::async_sharded_service<cache_hit
|
||||
timer<lowres_clock> _timer;
|
||||
bool _stopped = false;
|
||||
float _diff = 0;
|
||||
std::unordered_map<utils::UUID, stat> _rates;
|
||||
std::unordered_map<table_id, stat> _rates;
|
||||
size_t _slen = 0;
|
||||
std::string _gstate;
|
||||
uint64_t _published_nr = 0;
|
||||
|
||||
@@ -1120,7 +1120,7 @@ future<> migration_manager::sync_schema(const replica::database& db, const std::
|
||||
});
|
||||
}
|
||||
|
||||
future<column_mapping> get_column_mapping(utils::UUID table_id, table_schema_version v) {
|
||||
future<column_mapping> get_column_mapping(table_id table_id, table_schema_version v) {
|
||||
schema_ptr s = local_schema_registry().get_or_null(v);
|
||||
if (s) {
|
||||
return make_ready_future<column_mapping>(s->get_column_mapping());
|
||||
|
||||
@@ -222,6 +222,6 @@ public:
|
||||
void set_concurrent_ddl_retries(size_t);
|
||||
};
|
||||
|
||||
future<column_mapping> get_column_mapping(utils::UUID table_id, table_schema_version v);
|
||||
future<column_mapping> get_column_mapping(table_id, table_schema_version v);
|
||||
|
||||
}
|
||||
|
||||
@@ -136,26 +136,26 @@ void cache_hitrate_calculator::run_on(size_t master, lowres_clock::duration d) {
|
||||
}
|
||||
|
||||
future<lowres_clock::duration> cache_hitrate_calculator::recalculate_hitrates() {
|
||||
auto non_system_filter = [&] (const std::pair<utils::UUID, lw_shared_ptr<replica::column_family>>& cf) {
|
||||
auto non_system_filter = [&] (const std::pair<table_id, lw_shared_ptr<replica::column_family>>& cf) {
|
||||
return _db.local().find_keyspace(cf.second->schema()->ks_name()).get_replication_strategy().get_type() != locator::replication_strategy_type::local;
|
||||
};
|
||||
|
||||
auto cf_to_cache_hit_stats = [non_system_filter] (replica::database& db) {
|
||||
return boost::copy_range<std::unordered_map<utils::UUID, stat>>(db.get_column_families() | boost::adaptors::filtered(non_system_filter) |
|
||||
boost::adaptors::transformed([] (const std::pair<utils::UUID, lw_shared_ptr<replica::column_family>>& cf) {
|
||||
return boost::copy_range<std::unordered_map<table_id, stat>>(db.get_column_families() | boost::adaptors::filtered(non_system_filter) |
|
||||
boost::adaptors::transformed([] (const std::pair<table_id, lw_shared_ptr<replica::column_family>>& cf) {
|
||||
auto& stats = cf.second->get_row_cache().stats();
|
||||
return std::make_pair(cf.first, stat{float(stats.reads_with_no_misses.rate().rates[0]), float(stats.reads_with_misses.rate().rates[0])});
|
||||
}));
|
||||
};
|
||||
|
||||
auto sum_stats_per_cf = [] (std::unordered_map<utils::UUID, stat> a, std::unordered_map<utils::UUID, stat> b) {
|
||||
auto sum_stats_per_cf = [] (std::unordered_map<table_id, stat> a, std::unordered_map<table_id, stat> b) {
|
||||
for (auto& r : b) {
|
||||
a[r.first] += r.second;
|
||||
}
|
||||
return a;
|
||||
};
|
||||
|
||||
return _db.map_reduce0(cf_to_cache_hit_stats, std::unordered_map<utils::UUID, stat>(), sum_stats_per_cf).then([this, non_system_filter] (std::unordered_map<utils::UUID, stat> rates) mutable {
|
||||
return _db.map_reduce0(cf_to_cache_hit_stats, std::unordered_map<table_id, stat>(), sum_stats_per_cf).then([this, non_system_filter] (std::unordered_map<table_id, stat> rates) mutable {
|
||||
_diff = 0;
|
||||
_gstate.reserve(_slen); // assume length did not change from previous iteration
|
||||
_slen = 0;
|
||||
|
||||
@@ -115,7 +115,7 @@ public:
|
||||
} // anonymous namespace
|
||||
|
||||
future<> sstables_loader::load_and_stream(sstring ks_name, sstring cf_name,
|
||||
utils::UUID table_id, std::vector<sstables::shared_sstable> sstables, bool primary_replica_only) {
|
||||
::table_id table_id, std::vector<sstables::shared_sstable> sstables, bool primary_replica_only) {
|
||||
const auto full_partition_range = dht::partition_range::make_open_ended_both_sides();
|
||||
const auto full_token_range = dht::token_range::make_open_ended_both_sides();
|
||||
auto& table = _db.local().find_column_family(table_id);
|
||||
@@ -249,7 +249,7 @@ future<> sstables_loader::load_new_sstables(sstring ks_name, sstring cf_name,
|
||||
ks_name, cf_name, load_and_stream, primary_replica_only);
|
||||
try {
|
||||
if (load_and_stream) {
|
||||
utils::UUID table_id;
|
||||
::table_id table_id;
|
||||
std::vector<std::vector<sstables::shared_sstable>> sstables_on_shards;
|
||||
std::tie(table_id, sstables_on_shards) = co_await replica::distributed_loader::get_sstables_from_upload_dir(_db, ks_name, cf_name);
|
||||
co_await container().invoke_on_all([&sstables_on_shards, ks_name, cf_name, table_id, primary_replica_only] (sstables_loader& loader) mutable -> future<> {
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <seastar/core/sharded.hh>
|
||||
#include "utils/UUID.hh"
|
||||
#include "schema_fwd.hh"
|
||||
#include "sstables/shared_sstable.hh"
|
||||
|
||||
using namespace seastar;
|
||||
@@ -45,7 +45,7 @@ class sstables_loader : public seastar::peering_sharded_service<sstables_loader>
|
||||
bool _loading_new_sstables = false;
|
||||
|
||||
future<> load_and_stream(sstring ks_name, sstring cf_name,
|
||||
utils::UUID table_id, std::vector<sstables::shared_sstable> sstables,
|
||||
table_id, std::vector<sstables::shared_sstable> sstables,
|
||||
bool primary_replica_only);
|
||||
|
||||
public:
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "query-request.hh"
|
||||
#include "utils/UUID.hh"
|
||||
#include "schema_fwd.hh"
|
||||
#include <vector>
|
||||
#include "range.hh"
|
||||
#include "dht/i_partitioner.hh"
|
||||
@@ -19,10 +19,9 @@
|
||||
namespace streaming {
|
||||
|
||||
struct stream_detail {
|
||||
using UUID = utils::UUID;
|
||||
UUID cf_id;
|
||||
table_id cf_id;
|
||||
stream_detail() = default;
|
||||
stream_detail(UUID cf_id_)
|
||||
stream_detail(table_id cf_id_)
|
||||
: cf_id(std::move(cf_id_)) {
|
||||
}
|
||||
};
|
||||
|
||||
@@ -341,7 +341,7 @@ future<> stream_manager::on_dead(inet_address endpoint, endpoint_state ep_state)
|
||||
return make_ready_future();
|
||||
}
|
||||
|
||||
shared_ptr<stream_session> stream_manager::get_session(utils::UUID plan_id, gms::inet_address from, const char* verb, std::optional<utils::UUID> cf_id) {
|
||||
shared_ptr<stream_session> stream_manager::get_session(utils::UUID plan_id, gms::inet_address from, const char* verb, std::optional<table_id> cf_id) {
|
||||
if (cf_id) {
|
||||
sslog.debug("[Stream #{}] GOT {} from {}: cf_id={}", plan_id, verb, from, *cf_id);
|
||||
} else {
|
||||
|
||||
@@ -169,7 +169,7 @@ public:
|
||||
|
||||
stream_bytes get_progress_on_local_shard() const;
|
||||
|
||||
shared_ptr<stream_session> get_session(utils::UUID plan_id, gms::inet_address from, const char* verb, std::optional<utils::UUID> cf_id = {});
|
||||
shared_ptr<stream_session> get_session(utils::UUID plan_id, gms::inet_address from, const char* verb, std::optional<table_id> cf_id = {});
|
||||
|
||||
public:
|
||||
virtual future<> on_join(inet_address endpoint, endpoint_state ep_state) override { return make_ready_future(); }
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
namespace streaming {
|
||||
|
||||
stream_receive_task::stream_receive_task(shared_ptr<stream_session> _session, UUID _cf_id, int _total_files, long _total_size)
|
||||
stream_receive_task::stream_receive_task(shared_ptr<stream_session> _session, table_id _cf_id, int _total_files, long _total_size)
|
||||
: stream_task(_session, _cf_id)
|
||||
, total_files(_total_files)
|
||||
, total_size(_total_size) {
|
||||
|
||||
@@ -28,7 +28,7 @@ private:
|
||||
// total size of files to receive
|
||||
long total_size;
|
||||
public:
|
||||
stream_receive_task(shared_ptr<stream_session> _session, UUID _cf_id, int _total_files, long _total_size);
|
||||
stream_receive_task(shared_ptr<stream_session> _session, table_id _cf_id, int _total_files, long _total_size);
|
||||
~stream_receive_task();
|
||||
|
||||
virtual int get_total_number_of_files() const override {
|
||||
|
||||
@@ -74,7 +74,7 @@ void stream_manager::init_messaging_service_handler() {
|
||||
return make_ready_future<>();
|
||||
});
|
||||
});
|
||||
ms.register_stream_mutation_fragments([this] (const rpc::client_info& cinfo, UUID plan_id, UUID schema_id, UUID cf_id, uint64_t estimated_partitions, rpc::optional<stream_reason> reason_opt, rpc::source<frozen_mutation_fragment, rpc::optional<stream_mutation_fragments_cmd>> source) {
|
||||
ms.register_stream_mutation_fragments([this] (const rpc::client_info& cinfo, UUID plan_id, UUID schema_id, table_id cf_id, uint64_t estimated_partitions, rpc::optional<stream_reason> reason_opt, rpc::source<frozen_mutation_fragment, rpc::optional<stream_mutation_fragments_cmd>> source) {
|
||||
auto from = netw::messaging_service::get_source(cinfo);
|
||||
auto reason = reason_opt ? *reason_opt: stream_reason::unspecified;
|
||||
sslog.trace("Got stream_mutation_fragments from {} reason {}", from, int(reason));
|
||||
@@ -163,7 +163,7 @@ void stream_manager::init_messaging_service_handler() {
|
||||
});
|
||||
});
|
||||
});
|
||||
ms.register_stream_mutation_done([this] (const rpc::client_info& cinfo, UUID plan_id, dht::token_range_vector ranges, UUID cf_id, unsigned dst_cpu_id) {
|
||||
ms.register_stream_mutation_done([this] (const rpc::client_info& cinfo, UUID plan_id, dht::token_range_vector ranges, table_id cf_id, unsigned dst_cpu_id) {
|
||||
const auto& from = cinfo.retrieve_auxiliary<gms::inet_address>("baddr");
|
||||
return container().invoke_on(dst_cpu_id, [ranges = std::move(ranges), plan_id, cf_id, from] (auto& sm) mutable {
|
||||
auto session = sm.get_session(plan_id, from, "STREAM_MUTATION_DONE", cf_id);
|
||||
@@ -336,14 +336,14 @@ session_info stream_session::make_session_info() {
|
||||
return session_info(peer, std::move(receiving_summaries), std::move(transfer_summaries), _state);
|
||||
}
|
||||
|
||||
void stream_session::receive_task_completed(UUID cf_id) {
|
||||
void stream_session::receive_task_completed(table_id cf_id) {
|
||||
_receivers.erase(cf_id);
|
||||
sslog.debug("[Stream #{}] receive task_completed: cf_id={} done, stream_receive_task.size={} stream_transfer_task.size={}",
|
||||
plan_id(), cf_id, _receivers.size(), _transfers.size());
|
||||
maybe_completed();
|
||||
}
|
||||
|
||||
void stream_session::transfer_task_completed(UUID cf_id) {
|
||||
void stream_session::transfer_task_completed(table_id cf_id) {
|
||||
_transfers.erase(cf_id);
|
||||
sslog.debug("[Stream #{}] transfer task_completed: cf_id={} done, stream_receive_task.size={} stream_transfer_task.size={}",
|
||||
plan_id(), cf_id, _receivers.size(), _transfers.size());
|
||||
@@ -460,7 +460,7 @@ void stream_session::add_transfer_ranges(sstring keyspace, dht::token_range_vect
|
||||
}
|
||||
}
|
||||
|
||||
future<> stream_session::receiving_failed(UUID cf_id)
|
||||
future<> stream_session::receiving_failed(table_id cf_id)
|
||||
{
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
@@ -139,9 +139,9 @@ private:
|
||||
// stream requests to send to the peer
|
||||
std::vector<stream_request> _requests;
|
||||
// streaming tasks are created and managed per ColumnFamily ID
|
||||
std::map<UUID, stream_transfer_task> _transfers;
|
||||
std::map<table_id, stream_transfer_task> _transfers;
|
||||
// data receivers, filled after receiving prepare message
|
||||
std::map<UUID, stream_receive_task> _receivers;
|
||||
std::map<table_id, stream_receive_task> _receivers;
|
||||
//private final StreamingMetrics metrics;
|
||||
/* can be null when session is created in remote */
|
||||
//private final StreamConnectionFactory factory;
|
||||
@@ -312,15 +312,15 @@ public:
|
||||
|
||||
future<> update_progress();
|
||||
|
||||
void receive_task_completed(UUID cf_id);
|
||||
void transfer_task_completed(UUID cf_id);
|
||||
void receive_task_completed(table_id cf_id);
|
||||
void transfer_task_completed(table_id cf_id);
|
||||
void transfer_task_completed_all();
|
||||
private:
|
||||
void send_failed_complete_message();
|
||||
bool maybe_completed();
|
||||
void prepare_receiving(stream_summary& summary);
|
||||
void start_streaming_files();
|
||||
future<> receiving_failed(UUID cf_id);
|
||||
future<> receiving_failed(table_id cf_id);
|
||||
};
|
||||
|
||||
} // namespace streaming
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/UUID.hh"
|
||||
#include "schema_fwd.hh"
|
||||
#include <ostream>
|
||||
|
||||
namespace streaming {
|
||||
@@ -20,8 +20,7 @@ namespace streaming {
|
||||
*/
|
||||
class stream_summary {
|
||||
public:
|
||||
using UUID = utils::UUID;
|
||||
UUID cf_id;
|
||||
table_id cf_id;
|
||||
|
||||
/**
|
||||
* Number of files to transfer. Can be 0 if nothing to transfer for some streaming request.
|
||||
@@ -30,7 +29,7 @@ public:
|
||||
long total_size;
|
||||
|
||||
stream_summary() = default;
|
||||
stream_summary(UUID _cf_id, int _files, long _total_size)
|
||||
stream_summary(table_id _cf_id, int _files, long _total_size)
|
||||
: cf_id (_cf_id)
|
||||
, files(_files)
|
||||
, total_size(_total_size) {
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
namespace streaming {
|
||||
|
||||
stream_task::stream_task(shared_ptr<stream_session> _session, UUID _cf_id)
|
||||
stream_task::stream_task(shared_ptr<stream_session> _session, table_id _cf_id)
|
||||
: session(_session)
|
||||
, cf_id(std::move(_cf_id)) {
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/UUID.hh"
|
||||
#include "schema_fwd.hh"
|
||||
#include "streaming/stream_summary.hh"
|
||||
#include <memory>
|
||||
#include <seastar/core/shared_ptr.hh>
|
||||
@@ -24,13 +24,12 @@ class stream_session;
|
||||
*/
|
||||
class stream_task {
|
||||
public:
|
||||
using UUID = utils::UUID;
|
||||
/** StreamSession that this task belongs */
|
||||
shared_ptr<stream_session> session;
|
||||
|
||||
UUID cf_id;
|
||||
table_id cf_id;
|
||||
|
||||
stream_task(shared_ptr<stream_session> _session, UUID _cf_id);
|
||||
stream_task(shared_ptr<stream_session> _session, table_id _cf_id);
|
||||
virtual ~stream_task();
|
||||
|
||||
public:
|
||||
|
||||
@@ -35,7 +35,7 @@ namespace streaming {
|
||||
|
||||
extern logging::logger sslog;
|
||||
|
||||
stream_transfer_task::stream_transfer_task(shared_ptr<stream_session> session, UUID cf_id, dht::token_range_vector ranges, long total_size)
|
||||
stream_transfer_task::stream_transfer_task(shared_ptr<stream_session> session, table_id cf_id, dht::token_range_vector ranges, long total_size)
|
||||
: stream_task(session, cf_id)
|
||||
, _ranges(std::move(ranges))
|
||||
, _total_size(total_size) {
|
||||
@@ -46,7 +46,7 @@ stream_transfer_task::~stream_transfer_task() = default;
|
||||
struct send_info {
|
||||
netw::messaging_service& ms;
|
||||
utils::UUID plan_id;
|
||||
utils::UUID cf_id;
|
||||
table_id cf_id;
|
||||
netw::messaging_service::msg_addr id;
|
||||
uint32_t dst_cpu_id;
|
||||
stream_reason reason;
|
||||
|
||||
@@ -32,9 +32,8 @@ private:
|
||||
long _total_size;
|
||||
bool _mutation_done_sent = false;
|
||||
public:
|
||||
using UUID = utils::UUID;
|
||||
stream_transfer_task(stream_transfer_task&&) = default;
|
||||
stream_transfer_task(shared_ptr<stream_session> session, UUID cf_id, dht::token_range_vector ranges, long total_size = 0);
|
||||
stream_transfer_task(shared_ptr<stream_session> session, table_id cf_id, dht::token_range_vector ranges, long total_size = 0);
|
||||
~stream_transfer_task();
|
||||
public:
|
||||
virtual void abort() override {
|
||||
|
||||
@@ -50,7 +50,7 @@ SEASTAR_TEST_CASE(test_execute_batch) {
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
auto version = netw::messaging_service::current_version;
|
||||
auto bm = qp.proxy().get_batchlog_mutation_for({ m }, s->id(), version, db_clock::now() - db_clock::duration(3h));
|
||||
auto bm = qp.proxy().get_batchlog_mutation_for({ m }, s->id().uuid(), version, db_clock::now() - db_clock::duration(3h));
|
||||
|
||||
return qp.proxy().mutate_locally(bm, tracing::trace_state_ptr(), db::commitlog::force_sync::no).then([&bp] () mutable {
|
||||
return bp.count_all_batches().then([](auto n) {
|
||||
|
||||
@@ -24,7 +24,7 @@ SEASTAR_TEST_CASE(test_column_mapping_persistence) {
|
||||
// column mapping into the history table
|
||||
cquery_nofail(e, "create table test (pk int PRIMARY KEY, v int)");
|
||||
auto schema = e.local_db().find_schema("ks", "test");
|
||||
const utils::UUID table_id = schema->id();
|
||||
const auto table_id = schema->id();
|
||||
const table_schema_version v1 = schema->version();
|
||||
const column_mapping orig_cm = schema->get_column_mapping();
|
||||
|
||||
@@ -52,7 +52,7 @@ SEASTAR_TEST_CASE(test_column_mapping_ttl_check) {
|
||||
// column mapping into the history table
|
||||
cquery_nofail(e, "create table test (pk int PRIMARY KEY, v int)");
|
||||
auto schema = e.local_db().find_schema("ks", "test");
|
||||
const utils::UUID table_id = schema->id();
|
||||
const auto table_id = schema->id();
|
||||
const table_schema_version v1 = schema->version();
|
||||
|
||||
const sstring select_ttl_query = format(
|
||||
|
||||
@@ -70,11 +70,15 @@ static future<> cl_test(noncopyable_function<future<> (commitlog&)> f) {
|
||||
return cl_test(cfg, std::move(f));
|
||||
}
|
||||
|
||||
static table_id make_table_id() {
|
||||
return table_id(utils::UUID_gen::get_time_UUID());
|
||||
}
|
||||
|
||||
// just write in-memory...
|
||||
SEASTAR_TEST_CASE(test_create_commitlog){
|
||||
return cl_test([](commitlog& log) {
|
||||
sstring tmp = "hej bubba cow";
|
||||
return log.add_mutation(utils::UUID_gen::get_time_UUID(), tmp.size(), db::commitlog::force_sync::no, [tmp](db::commitlog::output& dst) {
|
||||
return log.add_mutation(make_table_id(), tmp.size(), db::commitlog::force_sync::no, [tmp](db::commitlog::output& dst) {
|
||||
dst.write(tmp.data(), tmp.size());
|
||||
}).then([](db::replay_position rp) {
|
||||
BOOST_CHECK_NE(rp, db::replay_position());
|
||||
@@ -88,7 +92,7 @@ SEASTAR_TEST_CASE(test_commitlog_written_to_disk_batch){
|
||||
cfg.mode = commitlog::sync_mode::BATCH;
|
||||
return cl_test(cfg, [](commitlog& log) {
|
||||
sstring tmp = "hej bubba cow";
|
||||
return log.add_mutation(utils::UUID_gen::get_time_UUID(), tmp.size(), db::commitlog::force_sync::no, [tmp](db::commitlog::output& dst) {
|
||||
return log.add_mutation(make_table_id(), tmp.size(), db::commitlog::force_sync::no, [tmp](db::commitlog::output& dst) {
|
||||
dst.write(tmp.data(), tmp.size());
|
||||
}).then([&log](replay_position rp) {
|
||||
BOOST_CHECK_NE(rp, db::replay_position());
|
||||
@@ -103,7 +107,7 @@ SEASTAR_TEST_CASE(test_commitlog_written_to_disk_sync){
|
||||
commitlog::config cfg;
|
||||
return cl_test(cfg, [](commitlog& log) {
|
||||
sstring tmp = "hej bubba cow";
|
||||
return log.add_mutation(utils::UUID_gen::get_time_UUID(), tmp.size(), db::commitlog::force_sync::yes, [tmp](db::commitlog::output& dst) {
|
||||
return log.add_mutation(make_table_id(), tmp.size(), db::commitlog::force_sync::yes, [tmp](db::commitlog::output& dst) {
|
||||
dst.write(tmp.data(), tmp.size());
|
||||
}).then([&log](replay_position rp) {
|
||||
BOOST_CHECK_NE(rp, db::replay_position());
|
||||
@@ -119,7 +123,7 @@ SEASTAR_TEST_CASE(test_commitlog_written_to_disk_no_sync){
|
||||
cfg.commitlog_sync_period_in_ms = 10000000000;
|
||||
return cl_test(cfg, [](commitlog& log) {
|
||||
sstring tmp = "hej bubba cow";
|
||||
return log.add_mutation(utils::UUID_gen::get_time_UUID(), tmp.size(), db::commitlog::force_sync::no, [tmp](db::commitlog::output& dst) {
|
||||
return log.add_mutation(make_table_id(), tmp.size(), db::commitlog::force_sync::no, [tmp](db::commitlog::output& dst) {
|
||||
dst.write(tmp.data(), tmp.size());
|
||||
}).then([&log](replay_position rp) {
|
||||
BOOST_CHECK_NE(rp, db::replay_position());
|
||||
@@ -132,7 +136,7 @@ SEASTAR_TEST_CASE(test_commitlog_written_to_disk_no_sync){
|
||||
SEASTAR_TEST_CASE(test_commitlog_written_to_disk_periodic){
|
||||
return cl_test([](commitlog& log) {
|
||||
auto state = make_lw_shared<bool>(false);
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = make_table_id();
|
||||
return do_until([state]() {return *state;},
|
||||
[&log, state, uuid]() {
|
||||
sstring tmp = "hej bubba cow";
|
||||
@@ -153,7 +157,7 @@ SEASTAR_TEST_CASE(test_commitlog_new_segment){
|
||||
cfg.commitlog_segment_size_in_mb = 1;
|
||||
return cl_test(cfg, [](commitlog& log) {
|
||||
return do_with(rp_set(), [&log](auto& set) {
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = make_table_id();
|
||||
return do_until([&set]() { return set.size() > 1; }, [&log, &set, uuid]() {
|
||||
sstring tmp = "hej bubba cow";
|
||||
return log.add_mutation(uuid, tmp.size(), db::commitlog::force_sync::no, [tmp](db::commitlog::output& dst) {
|
||||
@@ -189,17 +193,17 @@ SEASTAR_TEST_CASE(test_commitlog_discard_completed_segments){
|
||||
cfg.commitlog_segment_size_in_mb = 1;
|
||||
return cl_test(cfg, [](commitlog& log) {
|
||||
struct state_type {
|
||||
std::vector<utils::UUID> uuids;
|
||||
std::unordered_map<utils::UUID, db::rp_set> rps;
|
||||
std::vector<table_id> uuids;
|
||||
std::unordered_map<table_id, db::rp_set> rps;
|
||||
|
||||
mutable size_t index = 0;
|
||||
|
||||
state_type() {
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
uuids.push_back(utils::UUID_gen::get_time_UUID());
|
||||
uuids.push_back(make_table_id());
|
||||
}
|
||||
}
|
||||
const utils::UUID & next_uuid() const {
|
||||
const table_id& next_uuid() const {
|
||||
return uuids[index++ % uuids.size()];
|
||||
}
|
||||
bool done() const {
|
||||
@@ -250,7 +254,7 @@ SEASTAR_TEST_CASE(test_commitlog_discard_completed_segments){
|
||||
SEASTAR_TEST_CASE(test_equal_record_limit){
|
||||
return cl_test([](commitlog& log) {
|
||||
auto size = log.max_record_size();
|
||||
return log.add_mutation(utils::UUID_gen::get_time_UUID(), size, db::commitlog::force_sync::no, [size](db::commitlog::output& dst) {
|
||||
return log.add_mutation(make_table_id(), size, db::commitlog::force_sync::no, [size](db::commitlog::output& dst) {
|
||||
dst.fill(char(1), size);
|
||||
}).then([](db::replay_position rp) {
|
||||
BOOST_CHECK_NE(rp, db::replay_position());
|
||||
@@ -261,7 +265,7 @@ SEASTAR_TEST_CASE(test_equal_record_limit){
|
||||
SEASTAR_TEST_CASE(test_exceed_record_limit){
|
||||
return cl_test([](commitlog& log) {
|
||||
auto size = log.max_record_size() + 1;
|
||||
return log.add_mutation(utils::UUID_gen::get_time_UUID(), size, db::commitlog::force_sync::no, [size](db::commitlog::output& dst) {
|
||||
return log.add_mutation(make_table_id(), size, db::commitlog::force_sync::no, [size](db::commitlog::output& dst) {
|
||||
dst.fill(char(1), size);
|
||||
}).then_wrapped([](future<db::rp_handle> f) {
|
||||
try {
|
||||
@@ -280,7 +284,7 @@ SEASTAR_TEST_CASE(test_commitlog_closed) {
|
||||
return cl_test(cfg, [](commitlog& log) {
|
||||
return log.shutdown().then([&log] {
|
||||
sstring tmp = "test321";
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = make_table_id();
|
||||
return log.add_mutation(uuid, tmp.size(), db::commitlog::force_sync::no, [tmp](db::commitlog::output& dst) {
|
||||
dst.write(tmp.data(), tmp.size());
|
||||
}).then_wrapped([] (future<db::rp_handle> f) {
|
||||
@@ -323,7 +327,7 @@ SEASTAR_TEST_CASE(test_commitlog_delete_when_over_disk_limit) {
|
||||
});
|
||||
|
||||
auto set = make_lw_shared<std::set<segment_id_type>>();
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = make_table_id();
|
||||
return do_until([set, sem]() {return set->size() > 2 && sem->try_wait();},
|
||||
[&log, set, uuid]() {
|
||||
sstring tmp = "hej bubba cow";
|
||||
@@ -367,7 +371,7 @@ SEASTAR_TEST_CASE(test_commitlog_reader){
|
||||
auto set = make_lw_shared<rp_set>();
|
||||
auto count = make_lw_shared<size_t>(0);
|
||||
auto count2 = make_lw_shared<size_t>(0);
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = make_table_id();
|
||||
return do_until([count, set]() {return set->size() > 1;},
|
||||
[&log, uuid, count, set]() {
|
||||
sstring tmp = "hej bubba cow";
|
||||
@@ -435,7 +439,7 @@ SEASTAR_TEST_CASE(test_commitlog_entry_corruption){
|
||||
auto rps = make_lw_shared<std::vector<db::replay_position>>();
|
||||
return do_until([rps]() {return rps->size() > 1;},
|
||||
[&log, rps]() {
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = make_table_id();
|
||||
sstring tmp = "hej bubba cow";
|
||||
return log.add_mutation(uuid, tmp.size(), db::commitlog::force_sync::no, [tmp](db::commitlog::output& dst) {
|
||||
dst.write(tmp.data(), tmp.size());
|
||||
@@ -475,7 +479,7 @@ SEASTAR_TEST_CASE(test_commitlog_chunk_corruption){
|
||||
auto rps = make_lw_shared<std::vector<db::replay_position>>();
|
||||
return do_until([rps]() {return rps->size() > 1;},
|
||||
[&log, rps]() {
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = make_table_id();
|
||||
sstring tmp = "hej bubba cow";
|
||||
return log.add_mutation(uuid, tmp.size(), db::commitlog::force_sync::no, [tmp](db::commitlog::output& dst) {
|
||||
dst.write(tmp.data(), tmp.size());
|
||||
@@ -514,7 +518,7 @@ SEASTAR_TEST_CASE(test_commitlog_reader_produce_exception){
|
||||
auto rps = make_lw_shared<std::vector<db::replay_position>>();
|
||||
return do_until([rps]() {return rps->size() > 1;},
|
||||
[&log, rps]() {
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = make_table_id();
|
||||
sstring tmp = "hej bubba cow";
|
||||
return log.add_mutation(uuid, tmp.size(), db::commitlog::force_sync::no, [tmp](db::commitlog::output& dst) {
|
||||
dst.write(tmp.data(), tmp.size());
|
||||
@@ -579,7 +583,7 @@ SEASTAR_TEST_CASE(test_allocation_failure){
|
||||
}
|
||||
auto last = junk->end();
|
||||
junk->erase(--last);
|
||||
return log.add_mutation(utils::UUID_gen::get_time_UUID(), size, db::commitlog::force_sync::no, [size](db::commitlog::output& dst) {
|
||||
return log.add_mutation(make_table_id(), size, db::commitlog::force_sync::no, [size](db::commitlog::output& dst) {
|
||||
dst.fill(char(1), size);
|
||||
}).then_wrapped([junk, size](future<db::rp_handle> f) {
|
||||
std::exception_ptr ep;
|
||||
@@ -718,7 +722,7 @@ SEASTAR_TEST_CASE(test_commitlog_new_segment_odsync){
|
||||
cfg.use_o_dsync = true;
|
||||
|
||||
return cl_test(cfg, [](commitlog& log) -> future<> {
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = make_table_id();
|
||||
rp_set set;
|
||||
while (set.size() <= 1) {
|
||||
sstring tmp = "hej bubba cow";
|
||||
@@ -769,7 +773,7 @@ SEASTAR_TEST_CASE(test_commitlog_deadlock_in_recycle) {
|
||||
// uncomment for verbosity
|
||||
// logging::logger_registry().set_logger_level("commitlog", logging::log_level::debug);
|
||||
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = make_table_id();
|
||||
auto size = log.max_record_size() / 2;
|
||||
|
||||
timer<> t;
|
||||
@@ -844,7 +848,7 @@ SEASTAR_TEST_CASE(test_commitlog_shutdown_during_wait) {
|
||||
// uncomment for verbosity
|
||||
//logging::logger_registry().set_logger_level("commitlog", logging::log_level::debug);
|
||||
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = make_table_id();
|
||||
auto size = log.max_record_size() / 2;
|
||||
|
||||
// add a flush handler that does not.
|
||||
@@ -909,7 +913,7 @@ SEASTAR_TEST_CASE(test_commitlog_deadlock_with_flush_threshold) {
|
||||
// uncomment for verbosity
|
||||
// logging::logger_registry().set_logger_level("commitlog", logging::log_level::debug);
|
||||
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = make_table_id();
|
||||
auto size = log.max_record_size();
|
||||
|
||||
bool done = false;
|
||||
@@ -998,7 +1002,7 @@ static future<> do_test_exception_in_allocate_ex(bool do_file_delete) {
|
||||
// uncomment for verbosity
|
||||
// logging::logger_registry().set_logger_level("commitlog", logging::log_level::debug);
|
||||
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = make_table_id();
|
||||
auto size = log.max_record_size();
|
||||
|
||||
auto r = log.add_flush_handler([&](cf_id_type id, replay_position pos) {
|
||||
|
||||
@@ -54,7 +54,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
static future<> apply_mutation(sharded<replica::database>& sharded_db, utils::UUID uuid, const mutation& m, bool do_flush = false,
|
||||
static future<> apply_mutation(sharded<replica::database>& sharded_db, table_id uuid, const mutation& m, bool do_flush = false,
|
||||
db::commitlog::force_sync fs = db::commitlog::force_sync::no, db::timeout_clock::time_point timeout = db::no_timeout) {
|
||||
auto shard = m.shard_of();
|
||||
return sharded_db.invoke_on(shard, [uuid, fm = freeze(m), do_flush, fs, timeout] (replica::database& db) {
|
||||
|
||||
@@ -284,7 +284,7 @@ public:
|
||||
}
|
||||
|
||||
virtual future<> create_table(std::function<schema(std::string_view)> schema_maker) override {
|
||||
auto id = utils::UUID_gen::get_time_UUID();
|
||||
auto id = table_id(utils::UUID_gen::get_time_UUID());
|
||||
schema_builder builder(make_lw_shared<schema>(schema_maker(ks_name)));
|
||||
builder.set_uuid(id);
|
||||
auto s = builder.build(schema_builder::compact_storage::no);
|
||||
|
||||
@@ -55,7 +55,7 @@ SEASTAR_TEST_CASE(test_commitlog_new_segment_custom_prefix){
|
||||
cfg.commitlog_segment_size_in_mb = 1;
|
||||
return cl_test(cfg, [](commitlog& log) {
|
||||
return do_with(rp_set(), [&log](auto& set) {
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = table_id(utils::UUID_gen::get_time_UUID());
|
||||
return do_until([&set]() { return set.size() > 1; }, [&log, &set, uuid]() {
|
||||
sstring tmp = "hej bubba cow";
|
||||
return log.add_mutation(uuid, tmp.size(), db::commitlog::force_sync::no, [tmp](db::commitlog::output& dst) {
|
||||
|
||||
@@ -136,7 +136,7 @@ struct commitlog_service {
|
||||
};
|
||||
|
||||
static std::vector<clperf_result> do_commitlog_test(distributed<commitlog_service>& cls, test_config& cfg) {
|
||||
auto uuid = utils::UUID_gen::get_time_UUID();
|
||||
auto uuid = table_id(utils::UUID_gen::get_time_UUID());
|
||||
|
||||
return time_parallel_ex<clperf_result>([&] {
|
||||
auto& log = cls.local();
|
||||
|
||||
@@ -1310,7 +1310,7 @@ private:
|
||||
}
|
||||
return {};
|
||||
}
|
||||
static schema_ptr schema_from_thrift(const CfDef& cf_def, const sstring ks_name, std::optional<utils::UUID> id = { }) {
|
||||
static schema_ptr schema_from_thrift(const CfDef& cf_def, const sstring ks_name, std::optional<table_id> id = { }) {
|
||||
thrift_validation::validate_cf_def(cf_def);
|
||||
schema_builder builder(ks_name, cf_def.name, id);
|
||||
schema_builder::default_names names(builder);
|
||||
|
||||
@@ -27,9 +27,9 @@ public:
|
||||
boost::icl::interval_map<dht::token, gc_clock::time_point, boost::icl::partial_absorber, std::less, boost::icl::inplace_max> map;
|
||||
};
|
||||
|
||||
thread_local std::unordered_map<utils::UUID, seastar::lw_shared_ptr<repair_history_map>> repair_history_maps;
|
||||
thread_local std::unordered_map<table_id, seastar::lw_shared_ptr<repair_history_map>> repair_history_maps;
|
||||
|
||||
static seastar::lw_shared_ptr<repair_history_map> get_or_create_repair_history_map_for_table(const utils::UUID& id) {
|
||||
static seastar::lw_shared_ptr<repair_history_map> get_or_create_repair_history_map_for_table(const table_id& id) {
|
||||
auto it = repair_history_maps.find(id);
|
||||
if (it != repair_history_maps.end()) {
|
||||
return it->second;
|
||||
@@ -39,7 +39,7 @@ static seastar::lw_shared_ptr<repair_history_map> get_or_create_repair_history_m
|
||||
}
|
||||
}
|
||||
|
||||
seastar::lw_shared_ptr<repair_history_map> get_repair_history_map_for_table(const utils::UUID& id) {
|
||||
seastar::lw_shared_ptr<repair_history_map> get_repair_history_map_for_table(const table_id& id) {
|
||||
auto it = repair_history_maps.find(id);
|
||||
if (it != repair_history_maps.end()) {
|
||||
return it->second;
|
||||
@@ -48,7 +48,7 @@ seastar::lw_shared_ptr<repair_history_map> get_repair_history_map_for_table(cons
|
||||
}
|
||||
}
|
||||
|
||||
void drop_repair_history_map_for_table(const utils::UUID& id) {
|
||||
void drop_repair_history_map_for_table(const table_id& id) {
|
||||
repair_history_maps.erase(id);
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ namespace replica {
|
||||
class database;
|
||||
}
|
||||
|
||||
void drop_repair_history_map_for_table(const utils::UUID& id);
|
||||
void drop_repair_history_map_for_table(const table_id& id);
|
||||
|
||||
get_gc_before_for_range_result get_gc_before_for_range(schema_ptr s, const dht::token_range& range, const gc_clock::time_point& query_time);
|
||||
|
||||
|
||||
@@ -109,7 +109,7 @@ private:
|
||||
}
|
||||
return wrap(*it);
|
||||
}
|
||||
virtual std::optional<data_dictionary::table> try_find_table(data_dictionary::database db, utils::UUID id) const override {
|
||||
virtual std::optional<data_dictionary::table> try_find_table(data_dictionary::database db, table_id id) const override {
|
||||
auto& tables = unwrap(db).tables;
|
||||
auto it = std::find_if(tables.begin(), tables.end(), [id] (const table& tbl) { return tbl.schema->id() == id; });
|
||||
if (it == tables.end()) {
|
||||
|
||||
@@ -238,6 +238,10 @@ struct tagged_uuid {
|
||||
const utils::UUID& uuid() const noexcept {
|
||||
return id;
|
||||
}
|
||||
|
||||
sstring to_sstring() const {
|
||||
return id.to_sstring();
|
||||
}
|
||||
};
|
||||
} // namespace utils
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ public:
|
||||
return _raw;
|
||||
}
|
||||
|
||||
const utils::UUID& base_id() const {
|
||||
const table_id& base_id() const {
|
||||
return _raw.base_id();
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user