/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Modified by ScyllaDB
* Copyright (C) 2015-present ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see .
*/
#include "db/schema_tables.hh"
#include "service/migration_manager.hh"
#include "service/storage_proxy.hh"
#include "gms/feature_service.hh"
#include "partition_slice_builder.hh"
#include "dht/i_partitioner.hh"
#include "system_keyspace.hh"
#include "query_context.hh"
#include "query-result-set.hh"
#include "query-result-writer.hh"
#include "schema_builder.hh"
#include "map_difference.hh"
#include "utils/UUID_gen.hh"
#include
#include "log.hh"
#include "frozen_schema.hh"
#include "schema_registry.hh"
#include "mutation_query.hh"
#include "system_keyspace.hh"
#include "system_distributed_keyspace.hh"
#include "cql3/cql3_type.hh"
#include "cql3/functions/functions.hh"
#include "cql3/util.hh"
#include "types/list.hh"
#include "types/set.hh"
#include "db/marshal/type_parser.hh"
#include "db/config.hh"
#include "db/extensions.hh"
#include "hashers.hh"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "compaction/compaction_strategy.hh"
#include "utils/joinpoint.hh"
#include "view_info.hh"
#include "cql_type_parser.hh"
#include "db/timeout_clock.hh"
#include "database.hh"
#include "user_types_metadata.hh"
#include "index/target_parser.hh"
#include "lua.hh"
#include "db/query_context.hh"
#include "serializer.hh"
#include "idl/mutation.dist.hh"
#include "serializer_impl.hh"
#include "idl/mutation.dist.impl.hh"
#include "db/system_keyspace.hh"
#include "cql3/untyped_result_set.hh"
#include "cql3/functions/user_aggregate.hh"
using namespace db::system_keyspace;
using namespace std::chrono_literals;
static logging::logger diff_logger("schema_diff");
static bool is_extra_durable(const sstring& ks_name, const sstring& cf_name) {
return (is_system_keyspace(ks_name) && db::system_keyspace::is_extra_durable(cf_name))
|| ((ks_name == db::system_distributed_keyspace::NAME || ks_name == db::system_distributed_keyspace::NAME_EVERYWHERE)
&& db::system_distributed_keyspace::is_extra_durable(cf_name));
}
/** system.schema_* tables used to store keyspace/table/type attributes prior to C* 3.0 */
namespace db {
schema_ctxt::schema_ctxt(const db::config& cfg)
: _extensions(cfg.extensions())
, _murmur3_partitioner_ignore_msb_bits(cfg.murmur3_partitioner_ignore_msb_bits())
, _schema_registry_grace_period(cfg.schema_registry_grace_period())
{}
schema_ctxt::schema_ctxt(const database& db)
: schema_ctxt(db.get_config())
{}
schema_ctxt::schema_ctxt(distributed& db)
: schema_ctxt(db.local())
{}
schema_ctxt::schema_ctxt(distributed& proxy)
: schema_ctxt(proxy.local().get_db())
{}
namespace schema_tables {
logging::logger slogger("schema_tables");
const sstring version = "3";
struct qualified_name {
sstring keyspace_name;
sstring table_name;
qualified_name(sstring keyspace_name, sstring table_name)
: keyspace_name(std::move(keyspace_name))
, table_name(std::move(table_name))
{ }
qualified_name(const schema_ptr& s)
: keyspace_name(s->ks_name())
, table_name(s->cf_name())
{ }
bool operator<(const qualified_name& o) const {
return keyspace_name < o.keyspace_name
|| (keyspace_name == o.keyspace_name && table_name < o.table_name);
}
bool operator==(const qualified_name& o) const {
return keyspace_name == o.keyspace_name && table_name == o.table_name;
}
};
static future read_table_mutations(distributed& proxy, const qualified_name& table, schema_ptr s);
static future<> merge_tables_and_views(distributed& proxy,
std::map&& tables_before,
std::map&& tables_after,
std::map&& views_before,
std::map&& views_after);
struct [[nodiscard]] user_types_to_drop final {
seastar::noncopyable_function ()> drop;
};
static future merge_types(distributed& proxy,
schema_result before,
schema_result after);
static future<> merge_functions(distributed& proxy, schema_result before, schema_result after);
static future<> merge_aggregates(distributed& proxy, schema_result before, schema_result after);
static future<> do_merge_schema(distributed&, std::vector, bool do_flush);
using computed_columns_map = std::unordered_map;
static computed_columns_map get_computed_columns(const schema_mutations& sm);
static std::vector create_columns_from_column_rows(
const query::result_set& rows, const sstring& keyspace,
const sstring& table, bool is_super, column_view_virtual is_view_virtual, const computed_columns_map& computed_columns);
static std::vector create_indices_from_index_rows(const query::result_set& rows,
const sstring& keyspace,
const sstring& table);
static index_metadata create_index_from_index_row(const query::result_set_row& row,
sstring keyspace,
sstring table);
static void add_column_to_schema_mutation(schema_ptr, const column_definition&,
api::timestamp_type, mutation&);
static void add_computed_column_to_schema_mutation(schema_ptr, const column_definition&,
api::timestamp_type, mutation&);
static void add_index_to_schema_mutation(schema_ptr table,
const index_metadata& index, api::timestamp_type timestamp,
mutation& mutation);
static void drop_column_from_schema_mutation(schema_ptr schema_table, schema_ptr table,
const sstring& column_name, long timestamp,
std::vector&);
static void drop_index_from_schema_mutation(schema_ptr table,
const index_metadata& column, long timestamp,
std::vector& mutations);
static future create_table_from_table_row(
distributed&,
const query::result_set_row&);
static void prepare_builder_from_table_row(const schema_ctxt&, schema_builder&, const query::result_set_row&);
using namespace v3;
using days = std::chrono::duration>;
future<> save_system_schema(cql3::query_processor& qp, const sstring & ksname) {
auto& ks = qp.db().find_keyspace(ksname);
auto ksm = ks.metadata();
// delete old, possibly obsolete entries in schema tables
co_await parallel_for_each(all_table_names(schema_features::full()), [ksm] (sstring cf) -> future<> {
auto deletion_timestamp = schema_creation_timestamp() - 1;
co_await qctx->execute_cql(format("DELETE FROM {}.{} USING TIMESTAMP {} WHERE keyspace_name = ?", NAME, cf,
deletion_timestamp), ksm->name()).discard_result();
});
{
auto mvec = make_create_keyspace_mutations(ksm, schema_creation_timestamp(), true);
co_await qp.proxy().mutate_locally(std::move(mvec), tracing::trace_state_ptr());
}
}
/** add entries to system_schema.* for the hardcoded system definitions */
future<> save_system_keyspace_schema(cql3::query_processor& qp) {
return save_system_schema(qp, NAME);
}
namespace v3 {
static constexpr auto schema_gc_grace = std::chrono::duration_cast(days(7)).count();
schema_ptr keyspaces() {
static thread_local auto schema = [] {
schema_builder builder(make_shared_schema(generate_legacy_id(NAME, KEYSPACES), NAME, KEYSPACES,
// partition key
{{"keyspace_name", utf8_type}},
// clustering key
{},
// regular columns
{
{"durable_writes", boolean_type},
{"replication", map_type_impl::get_instance(utf8_type, utf8_type, false)},
},
// static columns
{},
// regular column name type
utf8_type,
// comment
"keyspace definitions"
));
builder.set_gc_grace_seconds(schema_gc_grace);
builder.with_version(generate_schema_version(builder.uuid()));
builder.with_null_sharder();
return builder.build();
}();
return schema;
}
schema_ptr tables() {
static thread_local auto schema = [] {
schema_builder builder(make_shared_schema(generate_legacy_id(NAME, TABLES), NAME, TABLES,
// partition key
{{"keyspace_name", utf8_type}},
// clustering key
{{"table_name", utf8_type}},
// regular columns
{
{"bloom_filter_fp_chance", double_type},
{"caching", map_type_impl::get_instance(utf8_type, utf8_type, false)},
{"comment", utf8_type},
{"compaction", map_type_impl::get_instance(utf8_type, utf8_type, false)},
{"compression", map_type_impl::get_instance(utf8_type, utf8_type, false)},
{"crc_check_chance", double_type},
{"dclocal_read_repair_chance", double_type},
{"default_time_to_live", int32_type},
{"extensions", map_type_impl::get_instance(utf8_type, bytes_type, false)},
{"flags", set_type_impl::get_instance(utf8_type, false)}, // SUPER, COUNTER, DENSE, COMPOUND
{"gc_grace_seconds", int32_type},
{"id", uuid_type},
{"max_index_interval", int32_type},
{"memtable_flush_period_in_ms", int32_type},
{"min_index_interval", int32_type},
{"read_repair_chance", double_type},
{"speculative_retry", utf8_type},
},
// static columns
{},
// regular column name type
utf8_type,
// comment
"table definitions"
));
builder.set_gc_grace_seconds(schema_gc_grace);
builder.with_version(generate_schema_version(builder.uuid()));
builder.with_null_sharder();
return builder.build();
}();
return schema;
}
// Holds Scylla-specific table metadata.
schema_ptr scylla_tables(schema_features features) {
static auto make = [] (bool has_cdc_options, bool has_per_table_partitioners) -> schema_ptr {
auto id = generate_legacy_id(NAME, SCYLLA_TABLES);
auto sb = schema_builder(NAME, SCYLLA_TABLES, std::make_optional(id))
.with_column("keyspace_name", utf8_type, column_kind::partition_key)
.with_column("table_name", utf8_type, column_kind::clustering_key)
.with_column("version", uuid_type)
.set_gc_grace_seconds(schema_gc_grace);
// 0 - false, false
// 1 - true, false
// 2 - false, true
// 3 - true, true
int offset = 0;
if (has_cdc_options) {
sb.with_column("cdc", map_type_impl::get_instance(utf8_type, utf8_type, false));
++offset;
}
if (has_per_table_partitioners) {
sb.with_column("partitioner", utf8_type);
offset += 2;
}
sb.with_version(generate_schema_version(id, offset));
sb.with_null_sharder();
return sb.build();
};
static thread_local schema_ptr schemas[2][2] = { {make(false, false), make(false, true)}, {make(true, false), make(true, true)} };
return schemas[features.contains(schema_feature::CDC_OPTIONS)][features.contains(schema_feature::PER_TABLE_PARTITIONERS)];
}
// The "columns" table lists the definitions of all columns in all tables
// and views. Its schema needs to be identical to the one in Cassandra because
// it is the API through which drivers inspect the list of columns in a table
// (e.g., cqlsh's "DESCRIBE TABLE" and "DESCRIBE MATERIALIZED VIEW" get their
// information from the columns table).
// The "view_virtual_columns" table is an additional table with exactly the
// same schema (both are created by columns_schema()), but has a separate
// list of "virtual" columns. Those are used in materialized views for keeping
// rows without data alive (see issue #3362). These virtual columns cannot be
// listed in the regular "columns" table, otherwise the "DESCRIBE MATERIALIZED
// VIEW" would list them - while it should only list real, selected, columns.
static schema_ptr columns_schema(const char* columns_table_name) {
schema_builder builder(make_shared_schema(generate_legacy_id(NAME, columns_table_name), NAME, columns_table_name,
// partition key
{{"keyspace_name", utf8_type}},
// clustering key
{{"table_name", utf8_type},{"column_name", utf8_type}},
// regular columns
{
{"clustering_order", utf8_type},
{"column_name_bytes", bytes_type},
{"kind", utf8_type},
{"position", int32_type},
{"type", utf8_type},
},
// static columns
{},
// regular column name type
utf8_type,
// comment
"column definitions"
));
builder.set_gc_grace_seconds(schema_gc_grace);
builder.with_version(generate_schema_version(builder.uuid()));
builder.with_null_sharder();
return builder.build();
}
schema_ptr columns() {
static thread_local auto schema = columns_schema(COLUMNS);
return schema;
}
schema_ptr view_virtual_columns() {
static thread_local auto schema = columns_schema(VIEW_VIRTUAL_COLUMNS);
return schema;
}
// Computed columns are a special kind of columns. Rather than having their value provided directly
// by the user, they are computed - possibly from other column values. This table stores which columns
// for a given table are computed, and a serialized computation itself. Full column information is stored
// in the `columns` table, this one stores only entries for computed columns, so it will be empty for tables
// without any computed columns defined in the schema. `computation` is a serialized blob and its format
// is defined in column_computation.hh and system_schema docs.
//
static schema_ptr computed_columns_schema(const char* columns_table_name) {
schema_builder builder(make_shared_schema(generate_legacy_id(NAME, columns_table_name), NAME, columns_table_name,
// partition key
{{"keyspace_name", utf8_type}},
// clustering key
{{"table_name", utf8_type}, {"column_name", utf8_type}},
// regular columns
{{"computation", bytes_type}},
// static columns
{},
// regular column name type
utf8_type,
// comment
"computed columns"
));
builder.set_gc_grace_seconds(schema_gc_grace);
builder.with_version(generate_schema_version(builder.uuid()));
builder.with_null_sharder();
return builder.build();
}
schema_ptr computed_columns() {
static thread_local auto schema = computed_columns_schema(COMPUTED_COLUMNS);
return schema;
}
schema_ptr dropped_columns() {
static thread_local auto schema = [] {
schema_builder builder(make_shared_schema(generate_legacy_id(NAME, DROPPED_COLUMNS), NAME, DROPPED_COLUMNS,
// partition key
{{"keyspace_name", utf8_type}},
// clustering key
{{"table_name", utf8_type},{"column_name", utf8_type}},
// regular columns
{
{"dropped_time", timestamp_type},
{"type", utf8_type},
},
// static columns
{},
// regular column name type
utf8_type,
// comment
"dropped column registry"
));
builder.set_gc_grace_seconds(schema_gc_grace);
builder.with_version(generate_schema_version(builder.uuid()));
builder.with_null_sharder();
return builder.build();
}();
return schema;
}
schema_ptr triggers() {
static thread_local auto schema = [] {
schema_builder builder(make_shared_schema(generate_legacy_id(NAME, TRIGGERS), NAME, TRIGGERS,
// partition key
{{"keyspace_name", utf8_type}},
// clustering key
{{"table_name", utf8_type},{"trigger_name", utf8_type}},
// regular columns
{
{"options", map_type_impl::get_instance(utf8_type, utf8_type, false)},
},
// static columns
{},
// regular column name type
utf8_type,
// comment
"trigger definitions"
));
builder.set_gc_grace_seconds(schema_gc_grace);
builder.with_version(generate_schema_version(builder.uuid()));
builder.with_null_sharder();
return builder.build();
}();
return schema;
}
schema_ptr views() {
static thread_local auto schema = [] {
schema_builder builder(make_shared_schema(generate_legacy_id(NAME, VIEWS), NAME, VIEWS,
// partition key
{{"keyspace_name", utf8_type}},
// clustering key
{{"view_name", utf8_type}},
// regular columns
{
{"base_table_id", uuid_type},
{"base_table_name", utf8_type},
{"where_clause", utf8_type},
{"bloom_filter_fp_chance", double_type},
{"caching", map_type_impl::get_instance(utf8_type, utf8_type, false)},
{"comment", utf8_type},
{"compaction", map_type_impl::get_instance(utf8_type, utf8_type, false)},
{"compression", map_type_impl::get_instance(utf8_type, utf8_type, false)},
{"crc_check_chance", double_type},
{"dclocal_read_repair_chance", double_type},
{"default_time_to_live", int32_type},
{"extensions", map_type_impl::get_instance(utf8_type, bytes_type, false)},
{"gc_grace_seconds", int32_type},
{"id", uuid_type},
{"include_all_columns", boolean_type},
{"max_index_interval", int32_type},
{"memtable_flush_period_in_ms", int32_type},
{"min_index_interval", int32_type},
{"read_repair_chance", double_type},
{"speculative_retry", utf8_type},
},
// static columns
{},
// regular column name type
utf8_type,
// comment
"view definitions"
));
builder.set_gc_grace_seconds(schema_gc_grace);
builder.with_version(generate_schema_version(builder.uuid()));
builder.with_null_sharder();
return builder.build();
}();
return schema;
}
schema_ptr indexes() {
static thread_local auto schema = [] {
schema_builder builder(make_shared_schema(generate_legacy_id(NAME, INDEXES), NAME, INDEXES,
// partition key
{{"keyspace_name", utf8_type}},
// clustering key
{{"table_name", utf8_type},{"index_name", utf8_type}},
// regular columns
{
{"kind", utf8_type},
{"options", map_type_impl::get_instance(utf8_type, utf8_type, false)},
},
// static columns
{},
// regular column name type
utf8_type,
// comment
"secondary index definitions"
));
builder.set_gc_grace_seconds(schema_gc_grace);
builder.with_version(generate_schema_version(builder.uuid()));
builder.with_null_sharder();
return builder.build();
}();
return schema;
}
schema_ptr types() {
static thread_local auto schema = [] {
schema_builder builder(make_shared_schema(generate_legacy_id(NAME, TYPES), NAME, TYPES,
// partition key
{{"keyspace_name", utf8_type}},
// clustering key
{{"type_name", utf8_type}},
// regular columns
{
{"field_names", list_type_impl::get_instance(utf8_type, false)},
{"field_types", list_type_impl::get_instance(utf8_type, false)},
},
// static columns
{},
// regular column name type
utf8_type,
// comment
"user defined type definitions"
));
builder.set_gc_grace_seconds(schema_gc_grace);
builder.with_version(generate_schema_version(builder.uuid()));
builder.with_null_sharder();
return builder.build();
}();
return schema;
}
schema_ptr functions() {
static thread_local auto schema = [] {
schema_builder builder(make_shared_schema(generate_legacy_id(NAME, FUNCTIONS), NAME, FUNCTIONS,
// partition key
{{"keyspace_name", utf8_type}},
// clustering key
{{"function_name", utf8_type}, {"argument_types", list_type_impl::get_instance(utf8_type, false)}},
// regular columns
{
{"argument_names", list_type_impl::get_instance(utf8_type, false)},
{"body", utf8_type},
{"language", utf8_type},
{"return_type", utf8_type},
{"called_on_null_input", boolean_type},
},
// static columns
{},
// regular column name type
utf8_type,
// comment
"user defined function definitions"
));
builder.set_gc_grace_seconds(schema_gc_grace);
builder.with_version(generate_schema_version(builder.uuid()));
builder.with_null_sharder();
return builder.build();
}();
return schema;
}
schema_ptr aggregates() {
static thread_local auto schema = [] {
schema_builder builder(make_shared_schema(generate_legacy_id(NAME, AGGREGATES), NAME, AGGREGATES,
// partition key
{{"keyspace_name", utf8_type}},
// clustering key
{{"aggregate_name", utf8_type}, {"argument_types", list_type_impl::get_instance(utf8_type, false)}},
// regular columns
{
{"final_func", utf8_type},
{"initcond", utf8_type},
{"return_type", utf8_type},
{"state_func", utf8_type},
{"state_type", utf8_type},
},
// static columns
{},
// regular column name type
utf8_type,
// comment
"user defined aggregate definitions"
));
builder.set_gc_grace_seconds(schema_gc_grace);
builder.with_version(generate_schema_version(builder.uuid()));
builder.with_null_sharder();
return builder.build();
}();
return schema;
}
schema_ptr scylla_table_schema_history() {
static thread_local auto s = [] {
schema_builder builder(db::system_keyspace::NAME, SCYLLA_TABLE_SCHEMA_HISTORY, generate_legacy_id(db::system_keyspace::NAME, SCYLLA_TABLE_SCHEMA_HISTORY));
builder.with_column("cf_id", uuid_type, column_kind::partition_key);
builder.with_column("schema_version", uuid_type, column_kind::clustering_key);
builder.with_column("column_name", utf8_type, column_kind::clustering_key);
builder.with_column("clustering_order", utf8_type);
builder.with_column("column_name_bytes", bytes_type);
builder.with_column("kind", utf8_type);
builder.with_column("position", int32_type);
builder.with_column("type", utf8_type);
builder.set_comment("Scylla specific table to store a history of column mappings "
"for each table schema version upon an CREATE TABLE/ALTER TABLE operations");
builder.with_version(generate_schema_version(builder.uuid()));
builder.with_null_sharder();
return builder.build(schema_builder::compact_storage::no);
}();
return s;
}
}
#if 0
public static void truncateSchemaTables()
{
for (String table : ALL)
getSchemaCFS(table).truncateBlocking();
}
private static void flushSchemaTables()
{
for (String table : ALL)
SystemKeyspace.forceBlockingFlush(table);
}
#endif
static
mutation
redact_columns_for_missing_features(mutation m, schema_features features) {
if (features.contains(schema_feature::CDC_OPTIONS) && features.contains(schema_feature::PER_TABLE_PARTITIONERS)) {
return m;
}
if (m.schema()->cf_name() != SCYLLA_TABLES) {
return m;
}
slogger.debug("adjusting schema_tables mutation due to possible in-progress cluster upgrade");
// The global schema ptr make sure it will be registered in the schema registry.
global_schema_ptr redacted_schema{scylla_tables(features)};
m.upgrade(redacted_schema);
return m;
}
/**
* Read schema from system keyspace and calculate MD5 digest of every row, resulting digest
* will be converted into UUID which would act as content-based version of the schema.
*/
future calculate_schema_digest(distributed& proxy, schema_features features, noncopyable_function accept_keyspace)
{
auto map = [&proxy, features, accept_keyspace = std::move(accept_keyspace)] (sstring table) mutable -> future> {
auto rs = co_await db::system_keyspace::query_mutations(proxy, NAME, table);
auto s = proxy.local().get_db().local().find_schema(NAME, table);
std::vector mutations;
for (auto&& p : rs->partitions()) {
auto mut = p.mut().unfreeze(s);
auto partition_key = value_cast(utf8_type->deserialize(mut.key().get_component(*s, 0)));
if (!accept_keyspace(partition_key)) {
continue;
}
mut = redact_columns_for_missing_features(std::move(mut), features);
mutations.emplace_back(std::move(mut));
}
co_return mutations;
};
auto reduce = [features] (auto& hash, auto&& mutations) {
for (const mutation& m : mutations) {
feed_hash_for_schema_digest(hash, m, features);
}
};
auto hash = md5_hasher();
auto tables = all_table_names(features);
{
for (auto& table: tables) {
auto mutations = co_await map(table);
if (diff_logger.is_enabled(logging::log_level::trace)) {
for (const mutation& m : mutations) {
md5_hasher h;
feed_hash_for_schema_digest(h, m, features);
diff_logger.trace("Digest {} for {}, compacted={}", h.finalize(), m, compact_for_schema_digest(m));
}
}
reduce(hash, mutations);
}
co_return utils::UUID_gen::get_name_UUID(hash.finalize());
}
}
future calculate_schema_digest(distributed& proxy, schema_features features)
{
return calculate_schema_digest(proxy, features, std::not_fn(&is_system_keyspace));
}
future> convert_schema_to_mutations(distributed& proxy, schema_features features)
{
auto map = [&proxy, features] (sstring table) -> future> {
auto rs = co_await db::system_keyspace::query_mutations(proxy, NAME, table);
auto s = proxy.local().get_db().local().find_schema(NAME, table);
std::vector results;
for (auto&& p : rs->partitions()) {
auto mut = p.mut().unfreeze(s);
auto partition_key = value_cast(utf8_type->deserialize(mut.key().get_component(*s, 0)));
if (is_system_keyspace(partition_key)) {
continue;
}
mut = redact_columns_for_missing_features(std::move(mut), features);
results.emplace_back(mut);
}
co_return results;
};
auto reduce = [] (auto&& result, auto&& mutations) {
std::move(mutations.begin(), mutations.end(), std::back_inserter(result));
return std::move(result);
};
co_return co_await map_reduce(all_table_names(features), map, std::vector{}, reduce);
}
std::vector
adjust_schema_for_schema_features(std::vector schema, schema_features features) {
//Don't send the `computed_columns` table mutations to nodes that doesn't know it.
if (!features.contains(schema_feature::COMPUTED_COLUMNS)) {
schema.erase(std::remove_if(schema.begin(), schema.end(), [] (const mutation& m) {
return m.schema()->cf_name() == COMPUTED_COLUMNS;
}) , schema.end());
}
for (auto& m : schema) {
m = redact_columns_for_missing_features(m, features);
}
return schema;
}
future
read_schema_for_keyspaces(distributed& proxy, const sstring& schema_table_name, const std::set& keyspace_names)
{
auto map = [&proxy, schema_table_name] (const sstring& keyspace_name) { return read_schema_partition_for_keyspace(proxy, schema_table_name, keyspace_name); };
auto insert = [] (schema_result&& result, auto&& schema_entity) {
if (!schema_entity.second->empty()) {
result.insert(std::move(schema_entity));
}
return std::move(result);
};
co_return co_await map_reduce(keyspace_names.begin(), keyspace_names.end(), map, schema_result{}, insert);
}
static
future query_partition_mutation(service::storage_proxy& proxy,
schema_ptr s,
lw_shared_ptr cmd,
partition_key pkey)
{
auto dk = dht::decorate_key(*s, pkey);
auto range = dht::partition_range::make_singular(dk);
auto res_hit_rate = co_await proxy.query_mutations_locally(s, std::move(cmd), range, db::no_timeout, tracing::trace_state_ptr{});
auto&& [res, hit_rate] = res_hit_rate;
auto&& partitions = res->partitions();
if (partitions.size() == 0) {
co_return mutation(s, std::move(dk));
} else if (partitions.size() == 1) {
co_return partitions[0].mut().unfreeze(s);
} else {
throw std::invalid_argument("Results must have at most one partition");
}
}
future
read_schema_partition_for_keyspace(distributed& proxy, sstring schema_table_name, sstring keyspace_name)
{
auto schema = proxy.local().get_db().local().find_schema(NAME, schema_table_name);
auto keyspace_key = dht::decorate_key(*schema,
partition_key::from_singular(*schema, keyspace_name));
auto rs = co_await db::system_keyspace::query(proxy, NAME, schema_table_name, keyspace_key);
co_return schema_result_value_type{keyspace_name, std::move(rs)};
}
future
read_schema_partition_for_table(distributed& proxy, schema_ptr schema, const sstring& keyspace_name, const sstring& table_name)
{
auto keyspace_key = partition_key::from_singular(*schema, keyspace_name);
auto clustering_range = query::clustering_range(clustering_key_prefix::from_clustering_prefix(
*schema, exploded_clustering_prefix({utf8_type->decompose(table_name)})));
auto slice = partition_slice_builder(*schema)
.with_range(std::move(clustering_range))
.build();
auto cmd = make_lw_shared(schema->id(), schema->version(), std::move(slice), proxy.local().get_max_result_size(slice),
query::row_limit(query::max_rows));
co_return co_await query_partition_mutation(proxy.local(), std::move(schema), std::move(cmd), std::move(keyspace_key));
}
future
read_keyspace_mutation(distributed& proxy, const sstring& keyspace_name) {
schema_ptr s = keyspaces();
auto key = partition_key::from_singular(*s, keyspace_name);
auto slice = s->full_slice();
auto cmd = make_lw_shared(s->id(), s->version(), std::move(slice), proxy.local().get_max_result_size(slice));
co_return co_await query_partition_mutation(proxy.local(), std::move(s), std::move(cmd), std::move(key));
}
static thread_local semaphore the_merge_lock {1};
future<> merge_lock() {
return smp::submit_to(0, [] { return the_merge_lock.wait(); });
}
future<> merge_unlock() {
return smp::submit_to(0, [] { the_merge_lock.signal(); });
}
static future<> with_merge_lock(noncopyable_function ()> func) {
co_await merge_lock();
std::exception_ptr ep;
try {
co_await func();
} catch (...) {
ep = std::current_exception();
}
co_await merge_unlock();
if (ep) {
std::rethrow_exception(std::move(ep));
}
}
static
future<> update_schema_version_and_announce(distributed& proxy, schema_features features) {
auto uuid = co_await calculate_schema_digest(proxy, features);
co_await db::system_keyspace::update_schema_version(uuid);
co_await proxy.local().get_db().invoke_on_all([uuid] (database& db) {
db.update_version(uuid);
});
slogger.info("Schema version changed to {}", uuid);
}
/**
* Merge remote schema in form of mutations with local and mutate ks/cf metadata objects
* (which also involves fs operations on add/drop ks/cf)
*
* @param mutations the schema changes to apply
*
* @throws ConfigurationException If one of metadata attributes has invalid value
* @throws IOException If data was corrupted during transportation or failed to apply fs operations
*/
future<> merge_schema(distributed& proxy, gms::feature_service& feat, std::vector mutations)
{
co_await with_merge_lock([&] () mutable -> future<> {
co_await do_merge_schema(proxy, std::move(mutations), true);
co_await update_schema_version_and_announce(proxy, feat.cluster_schema_features());
});
}
future<> recalculate_schema_version(distributed& proxy, gms::feature_service& feat) {
co_await with_merge_lock([&] () -> future<> {
co_await update_schema_version_and_announce(proxy, feat.cluster_schema_features());
});
}
// Returns names of live table definitions of given keyspace
future>
static read_table_names_of_keyspace(distributed& proxy, const sstring& keyspace_name, schema_ptr schema_table) {
auto pkey = dht::decorate_key(*schema_table, partition_key::from_singular(*schema_table, keyspace_name));
auto&& rs = co_await db::system_keyspace::query(proxy, schema_table->ks_name(), schema_table->cf_name(), pkey);
co_return boost::copy_range>(rs->rows() | boost::adaptors::transformed([schema_table] (const query::result_set_row& row) {
const sstring name = schema_table->clustering_key_columns().begin()->name_as_text();
return row.get_nonnull(name);
}));
}
static utils::UUID table_id_from_mutations(const schema_mutations& sm) {
auto table_rs = query::result_set(sm.columnfamilies_mutation());
query::result_set_row table_row = table_rs.row(0);
return table_row.get_nonnull("id");
}
static
future>
read_tables_for_keyspaces(distributed& proxy, const std::set& keyspace_names, schema_ptr s)
{
std::map result;
for (auto&& keyspace_name : keyspace_names) {
for (auto&& table_name : co_await read_table_names_of_keyspace(proxy, keyspace_name, s)) {
auto qn = qualified_name(keyspace_name, table_name);
auto muts = co_await read_table_mutations(proxy, qn, s);
auto id = table_id_from_mutations(muts);
result.emplace(std::move(id), std::move(muts));
}
}
co_return result;
}
mutation compact_for_schema_digest(const mutation& m) {
// Cassandra is skipping tombstones from digest calculation
// to avoid disagreements due to tombstone GC.
// See https://issues.apache.org/jira/browse/CASSANDRA-6862.
// We achieve similar effect with compact_for_compaction().
mutation m_compacted(m);
m_compacted.partition().compact_for_compaction(*m.schema(), always_gc, gc_clock::time_point::max());
return m_compacted;
}
void feed_hash_for_schema_digest(hasher& h, const mutation& m, schema_features features) {
auto compacted = compact_for_schema_digest(m);
if (!features.contains() || !compacted.partition().empty()) {
feed_hash(h, compacted);
}
}
// Applies deletion of the "version" column to a system_schema.scylla_tables mutation.
static void delete_schema_version(mutation& m) {
if (m.column_family_id() != scylla_tables()->id()) {
return;
}
const column_definition& version_col = *m.schema()->get_column_definition(to_bytes("version"));
for (auto&& row : m.partition().clustered_rows()) {
auto&& cells = row.row().cells();
auto&& cell = cells.find_cell(version_col.id);
api::timestamp_type t = api::new_timestamp();
if (cell) {
t = std::max(t, cell->as_atomic_cell(version_col).timestamp());
}
cells.apply(version_col, atomic_cell::make_dead(t, gc_clock::now()));
}
}
/// Helper function which fills a given mutation with column information
/// provided the corresponding column_definition object.
static void fill_column_info(const schema& table,
const clustering_key& ckey,
const column_definition& column,
api::timestamp_type timestamp,
ttl_opt ttl,
mutation& m) {
auto order = "NONE";
if (column.is_clustering_key()) {
order = "ASC";
}
auto type = column.type;
if (type->is_reversed()) {
type = type->underlying_type();
if (column.is_clustering_key()) {
order = "DESC";
}
}
int32_t pos = -1;
if (column.is_primary_key()) {
pos = table.position(column);
}
m.set_clustered_cell(ckey, "column_name_bytes", data_value(column.name()), timestamp, ttl);
m.set_clustered_cell(ckey, "kind", serialize_kind(column.kind), timestamp, ttl);
m.set_clustered_cell(ckey, "position", pos, timestamp, ttl);
m.set_clustered_cell(ckey, "clustering_order", sstring(order), timestamp, ttl);
m.set_clustered_cell(ckey, "type", type->as_cql3_type().to_string(), timestamp, ttl);
}
future<> store_column_mapping(distributed& proxy, schema_ptr s, bool with_ttl) {
// Skip "system*" tables -- only user-related tables are relevant
if (static_cast(s->ks_name()).starts_with(db::system_keyspace::NAME)) {
co_return;
}
schema_ptr history_tbl = scylla_table_schema_history();
// Insert the new column mapping for a given schema version (without TTL)
std::vector muts;
partition_key pk = partition_key::from_exploded(*history_tbl, {uuid_type->decompose(s->id())});
ttl_opt ttl;
if (with_ttl) {
ttl = gc_clock::duration(DEFAULT_GC_GRACE_SECONDS);
}
// Use one timestamp for all mutations for the ease of debugging
const auto ts = api::new_timestamp();
for (const auto& cdef : boost::range::join(s->static_columns(), s->regular_columns())) {
mutation m(history_tbl, pk);
auto ckey = clustering_key::from_exploded(*history_tbl, {uuid_type->decompose(s->version()),
utf8_type->decompose(cdef.name_as_text())});
fill_column_info(*s, ckey, cdef, ts, ttl, m);
muts.emplace_back(std::move(m));
}
co_await proxy.local().mutate_locally(std::move(muts), tracing::trace_state_ptr());
}
static future<> do_merge_schema(distributed& proxy, std::vector mutations, bool do_flush)
{
slogger.trace("do_merge_schema: {}", mutations);
schema_ptr s = keyspaces();
// compare before/after schemas of the affected keyspaces only
std::set keyspaces;
std::set column_families;
for (auto&& mutation : mutations) {
keyspaces.emplace(value_cast(utf8_type->deserialize(mutation.key().get_component(*s, 0))));
column_families.emplace(mutation.column_family_id());
// We must force recalculation of schema version after the merge, since the resulting
// schema may be a mix of the old and new schemas.
delete_schema_version(mutation);
}
// current state of the schema
auto&& old_keyspaces = co_await read_schema_for_keyspaces(proxy, KEYSPACES, keyspaces);
auto&& old_column_families = co_await read_tables_for_keyspaces(proxy, keyspaces, tables());
auto&& old_types = co_await read_schema_for_keyspaces(proxy, TYPES, keyspaces);
auto&& old_views = co_await read_tables_for_keyspaces(proxy, keyspaces, views());
auto old_functions = co_await read_schema_for_keyspaces(proxy, FUNCTIONS, keyspaces);
auto old_aggregates = co_await read_schema_for_keyspaces(proxy, AGGREGATES, keyspaces);
co_await proxy.local().mutate_locally(std::move(mutations), tracing::trace_state_ptr());
if (do_flush) {
co_await proxy.local().get_db().invoke_on_all([&] (database& db) -> future<> {
auto& cfs = column_families;
co_await parallel_for_each(cfs.begin(), cfs.end(), [&] (const utils::UUID& id) -> future<> {
auto& cf = db.find_column_family(id);
co_await cf.flush();
});
});
}
// with new data applied
auto&& new_keyspaces = co_await read_schema_for_keyspaces(proxy, KEYSPACES, keyspaces);
auto&& new_column_families = co_await read_tables_for_keyspaces(proxy, keyspaces, tables());
auto&& new_types = co_await read_schema_for_keyspaces(proxy, TYPES, keyspaces);
auto&& new_views = co_await read_tables_for_keyspaces(proxy, keyspaces, views());
auto new_functions = co_await read_schema_for_keyspaces(proxy, FUNCTIONS, keyspaces);
auto new_aggregates = co_await read_schema_for_keyspaces(proxy, AGGREGATES, keyspaces);
std::set keyspaces_to_drop = co_await merge_keyspaces(proxy, std::move(old_keyspaces), std::move(new_keyspaces));
auto types_to_drop = co_await merge_types(proxy, std::move(old_types), std::move(new_types));
co_await merge_tables_and_views(proxy,
std::move(old_column_families), std::move(new_column_families),
std::move(old_views), std::move(new_views));
co_await merge_functions(proxy, std::move(old_functions), std::move(new_functions));
co_await merge_aggregates(proxy, std::move(old_aggregates), std::move(new_aggregates));
co_await types_to_drop.drop();
co_await proxy.local().get_db().invoke_on_all([&] (database& db) -> future<> {
// it is safe to drop a keyspace only when all nested ColumnFamilies where deleted
for (auto keyspace_to_drop : keyspaces_to_drop) {
db.drop_keyspace(keyspace_to_drop);
co_await db.get_notifier().drop_keyspace(keyspace_to_drop);
}
});
}
future> merge_keyspaces(distributed& proxy, schema_result&& before, schema_result&& after)
{
std::vector created;
std::vector altered;
std::set dropped;
/*
* - we don't care about entriesOnlyOnLeft() or entriesInCommon(), because only the changes are of interest to us
* - of all entriesOnlyOnRight(), we only care about ones that have live columns; it's possible to have a ColumnFamily
* there that only has the top-level deletion, if:
* a) a pushed DROP KEYSPACE change for a keyspace hadn't ever made it to this node in the first place
* b) a pulled dropped keyspace that got dropped before it could find a way to this node
* - of entriesDiffering(), we don't care about the scenario where both pre and post-values have zero live columns:
* that means that a keyspace had been recreated and dropped, and the recreated keyspace had never found a way
* to this node
*/
auto diff = difference(before, after, indirect_equal_to>());
for (auto&& key : diff.entries_only_on_left) {
slogger.info("Dropping keyspace {}", key);
dropped.emplace(key);
}
for (auto&& key : diff.entries_only_on_right) {
auto&& value = after[key];
slogger.info("Creating keyspace {}", key);
created.emplace_back(schema_result_value_type{key, std::move(value)});
}
for (auto&& key : diff.entries_differing) {
slogger.info("Altering keyspace {}", key);
altered.emplace_back(key);
}
co_await proxy.local().get_db().invoke_on_all([&] (database& db) -> future<> {
for (auto&& val : created) {
auto ksm = create_keyspace_from_schema_partition(val);
co_await db.create_keyspace(ksm);
co_await db.get_notifier().create_keyspace(ksm);
}
{
for (auto& name : altered) {
co_await db.update_keyspace(proxy, name);
};
}
});
co_return dropped;
}
struct schema_diff {
struct dropped_schema {
global_schema_ptr schema;
utils::joinpoint jp{[] {
return make_ready_future(db_clock::now());
}};
};
struct altered_schema {
global_schema_ptr old_schema;
global_schema_ptr new_schema;
};
std::vector created;
std::vector altered;
std::vector dropped;
size_t size() const {
return created.size() + altered.size() + dropped.size();
}
};
static schema_diff diff_table_or_view(distributed& proxy,
std::map&& before,
std::map&& after,
noncopyable_function create_schema)
{
schema_diff d;
auto diff = difference(before, after);
for (auto&& key : diff.entries_only_on_left) {
auto&& s = proxy.local().get_db().local().find_schema(key);
slogger.info("Dropping {}.{} id={} version={}", s->ks_name(), s->cf_name(), s->id(), s->version());
d.dropped.emplace_back(schema_diff::dropped_schema{s});
}
for (auto&& key : diff.entries_only_on_right) {
auto s = create_schema(std::move(after.at(key)));
slogger.info("Creating {}.{} id={} version={}", s->ks_name(), s->cf_name(), s->id(), s->version());
d.created.emplace_back(s);
}
for (auto&& key : diff.entries_differing) {
auto s_before = create_schema(std::move(before.at(key)));
auto s = create_schema(std::move(after.at(key)));
slogger.info("Altering {}.{} id={} version={}", s->ks_name(), s->cf_name(), s->id(), s->version());
d.altered.emplace_back(schema_diff::altered_schema{s_before, s});
}
return d;
}
// see the comments for merge_keyspaces()
// Atomically publishes schema changes. In particular, this function ensures
// that when a base schema and a subset of its views are modified together (i.e.,
// upon an alter table or alter type statement), then they are published together
// as well, without any deferring in-between.
static future<> merge_tables_and_views(distributed& proxy,
std::map&& tables_before,
std::map&& tables_after,
std::map&& views_before,
std::map&& views_after)
{
auto tables_diff = diff_table_or_view(proxy, std::move(tables_before), std::move(tables_after), [&] (schema_mutations sm) {
return create_table_from_mutations(proxy, std::move(sm));
});
auto views_diff = diff_table_or_view(proxy, std::move(views_before), std::move(views_after), [&] (schema_mutations sm) {
// The view schema mutation should be created with reference to the base table schema because we definitely know it by now.
// If we don't do it we are leaving a window where write commands to this schema are illegal.
// There are 3 possibilities:
// 1. The table was altered - in this case we want the view to correspond to this new table schema.
// 2. The table was just created - the table is guarantied to be published with the view in that case.
// 3. The view itself was altered - in that case we already know the base table so we can take it from
// the database object.
view_ptr vp = create_view_from_mutations(proxy, std::move(sm));
schema_ptr base_schema;
for (auto&& s : tables_diff.altered) {
if (s.new_schema.get()->ks_name() == vp->ks_name() && s.new_schema.get()->cf_name() == vp->view_info()->base_name() ) {
base_schema = s.new_schema;
break;
}
}
if (!base_schema) {
for (auto&& s : tables_diff.created) {
if (s.get()->ks_name() == vp->ks_name() && s.get()->cf_name() == vp->view_info()->base_name() ) {
base_schema = s;
break;
}
}
}
if (!base_schema) {
base_schema = proxy.local().local_db().find_schema(vp->ks_name(), vp->view_info()->base_name());
}
// Now when we have a referenced base - just in case we are registering an old view (this can happen in a mixed cluster)
// lets make it write enabled by updating it's compute columns.
view_ptr fixed_vp = maybe_fix_legacy_secondary_index_mv_schema(proxy.local().get_db().local(), vp, base_schema, preserve_version::yes);
if(fixed_vp) {
vp = fixed_vp;
}
vp->view_info()->set_base_info(vp->view_info()->make_base_dependent_view_info(*base_schema));
return vp;
});
co_await proxy.local().get_db().invoke_on_all([&] (database& db) -> future<> {
// First drop views and *only then* the tables, if interleaved it can lead
// to a mv not finding its schema when snapshoting since the main table
// was already dropped (see https://github.com/scylladb/scylla/issues/5614)
co_await parallel_for_each(views_diff.dropped, [&] (schema_diff::dropped_schema& dt) -> future<> {
auto& s = *dt.schema.get();
co_await db.drop_column_family(s.ks_name(), s.cf_name(), [&] { return dt.jp.value(); });
});
co_await parallel_for_each(tables_diff.dropped, [&] (schema_diff::dropped_schema& dt) -> future<> {
auto& s = *dt.schema.get();
co_await db.drop_column_family(s.ks_name(), s.cf_name(), [&] { return dt.jp.value(); });
});
// In order to avoid possible races we first create the tables and only then the views.
// That way if a view seeks information about its base table it's guarantied to find it.
co_await parallel_for_each(tables_diff.created, [&] (global_schema_ptr& gs) -> future<> {
co_await db.add_column_family_and_make_directory(gs);
});
co_await parallel_for_each(views_diff.created, [&] (global_schema_ptr& gs) -> future<> {
co_await db.add_column_family_and_make_directory(gs);
});
for (auto&& gs : boost::range::join(tables_diff.created, views_diff.created)) {
db.find_column_family(gs).mark_ready_for_writes();
}
std::vector columns_changed;
columns_changed.reserve(tables_diff.altered.size() + views_diff.altered.size());
for (auto&& altered : boost::range::join(tables_diff.altered, views_diff.altered)) {
columns_changed.push_back(db.update_column_family(altered.new_schema));
}
auto it = columns_changed.begin();
auto notify = [&] (auto& r, auto&& f) -> future<> {
auto notifications = r | boost::adaptors::transformed(f);
co_await when_all(notifications.begin(), notifications.end());
};
// View drops are notified first, because a table can only be dropped if its views are already deleted
co_await notify(views_diff.dropped, [&] (auto&& dt) { return db.get_notifier().drop_view(view_ptr(dt.schema)); });
co_await notify(tables_diff.dropped, [&] (auto&& dt) { return db.get_notifier().drop_column_family(dt.schema); });
// Table creations are notified first, in case a view is created right after the table
co_await notify(tables_diff.created, [&] (auto&& gs) { return db.get_notifier().create_column_family(gs); });
co_await notify(views_diff.created, [&] (auto&& gs) { return db.get_notifier().create_view(view_ptr(gs)); });
// Table altering is notified first, in case new base columns appear
co_await notify(tables_diff.altered, [&] (auto&& altered) { return db.get_notifier().update_column_family(altered.new_schema, *it++); });
co_await notify(views_diff.altered, [&] (auto&& altered) { return db.get_notifier().update_view(view_ptr(altered.new_schema), *it++); });
});
// Insert column_mapping into history table for altered and created tables.
//
// Entries for new tables are inserted without TTL, which means that the most
// recent schema version should always be available.
//
// For altered tables we both insert a new column mapping without TTL and
// overwrite the previous version entries with TTL to expire them eventually.
//
// Drop column mapping entries for dropped tables since these will not be TTLed automatically
// and will stay there forever if we don't clean them up manually
co_await when_all_succeed(
parallel_for_each(tables_diff.created, [&proxy] (global_schema_ptr& gs) -> future<> {
co_await store_column_mapping(proxy, gs.get(), false);
}),
parallel_for_each(tables_diff.altered, [&proxy] (schema_diff::altered_schema& altered) -> future<> {
co_await when_all_succeed(
store_column_mapping(proxy, altered.old_schema.get(), true),
store_column_mapping(proxy, altered.new_schema.get(), false));
}),
parallel_for_each(tables_diff.dropped, [&proxy] (schema_diff::dropped_schema& dropped) -> future<> {
schema_ptr s = dropped.schema.get();
co_await drop_column_mapping(s->id(), s->version());
})
);
}
static std::vector collect_rows(const std::set& keys, const schema_result& result) {
std::vector ret;
for (const auto& key : keys) {
for (const auto& row : result.find(key)->second->rows()) {
ret.push_back(&row);
}
}
return ret;
}
// Build a map from primary keys to rows.
static std::map, const query::result_set_row*> build_row_map(const query::result_set& result) {
const std::vector& rows = result.rows();
const schema_ptr& schema = result.schema();
std::vector primary_key;
for (const auto& column : schema->partition_key_columns()) {
primary_key.push_back(column);
}
for (const auto& column : schema->clustering_key_columns()) {
primary_key.push_back(column);
}
std::map, const query::result_set_row*> ret;
for (const auto& row: rows) {
std::vector key;
for (const auto& column : primary_key) {
const data_value *val = row.get_data_value(column.name_as_text());
key.push_back(val->serialize_nonnull());
}
ret.insert(std::pair(std::move(key), &row));
}
return ret;
}
struct row_diff {
std::vector altered;
std::vector created;
std::vector dropped;
};
// Compute which rows have been created, dropped or altered.
// A row is identified by its primary key.
// In the output, all entries of a given keyspace are together.
static row_diff diff_rows(const schema_result& before, const schema_result& after) {
auto diff = difference(before, after, indirect_equal_to>());
// For new or empty keyspaces, just record each row.
auto dropped = collect_rows(diff.entries_only_on_left, before); // Keyspaces now without rows
auto created = collect_rows(diff.entries_only_on_right, after); // New keyspaces with rows
std::vector altered;
for (const auto& key : diff.entries_differing) {
// For each keyspace that changed, compute the difference of the corresponding result_set to find which rows
// have changed.
auto before_rows = build_row_map(*before.find(key)->second);
auto after_rows = build_row_map(*after.find(key)->second);
auto diff_row = difference(before_rows, after_rows, indirect_equal_to());
for (const auto& key : diff_row.entries_only_on_left) {
dropped.push_back(before_rows.find(key)->second);
}
for (const auto& key : diff_row.entries_only_on_right) {
created.push_back(after_rows.find(key)->second);
}
for (const auto& key : diff_row.entries_differing) {
altered.push_back(after_rows.find(key)->second);
}
}
return {std::move(altered), std::move(created), std::move(dropped)};
}
template
static std::vector get_list(const query::result_set_row& row, const sstring& name);
// Create types for a given keyspace. This takes care of topologically sorting user defined types.
template static std::vector create_types(keyspace_metadata& ks, T&& range) {
cql_type_parser::raw_builder builder(ks);
std::unordered_set names;
for (const query::result_set_row& row : range) {
auto name = row.get_nonnull("type_name");
names.insert(to_bytes(name));
builder.add(std::move(name), get_list(row, "field_names"), get_list(row, "field_types"));
}
// Add user types that use any of the above types. From the
// database point of view they haven't changed since the content
// of system.types is the same for them. The runtime objects in
// the other hand now point to out of date types, so we need to
// recreate them.
for (const auto& p : ks.user_types().get_all_types()) {
const user_type& t = p.second;
if (names.contains(t->_name)) {
continue;
}
for (const auto& name : names) {
if (t->references_user_type(t->_keyspace, name)) {
std::vector field_types;
for (const data_type& f : t->field_types()) {
field_types.push_back(f->as_cql3_type().to_string());
}
builder.add(t->get_name_as_string(), t->string_field_names(), std::move(field_types));
}
}
}
return builder.build();
}
// Given a set of rows that is sorted by keyspace, create types for each keyspace.
// The topological sort in each keyspace is necessary when creating types, since we can only create a type when the
// types it reference have already been created.
static std::vector create_types(database& db, const std::vector& rows) {
std::vector ret;
for (auto i = rows.begin(), e = rows.end(); i != e;) {
const auto &row = *i;
auto keyspace = row->get_nonnull("keyspace_name");
auto next = std::find_if(i, e, [&keyspace](const query::result_set_row* r) {
return r->get_nonnull("keyspace_name") != keyspace;
});
auto ks = db.find_keyspace(keyspace).metadata();
auto v = create_types(*ks, boost::make_iterator_range(i, next) | boost::adaptors::indirected);
ret.insert(ret.end(), std::make_move_iterator(v.begin()), std::make_move_iterator(v.end()));
i = next;
}
return ret;
}
// see the comments for merge_keyspaces()
static future merge_types(distributed& proxy, schema_result before, schema_result after)
{
auto diff = diff_rows(before, after);
// Create and update user types before any tables/views are created that potentially
// use those types. Similarly, defer dropping until after tables/views that may use
// some of these user types are dropped.
co_await proxy.local().get_db().invoke_on_all([&] (database& db) -> future<> {
for (auto&& user_type : create_types(db, diff.created)) {
db.find_keyspace(user_type->_keyspace).add_user_type(user_type);
co_await db.get_notifier().create_user_type(user_type);
}
for (auto&& user_type : create_types(db, diff.altered)) {
db.find_keyspace(user_type->_keyspace).add_user_type(user_type);
co_await db.get_notifier().update_user_type(user_type);
}
});
co_return user_types_to_drop{[&proxy, before = std::move(before), rows = std::move(diff.dropped)] () mutable -> future<> {
co_await proxy.local().get_db().invoke_on_all([&] (database& db) -> future<> {
auto dropped = create_types(db, rows);
for (auto& user_type : dropped) {
db.find_keyspace(user_type->_keyspace).remove_user_type(user_type);
co_await db.get_notifier().drop_user_type(user_type);
}
});
}};
}
static std::vector read_arg_types(const query::result_set_row& row, const sstring& keyspace) {
std::vector arg_types;
for (const auto& arg : get_list(row, "argument_types")) {
arg_types.push_back(db::cql_type_parser::parse(keyspace, arg));
}
return arg_types;
}
#if 0
// see the comments for mergeKeyspaces()
private static void mergeAggregates(Map before, Map after)
{
List created = new ArrayList<>();
List altered = new ArrayList<>();
List dropped = new ArrayList<>();
MapDifference diff = Maps.difference(before, after);
// New keyspace with functions
for (Map.Entry entry : diff.entriesOnlyOnRight().entrySet())
if (entry.getValue().hasColumns())
created.addAll(createAggregatesFromAggregatesPartition(new Row(entry.getKey(), entry.getValue())).values());
for (Map.Entry> entry : diff.entriesDiffering().entrySet())
{
ColumnFamily pre = entry.getValue().leftValue();
ColumnFamily post = entry.getValue().rightValue();
if (pre.hasColumns() && post.hasColumns())
{
MapDifference delta =
Maps.difference(createAggregatesFromAggregatesPartition(new Row(entry.getKey(), pre)),
createAggregatesFromAggregatesPartition(new Row(entry.getKey(), post)));
dropped.addAll(delta.entriesOnlyOnLeft().values());
created.addAll(delta.entriesOnlyOnRight().values());
Iterables.addAll(altered, Iterables.transform(delta.entriesDiffering().values(), new Function, UDAggregate>()
{
public UDAggregate apply(MapDifference.ValueDifference pair)
{
return pair.rightValue();
}
}));
}
else if (pre.hasColumns())
{
dropped.addAll(createAggregatesFromAggregatesPartition(new Row(entry.getKey(), pre)).values());
}
else if (post.hasColumns())
{
created.addAll(createAggregatesFromAggregatesPartition(new Row(entry.getKey(), post)).values());
}
}
for (UDAggregate udf : created)
Schema.instance.addAggregate(udf);
for (UDAggregate udf : altered)
Schema.instance.updateAggregate(udf);
for (UDAggregate udf : dropped)
Schema.instance.dropAggregate(udf);
}
#endif
static shared_ptr create_func(database& db, const query::result_set_row& row) {
cql3::functions::function_name name{
row.get_nonnull("keyspace_name"), row.get_nonnull("function_name")};
auto arg_types = read_arg_types(row, name.keyspace);
data_type return_type = db::cql_type_parser::parse(name.keyspace, row.get_nonnull("return_type"));
// FIXME: We already computed the bitcode in
// create_function_statement, but it is not clear how to get it
// here. In this point in the code we only get what was saved in
// system_schema.functions, and we don't want to store the bitcode
// If this was not the replica that the client connected to we do
// have to produce bitcode in at least one shard. Right now this
// gets run in each shard.
auto arg_names = get_list(row, "argument_names");
auto body = row.get_nonnull("body");
lua::runtime_config cfg = lua::make_runtime_config(db.get_config());
auto bitcode = lua::compile(cfg, arg_names, body);
return ::make_shared(std::move(name), std::move(arg_types), std::move(arg_names),
std::move(body), row.get_nonnull("language"), std::move(return_type),
row.get_nonnull("called_on_null_input"), std::move(bitcode), std::move(cfg));
}
static shared_ptr create_aggregate(database& db, const query::result_set_row& row) {
cql3::functions::function_name name{
row.get_nonnull("keyspace_name"), row.get_nonnull("aggregate_name")};
auto arg_types = read_arg_types(row, name.keyspace);
data_type state_type = db::cql_type_parser::parse(name.keyspace, row.get_nonnull("state_type"));
sstring sfunc = row.get_nonnull("state_func");
sstring ffunc = row.get_nonnull("final_func");
sstring initcond_str = row.get_nonnull("initcond");
std::vector acc_types{state_type};
acc_types.insert(acc_types.end(), arg_types.begin(), arg_types.end());
auto state_func = dynamic_pointer_cast(
cql3::functions::functions::find(cql3::functions::function_name{name.keyspace, sfunc}, acc_types));
auto final_func = dynamic_pointer_cast(
cql3::functions::functions::find(cql3::functions::function_name{name.keyspace, ffunc}, {state_type}));
if (!state_func) {
throw std::runtime_error(format("State function {} needed by aggregate {} not found", sfunc, name.name));
}
if (!final_func) {
throw std::runtime_error(format("Final function {} needed by aggregate {} not found", ffunc, name.name));
}
bytes_opt initcond = state_type->from_string(initcond_str);
return ::make_shared(name, initcond, std::move(state_func), std::move(final_func));
}
static future<> merge_functions(distributed& proxy, schema_result before, schema_result after,
std::function(database& db, const query::result_set_row& row)> create) {
auto diff = diff_rows(before, after);
co_await proxy.local().get_db().invoke_on_all([&] (database& db) {
for (const auto& val : diff.created) {
cql3::functions::functions::add_function(create(db, *val));
}
for (const auto& val : diff.dropped) {
auto func = create(db, *val);
cql3::functions::functions::remove_function(func->name(), func->arg_types());
}
for (const auto& val : diff.altered) {
cql3::functions::functions::replace_function(create(db, *val));
}
});
}
static future<> merge_functions(distributed& proxy, schema_result before, schema_result after) {
co_await merge_functions(proxy, before, after, create_func);
}
static future<> merge_aggregates(distributed& proxy, schema_result before, schema_result after) {
co_await merge_functions(proxy, before, after, create_aggregate);
}
template
void set_cell_or_clustered(mutation& m, const clustering_key & ckey, Args && ...args) {
m.set_clustered_cell(ckey, std::forward(args)...);
}
template
void set_cell_or_clustered(mutation& m, const exploded_clustering_prefix & ckey, Args && ...args) {
m.set_cell(ckey, std::forward(args)...);
}
template
static atomic_cell_or_collection
make_map_mutation(const Map& map,
const column_definition& column,
api::timestamp_type timestamp,
noncopyable_function f)
{
auto column_type = static_pointer_cast(column.type);
auto ktyp = column_type->get_keys_type();
auto vtyp = column_type->get_values_type();
if (column_type->is_multi_cell()) {
collection_mutation_description mut;
for (auto&& entry : map) {
auto te = f(entry);
mut.cells.emplace_back(ktyp->decompose(data_value(te.first)), atomic_cell::make_live(*vtyp, timestamp, vtyp->decompose(data_value(te.second)), atomic_cell::collection_member::yes));
}
return mut.serialize(*column_type);
} else {
map_type_impl::native_type tmp;
tmp.reserve(map.size());
std::transform(map.begin(), map.end(), std::inserter(tmp, tmp.end()), std::move(f));
return atomic_cell::make_live(*column.type, timestamp, column_type->decompose(make_map_value(column_type, std::move(tmp))));
}
}
template
static atomic_cell_or_collection
make_map_mutation(const Map& map,
const column_definition& column,
api::timestamp_type timestamp)
{
return make_map_mutation(map, column, timestamp, [](auto&& p) {
return std::make_pair(data_value(p.first), data_value(p.second));
});
}
template
static void store_map(mutation& m, const K& ckey, const bytes& name, api::timestamp_type timestamp, const Map& map) {
auto s = m.schema();
auto column = s->get_column_definition(name);
assert(column);
set_cell_or_clustered(m, ckey, *column, make_map_mutation(map, *column, timestamp));
}
/*
* Keyspace metadata serialization/deserialization.
*/
std::vector make_create_keyspace_mutations(lw_shared_ptr keyspace, api::timestamp_type timestamp, bool with_tables_and_types_and_functions)
{
std::vector mutations;
schema_ptr s = keyspaces();
auto pkey = partition_key::from_singular(*s, keyspace->name());
mutation m(s, pkey);
auto ckey = clustering_key_prefix::make_empty();
m.set_cell(ckey, "durable_writes", keyspace->durable_writes(), timestamp);
auto map = keyspace->strategy_options();
map["class"] = keyspace->strategy_name();
store_map(m, ckey, "replication", timestamp, map);
mutations.emplace_back(std::move(m));
if (with_tables_and_types_and_functions) {
for (const auto& kv : keyspace->user_types().get_all_types()) {
add_type_to_schema_mutation(kv.second, timestamp, mutations);
}
for (auto&& s : keyspace->cf_meta_data() | boost::adaptors::map_values) {
add_table_or_view_to_schema_mutation(s, timestamp, true, mutations);
}
}
return mutations;
}
std::vector make_drop_keyspace_mutations(lw_shared_ptr keyspace, api::timestamp_type timestamp)
{
std::vector mutations;
for (auto&& schema_table : all_tables(schema_features::full())) {
auto pkey = partition_key::from_exploded(*schema_table, {utf8_type->decompose(keyspace->name())});
mutation m{schema_table, pkey};
m.partition().apply(tombstone{timestamp, gc_clock::now()});
mutations.emplace_back(std::move(m));
}
auto&& schema = db::system_keyspace::built_indexes();
auto pkey = partition_key::from_exploded(*schema, {utf8_type->decompose(keyspace->name())});
mutation m{schema, pkey};
m.partition().apply(tombstone{timestamp, gc_clock::now()});
mutations.emplace_back(std::move(m));
return mutations;
}
/**
* Deserialize only Keyspace attributes without nested tables or types
*
* @param partition Keyspace attributes in serialized form
*/
lw_shared_ptr create_keyspace_from_schema_partition(const schema_result_value_type& result)
{
auto&& rs = result.second;
if (rs->empty()) {
throw std::runtime_error("query result has no rows");
}
auto&& row = rs->row(0);
auto keyspace_name = row.get_nonnull("keyspace_name");
// We get called from multiple shards with result set originating on only one of them.
// Cannot use copying accessors for "deep" types like map, because we will hit shared_ptr asserts
// (or screw up shared pointers)
const auto& replication = row.get_nonnull("replication");
std::map strategy_options;
for (auto& p : replication) {
strategy_options.emplace(value_cast(p.first), value_cast(p.second));
}
auto strategy_name = strategy_options["class"];
strategy_options.erase("class");
bool durable_writes = row.get_nonnull("durable_writes");
return make_lw_shared(keyspace_name, strategy_name, strategy_options, durable_writes);
}
template
static std::vector get_list(const query::result_set_row& row, const sstring& name) {
std::vector list;
const auto& values = row.get_nonnull(name);
for (auto&& v : values) {
list.emplace_back(value_cast(v));
};
return list;
}
std::vector create_types_from_schema_partition(
keyspace_metadata& ks, lw_shared_ptr result) {
return create_types(ks, result->rows());
}
std::vector> create_functions_from_schema_partition(
database& db, lw_shared_ptr result) {
std::vector> ret;
for (const auto& row : result->rows()) {
ret.emplace_back(create_func(db, row));
}
return ret;
}
/*
* User type metadata serialization/deserialization
*/
template
static atomic_cell_or_collection
make_list_mutation(const std::vector& values,
const column_definition& column,
api::timestamp_type timestamp,
Func&& f)
{
auto column_type = static_pointer_cast(column.type);
auto vtyp = column_type->get_elements_type();
if (column_type->is_multi_cell()) {
collection_mutation_description m;
m.cells.reserve(values.size());
m.tomb.timestamp = timestamp - 1;
m.tomb.deletion_time = gc_clock::now();
for (auto&& value : values) {
auto dv = f(value);
auto uuid = utils::UUID_gen::get_time_UUID_bytes();
m.cells.emplace_back(
bytes(reinterpret_cast(uuid.data()), uuid.size()),
atomic_cell::make_live(*vtyp, timestamp, vtyp->decompose(std::move(dv)), atomic_cell::collection_member::yes));
}
return m.serialize(*column_type);
} else {
list_type_impl::native_type tmp;
tmp.reserve(values.size());
std::transform(values.begin(), values.end(), std::back_inserter(tmp), f);
return atomic_cell::make_live(*column.type, timestamp, column_type->decompose(make_list_value(column_type, std::move(tmp))));
}
}
void add_type_to_schema_mutation(user_type type, api::timestamp_type timestamp, std::vector& mutations)
{
schema_ptr s = types();
auto pkey = partition_key::from_singular(*s, type->_keyspace);
auto ckey = clustering_key::from_singular(*s, type->get_name_as_string());
mutation m{s, pkey};
auto field_names_column = s->get_column_definition("field_names");
auto field_names = make_list_mutation(type->field_names(), *field_names_column, timestamp, [](auto&& name) {
return utf8_type->deserialize(name);
});
m.set_clustered_cell(ckey, *field_names_column, std::move(field_names));
auto field_types_column = s->get_column_definition("field_types");
auto field_types = make_list_mutation(type->field_types(), *field_types_column, timestamp, [](auto&& type) {
return data_value(type->as_cql3_type().to_string());
});
m.set_clustered_cell(ckey, *field_types_column, std::move(field_types));
mutations.emplace_back(std::move(m));
}
std::vector make_create_type_mutations(lw_shared_ptr keyspace, user_type type, api::timestamp_type timestamp)
{
std::vector mutations;
add_type_to_schema_mutation(type, timestamp, mutations);
return mutations;
}
std::vector make_drop_type_mutations(lw_shared_ptr keyspace, user_type type, api::timestamp_type timestamp)
{
std::vector mutations;
schema_ptr s = types();
auto pkey = partition_key::from_singular(*s, type->_keyspace);
auto ckey = clustering_key::from_singular(*s, type->get_name_as_string());
mutation m{s, pkey};
m.partition().apply_delete(*s, ckey, tombstone(timestamp, gc_clock::now()));
mutations.emplace_back(std::move(m));
return mutations;
}
/*
* UDF metadata serialization/deserialization.
*/
static std::pair get_mutation(schema_ptr s, const cql3::functions::function& func) {
auto name = func.name();
auto pkey = partition_key::from_singular(*s, name.keyspace);
list_type_impl::native_type arg_types;
for (const auto& arg_type : func.arg_types()) {
arg_types.emplace_back(arg_type->as_cql3_type().to_string());
}
auto arg_list_type = list_type_impl::get_instance(utf8_type, false);
data_value arg_types_val = make_list_value(arg_list_type, std::move(arg_types));
auto ckey = clustering_key::from_exploded(
*s, {utf8_type->decompose(name.name), arg_list_type->decompose(arg_types_val)});
mutation m{s, pkey};
return {std::move(m), std::move(ckey)};
}
std::vector make_create_function_mutations(shared_ptr func,
api::timestamp_type timestamp) {
schema_ptr s = functions();
auto p = get_mutation(s, *func);
mutation& m = p.first;
clustering_key& ckey = p.second;
auto argument_names_column = s->get_column_definition("argument_names");
auto argument_names = make_list_mutation(func->arg_names(), *argument_names_column, timestamp, [] (auto&& name) {
return name;
});
m.set_clustered_cell(ckey, *argument_names_column, std::move(argument_names));
m.set_clustered_cell(ckey, "body", func->body(), timestamp);
m.set_clustered_cell(ckey, "language", func->language(), timestamp);
m.set_clustered_cell(ckey, "return_type", func->return_type()->as_cql3_type().to_string(), timestamp);
m.set_clustered_cell(ckey, "called_on_null_input", func->called_on_null_input(), timestamp);
return {m};
}
std::vector make_drop_function_mutations(schema_ptr s, const cql3::functions::function& func, api::timestamp_type timestamp) {
auto p = get_mutation(s, func);
mutation& m = p.first;
clustering_key& ckey = p.second;
m.partition().apply_delete(*s, ckey, tombstone(timestamp, gc_clock::now()));
return {std::move(m)};
}
std::vector make_drop_function_mutations(shared_ptr func, api::timestamp_type timestamp) {
return make_drop_function_mutations(functions(), *func, timestamp);
}
/*
* UDA metadata serialization/deserialization
*/
static std::pair get_mutation(schema_ptr s, const cql3::functions::user_aggregate& aggregate) {
auto name = aggregate.name();
auto pkey = partition_key::from_singular(*s, name.keyspace);
list_type_impl::native_type arg_types;
for (const auto& arg_type : aggregate.arg_types()) {
arg_types.emplace_back(arg_type->as_cql3_type().to_string());
}
auto arg_list_type = list_type_impl::get_instance(utf8_type, false);
data_value arg_types_val = make_list_value(arg_list_type, std::move(arg_types));
auto ckey = clustering_key::from_exploded(
*s, {utf8_type->decompose(name.name), arg_list_type->decompose(arg_types_val)});
mutation m{s, pkey};
return {std::move(m), std::move(ckey)};
}
std::vector make_create_aggregate_mutations(shared_ptr aggregate, api::timestamp_type timestamp) {
schema_ptr s = aggregates();
auto p = get_mutation(s, *aggregate);
mutation& m = p.first;
clustering_key& ckey = p.second;
m.set_clustered_cell(ckey, "final_func", aggregate->finalfunc().name().name, timestamp);
data_type state_type = aggregate->sfunc().arg_types()[0];
m.set_clustered_cell(ckey, "initcond", state_type->to_string(*aggregate->initcond()), timestamp);
m.set_clustered_cell(ckey, "return_type", aggregate->return_type()->as_cql3_type().to_string(), timestamp);
m.set_clustered_cell(ckey, "state_func", aggregate->sfunc().name().name, timestamp);
m.set_clustered_cell(ckey, "state_type", state_type->as_cql3_type().to_string(), timestamp);
return {m};
}
std::vector make_drop_aggregate_mutations(shared_ptr aggregate, api::timestamp_type timestamp) {
return make_drop_function_mutations(aggregates(), *aggregate, timestamp);
}
/*
* Table metadata serialization/deserialization.
*/
std::vector make_create_table_mutations(lw_shared_ptr keyspace, schema_ptr table, api::timestamp_type timestamp)
{
std::vector mutations;
add_table_or_view_to_schema_mutation(table, timestamp, true, mutations);
return mutations;
}
static void add_table_params_to_mutations(mutation& m, const clustering_key& ckey, schema_ptr table, api::timestamp_type timestamp) {
m.set_clustered_cell(ckey, "bloom_filter_fp_chance", table->bloom_filter_fp_chance(), timestamp);
m.set_clustered_cell(ckey, "comment", table->comment(), timestamp);
m.set_clustered_cell(ckey, "dclocal_read_repair_chance", table->dc_local_read_repair_chance(), timestamp);
m.set_clustered_cell(ckey, "default_time_to_live", gc_clock::as_int32(table->default_time_to_live()), timestamp);
m.set_clustered_cell(ckey, "gc_grace_seconds", gc_clock::as_int32(table->gc_grace_seconds()), timestamp);
m.set_clustered_cell(ckey, "max_index_interval", table->max_index_interval(), timestamp);
m.set_clustered_cell(ckey, "memtable_flush_period_in_ms", table->memtable_flush_period(), timestamp);
m.set_clustered_cell(ckey, "min_index_interval", table->min_index_interval(), timestamp);
m.set_clustered_cell(ckey, "read_repair_chance", table->read_repair_chance(), timestamp);
m.set_clustered_cell(ckey, "speculative_retry", table->speculative_retry().to_sstring(), timestamp);
m.set_clustered_cell(ckey, "crc_check_chance", table->crc_check_chance(), timestamp);
store_map(m, ckey, "caching", timestamp, table->caching_options().to_map());
{
auto map = table->compaction_strategy_options();
map["class"] = sstables::compaction_strategy::name(table->configured_compaction_strategy());
store_map(m, ckey, "compaction", timestamp, map);
}
store_map(m, ckey, "compression", timestamp, table->get_compressor_params().get_options());
std::map map;
if (!table->extensions().empty()) {
for (auto& p : table->extensions()) {
map.emplace(p.first, p.second->serialize());
}
}
store_map(m, ckey, "extensions", timestamp, map);
}
static data_type expand_user_type(data_type);
static std::vector expand_user_types(const std::vector& types) {
std::vector result;
result.reserve(types.size());
std::transform(types.begin(), types.end(), std::back_inserter(result), &expand_user_type);
return result;
}
static data_type expand_user_type(data_type original) {
if (original->is_user_type()) {
return tuple_type_impl::get_instance(
expand_user_types(
static_pointer_cast(
original)->field_types()));
}
if (original->is_tuple()) {
return tuple_type_impl::get_instance(
expand_user_types(
static_pointer_cast<
const tuple_type_impl>(
original)->all_types()));
}
if (original->is_reversed()) {
return reversed_type_impl::get_instance(
expand_user_type(original->underlying_type()));
}
if (original->is_collection()) {
auto ct = static_pointer_cast(original);
if (ct->is_list()) {
return list_type_impl::get_instance(
expand_user_type(ct->value_comparator()),
ct->is_multi_cell());
}
if (ct->is_map()) {
return map_type_impl::get_instance(
expand_user_type(ct->name_comparator()),
expand_user_type(ct->value_comparator()),
ct->is_multi_cell());
}
if (ct->is_set()) {
return set_type_impl::get_instance(
expand_user_type(ct->name_comparator()),
ct->is_multi_cell());
}
}
return original;
}
static void add_dropped_column_to_schema_mutation(schema_ptr table, const sstring& name, const schema::dropped_column& column, api::timestamp_type timestamp, mutation& m) {
auto ckey = clustering_key::from_exploded(*dropped_columns(), {utf8_type->decompose(table->cf_name()), utf8_type->decompose(name)});
db_clock::time_point tp(db_clock::duration(column.timestamp));
m.set_clustered_cell(ckey, "dropped_time", tp, timestamp);
/*
* From origin:
* we never store actual UDT names in dropped column types (so that we can safely drop types if nothing refers to
* them anymore), so before storing dropped columns in schema we expand UDTs to tuples. See expandUserTypes method.
* Because of that, we can safely pass Types.none() to parse()
*/
m.set_clustered_cell(ckey, "type", expand_user_type(column.type)->as_cql3_type().to_string(), timestamp);
}
mutation make_scylla_tables_mutation(schema_ptr table, api::timestamp_type timestamp) {
schema_ptr s = tables();
auto pkey = partition_key::from_singular(*s, table->ks_name());
auto ckey = clustering_key::from_singular(*s, table->cf_name());
mutation m(scylla_tables(), pkey);
m.set_clustered_cell(ckey, "version", utils::UUID(table->version()), timestamp);
// Since 4.0, we stopped using cdc column in scylla tables. Extensions are
// used instead. Since we stopped reading this column in commit 861c7b5, we
// can now keep it always empty.
auto& cdc_cdef = *scylla_tables()->get_column_definition("cdc");
m.set_clustered_cell(ckey, cdc_cdef, atomic_cell::make_dead(timestamp, gc_clock::now()));
if (table->has_custom_partitioner()) {
m.set_clustered_cell(ckey, "partitioner", table->get_partitioner().name(), timestamp);
} else {
// Avoid storing anything for default partitioner, so we don't end up with
// different digests on different nodes due to the other node redacting
// the partitioner column when the per_table_partitioners cluster feature is disabled.
//
// Tombstones are not considered for schema digest, so this is okay (and
// needed in order for disabling of per_table_partitioners to have effect).
auto& cdef = *scylla_tables()->get_column_definition("partitioner");
m.set_clustered_cell(ckey, cdef, atomic_cell::make_dead(timestamp, gc_clock::now()));
}
return m;
}
static schema_mutations make_table_mutations(schema_ptr table, api::timestamp_type timestamp, bool with_columns_and_triggers)
{
// When adding new schema properties, don't set cells for default values so that
// both old and new nodes will see the same version during rolling upgrades.
// For property that can be null (and can be changed), we insert tombstones, to make sure
// we don't keep a property the user has removed
schema_ptr s = tables();
auto pkey = partition_key::from_singular(*s, table->ks_name());
mutation m{s, pkey};
auto ckey = clustering_key::from_singular(*s, table->cf_name());
m.set_clustered_cell(ckey, "id", table->id(), timestamp);
auto scylla_tables_mutation = make_scylla_tables_mutation(table, timestamp);
list_type_impl::native_type flags;
if (table->is_super()) {
flags.emplace_back("super");
}
if (table->is_dense()) {
flags.emplace_back("dense");
}
if (table->is_compound()) {
flags.emplace_back("compound");
}
if (table->is_counter()) {
flags.emplace_back("counter");
}
m.set_clustered_cell(ckey, "flags", make_list_value(s->get_column_definition("flags")->type, flags), timestamp);
add_table_params_to_mutations(m, ckey, table, timestamp);
mutation columns_mutation(columns(), pkey);
mutation computed_columns_mutation(computed_columns(), pkey);
mutation dropped_columns_mutation(dropped_columns(), pkey);
mutation indices_mutation(indexes(), pkey);
if (with_columns_and_triggers) {
for (auto&& column : table->v3().all_columns()) {
if (column.is_view_virtual()) {
throw std::logic_error("view_virtual column found in non-view table");
}
add_column_to_schema_mutation(table, column, timestamp, columns_mutation);
if (column.is_computed()) {
add_computed_column_to_schema_mutation(table, column, timestamp, computed_columns_mutation);
}
}
for (auto&& index : table->indices()) {
add_index_to_schema_mutation(table, index, timestamp, indices_mutation);
}
// TODO: triggers
for (auto&& e : table->dropped_columns()) {
add_dropped_column_to_schema_mutation(table, e.first, e.second, timestamp, dropped_columns_mutation);
}
}
return schema_mutations{std::move(m), std::move(columns_mutation), std::nullopt, std::move(computed_columns_mutation),
std::move(indices_mutation), std::move(dropped_columns_mutation),
std::move(scylla_tables_mutation)};
}
void add_table_or_view_to_schema_mutation(schema_ptr s, api::timestamp_type timestamp, bool with_columns, std::vector& mutations)
{
make_schema_mutations(s, timestamp, with_columns).copy_to(mutations);
}
static schema_mutations make_view_mutations(view_ptr view, api::timestamp_type timestamp, bool with_columns);
static void make_drop_table_or_view_mutations(schema_ptr schema_table, schema_ptr table_or_view, api::timestamp_type timestamp, std::vector& mutations);
static void make_update_indices_mutations(
database& db,
schema_ptr old_table,
schema_ptr new_table,
api::timestamp_type timestamp,
std::vector& mutations)
{
mutation indices_mutation(indexes(), partition_key::from_singular(*indexes(), old_table->ks_name()));
auto diff = difference(old_table->all_indices(), new_table->all_indices());
bool new_token_column_computation = db.features().cluster_supports_correct_idx_token_in_secondary_index();
// indices that are no longer needed
for (auto&& name : diff.entries_only_on_left) {
const index_metadata& index = old_table->all_indices().at(name);
drop_index_from_schema_mutation(old_table, index, timestamp, mutations);
auto& cf = db.find_column_family(old_table);
auto view = cf.get_index_manager().create_view_for_index(index, new_token_column_computation);
make_drop_table_or_view_mutations(views(), view, timestamp, mutations);
}
// newly added indices and old indices with updated attributes
for (auto&& name : boost::range::join(diff.entries_differing, diff.entries_only_on_right)) {
const index_metadata& index = new_table->all_indices().at(name);
add_index_to_schema_mutation(new_table, index, timestamp, indices_mutation);
auto& cf = db.find_column_family(new_table);
auto view = cf.get_index_manager().create_view_for_index(index, new_token_column_computation);
auto view_mutations = make_view_mutations(view, timestamp, true);
view_mutations.copy_to(mutations);
}
mutations.emplace_back(std::move(indices_mutation));
}
static void add_drop_column_to_mutations(schema_ptr table, const sstring& name, const schema::dropped_column& dc, api::timestamp_type timestamp, std::vector& mutations) {
schema_ptr s = dropped_columns();
auto pkey = partition_key::from_singular(*s, table->ks_name());
auto ckey = clustering_key::from_exploded(*s, {utf8_type->decompose(table->cf_name()), utf8_type->decompose(name)});
mutation m(s, pkey);
add_dropped_column_to_schema_mutation(table, name, dc, timestamp, m);
mutations.emplace_back(std::move(m));
}
static void make_update_columns_mutations(schema_ptr old_table,
schema_ptr new_table,
api::timestamp_type timestamp,
bool from_thrift,
std::vector& mutations) {
mutation columns_mutation(columns(), partition_key::from_singular(*columns(), old_table->ks_name()));
mutation view_virtual_columns_mutation(view_virtual_columns(), partition_key::from_singular(*columns(), old_table->ks_name()));
mutation computed_columns_mutation(computed_columns(), partition_key::from_singular(*columns(), old_table->ks_name()));
auto diff = difference(old_table->v3().columns_by_name(), new_table->v3().columns_by_name());
// columns that are no longer needed
for (auto&& name : diff.entries_only_on_left) {
// Thrift only knows about the REGULAR ColumnDefinition type, so don't consider other type
// are being deleted just because they are not here.
const column_definition& column = *old_table->v3().columns_by_name().at(name);
if (from_thrift && !column.is_regular()) {
continue;
}
if (column.is_view_virtual()) {
drop_column_from_schema_mutation(view_virtual_columns(), old_table, column.name_as_text(), timestamp, mutations);
} else {
drop_column_from_schema_mutation(columns(), old_table, column.name_as_text(), timestamp, mutations);
}
if (column.is_computed()) {
drop_column_from_schema_mutation(computed_columns(), old_table, column.name_as_text(), timestamp, mutations);
}
}
// newly added columns and old columns with updated attributes
for (auto&& name : boost::range::join(diff.entries_differing, diff.entries_only_on_right)) {
const column_definition& column = *new_table->v3().columns_by_name().at(name);
if (column.is_view_virtual()) {
add_column_to_schema_mutation(new_table, column, timestamp, view_virtual_columns_mutation);
} else {
add_column_to_schema_mutation(new_table, column, timestamp, columns_mutation);
}
if (column.is_computed()) {
add_computed_column_to_schema_mutation(new_table, column, timestamp, computed_columns_mutation);
}
}
mutations.emplace_back(std::move(columns_mutation));
mutations.emplace_back(std::move(view_virtual_columns_mutation));
mutations.emplace_back(std::move(computed_columns_mutation));
// dropped columns
auto dc_diff = difference(old_table->dropped_columns(), new_table->dropped_columns());
// newly dropped columns
// columns added then dropped again
for (auto& name : boost::range::join(dc_diff.entries_differing, dc_diff.entries_only_on_right)) {
add_drop_column_to_mutations(new_table, name, new_table->dropped_columns().at(name), timestamp, mutations);
}
}
std::vector make_update_table_mutations(database& db,
lw_shared_ptr keyspace,
schema_ptr old_table,
schema_ptr new_table,
api::timestamp_type timestamp,
bool from_thrift)
{
std::vector mutations;
add_table_or_view_to_schema_mutation(new_table, timestamp, false, mutations);
make_update_indices_mutations(db, old_table, new_table, timestamp, mutations);
make_update_columns_mutations(std::move(old_table), std::move(new_table), timestamp, from_thrift, mutations);
warn(unimplemented::cause::TRIGGERS);
#if 0
MapDifference triggerDiff = Maps.difference(oldTable.getTriggers(), newTable.getTriggers());
// dropped triggers
for (TriggerDefinition trigger : triggerDiff.entriesOnlyOnLeft().values())
dropTriggerFromSchemaMutation(oldTable, trigger, timestamp, mutation);
// newly created triggers
for (TriggerDefinition trigger : triggerDiff.entriesOnlyOnRight().values())
addTriggerToSchemaMutation(newTable, trigger, timestamp, mutation);
#endif
return mutations;
}
static void make_drop_table_or_view_mutations(schema_ptr schema_table,
schema_ptr table_or_view,
api::timestamp_type timestamp,
std::vector& mutations) {
auto pkey = partition_key::from_singular(*schema_table, table_or_view->ks_name());
mutation m{schema_table, pkey};
auto ckey = clustering_key::from_singular(*schema_table, table_or_view->cf_name());
m.partition().apply_delete(*schema_table, ckey, tombstone(timestamp, gc_clock::now()));
mutations.emplace_back(m);
for (auto& column : table_or_view->v3().all_columns()) {
if (column.is_view_virtual()) {
drop_column_from_schema_mutation(view_virtual_columns(), table_or_view, column.name_as_text(), timestamp, mutations);
} else {
drop_column_from_schema_mutation(columns(), table_or_view, column.name_as_text(), timestamp, mutations);
}
if (column.is_computed()) {
drop_column_from_schema_mutation(computed_columns(), table_or_view, column.name_as_text(), timestamp, mutations);
}
}
for (auto& column : table_or_view->dropped_columns() | boost::adaptors::map_keys) {
drop_column_from_schema_mutation(dropped_columns(), table_or_view, column, timestamp, mutations);
}
mutation m1{scylla_tables(), pkey};
m1.partition().apply_delete(*scylla_tables(), ckey, tombstone(timestamp, gc_clock::now()));
mutations.emplace_back(m1);
}
std::vector make_drop_table_mutations(lw_shared_ptr keyspace, schema_ptr table, api::timestamp_type timestamp)
{
std::vector mutations;
make_drop_table_or_view_mutations(tables(), std::move(table), timestamp, mutations);
#if 0
for (TriggerDefinition trigger : table.getTriggers().values())
dropTriggerFromSchemaMutation(table, trigger, timestamp, mutation);
// TODO: get rid of in #6717
ColumnFamily indexCells = mutation.addOrGet(SystemKeyspace.BuiltIndexes);
for (String indexName : Keyspace.open(keyspace.name).getColumnFamilyStore(table.cfName).getBuiltIndexes())
indexCells.addTombstone(indexCells.getComparator().makeCellName(indexName), ldt, timestamp);
#endif
return mutations;
}
static future