view: change validate_view_keyspace to allow MVs if RF=Racks

The function validate_view_keyspace checks if a keyspace is eligible for
having materialized views, and it is used for validation when creating a
MV or a MV-based index.

Previously, it was required that the rf_rack_valid_keyspaces option is
set in order for tablets-based keyspaces to be considered eligible, and
the RF-rack condition was enforced when the option is set.

Instead of this, we change the validation to allow MVs in a keyspace if
the RF-rack condition is satisfied for the keyspace - regardless of the
config option.

We remove the config validation for views on startup that validates the
option `rf_rack_valid_keyspaces` is set if there are any views with
tablets, since this is not required anymore.

We can do this without worrying about upgrades because this change will
be effective from 2025.4 where MVs with tablets are first out of
experimental phase.

We update the test for MV and index restrictions in tablets keyspaces
according to the new requirements.

* Create MV/index: previously the test checked that it's allowed only if
  the config option `rf_rack_valid_keyspaces` is set. This is changed
  now so it's always allowed to create MV/index if the keyspace is
  RF-rack-valid. Update the test to verify that we can create MV/index
  when the keyspace is RF-rack-valid, even if the rf_rack option is not
  set, and verify that it fails when the keyspace is RF-rack-invalid.
* Alter: Add a new test to verify that while a keyspace has views, it
  can't be altered to become RF-rack-invalid.
This commit is contained in:
Michael Litvak
2025-11-02 13:24:37 +01:00
parent de1bb84fca
commit 8df61f6d99
13 changed files with 178 additions and 200 deletions

View File

@@ -14,6 +14,7 @@
#include "db/view/view.hh"
#include "exceptions/exceptions.hh"
#include "index/vector_index.hh"
#include "locator/token_metadata_fwd.hh"
#include "prepared_statement.hh"
#include "replica/database.hh"
#include "types/types.hh"
@@ -259,7 +260,7 @@ create_index_statement::validate(query_processor& qp, const service::client_stat
_properties->validate();
}
std::vector<::shared_ptr<index_target>> create_index_statement::validate_while_executing(data_dictionary::database db) const {
std::vector<::shared_ptr<index_target>> create_index_statement::validate_while_executing(data_dictionary::database db, locator::token_metadata_ptr tmptr) const {
auto schema = validation::validate_column_family(db, keyspace(), column_family());
if (schema->is_counter()) {
@@ -283,7 +284,7 @@ std::vector<::shared_ptr<index_target>> create_index_statement::validate_while_e
// Custom indexes need to validate this property themselves, if they need it.
if (!_properties || !_properties->custom_class) {
try {
db::view::validate_view_keyspace(db, keyspace());
db::view::validate_view_keyspace(db, keyspace(), tmptr);
} catch (const std::exception& e) {
// The type of the thrown exception is not specified, so we need to wrap it here.
throw exceptions::invalid_request_exception(e.what());
@@ -537,8 +538,8 @@ void create_index_statement::validate_targets_for_multi_column_index(std::vector
}
}
std::optional<create_index_statement::base_schema_with_new_index> create_index_statement::build_index_schema(data_dictionary::database db) const {
auto targets = validate_while_executing(db);
std::optional<create_index_statement::base_schema_with_new_index> create_index_statement::build_index_schema(data_dictionary::database db, locator::token_metadata_ptr tmptr) const {
auto targets = validate_while_executing(db, tmptr);
auto schema = db.find_schema(keyspace(), column_family());
@@ -596,7 +597,7 @@ std::optional<create_index_statement::base_schema_with_new_index> create_index_s
future<std::tuple<::shared_ptr<cql_transport::event::schema_change>, utils::chunked_vector<mutation>, cql3::cql_warnings_vec>>
create_index_statement::prepare_schema_mutations(query_processor& qp, const query_options&, api::timestamp_type ts) const {
using namespace cql_transport;
auto res = build_index_schema(qp.db());
auto res = build_index_schema(qp.db(), qp.proxy().get_token_metadata_ptr());
::shared_ptr<event::schema_change> ret;
utils::chunked_vector<mutation> muts;

View File

@@ -53,7 +53,7 @@ public:
schema_ptr schema;
index_metadata index;
};
std::optional<base_schema_with_new_index> build_index_schema(data_dictionary::database db) const;
std::optional<base_schema_with_new_index> build_index_schema(data_dictionary::database db, locator::token_metadata_ptr tmptr) const;
view_ptr create_view_for_index(const schema_ptr, const index_metadata& im, const data_dictionary::database&) const;
private:
void validate_for_local_index(const schema& schema) const;
@@ -69,7 +69,7 @@ private:
const sstring& name,
index_metadata_kind kind,
const index_options_map& options);
std::vector<::shared_ptr<index_target>> validate_while_executing(data_dictionary::database db) const;
std::vector<::shared_ptr<index_target>> validate_while_executing(data_dictionary::database db, locator::token_metadata_ptr tmptr) const;
};
}

View File

@@ -105,7 +105,7 @@ static bool validate_primary_key(
return new_non_pk_column;
}
std::pair<view_ptr, cql3::cql_warnings_vec> create_view_statement::prepare_view(data_dictionary::database db) const {
std::pair<view_ptr, cql3::cql_warnings_vec> create_view_statement::prepare_view(data_dictionary::database db, locator::token_metadata_ptr tmptr) const {
// We need to make sure that:
// - materialized view name is valid
// - primary key includes all columns in base table's primary key
@@ -153,7 +153,7 @@ std::pair<view_ptr, cql3::cql_warnings_vec> create_view_statement::prepare_view(
schema_ptr schema = validation::validate_column_family(db, _base_name.get_keyspace(), _base_name.get_column_family());
try {
db::view::validate_view_keyspace(db, keyspace());
db::view::validate_view_keyspace(db, keyspace(), tmptr);
} catch (const std::exception& e) {
// The type of the thrown exception is not specified, so we need to wrap it here.
throw exceptions::invalid_request_exception(e.what());
@@ -414,7 +414,7 @@ std::pair<view_ptr, cql3::cql_warnings_vec> create_view_statement::prepare_view(
future<std::tuple<::shared_ptr<cql_transport::event::schema_change>, utils::chunked_vector<mutation>, cql3::cql_warnings_vec>>
create_view_statement::prepare_schema_mutations(query_processor& qp, const query_options&, api::timestamp_type ts) const {
utils::chunked_vector<mutation> m;
auto [definition, warnings] = prepare_view(qp.db());
auto [definition, warnings] = prepare_view(qp.db(), qp.proxy().get_token_metadata_ptr());
try {
m = co_await service::prepare_new_view_announcement(qp.proxy(), std::move(definition), ts);
} catch (const exceptions::already_exists_exception& e) {

View File

@@ -48,7 +48,7 @@ public:
std::vector<::shared_ptr<cql3::column_identifier::raw>> clustering_keys,
bool if_not_exists);
std::pair<view_ptr, cql3::cql_warnings_vec> prepare_view(data_dictionary::database db) const;
std::pair<view_ptr, cql3::cql_warnings_vec> prepare_view(data_dictionary::database db, locator::token_metadata_ptr tmptr) const;
auto& properties() {
return _properties;

View File

@@ -3633,20 +3633,20 @@ sstring build_status_to_sstring(build_status status) {
on_internal_error(vlogger, fmt::format("Unknown view build status: {}", (int)status));
}
void validate_view_keyspace(const data_dictionary::database& db, std::string_view keyspace_name) {
const bool tablet_views_enabled = db.features().views_with_tablets;
// Note: if the configuration option `rf_rack_valid_keyspaces` is enabled, we can be
// sure that all tablet-based keyspaces are RF-rack-valid. We check that
// at start-up and then we don't allow for creating RF-rack-invalid keyspaces.
const bool rf_rack_valid_keyspaces = db.get_config().rf_rack_valid_keyspaces();
const bool required_config = tablet_views_enabled && rf_rack_valid_keyspaces;
void validate_view_keyspace(const data_dictionary::database& db, std::string_view keyspace_name, locator::token_metadata_ptr tmptr) {
const auto& rs = db.find_keyspace(keyspace_name).get_replication_strategy();
const bool uses_tablets = db.find_keyspace(keyspace_name).get_replication_strategy().uses_tablets();
if (!required_config && uses_tablets) {
if (rs.uses_tablets() && !db.features().views_with_tablets) {
throw std::logic_error("Materialized views and secondary indexes are not supported on base tables with tablets. "
"To be able to use them, enable the configuration option `rf_rack_valid_keyspaces` and make sure "
"that the cluster feature `VIEWS_WITH_TABLETS` is enabled.");
"To be able to use them, make sure all nodes in the cluster are upgraded.");
}
try {
locator::assert_rf_rack_valid_keyspace(keyspace_name, tmptr, rs);
} catch (const std::exception& e) {
throw std::logic_error(fmt::format(
"Materialized views and secondary indexes are not supported on the keyspace '{}': {}",
keyspace_name, e.what()));
}
}

View File

@@ -9,6 +9,7 @@
#pragma once
#include "gc_clock.hh"
#include "locator/token_metadata_fwd.hh"
#include "query/query-request.hh"
#include "schema/schema_fwd.hh"
#include "readers/mutation_reader.hh"
@@ -318,7 +319,7 @@ endpoints_to_update get_view_natural_endpoint(
///
/// Preconditions:
/// * The provided `keyspace_name` must correspond to an existing keyspace.
void validate_view_keyspace(const data_dictionary::database&, std::string_view keyspace_name);
void validate_view_keyspace(const data_dictionary::database&, std::string_view keyspace_name, locator::token_metadata_ptr tmptr);
}

View File

@@ -2218,11 +2218,6 @@ To start the scylla server proper, simply invoke as: scylla server (or just scyl
startlog.info("Verifying that all of the keyspaces are RF-rack-valid");
db.local().check_rf_rack_validity(token_metadata.local().get());
// Materialized views and secondary indexes are still restricted and require specific configuration
// options to work. Make sure that if there are existing views or indexes, they don't violate
// the requirements imposed on them.
db.local().validate_tablet_views_indexes();
// Semantic validation of sstable compression parameters from config.
// Adding here (i.e., after `join_cluster`) to ensure that the
// required SSTABLE_COMPRESSION_DICTS cluster feature has been negotiated.

View File

@@ -3599,37 +3599,6 @@ void database::check_rf_rack_validity(const locator::token_metadata_ptr tmptr) c
}
}
void database::validate_tablet_views_indexes() const {
dblog.info("Verifying that all existing materialized views are valid");
const data_dictionary::database& db = this->as_data_dictionary();
std::flat_set<std::string_view> invalid_keyspaces;
for (const view_ptr& view : get_views()) {
const auto& ks = view->ks_name();
try {
db::view::validate_view_keyspace(db, ks);
} catch (...) {
invalid_keyspaces.emplace(ks);
}
}
if (invalid_keyspaces.empty()) {
dblog.info("All existing materialized views are valid");
return;
}
// `std::flat_set` guarantees iteration in the increasing order.
const std::string ks_list = invalid_keyspaces
| std::views::join_with(std::string_view(", "))
| std::ranges::to<std::string>();
dblog.warn("Some of the existing keyspaces violate the requirements "
"for using materialized views or secondary indexes. Those features require enabling "
"the configuration option `rf_rack_valid_keyspaces` and the cluster feature "
"`VIEWS_WITH_TABLETS`. The keyspaces that violate that condition: {}", ks_list);
}
utils::chunked_vector<uint64_t> compute_random_sorted_ints(uint64_t max_value, uint64_t n_values) {
static thread_local std::minstd_rand rng{std::random_device{}()};
std::uniform_int_distribution<uint64_t> dist(0, max_value);

View File

@@ -2125,19 +2125,6 @@ public:
// must contain a complete list of racks and data centers in the cluster.
void check_rf_rack_validity(const locator::token_metadata_ptr) const;
/// Verify that all existing materialized views are valid.
///
/// We consider a materialized view valid if one of the following
/// conditions is satisfied:
/// * it resides in a vnode-based keyspace,
/// * it resides in a tablet-based keyspace, the cluster feature `VIEWS_WITH_TABLETS`
/// is enabled, and the configuration option `rf_rack_valid_keyspaces` is enabled.
///
/// Result:
/// * Depending on whether there are invalid materialized views, the function will
/// log that either everything's OK, or that there are some keyspaces that violate
/// the requirement.
void validate_tablet_views_indexes() const;
private:
// SSTable sampling might require considerable amounts of memory,
// so we want to limit the number of concurrent sampling operations.

View File

@@ -1,120 +0,0 @@
#
# Copyright (C) 2025-present ScyllaDB
#
# SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
#
import asyncio
import logging
import pytest
from cassandra.cluster import Session as CassandraSession
from cassandra.protocol import InvalidRequest
from test.pylib.manager_client import ManagerClient
logger = logging.getLogger(__name__)
@pytest.mark.asyncio
@pytest.mark.parametrize("schema_kind", ["view", "index"])
# Views no longer depend on the experimental feature `views-with-tablets`,
# but let's keep these test cases to make sure it's really not needed anymore.
@pytest.mark.parametrize("views_with_tablets", [False, True])
@pytest.mark.parametrize("rf_rack_valid_keyspaces", [False, True])
async def test_mv_and_index_restrictions_in_tablet_keyspaces(manager: ManagerClient, schema_kind: str,
views_with_tablets: bool, rf_rack_valid_keyspaces: bool):
"""
Verify that creating a materialized view or a secondary index in a tablet-based keyspace
is only possible when both the configuration option `rf_rack_valid_keyspaces` is enabled.
"""
async def create_mv_or_index(cql: CassandraSession):
if schema_kind == "view":
await cql.run_async("CREATE MATERIALIZED VIEW ks.mv "
"AS SELECT * FROM ks.t "
"WHERE p IS NOT NULL AND v IS NOT NULL "
"PRIMARY KEY (v, p)")
elif schema_kind == "index":
await cql.run_async("CREATE INDEX myindex ON ks.t(v)")
else:
assert False, "Unknown schema kind"
async def try_pass(cql: CassandraSession):
try:
await cql.run_async(f"CREATE KEYSPACE ks WITH replication = "
"{'class': 'NetworkTopologyStrategy', 'replication_factor': 1} "
"AND tablets = {'enabled': true}")
await cql.run_async(f"CREATE TABLE ks.t (p int PRIMARY KEY, v int)")
await create_mv_or_index(cql)
finally:
await cql.run_async(f"DROP KEYSPACE IF EXISTS ks")
async def try_fail(cql: CassandraSession):
err = "Materialized views and secondary indexes are not supported on base tables with tablets. " \
"To be able to use them, enable the configuration option `rf_rack_valid_keyspaces` and " \
"make sure that the cluster feature `VIEWS_WITH_TABLETS` is enabled."
with pytest.raises(InvalidRequest, match=err):
await try_pass(cql)
feature = ["views-with-tablets"] if views_with_tablets else []
config = {"experimental_features": feature, "rf_rack_valid_keyspaces": rf_rack_valid_keyspaces}
srv = await manager.server_add(config=config)
# Necessary because we're restarting the node multiple times.
cql, _ = await manager.get_ready_cql([srv])
logger.debug("Obtained CassandraSession object")
# We just want to validate the statements. We don't need to wait.
assert hasattr(cql.cluster, "max_schema_agreement_wait")
cql.cluster.max_schema_agreement_wait = 0
logger.debug("Set max_schema_agreement_wait to 0")
if rf_rack_valid_keyspaces:
await try_pass(cql)
logger.debug("try_pass finished successfully")
else:
await try_fail(cql)
logger.debug("try_fail finished successfully")
@pytest.mark.asyncio
@pytest.mark.parametrize("view_type", ["view", "index"])
async def test_view_startup(manager: ManagerClient, view_type: str):
"""
Verify that starting a node with materialized views in a tablet-based
keyspace when the configuration option `rf_rack_valid_keyspaces` is disabled
leads to a warning.
"""
srv = await manager.server_add(config={"rf_rack_valid_keyspaces": True})
cql = manager.get_cql()
await cql.run_async("CREATE KEYSPACE ks WITH replication = "
"{'class': 'NetworkTopologyStrategy', 'replication_factor': 1} "
"AND tablets = {'enabled': true}")
await cql.run_async("CREATE TABLE ks.t (p int PRIMARY KEY, v int)")
if view_type == "view":
await cql.run_async("CREATE MATERIALIZED VIEW ks.mv "
"AS SELECT * FROM ks.t "
"WHERE p IS NOT NULL AND v IS NOT NULL "
"PRIMARY KEY (v, p)")
elif view_type == "index":
await cql.run_async("CREATE INDEX i ON ks.t(v)")
else:
logger.error(f"Unexpected view type: {view_type}")
assert False
await manager.server_stop(srv.server_id)
await manager.server_update_config(srv.server_id, "rf_rack_valid_keyspaces", False)
log = await manager.server_open_log(srv.server_id)
mark = await log.mark()
start_task = asyncio.create_task(manager.server_start(srv.server_id))
err = "Some of the existing keyspaces violate the requirements for using materialized " \
"views or secondary indexes. Those features require enabling the configuration " \
"option `rf_rack_valid_keyspaces` and the cluster feature `VIEWS_WITH_TABLETS`. " \
"The keyspaces that violate that condition: ks"
await log.wait_for(err, from_mark=mark)
await start_task

View File

@@ -0,0 +1,150 @@
#
# Copyright (C) 2025-present ScyllaDB
#
# SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
#
import asyncio
import logging
import pytest
from cassandra.cluster import Session as CassandraSession
from cassandra.protocol import InvalidRequest
from test.cluster.conftest import skip_mode
from test.pylib.manager_client import ManagerClient
logger = logging.getLogger(__name__)
@pytest.mark.asyncio
@pytest.mark.parametrize("rf_kind", ["numeric", "rack_list"])
@skip_mode('release', 'error injections are not supported in release mode')
async def test_create_mv_and_index_restrictions_in_tablet_keyspaces(manager: ManagerClient, rf_kind: str):
"""
Verify that creating a materialized view or a secondary index in a tablet-based keyspace
is allowed only when RF equals the number of racks, even if `rf_rack_valid_keyspaces` is false.
The constraint is relevant only for numeric-RF keyspaces. For rack-list keyspaces, it should
be always allowed.
"""
async def create_mv_or_index(cql: CassandraSession, schema_kind: str):
if schema_kind == "view":
await cql.run_async("CREATE MATERIALIZED VIEW ks.mv "
"AS SELECT * FROM ks.t "
"WHERE p IS NOT NULL AND v IS NOT NULL "
"PRIMARY KEY (v, p)")
elif schema_kind == "index":
await cql.run_async("CREATE INDEX myindex ON ks.t(v)")
else:
assert False, "Unknown schema kind"
async def test_create_mv_or_index_with_rf(cql: CassandraSession, schema_kind: str, rf: int, expected_error: str = None):
if rf_kind == "numeric":
rf_str = str(rf)
else:
rf_str = "[" + ", ".join([f"'rack{i+1}'" for i in range(rf)]) + "]"
try:
await cql.run_async(f"CREATE KEYSPACE ks WITH replication = "
f"{{'class': 'NetworkTopologyStrategy', 'dc1': {rf_str}}} "
"AND tablets = {'enabled': true}")
await cql.run_async("CREATE TABLE ks.t (p int PRIMARY KEY, v int)")
if expected_error:
with pytest.raises(InvalidRequest, match=expected_error):
await create_mv_or_index(cql, schema_kind)
else:
await create_mv_or_index(cql, schema_kind)
finally:
await cql.run_async("DROP KEYSPACE IF EXISTS ks")
config = {'rf_rack_valid_keyspaces': False}
if rf_kind == "numeric":
config = config | {'error_injections_at_startup': [{'name': 'suppress_features', 'value': 'RACK_LIST_RF'}]}
servers = await manager.servers_add(3, config=config, cmdline=['--logger-log-level', 'tablets=debug'], property_file=[
{'dc': 'dc1', 'rack': 'rack1'},
{'dc': 'dc1', 'rack': 'rack2'},
{'dc': 'dc1', 'rack': 'rack3'},
])
cql, _ = await manager.get_ready_cql(servers)
for schema_kind in ["view", "index"]:
# Create MV/index with RF=Racks - should always succeed
await test_create_mv_or_index_with_rf(cql, schema_kind, 3)
# Create MV/index with RF!=Racks - should fail for numeric RF
if rf_kind == "numeric":
expected_error = "required to be RF-rack-valid"
else:
expected_error = None
await test_create_mv_or_index_with_rf(cql, schema_kind, 2, expected_error=expected_error)
@pytest.mark.asyncio
@pytest.mark.parametrize("rf_kind", ["numeric", "rack_list"])
@skip_mode('release', 'error injections are not supported in release mode')
async def test_alter_keyspace_rf_rack_restriction_with_mv_and_index(manager: ManagerClient, rf_kind: str):
"""
Verify that ALTER KEYSPACE fails if it changes RF so that RF != number of racks
for a tablets-based keyspace while it has a materialized view or a secondary index, even if
`rf_rack_valid_keyspaces` is false.
It should fail when it has MV/index and succeed if it doesn't.
The constraint is relevant only for numeric-RF keyspaces. For rack-list keyspaces, it should
be always allowed.
"""
config = {'rf_rack_valid_keyspaces': False}
if rf_kind == "numeric":
config = config | {'error_injections_at_startup': [{'name': 'suppress_features', 'value': 'RACK_LIST_RF'}]}
servers = await manager.servers_add(3, config=config, cmdline=['--logger-log-level', 'tablets=debug'], property_file=[
{'dc': 'dc1', 'rack': 'rack1'},
{'dc': 'dc1', 'rack': 'rack2'},
{'dc': 'dc1', 'rack': 'rack3'},
])
cql, _ = await manager.get_ready_cql(servers)
for schema_kind in ["view", "index"]:
# Create a keyspace and MV/index with RF=Racks
ks = f"ks_{schema_kind}"
if rf_kind == "numeric":
rf_str = "3"
else:
rf_str = "['rack1', 'rack2', 'rack3']"
await cql.run_async(f"CREATE KEYSPACE {ks} WITH replication = "
f"{{'class': 'NetworkTopologyStrategy', 'dc1': {rf_str}}} "
"AND tablets = {'enabled': true}")
await cql.run_async(f"CREATE TABLE {ks}.t (p int PRIMARY KEY, v int)")
if schema_kind == "view":
await cql.run_async(f"CREATE MATERIALIZED VIEW {ks}.mv "
f"AS SELECT * FROM {ks}.t "
"WHERE p IS NOT NULL AND v IS NOT NULL "
"PRIMARY KEY (v, p)")
elif schema_kind == "index":
await cql.run_async(f"CREATE INDEX myindex ON {ks}.t(v)")
else:
assert False, "Unknown schema kind"
if rf_kind == "numeric":
# Try to ALTER KEYSPACE to RF!=Racks - should fail because it has MV/index
with pytest.raises(InvalidRequest, match="required to be RF-rack-valid"):
await cql.run_async(f"ALTER KEYSPACE {ks} WITH replication = "
f"{{'class': 'NetworkTopologyStrategy', 'dc1': 2}}")
# drop the view/index and verify that ALTER KEYSPACE is now allowed
if schema_kind == "view":
await cql.run_async(f"DROP MATERIALIZED VIEW {ks}.mv")
elif schema_kind == "index":
await cql.run_async(f"DROP INDEX {ks}.myindex")
await cql.run_async(f"ALTER KEYSPACE {ks} WITH replication = "
f"{{'class': 'NetworkTopologyStrategy', 'dc1': 2}}")
else:
# For rack-list RF, ALTER KEYSPACE should succeed
await cql.run_async(f"ALTER KEYSPACE {ks} WITH replication = "
f"{{'class': 'NetworkTopologyStrategy', 'dc1': ['rack1', 'rack2']}}")
await cql.run_async(f"DROP KEYSPACE {ks}")

View File

@@ -1112,11 +1112,6 @@ private:
startlog.info("Verifying that all of the keyspaces are RF-rack-valid");
_db.local().check_rf_rack_validity(_token_metadata.local().get());
// Materialized views and secondary indexes are still restricted and require specific configuration
// options to work. Make sure that if there are existing views or indexes, they don't violate
// the requirements imposed on them.
_db.local().validate_tablet_views_indexes();
utils::loading_cache_config perm_cache_config;
perm_cache_config.max_size = cfg->permissions_cache_max_entries();
perm_cache_config.expiry = std::chrono::milliseconds(cfg->permissions_validity_in_ms());

View File

@@ -320,14 +320,14 @@ std::vector<schema_ptr> do_load_schemas(const db::config& cfg, std::string_view
}
real_db.tables.emplace_back(dd_impl, dd_impl.unwrap(ks), std::move(schema), true);
} else if (auto p = dynamic_cast<cql3::statements::create_view_statement*>(statement)) {
auto&& [view, warnings] = p->prepare_view(db);
auto&& [view, warnings] = p->prepare_view(db, token_metadata.local().get());
auto it = std::find_if(real_db.tables.begin(), real_db.tables.end(), [&] (const table& t) { return t.schema->ks_name() == view->ks_name() && t.schema->cf_name() == view->cf_name(); });
if (it != real_db.tables.end()) {
continue; // view already exists
}
real_db.tables.emplace_back(dd_impl, dd_impl.unwrap(ks), view, true);
} else if (auto p = dynamic_cast<cql3::statements::create_index_statement*>(statement)) {
auto res = p->build_index_schema(db);
auto res = p->build_index_schema(db, token_metadata.local().get());
if (!res) {
continue; // index already exists
}