auth: Migrate legacy data on boot
This change allows for seamless migration of the legacy users metadata to the new role-based metadata tables. This process is summarized in `docs/migrating-from-users-to-roles.md`. In general, if any nondefault metadata exists in the new tables, then no migration happens. If, in this case, legacy metadata still exists then a warning is written to the log. If no nondefault metadata exists in the new tables and the legacy tables exist, then each node will copy the data from the legacy tables to the new tables, performing transformations as necessary. An informational message is written to the log when the migration process starts, and when the process ends. During the process of copying, data is overwritten so that multiple nodes racing to migrate data do not conflict. Since Apache Cassandra's auth. schema uses the same table for managing roles and authentication information, some useful functions in `roles-metadata.hh` have been added to avoid code duplication. Because a superuser should be able to drop the legacy users tables from `system_auth` once the cluster has migrated to roles and is functioning correctly, we remove the restriction on altering anything in the "system_auth" keyspace. Individual tables in `system_auth` are still protected later in the function. When a cluster is upgrading from one that does not support roles to one that does, some nodes will be running old code which accesses old metadata and some will be running new code which access new metadata. With the help of the gossiper `feature` mechanism, clients connecting to upgraded nodes will be notified (through code in the relevant CQL statements) that modifications are not allowed until the entire cluster has upgraded.
This commit is contained in:
@@ -91,6 +91,48 @@ default_authorizer::default_authorizer(cql3::query_processor& qp, ::service::mig
|
||||
default_authorizer::~default_authorizer() {
|
||||
}
|
||||
|
||||
static const sstring legacy_table_name{"permissions"};
|
||||
|
||||
bool default_authorizer::legacy_metadata_exists() const {
|
||||
return _qp.db().local().has_schema(meta::AUTH_KS, legacy_table_name);
|
||||
}
|
||||
|
||||
future<bool> default_authorizer::any_granted() const {
|
||||
static const sstring query = sprint("SELECT * FROM %s.%s LIMIT 1", meta::AUTH_KS, PERMISSIONS_CF);
|
||||
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
{},
|
||||
true).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
return !results->empty();
|
||||
});
|
||||
}
|
||||
|
||||
future<> default_authorizer::migrate_legacy_metadata() {
|
||||
alogger.info("Starting migration of legacy permissions metadata.");
|
||||
static const sstring query = sprint("SELECT * FROM %s.%s", meta::AUTH_KS, legacy_table_name);
|
||||
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::LOCAL_ONE).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
return do_for_each(*results, [this](const cql3::untyped_result_set_row& row) {
|
||||
return do_with(
|
||||
row.get_as<sstring>("username"),
|
||||
parse_resource(row.get_as<sstring>(RESOURCE_NAME)),
|
||||
[this, &row](const auto& username, const auto& r) {
|
||||
const permission_set perms = permissions::from_strings(row.get_set<sstring>(PERMISSIONS_NAME));
|
||||
return grant(username, perms, r);
|
||||
});
|
||||
}).finally([results] {});
|
||||
}).then([] {
|
||||
alogger.info("Finished migrating legacy permissions metadata.");
|
||||
}).handle_exception([](std::exception_ptr ep) {
|
||||
alogger.error("Encountered an error during migration!");
|
||||
std::rethrow_exception(ep);
|
||||
});
|
||||
}
|
||||
|
||||
future<> default_authorizer::start() {
|
||||
static const sstring create_table = sprint(
|
||||
"CREATE TABLE %s.%s ("
|
||||
@@ -113,12 +155,28 @@ future<> default_authorizer::start() {
|
||||
PERMISSIONS_CF,
|
||||
_qp,
|
||||
create_table,
|
||||
_migration_manager);
|
||||
_migration_manager).then([this] {
|
||||
_finished = do_after_system_ready(_as, [this] {
|
||||
if (legacy_metadata_exists()) {
|
||||
return any_granted().then([this](bool any) {
|
||||
if (!any) {
|
||||
return migrate_legacy_metadata();
|
||||
}
|
||||
|
||||
alogger.warn("Ignoring legacy permissions metadata since role permissions exist.");
|
||||
return make_ready_future<>();
|
||||
});
|
||||
}
|
||||
|
||||
return make_ready_future<>();
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
future<> default_authorizer::stop() {
|
||||
return make_ready_future<>();
|
||||
_as.request_abort();
|
||||
return _finished.handle_exception_type([](const sleep_aborted&) {});
|
||||
}
|
||||
|
||||
future<permission_set>
|
||||
|
||||
@@ -43,6 +43,8 @@
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include <seastar/core/abort_source.hh>
|
||||
|
||||
#include "auth/authorizer.hh"
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "service/migration_manager.hh"
|
||||
@@ -56,6 +58,10 @@ class default_authorizer : public authorizer {
|
||||
|
||||
::service::migration_manager& _migration_manager;
|
||||
|
||||
abort_source _as{};
|
||||
|
||||
future<> _finished{make_ready_future<>()};
|
||||
|
||||
public:
|
||||
default_authorizer(cql3::query_processor&, ::service::migration_manager&);
|
||||
|
||||
@@ -84,6 +90,12 @@ public:
|
||||
virtual const resource_set& protected_resources() const override;
|
||||
|
||||
private:
|
||||
bool legacy_metadata_exists() const;
|
||||
|
||||
future<bool> any_granted() const;
|
||||
|
||||
future<> migrate_legacy_metadata();
|
||||
|
||||
future<> modify(stdx::string_view, permission_set, const resource&, stdx::string_view);
|
||||
};
|
||||
|
||||
|
||||
@@ -161,28 +161,81 @@ static sstring hashpw(const sstring& pass) {
|
||||
return hashpw(pass, gensalt());
|
||||
}
|
||||
|
||||
static bool has_salted_hash(const cql3::untyped_result_set_row& row) {
|
||||
return utf8_type->deserialize(row.get_blob(SALTED_HASH)) != data_value::make_null(utf8_type);
|
||||
}
|
||||
|
||||
static const sstring update_row_query = sprint(
|
||||
"UPDATE %s SET %s = ? WHERE %s = ?",
|
||||
meta::roles_table::qualified_name(),
|
||||
SALTED_HASH,
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
static const sstring legacy_table_name{"credentials"};
|
||||
|
||||
bool password_authenticator::legacy_metadata_exists() const {
|
||||
return _qp.db().local().has_schema(meta::AUTH_KS, legacy_table_name);
|
||||
}
|
||||
|
||||
future<> password_authenticator::migrate_legacy_metadata() {
|
||||
plogger.info("Starting migration of legacy authentication metadata.");
|
||||
static const sstring query = sprint("SELECT * FROM %s.%s", meta::AUTH_KS, legacy_table_name);
|
||||
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::QUORUM).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
return do_for_each(*results, [this](const cql3::untyped_result_set_row& row) {
|
||||
auto username = row.get_as<sstring>("username");
|
||||
auto salted_hash = row.get_as<sstring>(SALTED_HASH);
|
||||
|
||||
return _qp.process(
|
||||
update_row_query,
|
||||
consistency_for_user(username),
|
||||
{std::move(salted_hash), username}).discard_result();
|
||||
}).finally([results] {});
|
||||
}).then([] {
|
||||
plogger.info("Finished migrating legacy authentication metadata.");
|
||||
}).handle_exception([](std::exception_ptr ep) {
|
||||
plogger.error("Encountered an error during migration!");
|
||||
std::rethrow_exception(ep);
|
||||
});
|
||||
}
|
||||
|
||||
future<> password_authenticator::create_default_if_missing() {
|
||||
return default_role_row_satisfies(_qp, &has_salted_hash).then([this](bool exists) {
|
||||
if (!exists) {
|
||||
return _qp.process(
|
||||
update_row_query,
|
||||
db::consistency_level::QUORUM,
|
||||
{hashpw(DEFAULT_USER_PASSWORD), DEFAULT_USER_NAME}).then([](auto&&) {
|
||||
plogger.info("Created default superuser authentication record.");
|
||||
});
|
||||
}
|
||||
|
||||
return make_ready_future<>();
|
||||
});
|
||||
}
|
||||
|
||||
future<> password_authenticator::start() {
|
||||
return once_among_shards([this] {
|
||||
gensalt(); // do this once to determine usable hashing
|
||||
|
||||
_stopped = do_after_system_ready(_as, [this] {
|
||||
return has_existing_users().then([this](bool existing) {
|
||||
if (!existing) {
|
||||
static const sstring query = sprint(
|
||||
"UPDATE %s SET %s = ? WHERE %s = ?",
|
||||
meta::roles_table::qualified_name(),
|
||||
SALTED_HASH,
|
||||
meta::roles_table::role_col_name);
|
||||
return async([this] {
|
||||
if (any_nondefault_role_row_satisfies(_qp, &has_salted_hash).get0()) {
|
||||
if (legacy_metadata_exists()) {
|
||||
plogger.warn("Ignoring legacy authentication metadata since nondefault data already exist.");
|
||||
}
|
||||
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::ONE,
|
||||
{hashpw(DEFAULT_USER_PASSWORD), DEFAULT_USER_NAME}).then([](auto) {
|
||||
plogger.info("Created default user '{}'", DEFAULT_USER_NAME);
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
return make_ready_future<>();
|
||||
if (legacy_metadata_exists()) {
|
||||
migrate_legacy_metadata().get0();
|
||||
return;
|
||||
}
|
||||
|
||||
create_default_if_missing().get0();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -269,14 +322,8 @@ future<> password_authenticator::create(stdx::string_view role_name, const authe
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
static const sstring query = sprint(
|
||||
"UPDATE %s SET %s = ? WHERE %s = ?",
|
||||
meta::roles_table::qualified_name(),
|
||||
SALTED_HASH,
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
return _qp.process(
|
||||
query,
|
||||
update_row_query,
|
||||
consistency_for_user(role_name),
|
||||
{hashpw(*options.password), sstring(role_name)}).discard_result();
|
||||
}
|
||||
@@ -384,54 +431,4 @@ const resource_set& password_authenticator::protected_resources() const {
|
||||
return ::make_shared<plain_text_password_challenge>(*this);
|
||||
}
|
||||
|
||||
//
|
||||
// Similar in structure to `service::has_existing_legacy_users()`, but trying to generalize the pattern breaks all
|
||||
// kinds of module boundaries and leaks implementation details.
|
||||
//
|
||||
future<bool> password_authenticator::has_existing_users() const {
|
||||
static const auto hash_is_null = [](const cql3::untyped_result_set_row& row) {
|
||||
return utf8_type->deserialize(row.get_blob(SALTED_HASH)) == data_value::make_null(utf8_type);
|
||||
};
|
||||
|
||||
static const sstring default_user_query = sprint(
|
||||
"SELECT %s FROM %s WHERE %s = ?",
|
||||
SALTED_HASH,
|
||||
meta::roles_table::qualified_name(),
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
static const sstring all_users_query = sprint(
|
||||
"SELECT %s FROM %s LIMIT 1",
|
||||
SALTED_HASH,
|
||||
meta::roles_table::qualified_name());
|
||||
|
||||
// This logic is borrowed directly from Apache Cassandra. By first checking for the presence of the default user, we
|
||||
// can potentially avoid doing a range query with a high consistency level.
|
||||
|
||||
return _qp.process(
|
||||
default_user_query,
|
||||
db::consistency_level::ONE,
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
true).then([this](auto results) {
|
||||
if (!results->empty() && !hash_is_null(results->one())) {
|
||||
return make_ready_future<bool>(true);
|
||||
}
|
||||
|
||||
return _qp.process(
|
||||
default_user_query,
|
||||
db::consistency_level::QUORUM,
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
true).then([this](auto results) {
|
||||
if (!results->empty() && !hash_is_null(results->one())) {
|
||||
return make_ready_future<bool>(true);
|
||||
}
|
||||
|
||||
return _qp.process(
|
||||
all_users_query,
|
||||
db::consistency_level::QUORUM).then([](auto results) {
|
||||
return make_ready_future<bool>(!boost::algorithm::all_of(*results, hash_is_null));
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -92,7 +92,11 @@ public:
|
||||
virtual ::shared_ptr<sasl_challenge> new_sasl_challenge() const override;
|
||||
|
||||
private:
|
||||
future<bool> has_existing_users() const;
|
||||
bool legacy_metadata_exists() const;
|
||||
|
||||
future<> migrate_legacy_metadata();
|
||||
|
||||
future<> create_default_if_missing();
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
91
auth/roles-metadata.cc
Normal file
91
auth/roles-metadata.cc
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright (C) 2018 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "auth/roles-metadata.hh"
|
||||
|
||||
#include <boost/algorithm/cxx11/any_of.hpp>
|
||||
#include <seastar/core/print.hh>
|
||||
#include <seastar/core/shared_ptr.hh>
|
||||
#include <seastar/core/sstring.hh>
|
||||
|
||||
#include "auth/common.hh"
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "cql3/untyped_result_set.hh"
|
||||
|
||||
namespace auth {
|
||||
|
||||
future<bool> default_role_row_satisfies(
|
||||
cql3::query_processor& qp,
|
||||
std::function<bool(const cql3::untyped_result_set_row&)> p) {
|
||||
static const sstring query = sprint(
|
||||
"SELECT * FROM %s WHERE %s = ?",
|
||||
meta::roles_table::qualified_name(),
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
return do_with(std::move(p), [&qp](const auto& p) {
|
||||
return qp.process(
|
||||
query,
|
||||
db::consistency_level::ONE,
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
true).then([&qp, &p](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
if (results->empty()) {
|
||||
return qp.process(
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
true).then([&p](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
if (results->empty()) {
|
||||
return make_ready_future<bool>(false);
|
||||
}
|
||||
|
||||
return make_ready_future<bool>(p(results->one()));
|
||||
});
|
||||
}
|
||||
|
||||
return make_ready_future<bool>(p(results->one()));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
future<bool> any_nondefault_role_row_satisfies(
|
||||
cql3::query_processor& qp,
|
||||
std::function<bool(const cql3::untyped_result_set_row&)> p) {
|
||||
static const sstring query = sprint("SELECT * FROM %s", meta::roles_table::qualified_name());
|
||||
|
||||
return do_with(std::move(p), [&qp](const auto& p) {
|
||||
return qp.process(
|
||||
query,
|
||||
db::consistency_level::QUORUM).then([&p](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
if (results->empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static const sstring col_name = sstring(meta::roles_table::role_col_name);
|
||||
|
||||
return boost::algorithm::any_of(*results, [&p](const cql3::untyped_result_set_row& row) {
|
||||
const bool is_nondefault = row.get_as<sstring>(col_name) != meta::DEFAULT_SUPERUSER_NAME;
|
||||
return is_nondefault && p(row);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
@@ -22,9 +22,18 @@
|
||||
#pragma once
|
||||
|
||||
#include <experimental/string_view>
|
||||
#include <functional>
|
||||
|
||||
#include <seastar/core/future.hh>
|
||||
|
||||
#include "seastarx.hh"
|
||||
#include "stdx.hh"
|
||||
|
||||
namespace cql3 {
|
||||
class query_processor;
|
||||
class untyped_result_set_row;
|
||||
}
|
||||
|
||||
namespace auth {
|
||||
|
||||
namespace meta {
|
||||
@@ -41,4 +50,18 @@ constexpr stdx::string_view role_col_name{"role", 4};
|
||||
|
||||
}
|
||||
|
||||
///
|
||||
/// Check that the default role satisfies a predicate, or `false` if the default role does not exist.
|
||||
///
|
||||
future<bool> default_role_row_satisfies(
|
||||
cql3::query_processor&,
|
||||
std::function<bool(const cql3::untyped_result_set_row&)>);
|
||||
|
||||
///
|
||||
/// Check that any nondefault role satisfies a predicate. `false` if no nondefault roles exist.
|
||||
///
|
||||
future<bool> any_nondefault_role_row_satisfies(
|
||||
cql3::query_processor&,
|
||||
std::function<bool(const cql3::untyped_result_set_row&)>);
|
||||
|
||||
}
|
||||
|
||||
@@ -178,40 +178,61 @@ future<> standard_role_manager::create_metadata_tables_if_missing() {
|
||||
_migration_manager));
|
||||
}
|
||||
|
||||
// Must be called within the scope of a seastar thread
|
||||
bool standard_role_manager::has_existing_roles() const {
|
||||
static const sstring default_role_query = sprint(
|
||||
"SELECT * FROM %s WHERE %s = ?",
|
||||
meta::roles_table::qualified_name(),
|
||||
meta::roles_table::role_col_name);
|
||||
future<> standard_role_manager::create_default_role_if_missing() {
|
||||
return default_role_row_satisfies(_qp, [](auto&&) { return true; }).then([this](bool exists) {
|
||||
if (!exists) {
|
||||
static const sstring query = sprint(
|
||||
"INSERT INTO %s (%s, is_superuser, can_login) VALUES (?, true, true)",
|
||||
meta::roles_table::qualified_name(),
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
static const sstring all_roles_query = sprint("SELECT * FROM %s LIMIT 1", meta::roles_table::qualified_name());
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
{meta::DEFAULT_SUPERUSER_NAME}).then([](auto&&) {
|
||||
log.info("Created default superuser role '{}'.", meta::DEFAULT_SUPERUSER_NAME);
|
||||
return make_ready_future<>();
|
||||
});
|
||||
}
|
||||
|
||||
// This logic is borrowed directly from Apache Cassandra. By first checking for the presence of the default role, we
|
||||
// can potentially avoid doing a range query with a high consistency level.
|
||||
return make_ready_future<>();
|
||||
}).handle_exception_type([](const exceptions::unavailable_exception& e) {
|
||||
log.warn("Skipped default role setup: some nodes were not ready; will retry");
|
||||
return make_exception_future<>(e);
|
||||
});
|
||||
}
|
||||
|
||||
const bool default_exists_one = !_qp.process(
|
||||
default_role_query,
|
||||
db::consistency_level::ONE,
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
true).get0()->empty();
|
||||
static const sstring legacy_table_name{"users"};
|
||||
|
||||
if (default_exists_one) {
|
||||
return true;
|
||||
}
|
||||
bool standard_role_manager::legacy_metadata_exists() const {
|
||||
return _qp.db().local().has_schema(meta::AUTH_KS, legacy_table_name);
|
||||
}
|
||||
|
||||
const bool default_exists_quorum = !_qp.process(
|
||||
default_role_query,
|
||||
db::consistency_level::QUORUM,
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
true).get0()->empty();
|
||||
future<> standard_role_manager::migrate_legacy_metadata() {
|
||||
log.info("Starting migration of legacy user metadata.");
|
||||
static const sstring query = sprint("SELECT * FROM %s.%s", meta::AUTH_KS, legacy_table_name);
|
||||
|
||||
if (default_exists_quorum) {
|
||||
return true;
|
||||
}
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::QUORUM).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
return do_for_each(*results, [this](const cql3::untyped_result_set_row& row) {
|
||||
role_config config;
|
||||
config.is_superuser = row.get_as<bool>("super");
|
||||
config.can_login = true;
|
||||
|
||||
const bool any_exists_quorum = !_qp.process(all_roles_query, db::consistency_level::QUORUM).get0()->empty();
|
||||
return any_exists_quorum;
|
||||
return do_with(
|
||||
row.get_as<sstring>("name"),
|
||||
std::move(config),
|
||||
[this](const auto& name, const auto& config) {
|
||||
return this->create_or_replace(name, config);
|
||||
});
|
||||
}).finally([results] {});
|
||||
}).then([] {
|
||||
log.info("Finished migrating legacy user metadata.");
|
||||
}).handle_exception([](std::exception_ptr ep) {
|
||||
log.error("Encountered an error during migration!");
|
||||
std::rethrow_exception(ep);
|
||||
});
|
||||
}
|
||||
|
||||
future<> standard_role_manager::start() {
|
||||
@@ -219,23 +240,20 @@ future<> standard_role_manager::start() {
|
||||
return this->create_metadata_tables_if_missing().then([this] {
|
||||
_stopped = auth::do_after_system_ready(_as, [this] {
|
||||
return seastar::async([this] {
|
||||
try {
|
||||
if (this->has_existing_roles()) {
|
||||
return;
|
||||
if (any_nondefault_role_row_satisfies(_qp, [](auto&&) { return true; }).get0()) {
|
||||
if (this->legacy_metadata_exists()) {
|
||||
log.warn("Ignoring legacy user metadata since nondefault roles already exist.");
|
||||
}
|
||||
// Create the default superuser.
|
||||
_qp.process(
|
||||
sprint(
|
||||
"INSERT INTO %s (%s, is_superuser, can_login) VALUES (?, true, true)",
|
||||
meta::roles_table::qualified_name(),
|
||||
meta::roles_table::role_col_name),
|
||||
db::consistency_level::QUORUM,
|
||||
{meta::DEFAULT_SUPERUSER_NAME}).get();
|
||||
log.info("Created default superuser role '{}'.", meta::DEFAULT_SUPERUSER_NAME);
|
||||
} catch (const exceptions::unavailable_exception& e) {
|
||||
log.warn("Skipped default role setup: some nodes were not ready; will retry");
|
||||
throw e;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (this->legacy_metadata_exists()) {
|
||||
this->migrate_legacy_metadata().get0();
|
||||
return;
|
||||
}
|
||||
|
||||
create_default_role_if_missing().get0();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -247,23 +265,27 @@ future<> standard_role_manager::stop() {
|
||||
return _stopped.handle_exception_type([] (const sleep_aborted&) { });
|
||||
}
|
||||
|
||||
future<>
|
||||
standard_role_manager::create(stdx::string_view role_name, const role_config& c) {
|
||||
future<> standard_role_manager::create_or_replace(stdx::string_view role_name, const role_config& c) {
|
||||
static const sstring query = sprint(
|
||||
"INSERT INTO %s (%s, is_superuser, can_login) VALUES (?, ?, ?)",
|
||||
meta::roles_table::qualified_name(),
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
return _qp.process(
|
||||
query,
|
||||
consistency_for_role(role_name),
|
||||
{sstring(role_name), c.is_superuser, c.can_login},
|
||||
true).discard_result();
|
||||
}
|
||||
|
||||
future<>
|
||||
standard_role_manager::create(stdx::string_view role_name, const role_config& c) {
|
||||
return this->exists(role_name).then([this, role_name, &c](bool role_exists) {
|
||||
if (role_exists) {
|
||||
throw role_already_exists(role_name);
|
||||
}
|
||||
|
||||
return _qp.process(
|
||||
query,
|
||||
consistency_for_role(role_name),
|
||||
{sstring(role_name), c.is_superuser, c.can_login},
|
||||
true).discard_result();
|
||||
return this->create_or_replace(role_name, c);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -92,7 +92,13 @@ private:
|
||||
|
||||
future<> create_metadata_tables_if_missing();
|
||||
|
||||
bool has_existing_roles() const;
|
||||
bool legacy_metadata_exists() const;
|
||||
|
||||
future<> migrate_legacy_metadata();
|
||||
|
||||
future<> create_default_role_if_missing();
|
||||
|
||||
future<> create_or_replace(stdx::string_view role_name, const role_config&);
|
||||
|
||||
future<> modify_membership(stdx::string_view role_name, stdx::string_view grantee_name, membership_change);
|
||||
};
|
||||
|
||||
@@ -579,6 +579,7 @@ scylla_core = (['database.cc',
|
||||
'auth/common.cc',
|
||||
'auth/default_authorizer.cc',
|
||||
'auth/resource.cc',
|
||||
'auth/roles-metadata.cc',
|
||||
'auth/password_authenticator.cc',
|
||||
'auth/permission.cc',
|
||||
'auth/permissions_cache.cc',
|
||||
|
||||
@@ -62,6 +62,8 @@ public:
|
||||
, _options(std::move(options)) {
|
||||
}
|
||||
|
||||
void validate(distributed<service::storage_proxy>&, const service::client_state&) override;
|
||||
|
||||
virtual future<> check_access(const service::client_state&) override;
|
||||
|
||||
virtual future<::shared_ptr<cql_transport::messages::result_message>>
|
||||
|
||||
@@ -70,6 +70,8 @@ public:
|
||||
, _if_not_exists(if_not_exists) {
|
||||
}
|
||||
|
||||
void validate(distributed<service::storage_proxy>&, const service::client_state&) override;
|
||||
|
||||
virtual future<> check_access(const service::client_state&) override;
|
||||
|
||||
virtual future<::shared_ptr<cql_transport::messages::result_message>>
|
||||
|
||||
@@ -46,6 +46,7 @@
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "cql3/query_options.hh"
|
||||
#include "cql3/selection/selection.hh"
|
||||
#include "service/storage_service.hh"
|
||||
|
||||
static auth::permission_set filter_applicable_permissions(const auth::permission_set& ps, const auth::resource& r) {
|
||||
auto const filtered_permissions = auth::permission_set::from_mask(ps.mask() & r.applicable_permissions().mask());
|
||||
@@ -67,6 +68,11 @@ cql3::statements::permission_altering_statement::permission_altering_statement(
|
||||
}
|
||||
|
||||
void cql3::statements::permission_altering_statement::validate(distributed<service::storage_proxy>& proxy, const service::client_state& state) {
|
||||
if (!service::get_local_storage_service().cluster_supports_roles()) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
"You cannot modify access-control information until the cluster has fully upgraded.");
|
||||
}
|
||||
|
||||
// a check to ensure the existence of the user isn't being leaked by user existence check.
|
||||
state.ensure_not_anonymous();
|
||||
}
|
||||
|
||||
@@ -54,6 +54,7 @@
|
||||
#include "cql3/statements/revoke_role_statement.hh"
|
||||
#include "cql3/statements/request_validations.hh"
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "service/storage_service.hh"
|
||||
#include "transport/messages/result_message.hh"
|
||||
#include "unimplemented.hh"
|
||||
|
||||
@@ -79,10 +80,23 @@ static future<result_message_ptr> void_result_message() {
|
||||
return make_ready_future<result_message_ptr>(nullptr);
|
||||
}
|
||||
|
||||
void validate_cluster_support() {
|
||||
// TODO(jhaberku): All other feature-checking CQL statements also grab the `storage_service` globally. I'm not sure
|
||||
// if it's accessible through some other object, but for now I'm sticking with convention.
|
||||
if (!service::get_local_storage_service().cluster_supports_roles()) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
"You cannot modify access-control information until the cluster has fully upgraded.");
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// `create_role_statement`
|
||||
//
|
||||
|
||||
void create_role_statement::validate(distributed<service::storage_proxy>&, const service::client_state&) {
|
||||
validate_cluster_support();
|
||||
}
|
||||
|
||||
future<> create_role_statement::check_access(const service::client_state& state) {
|
||||
state.ensure_not_anonymous();
|
||||
|
||||
@@ -131,6 +145,10 @@ create_role_statement::execute(distributed<service::storage_proxy>&,
|
||||
// `alter_role_statement`
|
||||
//
|
||||
|
||||
void alter_role_statement::validate(distributed<service::storage_proxy>&, const service::client_state&) {
|
||||
validate_cluster_support();
|
||||
}
|
||||
|
||||
future<> alter_role_statement::check_access(const service::client_state& state) {
|
||||
state.ensure_not_anonymous();
|
||||
|
||||
@@ -206,6 +224,8 @@ alter_role_statement::execute(distributed<service::storage_proxy>&, service::que
|
||||
//
|
||||
|
||||
void drop_role_statement::validate(distributed<service::storage_proxy>&, const service::client_state& state) {
|
||||
validate_cluster_support();
|
||||
|
||||
if (*state.user() == auth::authenticated_user(_role)) {
|
||||
throw request_validations::invalid_request("Cannot DROP primary role for current login.");
|
||||
}
|
||||
|
||||
37
docs/migrating-from-users-to-roles.md
Normal file
37
docs/migrating-from-users-to-roles.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# Migrating from users to roles
|
||||
|
||||
Previously, Scylla's access-control system ("auth", which lives in the `auth/` directory) operated based on users. A user could log in to the system and could be granted permissions to perform certain functions on resources in the database: particular keyspaces and tables.
|
||||
|
||||
With the introduction of roles, access-control rules are more flexible. A role is an entity that has an associated set of granted permissions. Unlike the old users, however, a role can be granted another role. If role `a` is granted to role `b`, then all the permissions of `a` are also inherited by `b`. All users are roles, but not all roles are users: we distinguish a user from a role based on whether or not the role can log in to the system to perform queries.
|
||||
|
||||
The change to roles from users required a change in the schema of the metadata tables populated internally by Scylla. Nonetheless, the auth. system includes code to perform this migration automatically in the background. The rest of this page describes the migration procedure in detail.
|
||||
|
||||
## Strategy
|
||||
|
||||
Generally, Scylla supports cluster upgrades with no down-time when nodes are upgraded one at a time. We assume that the replication factor of the `system_auth` keyspace is equal to the size of the cluster, as this is a prerequisite for enabling access-control for all versions of Scylla.
|
||||
|
||||
We also require that during a cluster upgrade from users to roles, no changes to access-control are made (to users, roles, or permissions).
|
||||
|
||||
In a cluster consisting of `n` nodes running an older version of Scylla, a single node is stopped. It's `scylla-server` executable is upgraded and then the node is restarted. After the node restarts, each of the modules that encompass access-control (role-management, authentication, and authorization) performs its own migration.
|
||||
|
||||
For each module, migration follows the same process:
|
||||
|
||||
- If any non-default metadata exists in the new tables, then nothing happens: no migration takes place. If the legacy table exists, a warning is printed to the log.
|
||||
- If the legacy table exists, then each entry in the old table is transformed and written to the new table. Existing entries in the new table are overwritten (see below for an explanation). A log message indicates that the migration process is starting and when it has finished.
|
||||
- If there is an error migrating data, an error is written to the log and the exception is allowed to propagate.
|
||||
|
||||
The reason that existing entries in the new table are overwritten during migration is that if two nodes are restarted at once (though we do not support this) and both observe the non-existence of non-default metadata and start migrating, we still ensure that all data are copied over.
|
||||
|
||||
After a single node has been upgraded, a client may connect to an old node, or to the new one.
|
||||
|
||||
If the client connects to an old node, any changes to access-control will succeed (because the code accesses the old tables, which still exist), but are unsupported: changes will not necessarily be reflected in the new tables.
|
||||
|
||||
If a client connects to a new node, all access-control CQL statements will access the new tables. A new gossiper feature flag, `ROLES`, helps to enforce the restriction that no changes to access-control can be made during an upgrade. Unless all nodes in the cluster advertise their support for `ROLES`, CQL statements which modify access-control will log an error and not succeed.
|
||||
|
||||
Once all nodes have been upgraded, it is important to verify the contents of the roles-related tables (`system_auth.{roles, role_permissions, role_members}`) as a superuser and compare them to the old users tables (`system_auth.{users,credentials,permissions}`). Alternatively, you may explore the migrated access-control data through the usual CQL statements: LIST PERMISSIONS, LIST ROLES, LIST USERS, etc.
|
||||
|
||||
After you are confident all the metadata has migrated successfully and the system is operating as expected, you may drop the legacy tables.
|
||||
|
||||
## Recovery
|
||||
|
||||
If a particular node fails to migrate metadata, it will log an error message. The best way to move forward in this case is to drop any entries in the new tables and to restart the node.
|
||||
@@ -160,11 +160,19 @@ future<> service::client_state::has_access(const sstring& ks, auth::permission p
|
||||
throw exceptions::unauthorized_exception(ks + " keyspace is not user-modifiable.");
|
||||
}
|
||||
|
||||
// we want to disallow altering AUTH_KS and TRACING_KS.
|
||||
for (auto& n : { auth::meta::AUTH_KS, tracing::trace_keyspace_helper::KEYSPACE_NAME }) {
|
||||
if (name == n && p == auth::permission::DROP) {
|
||||
throw exceptions::unauthorized_exception(sprint("Cannot %s %s", auth::permissions::to_string(p), resource));
|
||||
}
|
||||
//
|
||||
// we want to disallow dropping any contents of TRACING_KS and disallow dropping the `auth::meta::AUTH_KS`
|
||||
// keyspace.
|
||||
//
|
||||
|
||||
const bool dropping_anything_in_tracing = (name == tracing::trace_keyspace_helper::KEYSPACE_NAME)
|
||||
&& (p == auth::permission::DROP);
|
||||
|
||||
const bool dropping_auth_keyspace = (resource == auth::make_data_resource(auth::meta::AUTH_KS))
|
||||
&& (p == auth::permission::DROP);
|
||||
|
||||
if (dropping_anything_in_tracing || dropping_auth_keyspace) {
|
||||
throw exceptions::unauthorized_exception(sprint("Cannot %s %s", auth::permissions::to_string(p), resource));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -93,6 +93,7 @@ static const sstring SCHEMA_TABLES_V3 = "SCHEMA_TABLES_V3";
|
||||
static const sstring CORRECT_NON_COMPOUND_RANGE_TOMBSTONES = "CORRECT_NON_COMPOUND_RANGE_TOMBSTONES";
|
||||
static const sstring WRITE_FAILURE_REPLY_FEATURE = "WRITE_FAILURE_REPLY";
|
||||
static const sstring XXHASH_FEATURE = "XXHASH";
|
||||
static const sstring ROLES_FEATURE = "ROLES";
|
||||
|
||||
distributed<storage_service> _the_storage_service;
|
||||
|
||||
@@ -142,6 +143,7 @@ sstring storage_service::get_config_supported_features() {
|
||||
CORRECT_NON_COMPOUND_RANGE_TOMBSTONES,
|
||||
WRITE_FAILURE_REPLY_FEATURE,
|
||||
XXHASH_FEATURE,
|
||||
ROLES_FEATURE,
|
||||
};
|
||||
if (service::get_local_storage_service()._db.local().get_config().experimental()) {
|
||||
features.push_back(MATERIALIZED_VIEWS_FEATURE);
|
||||
@@ -355,6 +357,7 @@ void storage_service::register_features() {
|
||||
_correct_non_compound_range_tombstones = gms::feature(CORRECT_NON_COMPOUND_RANGE_TOMBSTONES);
|
||||
_write_failure_reply_feature = gms::feature(WRITE_FAILURE_REPLY_FEATURE);
|
||||
_xxhash_feature = gms::feature(XXHASH_FEATURE);
|
||||
_roles_feature = gms::feature(ROLES_FEATURE);
|
||||
|
||||
if (_db.local().get_config().experimental()) {
|
||||
_materialized_views_feature = gms::feature(MATERIALIZED_VIEWS_FEATURE);
|
||||
|
||||
@@ -277,6 +277,7 @@ private:
|
||||
gms::feature _correct_non_compound_range_tombstones;
|
||||
gms::feature _write_failure_reply_feature;
|
||||
gms::feature _xxhash_feature;
|
||||
gms::feature _roles_feature;
|
||||
public:
|
||||
void enable_all_features() {
|
||||
_range_tombstones_feature.enable();
|
||||
@@ -290,6 +291,7 @@ public:
|
||||
_correct_non_compound_range_tombstones.enable();
|
||||
_write_failure_reply_feature.enable();
|
||||
_xxhash_feature.enable();
|
||||
_roles_feature.enable();
|
||||
}
|
||||
|
||||
void finish_bootstrapping() {
|
||||
@@ -2267,6 +2269,10 @@ public:
|
||||
bool cluster_supports_xxhash_digest_algorithm() const {
|
||||
return bool(_xxhash_feature);
|
||||
}
|
||||
|
||||
bool cluster_supports_roles() const {
|
||||
return bool(_roles_feature);
|
||||
}
|
||||
};
|
||||
|
||||
inline future<> init_storage_service(distributed<database>& db, sharded<auth::service>& auth_service) {
|
||||
|
||||
Reference in New Issue
Block a user