treewide: use seastar::format() or fmt::format() explicitly

before this change, we rely on `using namespace seastar` to use
`seastar::format()` without qualifying the `format()` with its
namespace. this works fine until we changed the parameter type
of format string `seastar::format()` from `const char*` to
`fmt::format_string<...>`. this change practically invited
`seastar::format()` to the club of `std::format()` and `fmt::format()`,
where all members accept a templated parameter as its `fmt`
parameter. and `seastar::format()` is not the best candidate anymore.
despite that argument-dependent lookup (ADT for short) favors the
function which is in the same namespace as its parameter, but
`using namespace` makes `seastar::format()` more competitive,
so both `std::format()` and `seastar::format()` are considered
as the condidates.

that is what is happening scylladb in quite a few caller sites of
`format()`, hence ADT is not able to tell which function the winner
in the name lookup:

```
/__w/scylladb/scylladb/mutation/mutation_fragment_stream_validator.cc:265:12: error: call to 'format' is ambiguous
  265 |     return format("{} ({}.{} {})", _name_view, s.ks_name(), s.cf_name(), s.id());
      |            ^~~~~~
/usr/bin/../lib/gcc/x86_64-redhat-linux/14/../../../../include/c++/14/format:4290:5: note: candidate function [with _Args = <const std::basic_string_view<char> &, const seastar::basic_sstring<char, unsigned int, 15> &, const seastar::basic_sstring<char, unsigned int, 15> &, const utils::tagged_uuid<table_id_tag> &>]
 4290 |     format(format_string<_Args...> __fmt, _Args&&... __args)
      |     ^
/__w/scylladb/scylladb/seastar/include/seastar/core/print.hh:143:1: note: candidate function [with A = <const std::basic_string_view<char> &, const seastar::basic_sstring<char, unsigned int, 15> &, const seastar::basic_sstring<char, unsigned int, 15> &, const utils::tagged_uuid<table_id_tag> &>]
  143 | format(fmt::format_string<A...> fmt, A&&... a) {
      | ^
```

in this change, we

change all `format()` to either `fmt::format()` or `seastar::format()`
with following rules:
- if the caller expects an `sstring` or `std::string_view`, change to
  `seastar::format()`
- if the caller expects an `std::string`, change to `fmt::format()`.
  because, `sstring::operator std::basic_string` would incur a deep
  copy.

we will need another change to enable scylladb to compile with the
latest seastar. namely, to pass the format string as a templated
parameter down to helper functions which format their parameters.
to miminize the scope of this change, let's include that change when
bumping up the seastar submodule. as that change will depend on
the seastar change.

Signed-off-by: Kefu Chai <kefu.chai@scylladb.com>
This commit is contained in:
Kefu Chai
2023-07-10 10:01:54 +08:00
committed by Avi Kivity
parent f227f4332c
commit 3e84d43f93
113 changed files with 419 additions and 414 deletions

View File

@@ -34,7 +34,7 @@ future<std::string> get_key_from_roles(service::storage_proxy& proxy, auth::serv
const column_definition* salted_hash_col = schema->get_column_definition(bytes("salted_hash"));
const column_definition* can_login_col = schema->get_column_definition(bytes("can_login"));
if (!salted_hash_col || !can_login_col) {
co_await coroutine::return_exception(api_error::unrecognized_client(format("Credentials cannot be fetched for: {}", username)));
co_await coroutine::return_exception(api_error::unrecognized_client(fmt::format("Credentials cannot be fetched for: {}", username)));
}
auto selection = cql3::selection::selection::for_columns(schema, {salted_hash_col, can_login_col});
auto partition_slice = query::partition_slice(std::move(bounds), {}, query::column_id_vector{salted_hash_col->id, can_login_col->id}, selection->get_query_options());
@@ -51,18 +51,18 @@ future<std::string> get_key_from_roles(service::storage_proxy& proxy, auth::serv
auto result_set = builder.build();
if (result_set->empty()) {
co_await coroutine::return_exception(api_error::unrecognized_client(format("User not found: {}", username)));
co_await coroutine::return_exception(api_error::unrecognized_client(fmt::format("User not found: {}", username)));
}
const auto& result = result_set->rows().front();
bool can_login = result[1] && value_cast<bool>(boolean_type->deserialize(*result[1]));
if (!can_login) {
// This is a valid role name, but has "login=False" so should not be
// usable for authentication (see #19735).
co_await coroutine::return_exception(api_error::unrecognized_client(format("Role {} has login=false so cannot be used for login", username)));
co_await coroutine::return_exception(api_error::unrecognized_client(fmt::format("Role {} has login=false so cannot be used for login", username)));
}
const managed_bytes_opt& salted_hash = result.front();
if (!salted_hash) {
co_await coroutine::return_exception(api_error::unrecognized_client(format("No password found for user: {}", username)));
co_await coroutine::return_exception(api_error::unrecognized_client(fmt::format("No password found for user: {}", username)));
}
co_return value_cast<sstring>(utf8_type->deserialize(*salted_hash));
}

View File

@@ -42,12 +42,12 @@ comparison_operator_type get_comparison_operator(const rjson::value& comparison_
{"NOT_CONTAINS", comparison_operator_type::NOT_CONTAINS},
};
if (!comparison_operator.IsString()) {
throw api_error::validation(format("Invalid comparison operator definition {}", rjson::print(comparison_operator)));
throw api_error::validation(fmt::format("Invalid comparison operator definition {}", rjson::print(comparison_operator)));
}
std::string op = comparison_operator.GetString();
auto it = ops.find(op);
if (it == ops.end()) {
throw api_error::validation(format("Unsupported comparison operator {}", op));
throw api_error::validation(fmt::format("Unsupported comparison operator {}", op));
}
return it->second;
}
@@ -429,7 +429,7 @@ static bool check_BETWEEN(const T& v, const T& lb, const T& ub, bool bounds_from
if (cmp_lt()(ub, lb)) {
if (bounds_from_query) {
throw api_error::validation(
format("BETWEEN operator requires lower_bound <= upper_bound, but {} > {}", lb, ub));
fmt::format("BETWEEN operator requires lower_bound <= upper_bound, but {} > {}", lb, ub));
} else {
return false;
}
@@ -613,7 +613,7 @@ conditional_operator_type get_conditional_operator(const rjson::value& req) {
return conditional_operator_type::OR;
} else {
throw api_error::validation(
format("'ConditionalOperator' parameter must be AND, OR or missing. Found {}.", s));
fmt::format("'ConditionalOperator' parameter must be AND, OR or missing. Found {}.", s));
}
}

View File

@@ -195,12 +195,12 @@ static std::string view_name(const std::string& table_name, std::string_view ind
}
if (!valid_table_name_chars(index_name)) {
throw api_error::validation(
format("IndexName '{}' must satisfy regular expression pattern: [a-zA-Z0-9_.-]+", index_name));
fmt::format("IndexName '{}' must satisfy regular expression pattern: [a-zA-Z0-9_.-]+", index_name));
}
std::string ret = table_name + delim + std::string(index_name);
if (ret.length() > max_table_name_length) {
throw api_error::validation(
format("The total length of TableName ('{}') and IndexName ('{}') cannot exceed {} characters",
fmt::format("The total length of TableName ('{}') and IndexName ('{}') cannot exceed {} characters",
table_name, index_name, max_table_name_length - delim.size()));
}
return ret;
@@ -255,7 +255,7 @@ schema_ptr executor::find_table(service::storage_proxy& proxy, const rjson::valu
validate_table_name(table_name.value());
throw api_error::resource_not_found(
format("Requested resource not found: Table: {} not found", *table_name));
fmt::format("Requested resource not found: Table: {} not found", *table_name));
}
}
@@ -309,7 +309,7 @@ get_table_or_view(service::storage_proxy& proxy, const rjson::value& request) {
validate_table_name(table_name);
throw api_error::resource_not_found(
format("Requested resource not found: Internal table: {}.{} not found", internal_ks_name, internal_table_name));
fmt::format("Requested resource not found: Internal table: {}.{} not found", internal_ks_name, internal_table_name));
}
}
@@ -323,7 +323,7 @@ get_table_or_view(service::storage_proxy& proxy, const rjson::value& request) {
type = table_or_view_type::gsi;
} else {
throw api_error::validation(
format("Non-string IndexName '{}'", rjson::to_string_view(*index_name)));
fmt::format("Non-string IndexName '{}'", rjson::to_string_view(*index_name)));
}
// If no tables for global indexes were found, the index may be local
if (!proxy.data_dictionary().has_schema(keyspace_name, table_name)) {
@@ -341,14 +341,14 @@ get_table_or_view(service::storage_proxy& proxy, const rjson::value& request) {
// does exist but the index does not (ValidationException).
if (proxy.data_dictionary().has_schema(keyspace_name, orig_table_name)) {
throw api_error::validation(
format("Requested resource not found: Index '{}' for table '{}'", index_name->GetString(), orig_table_name));
fmt::format("Requested resource not found: Index '{}' for table '{}'", index_name->GetString(), orig_table_name));
} else {
throw api_error::resource_not_found(
format("Requested resource not found: Table: {} not found", orig_table_name));
fmt::format("Requested resource not found: Table: {} not found", orig_table_name));
}
} else {
throw api_error::resource_not_found(
format("Requested resource not found: Table: {} not found", table_name));
fmt::format("Requested resource not found: Table: {} not found", table_name));
}
}
}
@@ -361,7 +361,7 @@ static std::string get_string_attribute(const rjson::value& value, std::string_v
if (!attribute_value)
return default_return;
if (!attribute_value->IsString()) {
throw api_error::validation(format("Expected string value for attribute {}, got: {}",
throw api_error::validation(fmt::format("Expected string value for attribute {}, got: {}",
attribute_name, value));
}
return std::string(attribute_value->GetString(), attribute_value->GetStringLength());
@@ -376,7 +376,7 @@ static bool get_bool_attribute(const rjson::value& value, std::string_view attri
return default_return;
}
if (!attribute_value->IsBool()) {
throw api_error::validation(format("Expected boolean value for attribute {}, got: {}",
throw api_error::validation(fmt::format("Expected boolean value for attribute {}, got: {}",
attribute_name, value));
}
return attribute_value->GetBool();
@@ -390,7 +390,7 @@ static std::optional<int> get_int_attribute(const rjson::value& value, std::stri
if (!attribute_value)
return {};
if (!attribute_value->IsInt()) {
throw api_error::validation(format("Expected integer value for attribute {}, got: {}",
throw api_error::validation(fmt::format("Expected integer value for attribute {}, got: {}",
attribute_name, value));
}
return attribute_value->GetInt();
@@ -438,7 +438,7 @@ static rjson::value generate_arn_for_table(const schema& schema) {
}
static rjson::value generate_arn_for_index(const schema& schema, std::string_view index_name) {
return rjson::from_string(format(
return rjson::from_string(fmt::format(
"arn:scylla:alternator:{}:scylla:table/{}/index/{}",
schema.ks_name(), schema.cf_name(), index_name));
}
@@ -621,7 +621,7 @@ future<executor::request_return_type> executor::delete_table(client_state& clien
std::optional<data_dictionary::table> tbl = p.local().data_dictionary().try_find_table(keyspace_name, table_name);
if (!tbl) {
throw api_error::resource_not_found(format("Requested resource not found: Table: {} not found", table_name));
throw api_error::resource_not_found(fmt::format("Requested resource not found: Table: {} not found", table_name));
}
auto m = co_await service::prepare_column_family_drop_announcement(_proxy, keyspace_name, table_name, group0_guard.write_timestamp(), service::drop_views::yes);
@@ -651,7 +651,7 @@ future<executor::request_return_type> executor::delete_table(client_state& clien
}
std::tie(m, group0_guard) = co_await std::move(mc).extract();
co_await mm.announce(std::move(m), std::move(group0_guard), format("alternator-executor: delete {} table", table_name));
co_await mm.announce(std::move(m), std::move(group0_guard), fmt::format("alternator-executor: delete {} table", table_name));
});
rjson::value response = rjson::empty_object();
@@ -671,7 +671,7 @@ static data_type parse_key_type(const std::string& type) {
}
}
throw api_error::validation(
format("Invalid key type '{}', can only be S, B or N.", type));
fmt::format("Invalid key type '{}', can only be S, B or N.", type));
}
@@ -681,7 +681,7 @@ static void add_column(schema_builder& builder, const std::string& name, const r
// second column with the same name. We should fix this, by renaming
// some column names which we want to reserve.
if (name == executor::ATTRS_COLUMN_NAME) {
throw api_error::validation(format("Column name '{}' is currently reserved. FIXME.", name));
throw api_error::validation(fmt::format("Column name '{}' is currently reserved. FIXME.", name));
}
for (auto it = attribute_definitions.Begin(); it != attribute_definitions.End(); ++it) {
const rjson::value& attribute_info = *it;
@@ -692,7 +692,7 @@ static void add_column(schema_builder& builder, const std::string& name, const r
}
}
throw api_error::validation(
format("KeySchema key '{}' missing in AttributeDefinitions", name));
fmt::format("KeySchema key '{}' missing in AttributeDefinitions", name));
}
// Parse the KeySchema request attribute, which specifies the column names
@@ -760,7 +760,7 @@ static schema_ptr get_table_from_arn(service::storage_proxy& proxy, std::string_
// A table name cannot contain a '/' - if it does, it's not a
// table ARN, it may be an index. DynamoDB returns a
// ValidationException in that case - see #10786.
throw api_error::validation(format("ResourceArn '{}' is not a valid table ARN", table_name));
throw api_error::validation(fmt::format("ResourceArn '{}' is not a valid table ARN", table_name));
}
// FIXME: remove sstring creation once find_schema gains a view-based interface
return proxy.data_dictionary().find_schema(sstring(keyspace_name), sstring(table_name));
@@ -795,7 +795,7 @@ static void validate_tags(const std::map<sstring, sstring>& tags) {
std::string_view value = it->second;
if (!allowed_write_isolation_values.contains(value)) {
throw api_error::validation(
format("Incorrect write isolation tag {}. Allowed values: {}", value, allowed_write_isolation_values));
fmt::format("Incorrect write isolation tag {}. Allowed values: {}", value, allowed_write_isolation_values));
}
}
}
@@ -830,7 +830,7 @@ void rmw_operation::set_default_write_isolation(std::string_view value) {
"See docs/alternator/alternator.md for instructions.");
}
if (!allowed_write_isolation_values.contains(value)) {
throw std::runtime_error(format("Invalid --alternator-write-isolation "
throw std::runtime_error(fmt::format("Invalid --alternator-write-isolation "
"setting '{}'. Allowed values: {}.",
value, allowed_write_isolation_values));
}
@@ -993,7 +993,7 @@ static void validate_attribute_definitions(const rjson::value& attribute_definit
}
auto [it2, added] = seen_attribute_names.emplace(rjson::to_string_view(*attribute_name));
if (!added) {
throw api_error::validation(format("Duplicate AttributeName={} in AttributeDefinitions",
throw api_error::validation(fmt::format("Duplicate AttributeName={} in AttributeDefinitions",
rjson::to_string_view(*attribute_name)));
}
const rjson::value* attribute_type = rjson::find(*it, "AttributeType");
@@ -1017,7 +1017,7 @@ static future<executor::request_return_type> create_table_on_shard0(service::cli
validate_table_name(table_name);
if (table_name.find(executor::INTERNAL_TABLE_PREFIX) == 0) {
co_return api_error::validation(format("Prefix {} is reserved for accessing internal tables", executor::INTERNAL_TABLE_PREFIX));
co_return api_error::validation(fmt::format("Prefix {} is reserved for accessing internal tables", executor::INTERNAL_TABLE_PREFIX));
}
std::string keyspace_name = executor::KEYSPACE_NAME_PREFIX + table_name;
const rjson::value& attribute_definitions = request["AttributeDefinitions"];
@@ -1055,7 +1055,7 @@ static future<executor::request_return_type> create_table_on_shard0(service::cli
std::string_view index_name = rjson::to_string_view(*index_name_v);
auto [it, added] = index_names.emplace(index_name);
if (!added) {
co_return api_error::validation(format("Duplicate IndexName '{}', ", index_name));
co_return api_error::validation(fmt::format("Duplicate IndexName '{}', ", index_name));
}
std::string vname(view_name(table_name, index_name));
elogger.trace("Adding GSI {}", index_name);
@@ -1110,7 +1110,7 @@ static future<executor::request_return_type> create_table_on_shard0(service::cli
std::string_view index_name = rjson::to_string_view(*index_name_v);
auto [it, added] = index_names.emplace(index_name);
if (!added) {
co_return api_error::validation(format("Duplicate IndexName '{}', ", index_name));
co_return api_error::validation(fmt::format("Duplicate IndexName '{}', ", index_name));
}
std::string vname(lsi_name(table_name, index_name));
elogger.trace("Adding LSI {}", index_name);
@@ -1221,7 +1221,7 @@ static future<executor::request_return_type> create_table_on_shard0(service::cli
schema_mutations = service::prepare_new_keyspace_announcement(sp.local_db(), ksm, ts);
} catch (exceptions::already_exists_exception&) {
if (sp.data_dictionary().has_schema(keyspace_name, table_name)) {
co_return api_error::resource_in_use(format("Table {} already exists", table_name));
co_return api_error::resource_in_use(fmt::format("Table {} already exists", table_name));
}
}
if (sp.data_dictionary().try_find_table(schema->id())) {
@@ -1263,7 +1263,7 @@ static future<executor::request_return_type> create_table_on_shard0(service::cli
}
std::tie(schema_mutations, group0_guard) = co_await std::move(mc).extract();
co_await mm.announce(std::move(schema_mutations), std::move(group0_guard), format("alternator-executor: create {} table", table_name));
co_await mm.announce(std::move(schema_mutations), std::move(group0_guard), fmt::format("alternator-executor: create {} table", table_name));
co_await mm.wait_for_schema_agreement(sp.local_db(), db::timeout_clock::now() + 10s, nullptr);
rjson::value status = rjson::empty_object();
@@ -1316,7 +1316,7 @@ future<executor::request_return_type> executor::update_table(client_state& clien
// the ugly but harmless conversion to string_view here is because
// Seastar's sstring is missing a find(std::string_view) :-()
if (std::string_view(tab->cf_name()).find(INTERNAL_TABLE_PREFIX) == 0) {
co_await coroutine::return_exception(api_error::validation(format("Prefix {} is reserved for accessing internal tables", INTERNAL_TABLE_PREFIX)));
co_await coroutine::return_exception(api_error::validation(fmt::format("Prefix {} is reserved for accessing internal tables", INTERNAL_TABLE_PREFIX)));
}
schema_builder builder(tab);
@@ -1423,7 +1423,7 @@ void validate_value(const rjson::value& v, const char* caller) {
}
} else if (type != "L" && type != "M" && type != "BOOL" && type != "NULL") {
// TODO: can do more sanity checks on the content of the above types.
throw api_error::validation(format("{}: unknown type {} for value {}", caller, type, v));
throw api_error::validation(fmt::format("{}: unknown type {} for value {}", caller, type, v));
}
}
@@ -1617,7 +1617,7 @@ rmw_operation::returnvalues rmw_operation::parse_returnvalues(const rjson::value
} else if (s == "UPDATED_NEW") {
return rmw_operation::returnvalues::UPDATED_NEW;
} else {
throw api_error::validation(format("Unrecognized value for ReturnValues: {}", s));
throw api_error::validation(fmt::format("Unrecognized value for ReturnValues: {}", s));
}
}
@@ -1636,7 +1636,7 @@ rmw_operation::parse_returnvalues_on_condition_check_failure(const rjson::value&
} else if (s == "ALL_OLD") {
return rmw_operation::returnvalues_on_condition_check_failure::ALL_OLD;
} else {
throw api_error::validation(format("Unrecognized value for ReturnValuesOnConditionCheckFailure: {}", s));
throw api_error::validation(fmt::format("Unrecognized value for ReturnValuesOnConditionCheckFailure: {}", s));
}
}
@@ -2206,7 +2206,7 @@ future<executor::request_return_type> executor::batch_write_item(client_state& c
}
used_keys.insert(std::move(mut_key));
} else {
co_return api_error::validation(format("Unknown BatchWriteItem request type: {}", r_name));
co_return api_error::validation(fmt::format("Unknown BatchWriteItem request type: {}", r_name));
}
}
}
@@ -2329,13 +2329,13 @@ void attribute_path_map_add(const char* source, attribute_path_map<T>& map, cons
} else if(!p.has_operators()) {
// If p is top-level and we already have it or a part of it
// in map, it's a forbidden overlapping path.
throw api_error::validation(format(
throw api_error::validation(fmt::format(
"Invalid {}: two document paths overlap at {}", source, p.root()));
} else if (it->second.has_value()) {
// If we're here, it != map.end() && p.has_operators && it->second.has_value().
// This means the top-level attribute already has a value, and we're
// trying to add a non-top-level value. It's an overlap.
throw api_error::validation(format("Invalid {}: two document paths overlap at {}", source, p.root()));
throw api_error::validation(fmt::format("Invalid {}: two document paths overlap at {}", source, p.root()));
}
node* h = &it->second;
// The second step is to walk h from the top-level node to the inner node
@@ -2395,7 +2395,7 @@ void attribute_path_map_add(const char* source, attribute_path_map<T>& map, cons
if (it == map.end()) {
map.emplace(attr, node {std::move(value)});
} else {
throw api_error::validation(format(
throw api_error::validation(fmt::format(
"Invalid {}: Duplicate attribute: {}", source, attr));
}
}
@@ -2448,7 +2448,7 @@ static select_type parse_select(const rjson::value& request, table_or_view_type
}
return select_type::projection;
}
throw api_error::validation(format("Unknown Select value '{}'. Allowed choices: ALL_ATTRIBUTES, SPECIFIC_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, COUNT",
throw api_error::validation(fmt::format("Unknown Select value '{}'. Allowed choices: ALL_ATTRIBUTES, SPECIFIC_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, COUNT",
select));
}
@@ -2817,12 +2817,12 @@ static std::optional<rjson::value> action_result(
std::string v1_type = get_item_type_string(v1);
if (v1_type == "N") {
if (get_item_type_string(v2) != "N") {
throw api_error::validation(format("Incorrect operand type for operator or function. Expected {}: {}", v1_type, rjson::print(v2)));
throw api_error::validation(fmt::format("Incorrect operand type for operator or function. Expected {}: {}", v1_type, rjson::print(v2)));
}
result = number_add(v1, v2);
} else if (v1_type == "SS" || v1_type == "NS" || v1_type == "BS") {
if (get_item_type_string(v2) != v1_type) {
throw api_error::validation(format("Incorrect operand type for operator or function. Expected {}: {}", v1_type, rjson::print(v2)));
throw api_error::validation(fmt::format("Incorrect operand type for operator or function. Expected {}: {}", v1_type, rjson::print(v2)));
}
result = set_sum(v1, v2);
} else {
@@ -2908,7 +2908,7 @@ static bool hierarchy_actions(
} else if (h.has_members()) {
if (type[0] != 'M' || !v.IsObject()) {
// A .something on a non-map doesn't work.
throw api_error::validation(format("UpdateExpression: document paths not valid for this item:{}", h));
throw api_error::validation(fmt::format("UpdateExpression: document paths not valid for this item:{}", h));
}
for (const auto& member : h.get_members()) {
std::string attr = member.first;
@@ -3095,7 +3095,7 @@ update_item_operation::apply(std::unique_ptr<rjson::value> previous_item, api::t
std::string column_name = actions.first;
const column_definition* cdef = _schema->get_column_definition(to_bytes(column_name));
if (cdef && cdef->is_primary_key()) {
throw api_error::validation(format("UpdateItem cannot update key column {}", column_name));
throw api_error::validation(fmt::format("UpdateItem cannot update key column {}", column_name));
}
if (actions.second.has_value()) {
// An action on a top-level attribute column_name. The single
@@ -3119,7 +3119,7 @@ update_item_operation::apply(std::unique_ptr<rjson::value> previous_item, api::t
}
const rjson::value *toplevel = rjson::find(*previous_item, column_name);
if (!toplevel) {
throw api_error::validation(format("UpdateItem cannot update document path: missing attribute {}",
throw api_error::validation(fmt::format("UpdateItem cannot update document path: missing attribute {}",
column_name));
}
rjson::value result = rjson::copy(*toplevel);
@@ -3156,7 +3156,7 @@ update_item_operation::apply(std::unique_ptr<rjson::value> previous_item, api::t
validate_value(v2, "AttributeUpdates");
std::string v2_type = get_item_type_string(v2);
if (v2_type != "SS" && v2_type != "NS" && v2_type != "BS") {
throw api_error::validation(format("AttributeUpdates DELETE operation with Value only valid for sets, got type {}", v2_type));
throw api_error::validation(fmt::format("AttributeUpdates DELETE operation with Value only valid for sets, got type {}", v2_type));
}
if (v1) {
std::optional<rjson::value> result = set_diff(*v1, v2);
@@ -3202,7 +3202,7 @@ update_item_operation::apply(std::unique_ptr<rjson::value> previous_item, api::t
std::string v1_type = get_item_type_string(*v1);
std::string v2_type = get_item_type_string(v2);
if (v2_type != v1_type) {
throw api_error::validation(format("Operand type mismatch in AttributeUpdates ADD. Expected {}, got {}", v1_type, v2_type));
throw api_error::validation(fmt::format("Operand type mismatch in AttributeUpdates ADD. Expected {}, got {}", v1_type, v2_type));
}
if (v1_type == "N") {
do_update(std::move(column_name), number_add(*v1, v2));
@@ -3221,7 +3221,7 @@ update_item_operation::apply(std::unique_ptr<rjson::value> previous_item, api::t
}
} else {
throw api_error::validation(
format("Unknown Action value '{}' in AttributeUpdates", action));
fmt::format("Unknown Action value '{}' in AttributeUpdates", action));
}
}
}
@@ -4125,7 +4125,7 @@ static query::clustering_range calculate_ck_bound(schema_ptr schema, const colum
// NOTICE(sarna): A range starting with given prefix and ending (non-inclusively) with a string "incremented" by a single
// character at the end. Throws for NUMBER instances.
if (!ck_cdef.type->is_compatible_with(*utf8_type)) {
throw api_error::validation(format("BEGINS_WITH operator cannot be applied to type {}", type_to_string(ck_cdef.type)));
throw api_error::validation(fmt::format("BEGINS_WITH operator cannot be applied to type {}", type_to_string(ck_cdef.type)));
}
return get_clustering_range_for_begins_with(std::move(raw_value), ck, schema, ck_cdef.type);
}
@@ -4206,13 +4206,13 @@ static std::string_view get_toplevel(const parsed::value& v,
used_attribute_names.emplace(column_name);
if (!expression_attribute_names) {
throw api_error::validation(
format("ExpressionAttributeNames missing, entry '{}' required by KeyConditionExpression",
fmt::format("ExpressionAttributeNames missing, entry '{}' required by KeyConditionExpression",
column_name));
}
const rjson::value* value = rjson::find(*expression_attribute_names, column_name);
if (!value || !value->IsString()) {
throw api_error::validation(
format("ExpressionAttributeNames missing entry '{}' required by KeyConditionExpression",
fmt::format("ExpressionAttributeNames missing entry '{}' required by KeyConditionExpression",
column_name));
}
column_name = rjson::to_string_view(*value);
@@ -4330,7 +4330,7 @@ calculate_bounds_condition_expression(schema_ptr schema,
}
if (f->_function_name != "begins_with") {
throw api_error::validation(
format("KeyConditionExpression function '{}' not supported",f->_function_name));
fmt::format("KeyConditionExpression function '{}' not supported",f->_function_name));
}
if (f->_parameters.size() != 2 || !f->_parameters[0].is_path() ||
!f->_parameters[1].is_constant()) {
@@ -4395,7 +4395,7 @@ calculate_bounds_condition_expression(schema_ptr schema,
ck_bounds.push_back(query::clustering_range(ck));
} else {
throw api_error::validation(
format("KeyConditionExpression condition on non-key attribute {}", key));
fmt::format("KeyConditionExpression condition on non-key attribute {}", key));
}
continue;
}
@@ -4403,10 +4403,10 @@ calculate_bounds_condition_expression(schema_ptr schema,
// are allowed *only* on the clustering key:
if (sstring(key) == pk_cdef.name_as_text()) {
throw api_error::validation(
format("KeyConditionExpression only '=' condition is supported on partition key {}", key));
fmt::format("KeyConditionExpression only '=' condition is supported on partition key {}", key));
} else if (!ck_cdef || sstring(key) != ck_cdef->name_as_text()) {
throw api_error::validation(
format("KeyConditionExpression condition on non-key attribute {}", key));
fmt::format("KeyConditionExpression condition on non-key attribute {}", key));
}
if (!ck_bounds.empty()) {
throw api_error::validation(
@@ -4430,7 +4430,7 @@ calculate_bounds_condition_expression(schema_ptr schema,
// begins_with() supported on bytes and strings (both stored
// in the database as strings) but not on numbers.
throw api_error::validation(
format("KeyConditionExpression begins_with() not supported on type {}",
fmt::format("KeyConditionExpression begins_with() not supported on type {}",
type_to_string(ck_cdef->type)));
} else if (raw_value.empty()) {
ck_bounds.push_back(query::clustering_range::make_open_ended_both_sides());
@@ -4658,7 +4658,7 @@ future<executor::request_return_type> executor::describe_continuous_backups(clie
validate_table_name(table_name);
throw api_error::table_not_found(
format("Table {} not found", table_name));
fmt::format("Table {} not found", table_name));
}
rjson::value desc = rjson::empty_object();
rjson::add(desc, "ContinuousBackupsStatus", "DISABLED");

View File

@@ -57,10 +57,10 @@ static Result parse(const char* input_name, std::string_view input, Func&& f) {
// TODO: displayRecognitionError could set a position inside the
// expressions_syntax_error in throws, and we could use it here to
// mark the broken position in 'input'.
throw expressions_syntax_error(format("Failed parsing {} '{}': {}",
throw expressions_syntax_error(fmt::format("Failed parsing {} '{}': {}",
input_name, input, e.what()));
} catch (...) {
throw expressions_syntax_error(format("Failed parsing {} '{}': {}",
throw expressions_syntax_error(fmt::format("Failed parsing {} '{}': {}",
input_name, input, std::current_exception()));
}
}
@@ -160,12 +160,12 @@ static std::optional<std::string> resolve_path_component(const std::string& colu
if (column_name.size() > 0 && column_name.front() == '#') {
if (!expression_attribute_names) {
throw api_error::validation(
format("ExpressionAttributeNames missing, entry '{}' required by expression", column_name));
fmt::format("ExpressionAttributeNames missing, entry '{}' required by expression", column_name));
}
const rjson::value* value = rjson::find(*expression_attribute_names, column_name);
if (!value || !value->IsString()) {
throw api_error::validation(
format("ExpressionAttributeNames missing entry '{}' required by expression", column_name));
fmt::format("ExpressionAttributeNames missing entry '{}' required by expression", column_name));
}
used_attribute_names.emplace(column_name);
return std::string(rjson::to_string_view(*value));
@@ -202,16 +202,16 @@ static void resolve_constant(parsed::constant& c,
[&] (const std::string& valref) {
if (!expression_attribute_values) {
throw api_error::validation(
format("ExpressionAttributeValues missing, entry '{}' required by expression", valref));
fmt::format("ExpressionAttributeValues missing, entry '{}' required by expression", valref));
}
const rjson::value* value = rjson::find(*expression_attribute_values, valref);
if (!value) {
throw api_error::validation(
format("ExpressionAttributeValues missing entry '{}' required by expression", valref));
fmt::format("ExpressionAttributeValues missing entry '{}' required by expression", valref));
}
if (value->IsNull()) {
throw api_error::validation(
format("ExpressionAttributeValues null value for entry '{}' required by expression", valref));
fmt::format("ExpressionAttributeValues null value for entry '{}' required by expression", valref));
}
validate_value(*value, "ExpressionAttributeValues");
used_attribute_values.emplace(valref);
@@ -708,7 +708,7 @@ rjson::value calculate_value(const parsed::value& v,
auto function_it = function_handlers.find(std::string_view(f._function_name));
if (function_it == function_handlers.end()) {
throw api_error::validation(
format("{}: unknown function '{}' called.", caller, f._function_name));
fmt::format("{}: unknown function '{}' called.", caller, f._function_name));
}
return function_it->second(caller, previous_item, f);
},

View File

@@ -143,17 +143,17 @@ static big_decimal parse_and_validate_number(std::string_view s) {
big_decimal ret(s);
auto [magnitude, precision] = internal::get_magnitude_and_precision(s);
if (magnitude > 125) {
throw api_error::validation(format("Number overflow: {}. Attempting to store a number with magnitude larger than supported range.", s));
throw api_error::validation(fmt::format("Number overflow: {}. Attempting to store a number with magnitude larger than supported range.", s));
}
if (magnitude < -130) {
throw api_error::validation(format("Number underflow: {}. Attempting to store a number with magnitude lower than supported range.", s));
throw api_error::validation(fmt::format("Number underflow: {}. Attempting to store a number with magnitude lower than supported range.", s));
}
if (precision > 38) {
throw api_error::validation(format("Number too precise: {}. Attempting to store a number with more significant digits than supported.", s));
throw api_error::validation(fmt::format("Number too precise: {}. Attempting to store a number with more significant digits than supported.", s));
}
return ret;
} catch (const marshal_exception& e) {
throw api_error::validation(format("The parameter cannot be converted to a numeric value: {}", s));
throw api_error::validation(fmt::format("The parameter cannot be converted to a numeric value: {}", s));
}
}
@@ -265,7 +265,7 @@ bytes get_key_column_value(const rjson::value& item, const column_definition& co
std::string column_name = column.name_as_text();
const rjson::value* key_typed_value = rjson::find(item, column_name);
if (!key_typed_value) {
throw api_error::validation(format("Key column {} not found", column_name));
throw api_error::validation(fmt::format("Key column {} not found", column_name));
}
return get_key_from_typed_value(*key_typed_value, column);
}
@@ -279,21 +279,21 @@ bytes get_key_column_value(const rjson::value& item, const column_definition& co
static const rjson::value& get_typed_value(const rjson::value& key_typed_value, std::string_view type_str, std::string_view name, std::string_view value_name) {
if (!key_typed_value.IsObject() || key_typed_value.MemberCount() != 1) {
throw api_error::validation(
format("Malformed value object for {} {}: {}",
fmt::format("Malformed value object for {} {}: {}",
value_name, name, key_typed_value));
}
auto it = key_typed_value.MemberBegin();
if (rjson::to_string_view(it->name) != type_str) {
throw api_error::validation(
format("Type mismatch: expected type {} for {} {}, got type {}",
fmt::format("Type mismatch: expected type {} for {} {}, got type {}",
type_str, value_name, name, it->name));
}
// We assume this function is called just for key types (S, B, N), and
// all of those always have a string value in the JSON.
if (!it->value.IsString()) {
throw api_error::validation(
format("Malformed value object for {} {}: {}",
fmt::format("Malformed value object for {} {}: {}",
value_name, name, key_typed_value));
}
@@ -402,16 +402,16 @@ position_in_partition pos_from_json(const rjson::value& item, schema_ptr schema)
big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic) {
if (!v.IsObject() || v.MemberCount() != 1) {
throw api_error::validation(format("{}: invalid number object", diagnostic));
throw api_error::validation(fmt::format("{}: invalid number object", diagnostic));
}
auto it = v.MemberBegin();
if (it->name != "N") {
throw api_error::validation(format("{}: expected number, found type '{}'", diagnostic, it->name));
throw api_error::validation(fmt::format("{}: expected number, found type '{}'", diagnostic, it->name));
}
if (!it->value.IsString()) {
// We shouldn't reach here. Callers normally validate their input
// earlier with validate_value().
throw api_error::validation(format("{}: improperly formatted number constant", diagnostic));
throw api_error::validation(fmt::format("{}: improperly formatted number constant", diagnostic));
}
big_decimal ret = parse_and_validate_number(rjson::to_string_view(it->value));
return ret;
@@ -492,7 +492,7 @@ rjson::value set_sum(const rjson::value& v1, const rjson::value& v2) {
auto [set1_type, set1] = unwrap_set(v1);
auto [set2_type, set2] = unwrap_set(v2);
if (set1_type != set2_type) {
throw api_error::validation(format("Mismatched set types: {} and {}", set1_type, set2_type));
throw api_error::validation(fmt::format("Mismatched set types: {} and {}", set1_type, set2_type));
}
if (!set1 || !set2) {
throw api_error::validation("UpdateExpression: ADD operation for sets must be given sets as arguments");
@@ -520,7 +520,7 @@ std::optional<rjson::value> set_diff(const rjson::value& v1, const rjson::value&
auto [set1_type, set1] = unwrap_set(v1);
auto [set2_type, set2] = unwrap_set(v2);
if (set1_type != set2_type) {
throw api_error::validation(format("Set DELETE type mismatch: {} and {}", set1_type, set2_type));
throw api_error::validation(fmt::format("Set DELETE type mismatch: {} and {}", set1_type, set2_type));
}
if (!set1 || !set2) {
throw api_error::validation("UpdateExpression: DELETE operation can only be performed on a set");

View File

@@ -263,7 +263,7 @@ future<std::string> server::verify_signature(const request& req, const chunked_c
std::string_view authorization_header = authorization_it->second;
auto pos = authorization_header.find_first_of(' ');
if (pos == std::string_view::npos || authorization_header.substr(0, pos) != "AWS4-HMAC-SHA256") {
throw api_error::invalid_signature(format("Authorization header must use AWS4-HMAC-SHA256 algorithm: {}", authorization_header));
throw api_error::invalid_signature(fmt::format("Authorization header must use AWS4-HMAC-SHA256 algorithm: {}", authorization_header));
}
authorization_header.remove_prefix(pos+1);
std::string credential;
@@ -298,7 +298,7 @@ future<std::string> server::verify_signature(const request& req, const chunked_c
std::vector<std::string_view> credential_split = split(credential, '/');
if (credential_split.size() != 5) {
throw api_error::validation(format("Incorrect credential information format: {}", credential));
throw api_error::validation(fmt::format("Incorrect credential information format: {}", credential));
}
std::string user(credential_split[0]);
std::string datestamp(credential_split[1]);
@@ -383,7 +383,7 @@ static tracing::trace_state_ptr maybe_trace_query(service::client_state& client_
std::string buf;
tracing::add_session_param(trace_state, "alternator_op", op);
tracing::add_query(trace_state, truncated_content_view(query, buf));
tracing::begin(trace_state, format("Alternator {}", op), client_state.get_client_address());
tracing::begin(trace_state, seastar::format("Alternator {}", op), client_state.get_client_address());
if (!username.empty()) {
tracing::set_username(trace_state, auth::authenticated_user(username));
}
@@ -419,7 +419,7 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
auto callback_it = _callbacks.find(op);
if (callback_it == _callbacks.end()) {
_executor._stats.unsupported_operations++;
co_return api_error::unknown_operation(format("Unsupported operation {}", op));
co_return api_error::unknown_operation(fmt::format("Unsupported operation {}", op));
}
if (_pending_requests.get_count() >= _max_concurrent_requests) {
_executor._stats.requests_shed++;
@@ -643,7 +643,7 @@ future<> server::json_parser::stop() {
const char* api_error::what() const noexcept {
if (_what_string.empty()) {
_what_string = format("{} {}: {}", std::to_underlying(_http_code), _type, _msg);
_what_string = fmt::format("{} {}: {}", std::to_underlying(_http_code), _type, _msg);
}
return _what_string.c_str();
}

View File

@@ -246,7 +246,7 @@ public:
value = T{boost::lexical_cast<Base>(param)};
}
} catch (boost::bad_lexical_cast&) {
throw httpd::bad_param_exception(format("{} ({}): type error - should be {}", name, param, boost::units::detail::demangle(typeid(Base).name())));
throw httpd::bad_param_exception(fmt::format("{} ({}): type error - should be {}", name, param, boost::units::detail::demangle(typeid(Base).name())));
}
}

View File

@@ -907,7 +907,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
}
ignore_nodes.push_back(std::move(hoep));
} catch (...) {
throw std::runtime_error(format("Failed to parse ignore_nodes parameter: ignore_nodes={}, node={}: {}", ignore_nodes_strs, n, std::current_exception()));
throw std::runtime_error(fmt::format("Failed to parse ignore_nodes parameter: ignore_nodes={}, node={}: {}", ignore_nodes_strs, n, std::current_exception()));
}
}
return ss.local().removenode(host_id, std::move(ignore_nodes)).then([] {

View File

@@ -149,7 +149,7 @@ future<std::optional<auth::authenticated_user>> auth::certificate_authenticator:
co_return username;
}
}
throw exceptions::authentication_exception(format("Subject '{}'/'{}' does not match any query expression", subject, altname));
throw exceptions::authentication_exception(seastar::format("Subject '{}'/'{}' does not match any query expression", subject, altname));
}

View File

@@ -67,7 +67,7 @@ bool default_authorizer::legacy_metadata_exists() const {
}
future<bool> default_authorizer::legacy_any_granted() const {
static const sstring query = format("SELECT * FROM {}.{} LIMIT 1", meta::legacy::AUTH_KS, PERMISSIONS_CF);
static const sstring query = seastar::format("SELECT * FROM {}.{} LIMIT 1", meta::legacy::AUTH_KS, PERMISSIONS_CF);
return _qp.execute_internal(
query,
@@ -80,7 +80,7 @@ future<bool> default_authorizer::legacy_any_granted() const {
future<> default_authorizer::migrate_legacy_metadata() {
alogger.info("Starting migration of legacy permissions metadata.");
static const sstring query = format("SELECT * FROM {}.{}", meta::legacy::AUTH_KS, legacy_table_name);
static const sstring query = seastar::format("SELECT * FROM {}.{}", meta::legacy::AUTH_KS, legacy_table_name);
return _qp.execute_internal(
query,
@@ -163,7 +163,7 @@ default_authorizer::authorize(const role_or_anonymous& maybe_role, const resourc
co_return permissions::NONE;
}
const sstring query = format("SELECT {} FROM {}.{} WHERE {} = ? AND {} = ?",
const sstring query = seastar::format("SELECT {} FROM {}.{} WHERE {} = ? AND {} = ?",
PERMISSIONS_NAME,
get_auth_ks_name(_qp),
PERMISSIONS_CF,
@@ -188,7 +188,7 @@ default_authorizer::modify(
const resource& resource,
std::string_view op,
::service::group0_batch& mc) {
const sstring query = format("UPDATE {}.{} SET {} = {} {} ? WHERE {} = ? AND {} = ?",
const sstring query = seastar::format("UPDATE {}.{} SET {} = {} {} ? WHERE {} = ? AND {} = ?",
get_auth_ks_name(_qp),
PERMISSIONS_CF,
PERMISSIONS_NAME,
@@ -218,7 +218,7 @@ future<> default_authorizer::revoke(std::string_view role_name, permission_set s
}
future<std::vector<permission_details>> default_authorizer::list_all() const {
const sstring query = format("SELECT {}, {}, {} FROM {}.{}",
const sstring query = seastar::format("SELECT {}, {}, {} FROM {}.{}",
ROLE_NAME,
RESOURCE_NAME,
PERMISSIONS_NAME,
@@ -246,7 +246,7 @@ future<std::vector<permission_details>> default_authorizer::list_all() const {
future<> default_authorizer::revoke_all(std::string_view role_name, ::service::group0_batch& mc) {
try {
const sstring query = format("DELETE FROM {}.{} WHERE {} = ?",
const sstring query = seastar::format("DELETE FROM {}.{} WHERE {} = ?",
get_auth_ks_name(_qp),
PERMISSIONS_CF,
ROLE_NAME);
@@ -266,7 +266,7 @@ future<> default_authorizer::revoke_all(std::string_view role_name, ::service::g
}
future<> default_authorizer::revoke_all_legacy(const resource& resource) {
static const sstring query = format("SELECT {} FROM {}.{} WHERE {} = ? ALLOW FILTERING",
static const sstring query = seastar::format("SELECT {} FROM {}.{} WHERE {} = ? ALLOW FILTERING",
ROLE_NAME,
get_auth_ks_name(_qp),
PERMISSIONS_CF,
@@ -283,7 +283,7 @@ future<> default_authorizer::revoke_all_legacy(const resource& resource) {
res->begin(),
res->end(),
[this, res, resource](const cql3::untyped_result_set::row& r) {
static const sstring query = format("DELETE FROM {}.{} WHERE {} = ? AND {} = ?",
static const sstring query = seastar::format("DELETE FROM {}.{} WHERE {} = ? AND {} = ?",
get_auth_ks_name(_qp),
PERMISSIONS_CF,
ROLE_NAME,
@@ -323,7 +323,7 @@ future<> default_authorizer::revoke_all(const resource& resource, ::service::gro
auto name = resource.name();
auto gen = [this, name] (api::timestamp_type t) -> ::service::mutations_generator {
const sstring query = format("SELECT {} FROM {}.{} WHERE {} = ? ALLOW FILTERING",
const sstring query = seastar::format("SELECT {} FROM {}.{} WHERE {} = ? ALLOW FILTERING",
ROLE_NAME,
get_auth_ks_name(_qp),
PERMISSIONS_CF,
@@ -334,7 +334,7 @@ future<> default_authorizer::revoke_all(const resource& resource, ::service::gro
{name},
cql3::query_processor::cache_internal::no);
for (const auto& r : *res) {
const sstring query = format("DELETE FROM {}.{} WHERE {} = ? AND {} = ?",
const sstring query = seastar::format("DELETE FROM {}.{} WHERE {} = ? AND {} = ?",
get_auth_ks_name(_qp),
PERMISSIONS_CF,
ROLE_NAME,
@@ -346,7 +346,7 @@ future<> default_authorizer::revoke_all(const resource& resource, ::service::gro
{r.get_as<sstring>(ROLE_NAME), name});
if (muts.size() != 1) {
on_internal_error(alogger,
format("expecting single delete mutation, got {}", muts.size()));
seastar::format("expecting single delete mutation, got {}", muts.size()));
}
co_yield std::move(muts[0]);
}
@@ -357,7 +357,7 @@ future<> default_authorizer::revoke_all(const resource& resource, ::service::gro
void default_authorizer::revoke_all_keyspace_resources(const resource& ks_resource, ::service::group0_batch& mc) {
auto ks_name = ks_resource.name();
auto gen = [this, ks_name] (api::timestamp_type t) -> ::service::mutations_generator {
const sstring query = format("SELECT {}, {} FROM {}.{}",
const sstring query = seastar::format("SELECT {}, {} FROM {}.{}",
ROLE_NAME,
RESOURCE_NAME,
get_auth_ks_name(_qp),
@@ -374,7 +374,7 @@ void default_authorizer::revoke_all_keyspace_resources(const resource& ks_resour
// r doesn't represent resource related to ks_resource
continue;
}
const sstring query = format("DELETE FROM {}.{} WHERE {} = ? AND {} = ?",
const sstring query = seastar::format("DELETE FROM {}.{} WHERE {} = ? AND {} = ?",
get_auth_ks_name(_qp),
PERMISSIONS_CF,
ROLE_NAME,

View File

@@ -46,7 +46,7 @@ future<> maintenance_socket_role_manager::stop() {
template<typename T = void>
future<T> operation_not_supported_exception(std::string_view operation) {
return make_exception_future<T>(
std::runtime_error(format("role manager: {} operation not supported through maintenance socket", operation)));
std::runtime_error(fmt::format("role manager: {} operation not supported through maintenance socket", operation)));
}
future<> maintenance_socket_role_manager::create(std::string_view role_name, const role_config&, ::service::group0_batch&) {

View File

@@ -75,7 +75,7 @@ static bool has_salted_hash(const cql3::untyped_result_set_row& row) {
}
sstring password_authenticator::update_row_query() const {
return format("UPDATE {}.{} SET {} = ? WHERE {} = ?",
return seastar::format("UPDATE {}.{} SET {} = ? WHERE {} = ?",
get_auth_ks_name(_qp),
meta::roles_table::name,
SALTED_HASH,
@@ -90,7 +90,7 @@ bool password_authenticator::legacy_metadata_exists() const {
future<> password_authenticator::migrate_legacy_metadata() const {
plogger.info("Starting migration of legacy authentication metadata.");
static const sstring query = format("SELECT * FROM {}.{}", meta::legacy::AUTH_KS, legacy_table_name);
static const sstring query = seastar::format("SELECT * FROM {}.{}", meta::legacy::AUTH_KS, legacy_table_name);
return _qp.execute_internal(
query,
@@ -223,7 +223,7 @@ future<authenticated_user> password_authenticator::authenticate(
// obsolete prepared statements pretty quickly.
// Rely on query processing caching statements instead, and lets assume
// that a map lookup string->statement is not gonna kill us much.
const sstring query = format("SELECT {} FROM {}.{} WHERE {} = ?",
const sstring query = seastar::format("SELECT {} FROM {}.{} WHERE {} = ?",
SALTED_HASH,
get_auth_ks_name(_qp),
meta::roles_table::name,
@@ -280,7 +280,7 @@ future<> password_authenticator::alter(std::string_view role_name, const authent
co_return;
}
const sstring query = format("UPDATE {}.{} SET {} = ? WHERE {} = ?",
const sstring query = seastar::format("UPDATE {}.{} SET {} = ? WHERE {} = ?",
get_auth_ks_name(_qp),
meta::roles_table::name,
SALTED_HASH,
@@ -299,7 +299,7 @@ future<> password_authenticator::alter(std::string_view role_name, const authent
}
future<> password_authenticator::drop(std::string_view name, ::service::group0_batch& mc) {
const sstring query = format("DELETE {} FROM {}.{} WHERE {} = ?",
const sstring query = seastar::format("DELETE {} FROM {}.{} WHERE {} = ?",
SALTED_HASH,
get_auth_ks_name(_qp),
meta::roles_table::name,

View File

@@ -193,7 +193,7 @@ service_level_resource_view::service_level_resource_view(const resource &r) {
}
sstring encode_signature(std::string_view name, std::vector<data_type> args) {
return format("{}[{}]", name,
return seastar::format("{}[{}]", name,
fmt::join(args | boost::adaptors::transformed([] (const data_type t) {
return t->name();
}), "^"));
@@ -222,7 +222,7 @@ std::pair<sstring, std::vector<data_type>> decode_signature(std::string_view enc
// to the short form (int)
static sstring decoded_signature_string(std::string_view encoded_signature) {
auto [function_name, arg_types] = decode_signature(encoded_signature);
return format("{}({})", cql3::util::maybe_quote(sstring(function_name)),
return seastar::format("{}({})", cql3::util::maybe_quote(sstring(function_name)),
boost::algorithm::join(arg_types | boost::adaptors::transformed([] (data_type t) {
return t->cql3_type_name();
}), ", "));

View File

@@ -33,7 +33,7 @@ namespace auth {
class invalid_resource_name : public std::invalid_argument {
public:
explicit invalid_resource_name(std::string_view name)
: std::invalid_argument(format("The resource name '{}' is invalid.", name)) {
: std::invalid_argument(fmt::format("The resource name '{}' is invalid.", name)) {
}
};
@@ -149,7 +149,7 @@ class resource_kind_mismatch : public std::invalid_argument {
public:
explicit resource_kind_mismatch(resource_kind expected, resource_kind actual)
: std::invalid_argument(
format("This resource has kind '{}', but was expected to have kind '{}'.", actual, expected)) {
fmt::format("This resource has kind '{}', but was expected to have kind '{}'.", actual, expected)) {
}
};

View File

@@ -48,14 +48,14 @@ public:
class role_already_exists : public roles_argument_exception {
public:
explicit role_already_exists(std::string_view role_name)
: roles_argument_exception(format("Role {} already exists.", role_name)) {
: roles_argument_exception(seastar::format("Role {} already exists.", role_name)) {
}
};
class nonexistant_role : public roles_argument_exception {
public:
explicit nonexistant_role(std::string_view role_name)
: roles_argument_exception(format("Role {} doesn't exist.", role_name)) {
: roles_argument_exception(seastar::format("Role {} doesn't exist.", role_name)) {
}
};
@@ -63,7 +63,7 @@ class role_already_included : public roles_argument_exception {
public:
role_already_included(std::string_view grantee_name, std::string_view role_name)
: roles_argument_exception(
format("{} already includes role {}.", grantee_name, role_name)) {
seastar::format("{} already includes role {}.", grantee_name, role_name)) {
}
};
@@ -71,7 +71,7 @@ class revoke_ungranted_role : public roles_argument_exception {
public:
revoke_ungranted_role(std::string_view revokee_name, std::string_view role_name)
: roles_argument_exception(
format("{} was not granted role {}, so it cannot be revoked.", revokee_name, role_name)) {
seastar::format("{} was not granted role {}, so it cannot be revoked.", revokee_name, role_name)) {
}
};

View File

@@ -47,7 +47,7 @@ future<bool> default_role_row_satisfies(
cql3::query_processor& qp,
std::function<bool(const cql3::untyped_result_set_row&)> p,
std::optional<std::string> rolename) {
const sstring query = format("SELECT * FROM {}.{} WHERE {} = ?",
const sstring query = seastar::format("SELECT * FROM {}.{} WHERE {} = ?",
get_auth_ks_name(qp),
meta::roles_table::name,
meta::roles_table::role_col_name);
@@ -69,7 +69,7 @@ future<bool> any_nondefault_role_row_satisfies(
cql3::query_processor& qp,
std::function<bool(const cql3::untyped_result_set_row&)> p,
std::optional<std::string> rolename) {
const sstring query = format("SELECT * FROM {}.{}", get_auth_ks_name(qp), meta::roles_table::name);
const sstring query = seastar::format("SELECT * FROM {}.{}", get_auth_ks_name(qp), meta::roles_table::name);
auto results = co_await qp.execute_internal(query, db::consistency_level::QUORUM
, internal_distributed_query_state(), cql3::query_processor::cache_internal::no

View File

@@ -213,7 +213,7 @@ future<> service::create_legacy_keyspace_if_missing(::service::migration_manager
try {
co_return co_await mm.announce(::service::prepare_new_keyspace_announcement(db.real_database(), ksm, ts),
std::move(group0_guard), format("auth_service: create {} keyspace", meta::legacy::AUTH_KS));
std::move(group0_guard), seastar::format("auth_service: create {} keyspace", meta::legacy::AUTH_KS));
} catch (::service::group0_concurrent_modification&) {
log.info("Concurrent operation is detected while creating {} keyspace, retrying.", meta::legacy::AUTH_KS);
}
@@ -633,7 +633,7 @@ future<> migrate_to_auth_v2(db::system_keyspace& sys_ks, ::service::raft_group0_
::service::query_state qs(cs, empty_service_permit());
auto rows = co_await qp.execute_internal(
format("SELECT * FROM {}.{}", meta::legacy::AUTH_KS, cf_name),
seastar::format("SELECT * FROM {}.{}", meta::legacy::AUTH_KS, cf_name),
db::consistency_level::ALL,
qs,
{},

View File

@@ -49,7 +49,7 @@ namespace role_attributes_table {
constexpr std::string_view name{"role_attributes", 15};
static std::string_view creation_query() noexcept {
static const sstring instance = format(
static const sstring instance = seastar::format(
"CREATE TABLE {}.{} ("
" role text,"
" name text,"
@@ -89,7 +89,7 @@ static db::consistency_level consistency_for_role(std::string_view role_name) no
}
static future<std::optional<record>> find_record(cql3::query_processor& qp, std::string_view role_name) {
const sstring query = format("SELECT * FROM {}.{} WHERE {} = ?",
const sstring query = seastar::format("SELECT * FROM {}.{} WHERE {} = ?",
get_auth_ks_name(qp),
meta::roles_table::name,
meta::roles_table::role_col_name);
@@ -183,7 +183,7 @@ future<> standard_role_manager::create_default_role_if_missing() {
if (exists) {
co_return;
}
const sstring query = format("INSERT INTO {}.{} ({}, is_superuser, can_login) VALUES (?, true, true)",
const sstring query = seastar::format("INSERT INTO {}.{} ({}, is_superuser, can_login) VALUES (?, true, true)",
get_auth_ks_name(_qp),
meta::roles_table::name,
meta::roles_table::role_col_name);
@@ -212,7 +212,7 @@ bool standard_role_manager::legacy_metadata_exists() {
future<> standard_role_manager::migrate_legacy_metadata() {
log.info("Starting migration of legacy user metadata.");
static const sstring query = format("SELECT * FROM {}.{}", meta::legacy::AUTH_KS, legacy_table_name);
static const sstring query = seastar::format("SELECT * FROM {}.{}", meta::legacy::AUTH_KS, legacy_table_name);
return _qp.execute_internal(
query,
@@ -279,7 +279,7 @@ future<> standard_role_manager::stop() {
}
future<> standard_role_manager::create_or_replace(std::string_view role_name, const role_config& c, ::service::group0_batch& mc) {
const sstring query = format("INSERT INTO {}.{} ({}, is_superuser, can_login) VALUES (?, ?, ?)",
const sstring query = seastar::format("INSERT INTO {}.{} ({}, is_superuser, can_login) VALUES (?, ?, ?)",
get_auth_ks_name(_qp),
meta::roles_table::name,
meta::roles_table::role_col_name);
@@ -326,7 +326,7 @@ standard_role_manager::alter(std::string_view role_name, const role_config_updat
if (!u.is_superuser && !u.can_login) {
return make_ready_future<>();
}
const sstring query = format("UPDATE {}.{} SET {} WHERE {} = ?",
const sstring query = seastar::format("UPDATE {}.{} SET {} WHERE {} = ?",
get_auth_ks_name(_qp),
meta::roles_table::name,
build_column_assignments(u),
@@ -350,7 +350,7 @@ future<> standard_role_manager::drop(std::string_view role_name, ::service::grou
}
// First, revoke this role from all roles that are members of it.
const auto revoke_from_members = [this, role_name, &mc] () -> future<> {
const sstring query = format("SELECT member FROM {}.{} WHERE role = ?",
const sstring query = seastar::format("SELECT member FROM {}.{} WHERE role = ?",
get_auth_ks_name(_qp),
meta::role_members_table::name);
const auto members = co_await _qp.execute_internal(
@@ -382,7 +382,7 @@ future<> standard_role_manager::drop(std::string_view role_name, ::service::grou
};
// Delete all attributes for that role
const auto remove_attributes_of = [this, role_name, &mc] () -> future<> {
const sstring query = format("DELETE FROM {}.{} WHERE role = ?",
const sstring query = seastar::format("DELETE FROM {}.{} WHERE role = ?",
get_auth_ks_name(_qp),
meta::role_attributes_table::name);
if (legacy_mode(_qp)) {
@@ -394,7 +394,7 @@ future<> standard_role_manager::drop(std::string_view role_name, ::service::grou
};
// Finally, delete the role itself.
const auto delete_role = [this, role_name, &mc] () -> future<> {
const sstring query = format("DELETE FROM {}.{} WHERE {} = ?",
const sstring query = seastar::format("DELETE FROM {}.{} WHERE {} = ?",
get_auth_ks_name(_qp),
meta::roles_table::name,
meta::roles_table::role_col_name);
@@ -421,7 +421,7 @@ standard_role_manager::legacy_modify_membership(
std::string_view role_name,
membership_change ch) {
const auto modify_roles = [this, role_name, grantee_name, ch] () -> future<> {
const auto query = format(
const auto query = seastar::format(
"UPDATE {}.{} SET member_of = member_of {} ? WHERE {} = ?",
get_auth_ks_name(_qp),
meta::roles_table::name,
@@ -438,7 +438,7 @@ standard_role_manager::legacy_modify_membership(
const auto modify_role_members = [this, role_name, grantee_name, ch] () -> future<> {
switch (ch) {
case membership_change::add: {
const sstring insert_query = format("INSERT INTO {}.{} (role, member) VALUES (?, ?)",
const sstring insert_query = seastar::format("INSERT INTO {}.{} (role, member) VALUES (?, ?)",
get_auth_ks_name(_qp),
meta::role_members_table::name);
co_return co_await _qp.execute_internal(
@@ -450,7 +450,7 @@ standard_role_manager::legacy_modify_membership(
}
case membership_change::remove: {
const sstring delete_query = format("DELETE FROM {}.{} WHERE role = ? AND member = ?",
const sstring delete_query = seastar::format("DELETE FROM {}.{} WHERE role = ? AND member = ?",
get_auth_ks_name(_qp),
meta::role_members_table::name);
co_return co_await _qp.execute_internal(
@@ -476,7 +476,7 @@ standard_role_manager::modify_membership(
co_return co_await legacy_modify_membership(grantee_name, role_name, ch);
}
const auto modify_roles = format(
const auto modify_roles = seastar::format(
"UPDATE {}.{} SET member_of = member_of {} ? WHERE {} = ?",
get_auth_ks_name(_qp),
meta::roles_table::name,
@@ -488,12 +488,12 @@ standard_role_manager::modify_membership(
sstring modify_role_members;
switch (ch) {
case membership_change::add:
modify_role_members = format("INSERT INTO {}.{} (role, member) VALUES (?, ?)",
modify_role_members = seastar::format("INSERT INTO {}.{} (role, member) VALUES (?, ?)",
get_auth_ks_name(_qp),
meta::role_members_table::name);
break;
case membership_change::remove:
modify_role_members = format("DELETE FROM {}.{} WHERE role = ? AND member = ?",
modify_role_members = seastar::format("DELETE FROM {}.{} WHERE role = ? AND member = ?",
get_auth_ks_name(_qp),
meta::role_members_table::name);
break;
@@ -587,7 +587,7 @@ future<role_set> standard_role_manager::query_granted(std::string_view grantee_n
}
future<role_to_directly_granted_map> standard_role_manager::query_all_directly_granted() {
const sstring query = format("SELECT * FROM {}.{}",
const sstring query = seastar::format("SELECT * FROM {}.{}",
get_auth_ks_name(_qp),
meta::role_members_table::name);
@@ -601,7 +601,7 @@ future<role_to_directly_granted_map> standard_role_manager::query_all_directly_g
}
future<role_set> standard_role_manager::query_all() {
const sstring query = format("SELECT {} FROM {}.{}",
const sstring query = seastar::format("SELECT {} FROM {}.{}",
meta::roles_table::role_col_name,
get_auth_ks_name(_qp),
meta::roles_table::name);
@@ -645,7 +645,7 @@ future<bool> standard_role_manager::can_login(std::string_view role_name) {
}
future<std::optional<sstring>> standard_role_manager::get_attribute(std::string_view role_name, std::string_view attribute_name) {
const sstring query = format("SELECT name, value FROM {}.{} WHERE role = ? AND name = ?",
const sstring query = seastar::format("SELECT name, value FROM {}.{} WHERE role = ? AND name = ?",
get_auth_ks_name(_qp),
meta::role_attributes_table::name);
const auto result_set = co_await _qp.execute_internal(query, {sstring(role_name), sstring(attribute_name)}, cql3::query_processor::cache_internal::yes);
@@ -676,7 +676,7 @@ future<> standard_role_manager::set_attribute(std::string_view role_name, std::s
if (!co_await exists(role_name)) {
throw auth::nonexistant_role(role_name);
}
const sstring query = format("INSERT INTO {}.{} (role, name, value) VALUES (?, ?, ?)",
const sstring query = seastar::format("INSERT INTO {}.{} (role, name, value) VALUES (?, ?, ?)",
get_auth_ks_name(_qp),
meta::role_attributes_table::name);
if (legacy_mode(_qp)) {
@@ -691,7 +691,7 @@ future<> standard_role_manager::remove_attribute(std::string_view role_name, std
if (!co_await exists(role_name)) {
throw auth::nonexistant_role(role_name);
}
const sstring query = format("DELETE FROM {}.{} WHERE role = ? AND name = ?",
const sstring query = seastar::format("DELETE FROM {}.{} WHERE role = ? AND name = ?",
get_auth_ks_name(_qp),
meta::role_attributes_table::name);
if (legacy_mode(_qp)) {

View File

@@ -42,7 +42,7 @@ bytes from_hex(sstring_view s) {
auto half_byte1 = hex_to_int(s[i * 2]);
auto half_byte2 = hex_to_int(s[i * 2 + 1]);
if (half_byte1 == -1 || half_byte2 == -1) {
throw std::invalid_argument(format("Non-hex characters in {}", s));
throw std::invalid_argument(fmt::format("Non-hex characters in {}", s));
}
out[i] = (half_byte1 << 4) | half_byte2;
}

View File

@@ -872,7 +872,7 @@ future<> generation_service::check_and_repair_cdc_streams() {
return;
}
if (!_gossiper.is_normal(addr)) {
throw std::runtime_error(format("All nodes must be in NORMAL or LEFT state while performing check_and_repair_cdc_streams"
throw std::runtime_error(fmt::format("All nodes must be in NORMAL or LEFT state while performing check_and_repair_cdc_streams"
" ({} is in state {})", addr, _gossiper.get_gossip_status(state)));
}
@@ -1111,7 +1111,7 @@ future<bool> generation_service::legacy_do_handle_cdc_generation(cdc::generation
auto sys_dist_ks = get_sys_dist_ks();
auto gen = co_await retrieve_generation_data(gen_id, _sys_ks.local(), *sys_dist_ks, { _token_metadata.get()->count_normal_token_owners() });
if (!gen) {
throw std::runtime_error(format(
throw std::runtime_error(fmt::format(
"Could not find CDC generation {} in distributed system tables (current time: {}),"
" even though some node gossiped about it.",
gen_id, db_clock::now()));

View File

@@ -121,7 +121,7 @@ public:
class no_generation_data_exception : public std::runtime_error {
public:
no_generation_data_exception(cdc::generation_id generation_ts)
: std::runtime_error(format("could not find generation data for timestamp {}", generation_ts))
: std::runtime_error(fmt::format("could not find generation data for timestamp {}", generation_ts))
{}
};

View File

@@ -66,8 +66,8 @@ void cdc::stats::parts_touched_stats::register_metrics(seastar::metrics::metric_
namespace sm = seastar::metrics;
auto register_part = [&] (part_type part, sstring part_name) {
metrics.add_group(cdc_group_name, {
sm::make_total_operations(format("operations_on_{}_performed_{}", part_name, suffix), count[(size_t)part],
sm::description(format("number of {} CDC operations that processed a {}", suffix, part_name)),
sm::make_total_operations(seastar::format("operations_on_{}_performed_{}", part_name, suffix), count[(size_t)part],
sm::description(seastar::format("number of {} CDC operations that processed a {}", suffix, part_name)),
{})
});
};

View File

@@ -69,7 +69,7 @@ bool cdc::metadata::streams_available() const {
cdc::stream_id cdc::metadata::get_stream(api::timestamp_type ts, dht::token tok) {
auto now = api::new_timestamp();
if (ts > now + get_generation_leeway().count()) {
throw exceptions::invalid_request_exception(format(
throw exceptions::invalid_request_exception(seastar::format(
"cdc: attempted to get a stream \"from the future\" ({}; current server time: {})."
" With CDC you cannot send writes with timestamps arbitrarily into the future, because we don't"
" know what streams will be used at that time.\n"
@@ -100,7 +100,7 @@ cdc::stream_id cdc::metadata::get_stream(api::timestamp_type ts, dht::token tok)
// the generation under `it` because that generation was operating at `now - generation_leeway`.
bool is_previous_gen = it != _gens.end() && std::next(it) != _gens.end() && std::next(it)->first <= now;
if (it == _gens.end() || ts < it->first || is_previous_gen) {
throw exceptions::invalid_request_exception(format(
throw exceptions::invalid_request_exception(seastar::format(
"cdc: attempted to get a stream \"from the past\" ({}; current server time: {})."
" With CDC you cannot send writes with timestamps too far into the past, because that would break"
" consistency properties.\n"
@@ -112,7 +112,7 @@ cdc::stream_id cdc::metadata::get_stream(api::timestamp_type ts, dht::token tok)
it = _gens.begin();
if (it == _gens.end() || ts < it->first) {
throw std::runtime_error(format(
throw std::runtime_error(fmt::format(
"cdc::metadata::get_stream: could not find any CDC stream for timestamp {}."
" Are we in the middle of a cluster upgrade?", format_timestamp(ts)));
}
@@ -129,7 +129,7 @@ cdc::stream_id cdc::metadata::get_stream(api::timestamp_type ts, dht::token tok)
// about the current generation in time. We won't be able to prevent it until we introduce transactions.
if (!it->second) {
throw std::runtime_error(format(
throw std::runtime_error(fmt::format(
"cdc: attempted to get a stream from a generation that we know about, but weren't able to retrieve"
" (generation timestamp: {}, write timestamp: {}). Make sure that the replicas which contain"
" this generation's data are alive and reachable from this node.", format_timestamp(it->first), format_timestamp(ts)));

View File

@@ -262,7 +262,7 @@ class cql3_type::raw_tuple : public raw {
std::vector<shared_ptr<raw>> _types;
virtual sstring to_string() const override {
return format("tuple<{}>", fmt::join(_types, ", "));
return seastar::format("tuple<{}>", fmt::join(_types, ", "));
}
public:
raw_tuple(std::vector<shared_ptr<raw>> types)

View File

@@ -2161,7 +2161,7 @@ static cql3::raw_value do_evaluate(const function_call& fun_call, const evaluati
try {
scalar_fun->return_type()->validate(*result);
} catch (marshal_exception&) {
throw runtime_exception(format("Return of function {} ({}) is not a valid value for its declared return type {}",
throw runtime_exception(fmt::format("Return of function {} ({}) is not a valid value for its declared return type {}",
*scalar_fun, to_hex(result),
scalar_fun->return_type()->as_cql3_type()
));
@@ -2407,7 +2407,7 @@ type_of(const expression& e) {
static std::optional<std::reference_wrapper<const column_value>> get_single_column_restriction_column(const expression& e) {
if (find_in_expression<unresolved_identifier>(e, [](const auto&) {return true;})) {
on_internal_error(expr_logger,
format("get_single_column_restriction_column expects a prepared expression, but it's not: {}", e));
seastar::format("get_single_column_restriction_column expects a prepared expression, but it's not: {}", e));
}
const column_value* the_only_column = nullptr;

View File

@@ -110,7 +110,7 @@ void validate_token_relation(const std::vector<const column_definition*> column_
"The token() function must be applied to all partition key components or none of them");
}
throw exceptions::invalid_request_exception(
format("The token function arguments must be in the partition key order: {}",
seastar::format("The token function arguments must be in the partition key order: {}",
fmt::join(boost::adaptors::transform(pk, [](const column_definition& cd) {
return cd.name_as_text();
}), ", ")));

View File

@@ -63,7 +63,7 @@ public:
}
virtual sstring column_name(const std::vector<sstring>& column_names) const override {
return format("{}({})", _name, fmt::join(column_names, ", "));
return seastar::format("{}({})", _name, fmt::join(column_names, ", "));
}
virtual void print(std::ostream& os) const override;

View File

@@ -194,7 +194,7 @@ functions::make_arg_spec(const sstring& receiver_ks, std::optional<const std::st
std::transform(name.begin(), name.end(), name.begin(), ::tolower);
return make_lw_shared<column_specification>(receiver_ks,
receiver_cf,
::make_shared<column_identifier>(format("arg{:d}({})", i, name), true),
::make_shared<column_identifier>(seastar::format("arg{:d}({})", i, name), true),
fun.arg_types()[i]);
}
@@ -329,7 +329,7 @@ functions::get(data_dictionary::database db,
throw exceptions::invalid_request_exception("functions::get for token doesn't have a known column family");
}
if (schema == nullptr) {
throw exceptions::invalid_request_exception(format("functions::get for token cannot find {} table", *receiver_cf));
throw exceptions::invalid_request_exception(seastar::format("functions::get for token cannot find {} table", *receiver_cf));
}
auto fun = ::make_shared<token_fct>(schema);
validate_types(db, keyspace, schema.get(), fun, provided_args, receiver_ks, receiver_cf);
@@ -410,13 +410,13 @@ functions::get(data_dictionary::database db,
if (compatibles.empty()) {
throw exceptions::invalid_request_exception(
format("Invalid call to function {}, none of its type signatures match (known type signatures: {})",
seastar::format("Invalid call to function {}, none of its type signatures match (known type signatures: {})",
name, fmt::join(candidates, ", ")));
}
if (compatibles.size() > 1) {
throw exceptions::invalid_request_exception(
format("Ambiguous call to function {} (can be matched by following signatures: {}): use type casts to disambiguate",
seastar::format("Ambiguous call to function {} (can be matched by following signatures: {}): use type casts to disambiguate",
name, fmt::join(compatibles, ", ")));
}

View File

@@ -720,7 +720,7 @@ query_processor::parse_statement(const sstring_view& query, dialect d) {
throw;
} catch (const std::exception& e) {
log.error("The statement: {} could not be parsed: {}", query, e.what());
throw exceptions::syntax_exception(format("Failed parsing statement: [{}] reason: {}", query, e.what()));
throw exceptions::syntax_exception(seastar::format("Failed parsing statement: [{}] reason: {}", query, e.what()));
}
}
@@ -738,7 +738,7 @@ query_processor::parse_statements(std::string_view queries, dialect d) {
throw;
} catch (const std::exception& e) {
log.error("The statements: {} could not be parsed: {}", queries, e.what());
throw exceptions::syntax_exception(format("Failed parsing statements: [{}] reason: {}", queries, e.what()));
throw exceptions::syntax_exception(seastar::format("Failed parsing statements: [{}] reason: {}", queries, e.what()));
}
}

View File

@@ -678,7 +678,7 @@ void statement_restrictions::add_single_column_parition_key_restriction(const ex
}
if (has_token_restrictions()) {
throw exceptions::invalid_request_exception(
format("Columns \"{}\" cannot be restricted by both a normal relation and a token relation",
seastar::format("Columns \"{}\" cannot be restricted by both a normal relation and a token relation",
fmt::join(expr::get_sorted_column_defs(_partition_key_restrictions) |
boost::adaptors::transformed([](auto* p) {
return maybe_column_definition{p};
@@ -693,7 +693,7 @@ void statement_restrictions::add_single_column_parition_key_restriction(const ex
void statement_restrictions::add_token_partition_key_restriction(const expr::binary_operator& restr) {
if (!partition_key_restrictions_is_empty() && !has_token_restrictions()) {
throw exceptions::invalid_request_exception(
format("Columns \"{}\" cannot be restricted by both a normal relation and a token relation",
seastar::format("Columns \"{}\" cannot be restricted by both a normal relation and a token relation",
fmt::join(expr::get_sorted_column_defs(_partition_key_restrictions) |
boost::adaptors::transformed([](auto* p) {
return maybe_column_definition{p};

View File

@@ -77,7 +77,7 @@ void cql3::statements::alter_keyspace_statement::validate(query_processor& qp, c
throw exceptions::invalid_request_exception("Keyspace storage options not supported in the cluster");
}
if (!current_options.can_update_to(new_options)) {
throw exceptions::invalid_request_exception(format("Cannot alter storage options: {} to {} is not supported",
throw exceptions::invalid_request_exception(seastar::format("Cannot alter storage options: {} to {} is not supported",
current_options.type_string(), new_options.type_string()));
}

View File

@@ -139,11 +139,12 @@ static void validate_column_rename(data_dictionary::database db, const schema& s
if (!schema.indices().empty()) {
auto dependent_indices = db.find_column_family(schema.id()).get_index_manager().get_dependent_indices(*def);
if (!dependent_indices.empty()) {
auto index_names = fmt::join(dependent_indices | boost::adaptors::transformed([](const index_metadata& im) {
return im.name();
}), ", ");
throw exceptions::invalid_request_exception(
format("Cannot rename column {} because it has dependent secondary indexes ({})", from, index_names));
seastar::format("Cannot rename column {} because it has dependent secondary indexes ({})",
from,
fmt::join(dependent_indices | boost::adaptors::transformed([](const index_metadata& im) {
return im.name();
}), ", ")));
}
}
}

View File

@@ -211,7 +211,7 @@ void batch_statement::verify_batch_size(query_processor& qp, const std::vector<m
for (auto&& m : mutations) {
ks_cf_pairs.insert(m.schema()->ks_name() + "." + m.schema()->cf_name());
}
return format("Batch modifying {:d} partitions in {} is of size {:d} bytes, exceeding specified {} threshold of {:d} by {:d}.",
return seastar::format("Batch modifying {:d} partitions in {} is of size {:d} bytes, exceeding specified {} threshold of {:d} by {:d}.",
mutations.size(), fmt::join(ks_cf_pairs, ", "), size, type, threshold, size - threshold);
};
if (size > fail_threshold) {

View File

@@ -41,7 +41,7 @@ seastar::future<shared_ptr<db::functions::function>> create_aggregate_statement:
auto state_func = dynamic_pointer_cast<functions::scalar_function>(functions::instance().find(functions::function_name{_name.keyspace, _sfunc}, acc_types));
if (!state_func) {
auto acc_type_names = acc_types | boost::adaptors::transformed([] (auto&& t) { return t->cql3_type_name(); });
throw exceptions::invalid_request_exception(format("State function {}({}) not found", _sfunc, fmt::join(acc_type_names, ", ")));
throw exceptions::invalid_request_exception(seastar::format("State function {}({}) not found", _sfunc, fmt::join(acc_type_names, ", ")));
}
if (state_func->return_type() != state_type) {
throw exceptions::invalid_request_exception(format("State function '{}' doesn't return state ({})", _sfunc, state_type->cql3_type_name()));

View File

@@ -249,7 +249,7 @@ void create_index_statement::validate_for_collection(const index_target& target,
case index_target::target_type::keys_and_values:
if (!cd.type->is_map()) {
constexpr const char* msg_format = "Cannot create secondary index on {} of column {} with non-map type";
throw exceptions::invalid_request_exception(format(msg_format, to_sstring(target.type), cd.name_as_text()));
throw exceptions::invalid_request_exception(seastar::format(msg_format, to_sstring(target.type), cd.name_as_text()));
}
break;
}

View File

@@ -343,7 +343,7 @@ std::unique_ptr<prepared_statement> create_table_statement::raw_statement::prepa
#endif
} else {
if (stmt->_columns.size() > 1) {
throw exceptions::invalid_request_exception(format("COMPACT STORAGE with composite PRIMARY KEY allows no more than one column not part of the PRIMARY KEY (got: {})",
throw exceptions::invalid_request_exception(seastar::format("COMPACT STORAGE with composite PRIMARY KEY allows no more than one column not part of the PRIMARY KEY (got: {})",
fmt::join(stmt->_columns | boost::adaptors::map_keys, ", ")));
}
#if 0

View File

@@ -246,9 +246,10 @@ std::pair<view_ptr, cql3::cql_warnings_vec> create_view_statement::prepare_view(
}
if (!missing_pk_columns.empty()) {
auto column_names = fmt::join(missing_pk_columns | boost::adaptors::transformed(std::mem_fn(&column_definition::name_as_text)), ", ");
throw exceptions::invalid_request_exception(format("Cannot create Materialized View {} without primary key columns from base {} ({})",
column_family(), _base_name.get_column_family(), column_names));
throw exceptions::invalid_request_exception(seastar::format(
"Cannot create Materialized View {} without primary key columns from base {} ({})",
column_family(), _base_name.get_column_family(),
fmt::join(missing_pk_columns | boost::adaptors::transformed(std::mem_fn(&column_definition::name_as_text)), ", ")));
}
if (_partition_keys.empty()) {
@@ -289,9 +290,9 @@ std::pair<view_ptr, cql3::cql_warnings_vec> create_view_statement::prepare_view(
target_primary_keys.contains(non_pk_restrictions.cbegin()->first)) {
// This case (filter by new PK column of the view) works, as explained above
} else if (!non_pk_restrictions.empty()) {
auto column_names = fmt::join(non_pk_restrictions | boost::adaptors::map_keys | boost::adaptors::transformed(std::mem_fn(&column_definition::name_as_text)), ", ");
throw exceptions::invalid_request_exception(format("Non-primary key columns cannot be restricted in the SELECT statement used for materialized view {} creation (got restrictions on: {})",
column_family(), column_names));
throw exceptions::invalid_request_exception(seastar::format("Non-primary key columns cannot be restricted in the SELECT statement used for materialized view {} creation (got restrictions on: {})",
column_family(),
fmt::join(non_pk_restrictions | boost::adaptors::map_keys | boost::adaptors::transformed(std::mem_fn(&column_definition::name_as_text)), ", ")));
}
// IS NOT NULL restrictions are handled separately from other restrictions.

View File

@@ -141,7 +141,7 @@ seastar::future<shared_ptr<db::functions::function>> drop_function_statement_bas
if (_args_present) {
func = functions::instance().find(_name, _arg_types);
if (!func && !_if_exists) {
throw exceptions::invalid_request_exception(format("User function {}({}) doesn't exist", _name, _arg_types));
throw exceptions::invalid_request_exception(seastar::format("User function {}({}) doesn't exist", _name, _arg_types));
}
} else {
auto funcs = functions::instance().find(_name);

View File

@@ -435,11 +435,11 @@ modification_statement::process_where_clause(data_dictionary::database db, expr:
to_string(_restrictions->get_partition_key_restrictions())));
}
if (!_restrictions->get_non_pk_restriction().empty()) {
auto column_names = fmt::join(_restrictions->get_non_pk_restriction()
throw exceptions::invalid_request_exception(seastar::format("Invalid where clause contains non PRIMARY KEY columns: {}",
fmt::join(_restrictions->get_non_pk_restriction()
| boost::adaptors::map_keys
| boost::adaptors::indirected
| boost::adaptors::transformed(std::mem_fn(&column_definition::name_as_text)), ", ");
throw exceptions::invalid_request_exception(format("Invalid where clause contains non PRIMARY KEY columns: {}", column_names));
| boost::adaptors::transformed(std::mem_fn(&column_definition::name_as_text)), ", ")));
}
const expr::expression& ck_restrictions = _restrictions->get_clustering_columns_restrictions();
if (has_slice(ck_restrictions) && !allow_clustering_key_slices()) {

View File

@@ -1812,7 +1812,7 @@ mutation_fragments_select_statement::do_execute(query_processor& qp, service::qu
auto last_host = state->get_last_replicas().begin()->second.front();
if (last_host != this_node) {
const auto last_node = topo.find_node(last_host);
throw exceptions::invalid_request_exception(format(
throw exceptions::invalid_request_exception(seastar::format(
"Moving between coordinators is not allowed in SELECT FROM MUTATION_FRAGMENTS() statements, last page's coordinator was {}{}",
last_host,
last_node ? fmt::format("({})", last_node->endpoint()) : ""));

View File

@@ -253,7 +253,7 @@ static bytes from_json_object_aux(const user_type_impl& ut, const rjson::value&
}
if (!remaining_names.empty()) {
throw marshal_exception(format(
throw marshal_exception(seastar::format(
"Extraneous field definition for user type {}: {}", ut.get_name_as_string(), *remaining_names.begin()));
}
return ut.build_value(std::move(raw_tuple));

View File

@@ -22,7 +22,7 @@ private:
public:
missing_column(std::string_view column_name)
: bad_variant_access()
, _msg(format("missing column: {}", column_name))
, _msg(seastar::format("missing column: {}", column_name))
{}
const char* what() const noexcept override {

View File

@@ -356,7 +356,7 @@ static std::vector<sstring> experimental_feature_names() {
// created on-the-fly below with format(). Instead, we need to save the
// help string to a static object, and return a string_view to it:
static std::string_view experimental_features_help_string() {
static sstring s = format("Unlock experimental features provided as the "
static sstring s = seastar::format("Unlock experimental features provided as the "
"option arguments (possible values: {}). Can be repeated.",
experimental_feature_names());
return s;

View File

@@ -87,7 +87,7 @@ aggregate_function::column_name(const std::vector<sstring>& column_names) const
if (_agg.column_name_override) {
return *_agg.column_name_override;
}
return format("{}({})", _agg.name, fmt::join(column_names, ", "));
return seastar::format("{}({})", _agg.name, fmt::join(column_names, ", "));
}
}

View File

@@ -153,10 +153,10 @@ future<> cql_table_large_data_handler::try_record(std::string_view large_table,
sstring extra_fields_str;
sstring extra_values;
for (std::string_view field : extra_fields) {
extra_fields_str += format(", {}", field);
extra_fields_str += seastar::format(", {}", field);
extra_values += ", ?";
}
const sstring req = format("INSERT INTO system.large_{}s (keyspace_name, table_name, sstable_name, {}_size, partition_key, compaction_time{}) VALUES (?, ?, ?, ?, ?, ?{}) USING TTL 2592000",
const sstring req = seastar::format("INSERT INTO system.large_{}s (keyspace_name, table_name, sstable_name, {}_size, partition_key, compaction_time{}) VALUES (?, ?, ?, ?, ?, ?{}) USING TTL 2592000",
large_table, large_table, extra_fields_str, extra_values);
const schema &s = *sst.get_schema();
auto ks_name = s.ks_name();
@@ -205,7 +205,7 @@ future<> cql_table_large_data_handler::internal_record_large_cells(const sstable
auto ck_str = key_to_str(*clustering_key, s);
return try_record("cell", sst, partition_key, int64_t(cell_size), cell_type, column_name, extra_fields, ck_str, column_name);
} else {
auto desc = format("static {}", cell_type);
auto desc = seastar::format("static {}", cell_type);
return try_record("cell", sst, partition_key, int64_t(cell_size), desc, column_name, extra_fields, data_value::make_null(utf8_type), column_name);
}
}
@@ -220,7 +220,7 @@ future<> cql_table_large_data_handler::internal_record_large_cells_and_collectio
auto ck_str = key_to_str(*clustering_key, s);
return try_record("cell", sst, partition_key, int64_t(cell_size), cell_type, column_name, extra_fields, ck_str, column_name, data_value((int64_t)collection_elements));
} else {
auto desc = format("static {}", cell_type);
auto desc = seastar::format("static {}", cell_type);
return try_record("cell", sst, partition_key, int64_t(cell_size), desc, column_name, extra_fields, data_value::make_null(utf8_type), column_name, data_value((int64_t)collection_elements));
}
}
@@ -240,7 +240,7 @@ future<> cql_table_large_data_handler::record_large_rows(const sstables::sstable
future<> cql_table_large_data_handler::delete_large_data_entries(const schema& s, sstring sstable_name, std::string_view large_table_name) const {
SCYLLA_ASSERT(_sys_ks);
const sstring req =
format("DELETE FROM system.{} WHERE keyspace_name = ? AND table_name = ? AND sstable_name = ?",
seastar::format("DELETE FROM system.{} WHERE keyspace_name = ? AND table_name = ? AND sstable_name = ?",
large_table_name);
large_data_logger.debug("Dropping entries from {}: ks = {}, table = {}, sst = {}",
large_table_name, s.ks_name(), s.cf_name(), sstable_name);

View File

@@ -43,7 +43,7 @@ per_partition_rate_limit_options::per_partition_rate_limit_options(std::map<sstr
_max_reads_per_second = handle_uint32_arg(max_reads_per_second_key);
if (!map.empty()) {
throw exceptions::configuration_exception(format(
throw exceptions::configuration_exception(seastar::format(
"Unknown keys in map for per_partition_rate_limit extension: {}",
fmt::join(map | boost::adaptors::map_keys, ", ")));
}

View File

@@ -189,7 +189,7 @@ static void check_exists(std::string_view ks_name, std::string_view cf_name, con
// 'upgrade' Scylla from Cassandra work directories (which is an unsupported upgrade path)
// on which this check does not pass. We don't want the node to crash in these dtests,
// but throw an error instead. In production clusters we don't crash on `on_internal_error` anyway.
auto err = format("expected {}.{} to exist but it doesn't", ks_name, cf_name);
auto err = fmt::format("expected {}.{} to exist but it doesn't", ks_name, cf_name);
dlogger.error("{}", err);
throw std::runtime_error{std::move(err)};
}

View File

@@ -166,7 +166,7 @@ db::view::base_dependent_view_info::base_dependent_view_info(bool has_base_non_p
const std::vector<column_id>& db::view::base_dependent_view_info::base_regular_columns_in_view_pk() const {
if (use_only_for_reads) {
on_internal_error(vlogger,
format("base_regular_columns_in_view_pk(): operation unsupported when initialized only for view reads. "
seastar::format("base_regular_columns_in_view_pk(): operation unsupported when initialized only for view reads. "
"Missing column in the base table: {}", to_sstring_view(_column_missing_in_base.value_or(bytes()))));
}
return _base_regular_columns_in_view_pk;
@@ -175,7 +175,7 @@ const std::vector<column_id>& db::view::base_dependent_view_info::base_regular_c
const std::vector<column_id>& db::view::base_dependent_view_info::base_static_columns_in_view_pk() const {
if (use_only_for_reads) {
on_internal_error(vlogger,
format("base_static_columns_in_view_pk(): operation unsupported when initialized only for view reads. "
seastar::format("base_static_columns_in_view_pk(): operation unsupported when initialized only for view reads. "
"Missing column in the base table: {}", to_sstring_view(_column_missing_in_base.value_or(bytes()))));
}
return _base_static_columns_in_view_pk;
@@ -184,7 +184,7 @@ const std::vector<column_id>& db::view::base_dependent_view_info::base_static_co
const schema_ptr& db::view::base_dependent_view_info::base_schema() const {
if (use_only_for_reads) {
on_internal_error(vlogger,
format("base_schema(): operation unsupported when initialized only for view reads. "
seastar::format("base_schema(): operation unsupported when initialized only for view reads. "
"Missing column in the base table: {}", to_sstring_view(_column_missing_in_base.value_or(bytes()))));
}
return _base_schema;

View File

@@ -83,12 +83,13 @@ std::unique_ptr<dht::i_partitioner> make_partitioner(sstring partitioner_name) {
try {
return create_object<i_partitioner>(partitioner_name);
} catch (std::exception& e) {
auto supported_partitioners = fmt::join(
class_registry<i_partitioner>::classes() |
boost::adaptors::map_keys,
", ");
throw std::runtime_error(format("Partitioner {} is not supported, supported partitioners = {{ {} }} : {}",
partitioner_name, supported_partitioners, e.what()));
throw std::runtime_error(fmt::format("Partitioner {} is not supported, supported partitioners = {{ {} }} : {}",
partitioner_name,
fmt::join(
class_registry<i_partitioner>::classes() |
boost::adaptors::map_keys,
", "),
e.what()));
}
}

View File

@@ -47,7 +47,7 @@ const std::unordered_map<exception_code, sstring>& exception_map() {
template<typename... Args>
static inline sstring prepare_message(const char* fmt, Args&&... args) noexcept {
try {
return format(fmt, std::forward<Args>(args)...);
return seastar::format(fmt, std::forward<Args>(args)...);
} catch (...) {
return sstring();
}

View File

@@ -2179,7 +2179,7 @@ future<> gossiper::do_shadow_round(std::unordered_set<gms::inet_address> nodes,
break;
}
if (clk::now() > start_time + std::chrono::milliseconds(_gcfg.shadow_round_ms)) {
throw std::runtime_error(format("Unable to gossip with any nodes={} (ShadowRound).", nodes));
throw std::runtime_error(fmt::format("Unable to gossip with any nodes={} (ShadowRound).", nodes));
}
sleep_abortable(std::chrono::seconds(1), _abort_source).get();
logger.info("Connect nodes={} again ... ({} seconds passed)",
@@ -2280,7 +2280,7 @@ future<> gossiper::add_local_application_state(application_state_map states) {
auto permit = co_await gossiper.lock_endpoint(ep_addr, null_permit_id);
auto ep_state_before = gossiper.get_endpoint_state_ptr(ep_addr);
if (!ep_state_before) {
auto err = format("endpoint_state_map does not contain endpoint = {}, application_states = {}",
auto err = fmt::format("endpoint_state_map does not contain endpoint = {}, application_states = {}",
ep_addr, states);
co_await coroutine::return_exception(std::runtime_error(err));
}
@@ -2451,7 +2451,7 @@ future<> gossiper::wait_alive(noncopyable_function<std::vector<gms::inet_address
break;
}
if (std::chrono::steady_clock::now() > timeout + start_time) {
throw std::runtime_error(format("Failed to mark node as alive in {} ms, nodes={}, live_nodes={}",
throw std::runtime_error(fmt::format("Failed to mark node as alive in {} ms, nodes={}, live_nodes={}",
timeout.count(), nodes, live_nodes));
}
co_await sleep_abortable(std::chrono::milliseconds(100), _abort_source);
@@ -2712,7 +2712,7 @@ void gossiper::check_knows_remote_features(std::set<std::string_view>& local_fea
logger.info("Feature check passed. Local node {} features = {}, Remote common_features = {}",
local_endpoint, local_features, common_features);
} else {
throw std::runtime_error(format("Feature check failed. This node can not join the cluster because it does not understand the feature. Local node {} features = {}, Remote common_features = {}", local_endpoint, local_features, common_features));
throw std::runtime_error(fmt::format("Feature check failed. This node can not join the cluster because it does not understand the feature. Local node {} features = {}, Remote common_features = {}", local_endpoint, local_features, common_features));
}
}

View File

@@ -18,6 +18,7 @@
#include "utils/date.h"
#include <seastar/core/align.hh>
#include <lua.hpp>
#include "seastarx.hh"
#include "db/config.hh"
// Lua 5.4 added an extra parameter to lua_resume
@@ -457,7 +458,7 @@ static sstring get_string(lua_State *l, int index) {
return sstring(p.str());
},
[] (const auto& v) {
return format("{}", v);
return seastar::format("{}", v);
}));
}
@@ -497,17 +498,17 @@ static lua_date_table get_lua_date_table(lua_State* l, int index) {
if (k == "month") {
month = (unsigned char)v;
if (*month != v) {
throw exceptions::invalid_request_exception(format("month is too large: '{}'", v.str()));
throw exceptions::invalid_request_exception(seastar::format("month is too large: '{}'", v.str()));
}
} else if (k == "day") {
day = (unsigned char)v;
if (*day != v) {
throw exceptions::invalid_request_exception(format("day is too large: '{}'", v.str()));
throw exceptions::invalid_request_exception(seastar::format("day is too large: '{}'", v.str()));
}
} else {
int32_t vint(v);
if (vint != v) {
throw exceptions::invalid_request_exception(format("{} is too large: '{}'", k, v.str()));
throw exceptions::invalid_request_exception(seastar::format("{} is too large: '{}'", k, v.str()));
}
if (k == "year") {
year = vint;
@@ -615,17 +616,17 @@ struct from_lua_visitor {
if (k == "months") {
months = int32_t(v);
if (v != months) {
throw exceptions::invalid_request_exception(format("{} months doesn't fit in a 32 bit integer", v.str()));
throw exceptions::invalid_request_exception(seastar::format("{} months doesn't fit in a 32 bit integer", v.str()));
}
} else if (k == "days") {
days = int32_t(v);
if (v != days) {
throw exceptions::invalid_request_exception(format("{} days doesn't fit in a 32 bit integer", v.str()));
throw exceptions::invalid_request_exception(seastar::format("{} days doesn't fit in a 32 bit integer", v.str()));
}
} else if (k == "nanoseconds") {
nanoseconds = int64_t(v);
if (v != nanoseconds) {
throw exceptions::invalid_request_exception(format("{} nanoseconds doesn't fit in a 64 bit integer", v.str()));
throw exceptions::invalid_request_exception(seastar::format("{} nanoseconds doesn't fit in a 64 bit integer", v.str()));
}
} else {
throw exceptions::invalid_request_exception(format("invalid duration field: '{}'", k));
@@ -715,7 +716,7 @@ struct from_lua_visitor {
auto k_varint = get_varint(l, -2);
if (k_varint > num_elements || k_varint < 1) {
throw exceptions::invalid_request_exception(
format("key {} is not valid for a sequence of size {}", k_varint.str(), num_elements));
seastar::format("key {} is not valid for a sequence of size {}", k_varint.str(), num_elements));
}
size_t k = size_t(k_varint);
opt_elements[k - 1] = convert_from_lua(l, t.type(k - 1));

View File

@@ -211,7 +211,7 @@ struct from_val_visitor {
"externref",
};
if (val.kind() != expected) {
throw wasm::exception(format("Incorrect wasm value kind returned. Expected {}, got {}", kind_str[size_t(expected)], kind_str[size_t(val.kind())]));
throw wasm::exception(seastar::format("Incorrect wasm value kind returned. Expected {}, got {}", kind_str[size_t(expected)], kind_str[size_t(val.kind())]));
}
}
};
@@ -253,7 +253,7 @@ seastar::future<bytes_opt> run_script(context& ctx, wasmtime::Store& store, wasm
} else if (param) {
visit(type, init_arg_visitor{param, *argv, store, instance});
} else {
co_await coroutine::return_exception(wasm::exception(format("Function {} cannot be called on null values", ctx.function_name)));
co_await coroutine::return_exception(wasm::exception(seastar::format("Function {} cannot be called on null values", ctx.function_name)));
}
}
auto rets = wasmtime::get_val_vec();

View File

@@ -241,7 +241,7 @@ public:
};
for (const auto& [dc, rf] : dc_rf) {
if (rf > endpoints_in(dc)) {
throw exceptions::configuration_exception(fmt::format(
throw exceptions::configuration_exception(seastar::format(
"Datacenter {} doesn't have enough token-owning nodes for replication_factor={}", dc, rf));
}
}
@@ -471,7 +471,7 @@ future<tablet_replica_set> network_topology_strategy::add_tablets_in_dc(schema_p
if (candidate_racks.empty()) {
on_internal_error(tablet_logger,
format("allocate_replica {}.{}: no candidate racks found for dc={} allocated={} rf={}: existing={}",
seastar::format("allocate_replica {}.{}: no candidate racks found for dc={} allocated={} rf={}: existing={}",
s->ks_name(), s->cf_name(), dc, dc_node_count, dc_rf, replicas_per_rack));
}
@@ -482,7 +482,7 @@ future<tablet_replica_set> network_topology_strategy::add_tablets_in_dc(schema_p
auto& nodes = candidate->nodes;
if (nodes.empty()) {
on_internal_error(tablet_logger,
format("allocate_replica {}.{} tablet_id={}: candidates vector for rack={} is empty for allocating tablet replicas in dc={} allocated={} rf={}",
seastar::format("allocate_replica {}.{} tablet_id={}: candidates vector for rack={} is empty for allocating tablet replicas in dc={} allocated={} rf={}",
s->ks_name(), s->cf_name(), tb.id, rack, dc, dc_node_count, dc_rf));
}
auto host_id = nodes.back().host;
@@ -492,7 +492,7 @@ future<tablet_replica_set> network_topology_strategy::add_tablets_in_dc(schema_p
// Sanity check that a node is not used more than once
if (!inserted) {
on_internal_error(tablet_logger,
format("allocate_replica {}.{} tablet_id={}: allocated replica={} node already used when allocating tablet replicas in dc={} allocated={} rf={}: replicas={}",
seastar::format("allocate_replica {}.{} tablet_id={}: allocated replica={} node already used when allocating tablet replicas in dc={} allocated={} rf={}: replicas={}",
s->ks_name(), s->cf_name(), tb.id, replica, dc, dc_node_count, dc_rf, replicas));
}
nodes.pop_back();

View File

@@ -869,7 +869,7 @@ void token_metadata_impl::del_leaving_endpoint(host_id endpoint) {
void token_metadata_impl::add_replacing_endpoint(host_id existing_node, host_id replacing_node) {
if (existing_node == replacing_node) {
on_internal_error(tlogger, format("Can't replace node {} with itself", existing_node));
on_internal_error(tlogger, seastar::format("Can't replace node {} with itself", existing_node));
}
tlogger.info("Added node {} as pending replacing endpoint which replaces existing node {}",
replacing_node, existing_node);

View File

@@ -171,13 +171,13 @@ const node* topology::add_node(node_holder nptr) {
if (nptr->topology() != this) {
if (nptr->topology()) {
on_fatal_internal_error(tlogger, format("topology[{}]: {} belongs to different topology={}", fmt::ptr(this), node_printer(node), fmt::ptr(node->topology())));
on_fatal_internal_error(tlogger, seastar::format("topology[{}]: {} belongs to different topology={}", fmt::ptr(this), node_printer(node), fmt::ptr(node->topology())));
}
nptr->set_topology(this);
}
if (node->idx() > 0) {
on_internal_error(tlogger, format("topology[{}]: {}: has assigned idx", fmt::ptr(this), node_printer(nptr.get())));
on_internal_error(tlogger, seastar::format("topology[{}]: {}: has assigned idx", fmt::ptr(this), node_printer(nptr.get())));
}
// Note that _nodes contains also the this_node()
@@ -187,7 +187,7 @@ const node* topology::add_node(node_holder nptr) {
try {
if (is_configured_this_node(*node)) {
if (_this_node) {
on_internal_error(tlogger, format("topology[{}]: {}: local node already mapped to {}", fmt::ptr(this), node_printer(node), node_printer(this_node())));
on_internal_error(tlogger, seastar::format("topology[{}]: {}: local node already mapped to {}", fmt::ptr(this), node_printer(node), node_printer(this_node())));
}
locator::node& n = *_nodes.back();
n._is_this_node = node::this_node::yes;
@@ -220,13 +220,13 @@ const node* topology::update_node(node* node, std::optional<host_id> opt_id, std
if (opt_id) {
if (*opt_id != node->host_id()) {
if (!*opt_id) {
on_internal_error(tlogger, format("Updating node host_id to null is disallowed: {}: new host_id={}", node_printer(node), *opt_id));
on_internal_error(tlogger, seastar::format("Updating node host_id to null is disallowed: {}: new host_id={}", node_printer(node), *opt_id));
}
if (node->is_this_node() && node->host_id()) {
on_internal_error(tlogger, format("This node host_id is already set: {}: new host_id={}", node_printer(node), *opt_id));
on_internal_error(tlogger, seastar::format("This node host_id is already set: {}: new host_id={}", node_printer(node), *opt_id));
}
if (_nodes_by_host_id.contains(*opt_id)) {
on_internal_error(tlogger, format("Cannot update node host_id: {}: new host_id already exists: {}", node_printer(node), node_printer(_nodes_by_host_id[*opt_id])));
on_internal_error(tlogger, seastar::format("Cannot update node host_id: {}: new host_id already exists: {}", node_printer(node), node_printer(_nodes_by_host_id[*opt_id])));
}
changed = true;
} else {
@@ -236,7 +236,7 @@ const node* topology::update_node(node* node, std::optional<host_id> opt_id, std
if (opt_ep) {
if (*opt_ep != node->endpoint()) {
if (*opt_ep == inet_address{}) {
on_internal_error(tlogger, format("Updating node endpoint to null is disallowed: {}: new endpoint={}", node_printer(node), *opt_ep));
on_internal_error(tlogger, seastar::format("Updating node endpoint to null is disallowed: {}: new endpoint={}", node_printer(node), *opt_ep));
}
changed = true;
} else {
@@ -311,7 +311,7 @@ void topology::index_node(const node* node) {
tlogger.trace("topology[{}]: index_node: {}, at {}", fmt::ptr(this), node_printer(node), lazy_backtrace());
if (node->idx() < 0) {
on_internal_error(tlogger, format("topology[{}]: {}: must already have a valid idx", fmt::ptr(this), node_printer(node)));
on_internal_error(tlogger, seastar::format("topology[{}]: {}: must already have a valid idx", fmt::ptr(this), node_printer(node)));
}
// FIXME: for now we allow adding nodes with null host_id, for the following cases:
@@ -321,7 +321,7 @@ void topology::index_node(const node* node) {
if (node->host_id()) {
auto [nit, inserted_host_id] = _nodes_by_host_id.emplace(node->host_id(), node);
if (!inserted_host_id) {
on_internal_error(tlogger, format("topology[{}]: {}: node already exists", fmt::ptr(this), node_printer(node)));
on_internal_error(tlogger, seastar::format("topology[{}]: {}: node already exists", fmt::ptr(this), node_printer(node)));
}
}
if (node->endpoint() != inet_address{}) {
@@ -338,7 +338,7 @@ void topology::index_node(const node* node) {
if (node->host_id()) {
_nodes_by_host_id.erase(node->host_id());
}
on_internal_error(tlogger, format("topology[{}]: {}: node endpoint already mapped to {}", fmt::ptr(this), node_printer(node), node_printer(eit->second)));
on_internal_error(tlogger, seastar::format("topology[{}]: {}: node endpoint already mapped to {}", fmt::ptr(this), node_printer(node), node_printer(eit->second)));
}
}
if (!node->left() && !node->is_none()) {

View File

@@ -329,7 +329,7 @@ mutation_reader read_context::create_reader(
auto& rm = _readers[shard];
if (rm.state != reader_state::used && rm.state != reader_state::successful_lookup && rm.state != reader_state::inexistent) {
auto msg = format("Unexpected request to create reader for shard {}."
auto msg = seastar::format("Unexpected request to create reader for shard {}."
" The reader is expected to be in either `used`, `successful_lookup` or `inexistent` state,"
" but is in `{}` state instead.", shard, reader_state_to_string(rm.state));
mmq_log.warn("{}", msg);

View File

@@ -8,6 +8,7 @@
#include "mutation/mutation_fragment_stream_validator.hh"
#include "utils/to_string.hh"
#include "seastarx.hh"
logging::logger validator_log("mutation_fragment_stream_validator");
@@ -262,7 +263,7 @@ bool mutation_fragment_stream_validating_filter::operator()(const dht::decorated
sstring mutation_fragment_stream_validating_filter::full_name() const {
const auto& s = _validator.schema();
return format("{} ({}.{} {})", _name_view, s.ks_name(), s.cf_name(), s.id());
return seastar::format("{} ({}.{} {})", _name_view, s.ks_name(), s.cf_name(), s.id());
}
mutation_fragment_stream_validating_filter::mutation_fragment_stream_validating_filter(const char* name_literal, sstring name_value, const schema& s,

View File

@@ -386,7 +386,7 @@ std::optional<Querier> querier_cache::lookup_querier(
reinterpret_cast<uintptr_t>(&current_sem));
}
else if (can_be_used == can_use::no_fatal_semaphore_mismatch) {
on_internal_error(qlogger, format("semaphore mismatch detected, dropping reader {}: "
on_internal_error(qlogger, seastar::format("semaphore mismatch detected, dropping reader {}: "
"reader belongs to {} (0x{:x}) but the query class appropriate is {} (0x{:x})",
permit.description(),
q_semaphore_name,

View File

@@ -461,7 +461,7 @@ future<> server_impl::wait_for_state_change(seastar::abort_source* as) {
try {
return as ? _state_change_promise->get_shared_future(*as) : _state_change_promise->get_shared_future();
} catch (abort_requested_exception&) {
throw request_aborted(format(
throw request_aborted(fmt::format(
"Aborted while waiting for state change on server: {}, latest applied entry: {}, current state: {}", _id, _applied_idx, _fsm->current_state()));
}
}

View File

@@ -279,7 +279,7 @@ public:
}
if (_need_cpu_branches) {
on_internal_error_noexcept(rcslog, format("reader_permit::impl::~impl(): permit {}.{}:{} destroyed with {} need_cpu branches",
on_internal_error_noexcept(rcslog, seastar::format("reader_permit::impl::~impl(): permit {}.{}:{} destroyed with {} need_cpu branches",
_schema ? _schema->ks_name() : "*",
_schema ? _schema->cf_name() : "*",
_op_name_view,
@@ -288,7 +288,7 @@ public:
}
if (_awaits_branches) {
on_internal_error_noexcept(rcslog, format("reader_permit::impl::~impl(): permit {}.{}:{} destroyed with {} awaits branches",
on_internal_error_noexcept(rcslog, seastar::format("reader_permit::impl::~impl(): permit {}.{}:{} destroyed with {} awaits branches",
_schema ? _schema->ks_name() : "*",
_schema ? _schema->cf_name() : "*",
_op_name_view,
@@ -406,7 +406,7 @@ public:
}
sstring description() const {
return format("{}.{}:{}",
return seastar::format("{}.{}:{}",
_schema ? _schema->ks_name() : "*",
_schema ? _schema->cf_name() : "*",
_op_name_view);
@@ -1286,7 +1286,7 @@ std::exception_ptr reader_concurrency_semaphore::check_queue_size(std::string_vi
if (_stats.waiters >= _max_queue_length) {
_stats.total_reads_shed_due_to_overload++;
maybe_dump_reader_permit_diagnostics(*this, fmt::format("{} queue overload", queue_name));
return std::make_exception_ptr(std::runtime_error(format("{}: {} queue overload", _name, queue_name)));
return std::make_exception_ptr(std::runtime_error(fmt::format("{}: {} queue overload", _name, queue_name)));
}
return {};
}

View File

@@ -199,7 +199,7 @@ mutation_reader make_foreign_reader(schema_ptr schema,
template <typename... Arg>
static void require(bool condition, const char* msg, const Arg&... arg) {
if (!condition) {
on_internal_error(mrlog, format(msg, arg...));
on_internal_error(mrlog, seastar::format(msg, arg...));
}
}

View File

@@ -650,7 +650,7 @@ void repair::shard_repair_task_impl::check_failed_ranges() {
if (!_aborted) {
failed_because = _failed_because ? *_failed_because : "unknown";
}
auto msg = format("repair[{}]: {} out of {} ranges failed, keyspace={}, tables={}, repair_reason={}, nodes_down_during_repair={}, aborted_by_user={}, failed_because={}",
auto msg = seastar::format("repair[{}]: {} out of {} ranges failed, keyspace={}, tables={}, repair_reason={}, nodes_down_during_repair={}, aborted_by_user={}, failed_because={}",
global_repair_id.uuid(), nr_failed_ranges, ranges_size(), _status.keyspace, table_names(), _reason, nodes_down, _aborted, failed_because);
rlogger.warn("{}", msg);
throw std::runtime_error(msg);
@@ -706,7 +706,7 @@ future<> repair::shard_repair_task_impl::repair_range(const dht::token_range& ra
global_repair_id.uuid(), ranges_index, ranges_size(), _status.keyspace, table.name, range, neighbors, live_neighbors, status);
// If the task is aborted, its state will change to failed. One can wait for this with task_manager::task::done().
abort();
co_await coroutine::return_exception(std::runtime_error(format("Repair mandatory neighbor={} is not alive, keyspace={}, mandatory_neighbors={}",
co_await coroutine::return_exception(std::runtime_error(fmt::format("Repair mandatory neighbor={} is not alive, keyspace={}, mandatory_neighbors={}",
node, _status.keyspace, mandatory_neighbors)));
}
}
@@ -794,7 +794,7 @@ sstring repair_stats::get_stats() {
row_from_disk_rows_per_sec[x.first] = 0;
}
}
return format("round_nr={}, round_nr_fast_path_already_synced={}, round_nr_fast_path_same_combined_hashes={}, round_nr_slow_path={}, rpc_call_nr={}, tx_hashes_nr={}, rx_hashes_nr={}, duration={} seconds, tx_row_nr={}, rx_row_nr={}, tx_row_bytes={}, rx_row_bytes={}, row_from_disk_bytes={}, row_from_disk_nr={}, row_from_disk_bytes_per_sec={} MiB/s, row_from_disk_rows_per_sec={} Rows/s, tx_row_nr_peer={}, rx_row_nr_peer={}",
return seastar::format("round_nr={}, round_nr_fast_path_already_synced={}, round_nr_fast_path_same_combined_hashes={}, round_nr_slow_path={}, rpc_call_nr={}, tx_hashes_nr={}, rx_hashes_nr={}, duration={} seconds, tx_row_nr={}, rx_row_nr={}, tx_row_bytes={}, rx_row_bytes={}, row_from_disk_bytes={}, row_from_disk_nr={}, row_from_disk_bytes_per_sec={} MiB/s, row_from_disk_rows_per_sec={} Rows/s, tx_row_nr_peer={}, rx_row_nr_peer={}",
round_nr,
round_nr_fast_path_already_synced,
round_nr_fast_path_same_combined_hashes,
@@ -894,7 +894,7 @@ struct repair_options {
// The parsing code above removed from the map options we have parsed.
// If anything is left there in the end, it's an unsupported option.
if (!options.empty()) {
throw std::runtime_error(format("unsupported repair options: {}",
throw std::runtime_error(fmt::format("unsupported repair options: {}",
options));
}
}
@@ -1194,7 +1194,7 @@ future<int> repair_service::do_repair_start(sstring keyspace, std::unordered_map
auto node = gms::inet_address(n);
hosts.insert(node);
} catch(...) {
throw std::invalid_argument(format("Failed to parse node={} in hosts={} specified by user: {}",
throw std::invalid_argument(fmt::format("Failed to parse node={} in hosts={} specified by user: {}",
n, options.hosts, std::current_exception()));
}
}
@@ -1204,7 +1204,7 @@ future<int> repair_service::do_repair_start(sstring keyspace, std::unordered_map
auto node = gms::inet_address(n);
ignore_nodes.insert(node);
} catch(...) {
throw std::invalid_argument(format("Failed to parse node={} in ignore_nodes={} specified by user: {}",
throw std::invalid_argument(fmt::format("Failed to parse node={} in ignore_nodes={} specified by user: {}",
n, options.ignore_nodes, std::current_exception()));
}
}
@@ -1271,7 +1271,7 @@ future<int> repair_service::do_repair_start(sstring keyspace, std::unordered_map
auto node = gms::inet_address(n);
ignore_nodes.insert(node);
} catch(...) {
throw std::invalid_argument(format("Failed to parse node={} in ignore_nodes={} specified by user: {}",
throw std::invalid_argument(fmt::format("Failed to parse node={} in ignore_nodes={} specified by user: {}",
n, options.ignore_nodes, std::current_exception()));
}
}
@@ -1409,7 +1409,7 @@ future<> repair::user_requested_repair_task_impl::run() {
}
}
if (!errors.empty()) {
return make_exception_future<>(std::runtime_error(format("{}", errors)));
return make_exception_future<>(std::runtime_error(fmt::format("{}", errors)));
}
return make_ready_future<>();
}).get();
@@ -1534,7 +1534,7 @@ future<> repair::data_sync_repair_task_impl::run() {
}
}
if (!errors.empty()) {
return make_exception_future<>(std::runtime_error(format("{}", errors)));
return make_exception_future<>(std::runtime_error(fmt::format("{}", errors)));
}
return make_ready_future<>();
}).get();
@@ -1649,7 +1649,7 @@ future<> repair_service::bootstrap_with_repair(locator::token_metadata_ptr tmptr
auto nodes = boost::copy_range<std::vector<gms::inet_address>>(old_nodes |
boost::adaptors::filtered([&] (const gms::inet_address& node) { return !new_nodes.contains(node); }));
if (nodes.size() != 1) {
throw std::runtime_error(format("bootstrap_with_repair: keyspace={}, range={}, expected 1 node losing range but found {} nodes={}",
throw std::runtime_error(fmt::format("bootstrap_with_repair: keyspace={}, range={}, expected 1 node losing range but found {} nodes={}",
keyspace_name, desired_range, nodes.size(), nodes));
}
return nodes;
@@ -1721,7 +1721,7 @@ future<> repair_service::bootstrap_with_repair(locator::token_metadata_ptr tmptr
}
}
} else {
throw std::runtime_error(format("bootstrap_with_repair: keyspace={}, range={}, wrong number of old_endpoints={}, rf={}",
throw std::runtime_error(fmt::format("bootstrap_with_repair: keyspace={}, range={}, wrong number of old_endpoints={}, rf={}",
keyspace_name, desired_range, old_endpoints, replication_factor));
}
rlogger.debug("bootstrap_with_repair: keyspace={}, range={}, neighbors={}, mandatory_neighbors={}",
@@ -1883,7 +1883,7 @@ future<> repair_service::do_decommission_removenode_with_repair(locator::token_m
neighbors_set = get_neighbors_set(boost::copy_range<std::vector<inet_address>>(new_eps));
}
} else {
throw std::runtime_error(format("{}: keyspace={}, range={}, current_replica_endpoints={}, new_replica_endpoints={}, wrong number of new owner node={}",
throw std::runtime_error(fmt::format("{}: keyspace={}, range={}, current_replica_endpoints={}, new_replica_endpoints={}, wrong number of new owner node={}",
op, keyspace_name, r, current_eps, new_eps, new_owner));
}
neighbors_set.erase(myip);
@@ -2012,7 +2012,7 @@ future<> repair_service::do_rebuild_replace_with_repair(std::unordered_map<sstri
rlogger.debug("lost_nodes_per_dc={}", lost_nodes_per_dc);
if (source_dc) {
if (!topology.get_datacenters().contains(*source_dc)) {
throw std::runtime_error(format("{}: Could not find source_dc={} in datacenters={}", op, *source_dc, topology.get_datacenters()));
throw std::runtime_error(fmt::format("{}: Could not find source_dc={} in datacenters={}", op, *source_dc, topology.get_datacenters()));
}
if (topology.get_datacenters().size() == 1) {
rlogger.info("{}: source_dc={} ignored since the cluster has a single datacenter", op, *source_dc);
@@ -2107,7 +2107,7 @@ future<> repair_service::do_rebuild_replace_with_repair(std::unordered_map<sstri
}
}
if (!source_dc_for_keyspace.empty() && !live_nodes_per_dc.contains(source_dc_for_keyspace)) {
on_internal_error(rlogger, format("do_rebuild_replace_with_repair: cannot find source_dc_for_keyspace={} in live_nodes_per_dc={}", source_dc_for_keyspace, live_nodes_per_dc));
on_internal_error(rlogger, fmt::format("do_rebuild_replace_with_repair: cannot find source_dc_for_keyspace={} in live_nodes_per_dc={}", source_dc_for_keyspace, live_nodes_per_dc));
}
const auto& sync_nodes = source_dc_for_keyspace.empty() ? all_live_nodes : live_nodes_per_dc.at(source_dc_for_keyspace);
rlogger.info("{}: started with keyspace={}, nr_ranges={}, sync_nodes={}, ignore_nodes={} replaced_node={}", op, keyspace_name, ranges.size() * nr_tables, sync_nodes, ignore_nodes, replaced_node);

View File

@@ -2623,7 +2623,7 @@ public:
}
}
if (_all_live_peer_shards.size() != _all_live_peer_nodes.size()) {
on_internal_error(rlogger, format("The size of shards and nodes do not match table={} range={} shards={} nodes={}",
on_internal_error(rlogger, seastar::format("The size of shards and nodes do not match table={} range={} shards={} nodes={}",
_cf_name, _range, _all_live_peer_shards, _all_live_peer_nodes));
}
@@ -3088,7 +3088,7 @@ public:
if (table_dropped) {
throw replica::no_such_column_family(_shard_task.get_keyspace(), _cf_name);
} else {
throw nested_exception(std::make_exception_ptr(std::runtime_error(format("Failed to repair for keyspace={}, cf={}, range={}", _shard_task.get_keyspace(),
throw nested_exception(std::make_exception_ptr(std::runtime_error(fmt::format("Failed to repair for keyspace={}, cf={}, range={}", _shard_task.get_keyspace(),
_cf_name, _range))), std::move(ex));
}
} else {

View File

@@ -297,7 +297,7 @@ tablet_id process_one_row(table_id table, tablet_map& map, tablet_id tid, const
}
std::optional<tablet_replica> pending_replica;
if (pending.size() > 1) {
throw std::runtime_error(format("Too many pending replicas for table {} tablet {}: {}",
throw std::runtime_error(fmt::format("Too many pending replicas for table {} tablet {}: {}",
table, tid, pending));
}
if (pending.size() != 0) {

View File

@@ -1107,11 +1107,11 @@ schema_builder::schema_builder(std::string_view ks_name, std::string_view cf_nam
// avoided this case in the first place.
if (ks_name.find_first_of('/') != std::string_view::npos ||
ks_name.find_first_of('\0') != std::string_view::npos) {
throw std::logic_error(format("Tried to create a schema with illegal characters in keyspace name: {}", ks_name));
throw std::logic_error(fmt::format("Tried to create a schema with illegal characters in keyspace name: {}", ks_name));
}
if (cf_name.find_first_of('/') != std::string_view::npos ||
cf_name.find_first_of('\0') != std::string_view::npos) {
throw std::logic_error(format("Tried to create a schema with illegal characters in table name: {}", cf_name));
throw std::logic_error(fmt::format("Tried to create a schema with illegal characters in table name: {}", cf_name));
}
_raw._ks_name = sstring(ks_name);
_raw._cf_name = sstring(cf_name);

View File

@@ -20,7 +20,6 @@ shared_ptr<T> make_shared(A&&... a);
}
using namespace seastar;
using seastar::shared_ptr;
using seastar::make_shared;

View File

@@ -222,7 +222,7 @@ void service::client_state::set_keyspace(replica::database& db, std::string_view
// Skip keyspace validation for non-authenticated users. Apparently, some client libraries
// call set_keyspace() before calling login(), and we have to handle that.
if (_user && !db.has_keyspace(keyspace)) {
throw exceptions::invalid_request_exception(format("Keyspace '{}' does not exist", keyspace));
throw exceptions::invalid_request_exception(seastar::format("Keyspace '{}' does not exist", keyspace));
}
_keyspace = sstring(keyspace);
}

View File

@@ -778,7 +778,7 @@ future<std::vector<mutation>> prepare_column_family_drop_announcement(storage_pr
auto explicit_view_names = views
| boost::adaptors::filtered([&old_cfm](const view_ptr& v) { return !old_cfm.get_index_manager().is_index(v); })
| boost::adaptors::transformed([](const view_ptr& v) { return v->cf_name(); });
co_await coroutine::return_exception(exceptions::invalid_request_exception(format("Cannot drop table when materialized views still depend on it ({}.{{{}}})",
co_await coroutine::return_exception(exceptions::invalid_request_exception(seastar::format("Cannot drop table when materialized views still depend on it ({}.{{{}}})",
schema->ks_name(), fmt::join(explicit_view_names, ", "))));
}
mlogger.info("Drop table '{}.{}'", schema->ks_name(), schema->cf_name());

View File

@@ -144,7 +144,7 @@ static service_level_options::timeout_type get_duration(const cql3::untyped_resu
};
future<qos::service_levels_info> get_service_levels(cql3::query_processor& qp, std::string_view ks_name, std::string_view cf_name, db::consistency_level cl) {
sstring prepared_query = format("SELECT * FROM {}.{};", ks_name, cf_name);
sstring prepared_query = seastar::format("SELECT * FROM {}.{};", ks_name, cf_name);
auto result_set = co_await qp.execute_internal(prepared_query, cl, qos_query_state(), cql3::query_processor::cache_internal::yes);
qos::service_levels_info service_levels;
@@ -166,7 +166,7 @@ future<qos::service_levels_info> get_service_levels(cql3::query_processor& qp, s
}
future<service_levels_info> get_service_level(cql3::query_processor& qp, std::string_view ks_name, std::string_view cf_name, sstring service_level_name, db::consistency_level cl) {
sstring prepared_query = format("SELECT * FROM {}.{} WHERE service_level = ?;", ks_name, cf_name);
sstring prepared_query = seastar::format("SELECT * FROM {}.{} WHERE service_level = ?;", ks_name, cf_name);
auto result_set = co_await qp.execute_internal(prepared_query, cl, qos_query_state(), {service_level_name}, cql3::query_processor::cache_internal::yes);
qos::service_levels_info service_levels;

View File

@@ -280,7 +280,7 @@ future<> group0_state_machine::transfer_snapshot(raft::server_id from_id, raft::
// This is virtually impossible. We've just received the
// snapshot from the sender and must have updated our
// address map with its IP address.
const auto msg = format("Failed to apply snapshot from {}: ip address of the sender is not found", from_ip);
const auto msg = seastar::format("Failed to apply snapshot from {}: ip address of the sender is not found", from_ip);
co_await coroutine::return_exception(raft::transport_error(msg));
}
try {
@@ -348,7 +348,7 @@ future<> group0_state_machine::transfer_snapshot(raft::server_id from_id, raft::
co_await _sp.mutate_locally({std::move(history_mut)}, nullptr);
} catch (const abort_requested_exception&) {
throw raft::request_aborted(format(
throw raft::request_aborted(fmt::format(
"Abort requested while transferring snapshot from ID/IP: {}/{}, snapshot descriptor id: {}, snapshot index: {}", from_id, from_ip, snp.id, snp.idx));
}
}

View File

@@ -77,7 +77,7 @@ raft_rpc::two_way_rpc(sloc loc, raft::server_id id,
}
return verb(&_messaging, netw::msg_addr(*ip_addr), db::no_timeout, _group_id, _my_id, id, std::forward<Args>(args)...)
.handle_exception_type([loc= std::move(loc), id] (const seastar::rpc::closed_error& e) {;
const auto msg = format("Failed to execute {} on leader {}: {}", loc.function_name(), id, e);
const auto msg = fmt::format("Failed to execute {} on leader {}: {}", loc.function_name(), id, e);
rlogger.trace("{}", msg);
return make_exception_future<Ret>(raft::transport_error(msg));
});

View File

@@ -2797,7 +2797,7 @@ db::system_keyspace::peer_info storage_service::get_peer_info_for_update(inet_ad
try {
field = T(value.value());
} catch (...) {
on_internal_error(slogger, format("failed to parse {} {} for {}: {}", name, value.value(),
on_internal_error(slogger, fmt::format("failed to parse {} {} for {}: {}", name, value.value(),
endpoint, std::current_exception()));
}
};
@@ -6129,14 +6129,14 @@ future<> storage_service::move_tablet(table_id table, dht::token token, locator:
auto gid = locator::global_tablet_id{table, tid};
if (!locator::contains(tinfo.replicas, src)) {
throw std::runtime_error(format("Tablet {} has no replica on {}", gid, src));
throw std::runtime_error(seastar::format("Tablet {} has no replica on {}", gid, src));
}
auto* node = get_token_metadata().get_topology().find_node(dst.host);
if (!node) {
throw std::runtime_error(format("Unknown host: {}", dst.host));
throw std::runtime_error(seastar::format("Unknown host: {}", dst.host));
}
if (dst.shard >= node->get_shard_count()) {
throw std::runtime_error(format("Host {} does not have shard {}", *node, dst.shard));
throw std::runtime_error(seastar::format("Host {} does not have shard {}", *node, dst.shard));
}
if (src == dst) {

View File

@@ -1271,7 +1271,7 @@ public:
}
if (!min_dst) {
on_internal_error(lblogger, format("No destination shards on {}", best_hosts));
on_internal_error(lblogger, fmt::format("No destination shards on {}", best_hosts));
}
auto candidate = migration_candidate{
@@ -1601,7 +1601,7 @@ public:
if (skip) {
if (src_node_info.drained && skip->viable_targets.empty()) {
auto replicas = tmap.get_tablet_info(source_tablet.tablet).replicas;
throw std::runtime_error(format("Unable to find new replica for tablet {} on {} when draining {} (nodes {}, replicas {})",
throw std::runtime_error(fmt::format("Unable to find new replica for tablet {} on {} when draining {} (nodes {}, replicas {})",
source_tablet, src, nodes_to_drain, nodes_by_load_dst, replicas));
}
src_node_info.skipped_candidates.emplace_back(src, source_tablet, std::move(skip->viable_targets));

View File

@@ -834,7 +834,7 @@ class topology_coordinator : public endpoint_lifecycle_subscriber {
}
}
sstring reason = format("ALTER tablets KEYSPACE called with options: {}", saved_ks_props);
sstring reason = seastar::format("ALTER tablets KEYSPACE called with options: {}", saved_ks_props);
rtlogger.trace("do update {} reason {}", updates, reason);
mixed_change change{std::move(updates)};
group0_command g0_cmd = _group0.client().prepare_command(std::move(change), guard, reason);
@@ -2350,7 +2350,7 @@ class topology_coordinator : public endpoint_lifecycle_subscriber {
if (!unsupported_features.empty()) {
rtlogger.warn("node {} does not understand some features: {}", node.id, unsupported_features);
return join_node_response_params::rejected{
.reason = format("Feature check failed. The node does not support some features that are enabled by the cluster: {}",
.reason = seastar::format("Feature check failed. The node does not support some features that are enabled by the cluster: {}",
unsupported_features),
};
}

View File

@@ -365,11 +365,11 @@ static sstring pk_type_to_string(const schema& s) {
if (s.partition_key_size() == 1) {
return s.partition_key_columns().begin()->type->name();
} else {
auto type_params = fmt::join(s.partition_key_columns()
return seastar::format("org.apache.cassandra.db.marshal.CompositeType({})",
fmt::join(s.partition_key_columns()
| boost::adaptors::transformed(std::mem_fn(&column_definition::type))
| boost::adaptors::transformed(std::mem_fn(&abstract_type::name)),
",");
return format("org.apache.cassandra.db.marshal.CompositeType({})", type_params);
","));
}
}

View File

@@ -371,7 +371,7 @@ future<> sstable_directory::filesystem_components_lister::process(sstable_direct
// log and proceed.
for (auto& path : _state->generations_found | boost::adaptors::map_values) {
if (flags.throw_on_missing_toc) {
throw sstables::malformed_sstable_exception(format("At directory: {}: no TOC found for SSTable {}!. Refusing to boot", _directory.native(), path.native()));
throw sstables::malformed_sstable_exception(seastar::format("At directory: {}: no TOC found for SSTable {}!. Refusing to boot", _directory.native(), path.native()));
} else {
dirlog.info("Found incomplete SSTable {} at directory {}. Removing", path.native(), _directory.native());
_state->files_for_removal.insert(path.native());

View File

@@ -162,7 +162,7 @@ inline sstable_state state_from_dir(std::string_view dir) {
return sstable_state::upload;
}
throw std::runtime_error(format("Unknown sstable state dir {}", dir));
throw std::runtime_error(seastar::format("Unknown sstable state dir {}", dir));
}
// FIXME -- temporary, move to fs storage after patching the rest

View File

@@ -167,7 +167,7 @@ future<> table_helper::setup_keyspace(cql3::query_processor& qp, service::migrat
if (!db.has_keyspace(keyspace_name)) {
try {
co_await mm.announce(service::prepare_new_keyspace_announcement(db.real_database(), ksm, ts),
std::move(group0_guard), format("table_helper: create {} keyspace", keyspace_name));
std::move(group0_guard), seastar::format("table_helper: create {} keyspace", keyspace_name));
} catch (service::group0_concurrent_modification&) {
tlogger.info("Concurrent operation is detected while creating {} keyspace, retrying.", keyspace_name);
}
@@ -194,7 +194,7 @@ future<> table_helper::setup_keyspace(cql3::query_processor& qp, service::migrat
try {
co_return co_await mm.announce(std::move(table_mutations), std::move(group0_guard),
format("table_helper: create tables for {} keyspace", keyspace_name));
seastar::format("table_helper: create tables for {} keyspace", keyspace_name));
} catch (service::group0_concurrent_modification&) {
tlogger.info("Concurrent operation is detected while creating tables for {} keyspace, retrying.", keyspace_name);
}

View File

@@ -306,7 +306,7 @@ future<> task_manager::task::add_child(foreign_task_ptr&& child) {
void task_manager::task::start() {
if (_impl->_status.state != task_state::created) {
on_fatal_internal_error(tmlogger, format("{} task with id = {} was started twice", _impl->_module->get_name(), id()));
on_fatal_internal_error(tmlogger, seastar::format("{} task with id = {} was started twice", _impl->_module->get_name(), id()));
}
_impl->_status.start_time = db_clock::now();
@@ -692,7 +692,7 @@ task_manager::module_ptr task_manager::make_module(std::string name) {
task_manager::module_ptr task_manager::find_module(std::string module_name) {
auto it = _modules.find(module_name);
if (it == _modules.end()) {
throw std::runtime_error(format("module {} not found", module_name));
throw std::runtime_error(seastar::format("module {} not found", module_name));
}
return it->second;
}

View File

@@ -148,7 +148,7 @@ BOOST_AUTO_TEST_CASE(test_timeuuid_msb_is_monotonic) {
bool t1 = utils::timeuuid_tri_compare(next, prev) > 0;
bool t2 = utils::timeuuid_tri_compare(next, first) > 0;
if (!t1 || !t2) {
BOOST_CHECK_MESSAGE(t1 && t2, format("a UUID {}{} later is not great than at test start: {} {}", i, str(scale), t1, t2));
BOOST_CHECK_MESSAGE(t1 && t2, seastar::format("a UUID {}{} later is not great than at test start: {} {}", i, str(scale), t1, t2));
}
prev = next;
}
@@ -174,7 +174,7 @@ BOOST_AUTO_TEST_CASE(test_timeuuid_tri_compare_legacy) {
bool t1 = utils::timeuuid_tri_compare(next, prev) == timeuuid_legacy_tri_compare(next, prev);
bool t2 = utils::timeuuid_tri_compare(next, first) == timeuuid_legacy_tri_compare(next, first);
if (!t1 || !t2) {
BOOST_CHECK_MESSAGE(t1 && t2, format("a UUID {}{} later violates compare order", i, str(scale)));
BOOST_CHECK_MESSAGE(t1 && t2, seastar::format("a UUID {}{} later violates compare order", i, str(scale)));
}
prev = next;
}

View File

@@ -147,9 +147,9 @@ BOOST_AUTO_TEST_CASE(test_magnitude_and_precision) {
std::string number = prefix + test.number;
auto res = alternator::internal::get_magnitude_and_precision(number);
BOOST_CHECK_MESSAGE(res.magnitude == test.magnitude,
format("{}: expected magnitude {}, got {}", number, test.magnitude, res.magnitude));
seastar::format("{}: expected magnitude {}, got {}", number, test.magnitude, res.magnitude));
BOOST_CHECK_MESSAGE(res.precision == test.precision,
format("{}: expected precision {}, got {}", number, test.precision, res.precision));
seastar::format("{}: expected precision {}, got {}", number, test.precision, res.precision));
}
}
// Huge exponents like 1e1000000 are not guaranteed to return that
@@ -165,4 +165,4 @@ BOOST_AUTO_TEST_CASE(test_magnitude_and_precision) {
BOOST_CHECK(res.magnitude > 1000);
res = alternator::internal::get_magnitude_and_precision("1e-1000000000000");
BOOST_CHECK(res.magnitude < -1000);
}
}

View File

@@ -274,30 +274,30 @@ SEASTAR_THREAD_TEST_CASE(test_permissions_of_cdc_description) {
BOOST_REQUIRE(e.local_db().has_schema(t.substr(0, dot_pos), t.substr(dot_pos + 1)));
// Disallow DROP
assert_unauthorized(format("DROP TABLE {}", t));
assert_unauthorized(seastar::format("DROP TABLE {}", t));
// Allow SELECT
e.execute_cql(format("SELECT * FROM {}", t)).get();
e.execute_cql(seastar::format("SELECT * FROM {}", t)).get();
}
// Disallow ALTER
for (auto& t : {streams}) {
assert_unauthorized(format("ALTER TABLE {} ALTER time TYPE blob", t));
assert_unauthorized(seastar::format("ALTER TABLE {} ALTER time TYPE blob", t));
}
assert_unauthorized(format("ALTER TABLE {} ALTER id TYPE blob", generations_v2));
assert_unauthorized(format("ALTER TABLE {} ALTER key TYPE blob", timestamps));
assert_unauthorized(seastar::format("ALTER TABLE {} ALTER id TYPE blob", generations_v2));
assert_unauthorized(seastar::format("ALTER TABLE {} ALTER key TYPE blob", timestamps));
// Allow DELETE
for (auto& t : {streams}) {
e.execute_cql(format("DELETE FROM {} WHERE time = toTimeStamp(now())", t)).get();
e.execute_cql(seastar::format("DELETE FROM {} WHERE time = toTimeStamp(now())", t)).get();
}
e.execute_cql(format("DELETE FROM {} WHERE id = uuid()", generations_v2)).get();
e.execute_cql(format("DELETE FROM {} WHERE key = 'timestamps'", timestamps)).get();
e.execute_cql(seastar::format("DELETE FROM {} WHERE id = uuid()", generations_v2)).get();
e.execute_cql(seastar::format("DELETE FROM {} WHERE key = 'timestamps'", timestamps)).get();
// Allow UPDATE, INSERT
e.execute_cql(format("INSERT INTO {} (id, range_end) VALUES (uuid(), 0)", generations_v2)).get();
e.execute_cql(format("INSERT INTO {} (time, range_end) VALUES (toTimeStamp(now()), 0)", streams)).get();
e.execute_cql(format("UPDATE {} SET expired = toTimeStamp(now()) WHERE key = 'timestamps' AND time = toTimeStamp(now())", timestamps)).get();
e.execute_cql(seastar::format("INSERT INTO {} (id, range_end) VALUES (uuid(), 0)", generations_v2)).get();
e.execute_cql(seastar::format("INSERT INTO {} (time, range_end) VALUES (toTimeStamp(now()), 0)", streams)).get();
e.execute_cql(seastar::format("UPDATE {} SET expired = toTimeStamp(now()) WHERE key = 'timestamps' AND time = toTimeStamp(now())", timestamps)).get();
}).get();
}
@@ -782,9 +782,9 @@ SEASTAR_THREAD_TEST_CASE(test_ttls) {
do_with_cql_env_thread([](cql_test_env& e) {
auto test_ttl = [&e] (int ttl_seconds) {
const auto base_tbl_name = "tbl" + std::to_string(ttl_seconds);
cquery_nofail(e, format("CREATE TABLE ks.{} (pk int, ck int, val int, PRIMARY KEY(pk, ck)) WITH cdc = {{'enabled':'true', 'ttl':{}}}", base_tbl_name, ttl_seconds));
cquery_nofail(e, seastar::format("CREATE TABLE ks.{} (pk int, ck int, val int, PRIMARY KEY(pk, ck)) WITH cdc = {{'enabled':'true', 'ttl':{}}}", base_tbl_name, ttl_seconds));
BOOST_REQUIRE_EQUAL(e.local_db().find_schema("ks", base_tbl_name)->cdc_options().ttl(), ttl_seconds);
cquery_nofail(e, format("INSERT INTO ks.{} (pk, ck, val) VALUES(1, 11, 111)", base_tbl_name));
cquery_nofail(e, seastar::format("INSERT INTO ks.{} (pk, ck, val) VALUES(1, 11, 111)", base_tbl_name));
auto log_schema = e.local_db().find_schema("ks", cdc::log_name(base_tbl_name));
@@ -1682,7 +1682,7 @@ static void test_pre_post_image(cql_test_env& e, const std::vector<image_persist
processed_times.insert(time);
}
BOOST_TEST_MESSAGE(format("Returned rows: {}", groups));
BOOST_TEST_MESSAGE(seastar::format("Returned rows: {}", groups));
// Assert that there is the same number of groups differentiated by cdc$time
BOOST_REQUIRE_EQUAL(groups.size(), t.groups.size());
@@ -1708,12 +1708,12 @@ static void test_pre_post_image(cql_test_env& e, const std::vector<image_persist
actual_values.push_back(std::move(actual_value));
}
BOOST_TEST_MESSAGE(format("Looking up corresponding row to {}", actual_values));
BOOST_TEST_MESSAGE(seastar::format("Looking up corresponding row to {}", actual_values));
// Order in pre-postimage is unspecified
const auto it = std::find(expected.begin(), expected.end(), actual_values);
if (it == expected.end()) {
BOOST_FAIL(format("Failed to find corresponding expected row for {}", actual_values));
BOOST_FAIL(seastar::format("Failed to find corresponding expected row for {}", actual_values));
}
expected.erase(it);
}

View File

@@ -47,7 +47,7 @@ static shared_ptr<db::config> db_config_with_auth() {
//
static void create_user_if_not_exists(cql_test_env& env, std::string_view user_name) {
env.execute_cql(format("CREATE USER IF NOT EXISTS {} WITH PASSWORD '{}'", user_name, user_name)).get();
env.execute_cql(seastar::format("CREATE USER IF NOT EXISTS {} WITH PASSWORD '{}'", user_name, user_name)).get();
}
// Invoke `f` as though the user indicated with `user_name` had logged in. The current logged in user is restored after

View File

@@ -4900,7 +4900,7 @@ static future<> test_clustering_filtering_with_compaction_strategy(std::string_v
db_config->sstable_format("me");
return do_with_cql_env_thread([cs] (cql_test_env& e) {
cquery_nofail(e, format("CREATE TABLE cf(pk text, ck int, v text, PRIMARY KEY(pk, ck)) WITH COMPACTION = {{'class': '{}'}}", cs));
cquery_nofail(e, seastar::format("CREATE TABLE cf(pk text, ck int, v text, PRIMARY KEY(pk, ck)) WITH COMPACTION = {{'class': '{}'}}", cs));
cquery_nofail(e, "INSERT INTO cf(pk, ck, v) VALUES ('a', 1, 'a1')");
e.db().invoke_on_all([] (replica::database& db) { return db.flush_all_memtables(); }).get();
e.db().invoke_on_all([] (replica::database& db) { db.row_cache_tracker().clear(); }).get();
@@ -4924,7 +4924,7 @@ static future<> test_clustering_filtering_2_with_compaction_strategy(std::string
db_config->sstable_format("me");
return do_with_cql_env_thread([cs] (cql_test_env& e) {
cquery_nofail(e, format("CREATE TABLE cf(pk text, ck int, v text, PRIMARY KEY(pk, ck)) WITH COMPACTION = {{'class': '{}'}}", cs));
cquery_nofail(e, seastar::format("CREATE TABLE cf(pk text, ck int, v text, PRIMARY KEY(pk, ck)) WITH COMPACTION = {{'class': '{}'}}", cs));
cquery_nofail(e, "INSERT INTO cf(pk, ck, v) VALUES ('a', 1, 'a1')");
cquery_nofail(e, "INSERT INTO cf(pk, ck, v) VALUES ('b', 2, 'b2')");
e.db().invoke_on_all([] (replica::database& db) { return db.flush_all_memtables(); }).get();
@@ -4949,7 +4949,7 @@ static future<> test_clustering_filtering_3_with_compaction_strategy(std::string
db_config->sstable_format("me");
return do_with_cql_env_thread([cs] (cql_test_env& e) {
cquery_nofail(e, format("CREATE TABLE cf(pk text, ck int, v text, PRIMARY KEY(pk, ck)) WITH COMPACTION = {{'class': '{}'}}", cs));
cquery_nofail(e, seastar::format("CREATE TABLE cf(pk text, ck int, v text, PRIMARY KEY(pk, ck)) WITH COMPACTION = {{'class': '{}'}}", cs));
e.db().invoke_on_all([] (replica::database& db) {
auto& table = db.find_column_family("ks", "cf");
return table.disable_auto_compaction();
@@ -5182,15 +5182,15 @@ SEASTAR_TEST_CASE(timeuuid_fcts_prepared_re_evaluation) {
};
for (const auto& t : sub_tests) {
BOOST_TEST_CHECKPOINT(t.first);
e.execute_cql(format("CREATE TABLE test_{} (pk {} PRIMARY KEY)", t.first, t.second)).get();
auto drop_test_table = defer([&e, &t] { e.execute_cql(format("DROP TABLE test_{}", t.first)).get(); });
auto insert_stmt = e.prepare(format("INSERT INTO test_{0} (pk) VALUES ({0}())", t.first)).get();
e.execute_cql(seastar::format("CREATE TABLE test_{} (pk {} PRIMARY KEY)", t.first, t.second)).get();
auto drop_test_table = defer([&e, &t] { e.execute_cql(seastar::format("DROP TABLE test_{}", t.first)).get(); });
auto insert_stmt = e.prepare(seastar::format("INSERT INTO test_{0} (pk) VALUES ({0}())", t.first)).get();
e.execute_prepared(insert_stmt, {}).get();
sleep(1ms).get();
// Check that the second execution is evaluated again and yields a
// different value.
e.execute_prepared(insert_stmt, {}).get();
auto msg = e.execute_cql(format("SELECT * FROM test_{}", t.first)).get();
auto msg = e.execute_cql(seastar::format("SELECT * FROM test_{}", t.first)).get();
assert_that(msg).is_rows().with_size(2);
}
});

View File

@@ -671,11 +671,11 @@ static bool has_sufficient_replicas(const sstring& dc,
const std::unordered_map<sstring, size_t>& datacenters) noexcept {
auto dc_replicas_it = dc_replicas.find(dc);
if (dc_replicas_it == dc_replicas.end()) {
BOOST_TEST_FAIL(format("has_sufficient_replicas: dc {} not found in dc_replicas: {}", dc, dc_replicas));
BOOST_TEST_FAIL(seastar::format("has_sufficient_replicas: dc {} not found in dc_replicas: {}", dc, dc_replicas));
}
auto endpoint_it = all_endpoints.find(dc);
if (endpoint_it == all_endpoints.end()) {
BOOST_TEST_MESSAGE(format("has_sufficient_replicas: dc {} not found in all_endpoints: {}", dc, all_endpoints));
BOOST_TEST_MESSAGE(seastar::format("has_sufficient_replicas: dc {} not found in all_endpoints: {}", dc, all_endpoints));
return true;
}
return dc_replicas_it->second.size()

View File

@@ -2968,7 +2968,7 @@ SEASTAR_TEST_CASE(test_no_misses_when_read_is_repeated) {
auto s2 = tracker.get_stats();
if (s1.reads_with_misses != s2.reads_with_misses) {
BOOST_FAIL(format("Got cache miss when repeating read of {} on {}", ranges, m1));
BOOST_FAIL(seastar::format("Got cache miss when repeating read of {} on {}", ranges, m1));
}
}
});
@@ -3418,7 +3418,7 @@ SEASTAR_TEST_CASE(test_concurrent_reads_and_eviction) {
}
return m2 == actual;
})) {
BOOST_FAIL(format("Mutation read doesn't match any expected version, slice: {}, read: {}\nexpected: [{}]",
BOOST_FAIL(seastar::format("Mutation read doesn't match any expected version, slice: {}, read: {}\nexpected: [{}]",
slice, actual, fmt::join(possible_versions, ",\n")));
}
}

View File

@@ -121,7 +121,7 @@ public:
void assert_toc(const std::set<component_type>& expected_components) {
for (auto& expected : expected_components) {
if(!_sst->_recognized_components.contains(expected)) {
BOOST_FAIL(format("Expected component of TOC missing: {}\n ... in: {}",
BOOST_FAIL(seastar::format("Expected component of TOC missing: {}\n ... in: {}",
expected,
std::set<component_type>(
cbegin(_sst->_recognized_components),
@@ -130,7 +130,7 @@ public:
}
for (auto& present : _sst->_recognized_components) {
if (!expected_components.contains(present)) {
BOOST_FAIL(format("Unexpected component of TOC: {}\n ... when expecting: {}",
BOOST_FAIL(seastar::format("Unexpected component of TOC: {}\n ... when expecting: {}",
present,
expected_components));
}

View File

@@ -41,7 +41,7 @@ SEASTAR_THREAD_TEST_CASE(test_sstable_move) {
generation_type gen{0};
for (auto i = 0; i < 2; i++) {
gen = gen_generator();
auto new_dir = format("{}/gen-{}", fs::path(cur_dir).parent_path().native(), gen);
auto new_dir = seastar::format("{}/gen-{}", fs::path(cur_dir).parent_path().native(), gen);
touch_directory(new_dir).get();
test(sst).move_to_new_dir(new_dir, gen).get();
// the source directory must be empty now
@@ -110,7 +110,7 @@ SEASTAR_THREAD_TEST_CASE(test_sstable_move_replay) {
int count = 0;
do {
auto gen = gen_generator();
auto new_dir = format("{}/gen-{}", fs::path(cur_dir).parent_path().native(), gen);
auto new_dir = seastar::format("{}/gen-{}", fs::path(cur_dir).parent_path().native(), gen);
touch_directory(new_dir).get();
done = partial_create_links(sst, fs::path(new_dir), gen, count++);
test(sst).move_to_new_dir(new_dir, gen).get();

View File

@@ -44,7 +44,7 @@ SEASTAR_TEST_CASE(test_get_restricted_ranges) {
if (!std::equal(actual.begin(), actual.end(), expected.begin(), [&s](auto&& r1, auto&& r2) {
return r1.equal(r2, dht::ring_position_comparator(*s));
})) {
BOOST_FAIL(format("Ranges differ, expected {} but got {}", expected, actual));
BOOST_FAIL(fmt::format("Ranges differ, expected {} but got {}", expected, actual));
}
};

View File

@@ -31,7 +31,7 @@ private:
auto r = _cmp(a, b);
auto actual = r;
if (actual != order) {
BOOST_FAIL(format("Expected cmp({}, {}) == {}, but got {}", a, b, order, actual));
BOOST_FAIL(seastar::format("Expected cmp({}, {}) == {}, but got {}", a, b, order, actual));
}
});
});

View File

@@ -41,7 +41,7 @@ rows_assertions::is_empty() {
auto row_count = rs.size();
if (row_count != 0) {
auto&& first_row = *rs.rows().begin();
fail(format("Expected no rows, but got {:d}. First row: {}", row_count, first_row));
fail(seastar::format("Expected no rows, but got {:d}. First row: {}", row_count, first_row));
}
return {*this};
}
@@ -62,7 +62,7 @@ rows_assertions::rows_assertions::is_null() {
for (auto&& row : rs.rows()) {
for (const managed_bytes_opt& v : row) {
if (v) {
fail(format("Expected null values. Found: {}\n", v));
fail(seastar::format("Expected null values. Found: {}\n", v));
}
}
}
@@ -75,7 +75,7 @@ rows_assertions::rows_assertions::is_not_null() {
for (auto&& row : rs.rows()) {
for (const managed_bytes_opt& v : row) {
if (!v) {
fail(format("Expected non-null values. {}\n", fmt::to_string(row)));
fail(seastar::format("Expected non-null values. {}\n", fmt::to_string(row)));
}
}
}
@@ -110,7 +110,7 @@ rows_assertions::with_row(std::initializer_list<bytes_opt> values) {
return {*this};
}
}
fail(format("Expected row not found: {} not in {}\n", fmt::to_string(expected_row), _rows));
fail(seastar::format("Expected row not found: {} not in {}\n", fmt::to_string(expected_row), _rows));
return {*this};
}
@@ -130,13 +130,13 @@ rows_assertions::with_rows(std::vector<std::vector<bytes_opt>> rows) {
if (!std::equal(
std::begin(expected_row), std::end(expected_row),
std::begin(actual), std::end(actual))) {
fail(format("row {:d} differs, expected {} got {}", row_nr, fmt::to_string(row), fmt::to_string(actual)));
fail(seastar::format("row {:d} differs, expected {} got {}", row_nr, fmt::to_string(row), fmt::to_string(actual)));
}
++actual_i;
++row_nr;
}
if (actual_i != actual_end) {
fail(format("Expected less rows ({:d}), got {:d}. Next row is: {}", rows.size(), rs.size(),
fail(seastar::format("Expected less rows ({:d}), got {:d}. Next row is: {}", rows.size(), rs.size(),
fmt::to_string(*actual_i)));
}
return {*this};
@@ -155,7 +155,7 @@ rows_assertions::with_rows_ignore_order(std::vector<std::vector<bytes_opt>> rows
std::begin(expected_row), std::end(expected_row));
});
if (found == std::end(actual)) {
fail(format("row {} not found in result set ({})", fmt::to_string(expected),
fail(seastar::format("row {} not found in result set ({})", fmt::to_string(expected),
fmt::join(actual | boost::adaptors::transformed([] (auto& r) { return fmt::to_string(r); }), ", ")));
}
}
@@ -198,7 +198,7 @@ shared_ptr<cql_transport::messages::result_message> cquery_nofail(
return env.execute_cql(query).get();
}
} catch (...) {
BOOST_FAIL(format("query '{}' failed: {}\n{}:{}: originally from here",
BOOST_FAIL(seastar::format("query '{}' failed: {}\n{}:{}: originally from here",
query, std::current_exception(), loc.file_name(), loc.line()));
}
return shared_ptr<cql_transport::messages::result_message>(nullptr);
@@ -212,7 +212,7 @@ void require_rows(cql_test_env& e,
assert_that(cquery_nofail(e, qstr, nullptr, loc)).is_rows().with_rows_ignore_order(expected);
}
catch (const std::exception& e) {
BOOST_FAIL(format("query '{}' failed: {}\n{}:{}: originally from here",
BOOST_FAIL(seastar::format("query '{}' failed: {}\n{}:{}: originally from here",
qstr, e.what(), loc.file_name(), loc.line()));
}
}
@@ -224,7 +224,7 @@ void eventually_require_rows(cql_test_env& e, sstring_view qstr, const std::vect
assert_that(cquery_nofail(e, qstr, nullptr, loc)).is_rows().with_rows_ignore_order(expected);
});
} catch (const std::exception& e) {
BOOST_FAIL(format("query '{}' failed: {}\n{}:{}: originally from here",
BOOST_FAIL(seastar::format("query '{}' failed: {}\n{}:{}: originally from here",
qstr, e.what(), loc.file_name(), loc.line()));
}
}

View File

@@ -411,7 +411,7 @@ public:
}
future<> create_keyspace(const cql_test_config& cfg, std::string_view name) {
auto query = format("create keyspace {} with replication = {{ 'class' : 'org.apache.cassandra.locator.NetworkTopologyStrategy', 'replication_factor' : 1}}{};", name,
auto query = seastar::format("create keyspace {} with replication = {{ 'class' : 'org.apache.cassandra.locator.NetworkTopologyStrategy', 'replication_factor' : 1}}{};", name,
cfg.initial_tablets ? format(" and tablets = {{'initial' : {}}}", *cfg.initial_tablets) : "");
return execute_cql(query).discard_result();
}

View File

@@ -377,8 +377,8 @@ std::pair<evaluation_inputs, std::unique_ptr<evaluation_inputs_data>> make_evalu
const column_values& column_vals,
const std::vector<raw_value>& bind_marker_values) {
auto throw_error = [&](const auto&... fmt_args) -> sstring {
sstring error_msg = format(fmt_args...);
sstring final_msg = format("make_evaluation_inputs error: {}. (table_schema: {}, column_vals: {})", error_msg,
sstring error_msg = seastar::format(fmt_args...);
sstring final_msg = seastar::format("make_evaluation_inputs error: {}. (table_schema: {}, column_vals: {})", error_msg,
*table_schema, column_vals);
throw std::runtime_error(final_msg);
};

View File

@@ -48,7 +48,7 @@ public:
while (auto ei_opt = cur->next_entry().get()) {
sstables::clustered_index_cursor::entry_info& ei = *ei_opt;
if (prev_end && pos_cmp(ei.start, sstables::to_view(*prev_end))) {
BOOST_FAIL(format("Index blocks are not monotonic: {} > {}", *prev_end, ei.start));
BOOST_FAIL(seastar::format("Index blocks are not monotonic: {} > {}", *prev_end, ei.start));
}
prev_end = sstables::materialize(ei.end);
}

View File

@@ -48,7 +48,7 @@ row_assertion::matches(const query::result_set_row& row) const {
sstring
row_assertion::describe(schema_ptr schema) const {
return format("{{{}}}", fmt::join(_expected_values | boost::adaptors::transformed([&schema] (auto&& e) {
return seastar::format("{{{}}}", fmt::join(_expected_values | boost::adaptors::transformed([&schema] (auto&& e) {
auto&& name = e.first;
auto&& value = e.second;
const column_definition* def = schema->get_column_definition(name);

View File

@@ -246,7 +246,7 @@ static future<> update_item(const test_config& _, http::experimental::client& cl
}}
}},)", seq, seq);
return make_request(cli, "UpdateItem", prefix + format(update_item_suffix, ""));
return make_request(cli, "UpdateItem", prefix + seastar::format(update_item_suffix, ""));
}
static future<> update_item_gsi(const test_config& _, http::experimental::client& cli, uint64_t seq) {
@@ -401,7 +401,7 @@ void workload_main(const test_config& c) {
auto it = workloads.find(c.workload);
if (it == workloads.end()) {
throw std::runtime_error(format("unknown workload '{}'", c.workload));
throw std::runtime_error(fmt::format("unknown workload '{}'", c.workload));
}
fun_t fun = it->second;

View File

@@ -162,7 +162,7 @@ public:
: _name(name)
, _message(message)
, _table_name(boost::replace_all_copy(name, "-", "_"))
, _create_table_statement(format(create_table_statement_pattern, _table_name))
, _create_table_statement(fmt::format(fmt::runtime(create_table_statement_pattern), _table_name))
{ }
const std::string& name() const { return _name; }
@@ -278,7 +278,7 @@ private:
template <std::size_t... Is>
inline sstring_vec stats_values_to_strings_impl(const stats_values& values, std::index_sequence<Is...> seq) {
static_assert(stats_formats.size() == seq.size());
sstring_vec result {format(stats_formats[Is].c_str(), std::get<Is>(values))...};
sstring_vec result {seastar::format(stats_formats[Is].c_str(), std::get<Is>(values))...};
return result;
}
@@ -301,10 +301,10 @@ public:
void write_test_names(const output_items& param_names, const output_items& stats_names) override {
for (const auto& name: param_names) {
std::cout << format(name.format.c_str(), name.value) << " ";
std::cout << fmt::format(fmt::runtime(name.format.c_str()), name.value) << " ";
}
for (const auto& name: stats_names) {
std::cout << format(name.format.c_str(), name.value) << " ";
std::cout << fmt::format(fmt::runtime(name.format.c_str()), name.value) << " ";
}
std::cout << std::endl;
}
@@ -317,11 +317,11 @@ public:
const output_items& param_names, const output_items& stats_names) override {
for (auto& value : values) {
for (size_t i = 0; i < param_names.size(); ++i) {
std::cout << format(param_names.at(i).format.c_str(), params.at(i)) << " ";
std::cout << fmt::format(fmt::runtime(param_names.at(i).format.c_str()), params.at(i)) << " ";
}
auto stats_strings = stats_values_to_strings(value);
for (size_t i = 0; i < stats_names.size(); ++i) {
std::cout << format(stats_names.at(i).format.c_str(), stats_strings.at(i)) << " ";
std::cout << fmt::format(fmt::runtime(stats_names.at(i).format.c_str()), stats_strings.at(i)) << " ";
}
std::cout << "\n";
}
@@ -330,11 +330,11 @@ public:
void write_test_values(const sstring_vec& params, const stats_values& stats,
const output_items& param_names, const output_items& stats_names) override {
for (size_t i = 0; i < param_names.size(); ++i) {
std::cout << format(param_names.at(i).format.c_str(), params.at(i)) << " ";
std::cout << fmt::format(fmt::runtime(param_names.at(i).format.c_str()), params.at(i)) << " ";
}
sstring_vec stats_strings = stats_values_to_strings(stats);
for (size_t i = 0; i < stats_names.size(); ++i) {
std::cout << format(stats_names.at(i).format.c_str(), stats_strings.at(i)) << " ";
std::cout << fmt::format(fmt::runtime(stats_names.at(i).format.c_str()), stats_strings.at(i)) << " ";
}
std::cout << std::endl;
}
@@ -1727,7 +1727,7 @@ auto make_datasets() {
std::map<std::string, std::unique_ptr<dataset>> dsets;
auto add = [&] (std::unique_ptr<dataset> ds) {
if (dsets.contains(ds->name())) {
throw std::runtime_error(format("Dataset with name '{}' already exists", ds->name()));
throw std::runtime_error(seastar::format("Dataset with name '{}' already exists", ds->name()));
}
auto name = ds->name();
dsets.emplace(std::move(name), std::move(ds));
@@ -1770,7 +1770,7 @@ void populate(const std::vector<dataset*>& datasets, cql_test_env& env, const ta
dataset& ds = *ds_ptr;
output_mgr->add_dataset_population(ds);
env.execute_cql(format("{} WITH compression = {{ 'sstable_compression': '{}' }};",
env.execute_cql(seastar::format("{} WITH compression = {{ 'sstable_compression': '{}' }};",
ds.create_table_statement(), cfg.compressor)).get();
replica::column_family& cf = find_table(db, ds);
@@ -2008,7 +2008,7 @@ int scylla_fast_forward_main(int argc, char** argv) {
auto enabled_datasets = boost::copy_range<std::vector<dataset*>>(enabled_dataset_names
| boost::adaptors::transformed([&](auto&& name) {
if (!datasets.contains(name)) {
throw std::runtime_error(format("No such dataset: {}", name));
throw std::runtime_error(seastar::format("No such dataset: {}", name));
}
return datasets[name].get();
}));

View File

@@ -1459,7 +1459,7 @@ public:
for (const auto& p: *_snapshots) {
snapshot_ids.push_back(p.first);
}
BOOST_TEST_INFO(format("snapshot ids: [{}]", snapshot_ids));
BOOST_TEST_INFO(seastar::format("snapshot ids: [{}]", snapshot_ids));
BOOST_CHECK_LE(snapshot_ids.size(), 2);
}
}
@@ -3088,7 +3088,7 @@ struct append_reg_model {
try {
completion(x, prev);
} catch (inconsistency& e) {
e.what += format("\nwhen completing append: {}\nprev: {}\nmodel: {}", x, prev, seq);
e.what += fmt::format("\nwhen completing append: {}\nprev: {}\nmodel: {}", x, prev, seq);
throw;
}
returned.insert(x);
@@ -3153,14 +3153,14 @@ private:
SCYLLA_ASSERT(idx < seq.size());
if (prev_x != seq[idx - 1].elem) {
throw inconsistency{format(
throw inconsistency{fmt::format(
"elem {} completed again (existing at idx {}), but prev elem does not match existing model"
"\nprev elem: {}\nmodel prev elem: {}\nprev: {} model up to idx: {}",
x, idx, prev_x, seq[idx - 1].elem, prev, std::vector<entry>{seq.begin(), seq.begin()+idx})};
}
if (prev.digest() != seq[idx - 1].digest) {
auto err = format(
auto err = fmt::format(
"elem {} completed again (existing at idx {}), but prev does not match existing model"
"\n prev: {}\nmodel up to idx: {}",
x, idx, prev, std::vector<entry>{seq.begin(), seq.begin()+idx});
@@ -3185,13 +3185,13 @@ private:
// Check that the existing tail matches our tail.
SCYLLA_ASSERT(!seq.empty());
if (prev_x != seq.back().elem) {
throw inconsistency{format(
throw inconsistency{fmt::format(
"new completion (elem: {}) but prev elem does not match existing model"
"\nprev elem: {}\nmodel prev elem: {}\nprev: {}\n model: {}",
x, prev_x, seq.back().elem, prev, seq)};
}
if (prev.digest() != seq.back().digest) {
auto err = format(
auto err = fmt::format(
"new completion (elem: {}) but prev does not match existing model"
"\nprev: {}\n model: {}",
x, prev, seq);

View File

@@ -231,11 +231,11 @@ public:
std::tie(value, t) = _t.s.get_value(*_s, row);
testlog.trace("reader {}: {} @{}, {}", _id, value, t, clustering_row::printer(*_s, row));
if (_value && value != _value) {
throw std::runtime_error(format("Saw values from two different writes in partition {:d}: {} and {}", _key, _value, value));
throw std::runtime_error(fmt::format("Saw values from two different writes in partition {:d}: {} and {}", _key, _value, value));
}
auto lowest_timestamp = _writetimes[_key];
if (t < lowest_timestamp) {
throw std::runtime_error(format("Expected to see the write @{:d}, but saw @{:d} ({}), c_key={}", lowest_timestamp, t, value, row.key()));
throw std::runtime_error(fmt::format("Expected to see the write @{:d}, but saw @{:d} ({}), c_key={}", lowest_timestamp, t, value, row.key()));
}
_value = std::move(value);
return stop_iteration::no;

Some files were not shown because too many files have changed in this diff Show More