Currently, we use std::vector<*mutation> to keep
a list of mutations for processing.
This can lead to large allocation, e.g. when the vector
size is a function of the number of tables.
Use a chunked vector instead to prevent oversized allocations.
`perf-simple-query --smp 1` results obtained for fixed 400MHz frequency
and PGO disabled:
Before (read path):
```
enable-cache=1
Running test with config: {partitions=10000, concurrency=100, mode=read, query_single_key=no, counters=no}
Disabling auto compaction
Creating 10000 partitions...
89055.97 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39417 insns/op, 18003 cycles/op, 0 errors)
103372.72 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39380 insns/op, 17300 cycles/op, 0 errors)
98942.27 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39413 insns/op, 17336 cycles/op, 0 errors)
103752.93 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39407 insns/op, 17252 cycles/op, 0 errors)
102516.77 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39403 insns/op, 17288 cycles/op, 0 errors)
throughput:
mean= 99528.13 standard-deviation=6155.71
median= 102516.77 median-absolute-deviation=3844.59
maximum=103752.93 minimum=89055.97
instructions_per_op:
mean= 39403.99 standard-deviation=14.25
median= 39406.75 median-absolute-deviation=9.30
maximum=39416.63 minimum=39380.39
cpu_cycles_per_op:
mean= 17435.81 standard-deviation=318.24
median= 17300.40 median-absolute-deviation=147.59
maximum=18002.53 minimum=17251.75
```
After (read path)
```
enable-cache=1
Running test with config: {partitions=10000, concurrency=100, mode=read, query_single_key=no, counters=no}
Disabling auto compaction
Creating 10000 partitions...
59755.04 tps ( 66.2 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39466 insns/op, 22834 cycles/op, 0 errors)
71854.16 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39417 insns/op, 17883 cycles/op, 0 errors)
82149.45 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39411 insns/op, 17409 cycles/op, 0 errors)
49640.04 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.3 tasks/op, 39474 insns/op, 19975 cycles/op, 0 errors)
54963.22 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.3 tasks/op, 39474 insns/op, 18235 cycles/op, 0 errors)
throughput:
mean= 63672.38 standard-deviation=13195.12
median= 59755.04 median-absolute-deviation=8709.16
maximum=82149.45 minimum=49640.04
instructions_per_op:
mean= 39448.38 standard-deviation=31.60
median= 39466.17 median-absolute-deviation=25.75
maximum=39474.12 minimum=39411.42
cpu_cycles_per_op:
mean= 19267.01 standard-deviation=2217.03
median= 18234.80 median-absolute-deviation=1384.25
maximum=22834.26 minimum=17408.67
```
`perf-simple-query --smp 1 --write` results obtained for fixed 400MHz frequency
and PGO disabled:
Before (write path):
```
enable-cache=1
Running test with config: {partitions=10000, concurrency=100, mode=write, query_single_key=no, counters=no}
Disabling auto compaction
63736.96 tps ( 59.4 allocs/op, 16.4 logallocs/op, 14.3 tasks/op, 49667 insns/op, 19924 cycles/op, 0 errors)
64109.41 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 49992 insns/op, 20084 cycles/op, 0 errors)
56950.47 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50005 insns/op, 20501 cycles/op, 0 errors)
44858.42 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50014 insns/op, 21947 cycles/op, 0 errors)
28592.87 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50027 insns/op, 27659 cycles/op, 0 errors)
throughput:
mean= 51649.63 standard-deviation=15059.74
median= 56950.47 median-absolute-deviation=12087.33
maximum=64109.41 minimum=28592.87
instructions_per_op:
mean= 49941.18 standard-deviation=153.76
median= 50005.24 median-absolute-deviation=73.01
maximum=50027.07 minimum=49667.05
cpu_cycles_per_op:
mean= 22023.01 standard-deviation=3249.92
median= 20500.74 median-absolute-deviation=1938.76
maximum=27658.75 minimum=19924.32
```
After (write path)
```
enable-cache=1
Running test with config: {partitions=10000, concurrency=100, mode=write, query_single_key=no, counters=no}
Disabling auto compaction
53395.93 tps ( 59.4 allocs/op, 16.5 logallocs/op, 14.3 tasks/op, 50326 insns/op, 21252 cycles/op, 0 errors)
46527.83 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50704 insns/op, 21555 cycles/op, 0 errors)
55846.30 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50731 insns/op, 21060 cycles/op, 0 errors)
55669.30 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50735 insns/op, 21521 cycles/op, 0 errors)
52130.17 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50757 insns/op, 21334 cycles/op, 0 errors)
throughput:
mean= 52713.91 standard-deviation=3795.38
median= 53395.93 median-absolute-deviation=2955.40
maximum=55846.30 minimum=46527.83
instructions_per_op:
mean= 50650.57 standard-deviation=182.46
median= 50731.38 median-absolute-deviation=84.09
maximum=50756.62 minimum=50325.87
cpu_cycles_per_op:
mean= 21344.42 standard-deviation=202.86
median= 21334.00 median-absolute-deviation=176.37
maximum=21554.61 minimum=21060.24
```
Fixes #24815
Improvement for rare corner cases. No backport required
Signed-off-by: Benny Halevy <bhalevy@scylladb.com>
Closes scylladb/scylladb#24919
249 lines
8.9 KiB
C++
249 lines
8.9 KiB
C++
/*
|
|
* Copyright (C) 2019 pengjian.uestc @ gmail.com
|
|
*/
|
|
|
|
/*
|
|
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
|
*/
|
|
|
|
#include "utils/assert.hh"
|
|
#include <seastar/core/coroutine.hh>
|
|
#include <seastar/coroutine/parallel_for_each.hh>
|
|
#include "redis/keyspace_utils.hh"
|
|
#include "schema/schema_builder.hh"
|
|
#include "types/types.hh"
|
|
#include "cql3/statements/ks_prop_defs.hh"
|
|
#include <seastar/core/future.hh>
|
|
#include "utils/log.hh"
|
|
#include "auth/service.hh"
|
|
#include "service/migration_manager.hh"
|
|
#include "service/storage_proxy.hh"
|
|
#include "service/client_state.hh"
|
|
#include "transport/server.hh"
|
|
#include "db/system_keyspace.hh"
|
|
#include "schema/schema.hh"
|
|
#include "gms/gossiper.hh"
|
|
#include <seastar/core/format.hh>
|
|
#include "db/config.hh"
|
|
#include "data_dictionary/keyspace_metadata.hh"
|
|
#include "replica/database.hh"
|
|
|
|
using namespace seastar;
|
|
|
|
namespace redis {
|
|
|
|
static logging::logger logger("keyspace_utils");
|
|
schema_ptr strings_schema(sstring ks_name) {
|
|
schema_builder builder(generate_legacy_id(ks_name, redis::STRINGs), ks_name, redis::STRINGs,
|
|
// partition key
|
|
{{"pkey", utf8_type}},
|
|
// clustering key
|
|
{},
|
|
// regular columns
|
|
{{"data", utf8_type}},
|
|
// static columns
|
|
{},
|
|
// regular column name type
|
|
utf8_type,
|
|
// comment
|
|
"save STRINGs for redis"
|
|
);
|
|
builder.set_gc_grace_seconds(0);
|
|
builder.with(schema_builder::compact_storage::yes);
|
|
builder.with_hash_version();
|
|
return builder.build(schema_builder::compact_storage::yes);
|
|
}
|
|
|
|
schema_ptr lists_schema(sstring ks_name) {
|
|
schema_builder builder(generate_legacy_id(ks_name, redis::LISTs), ks_name, redis::LISTs,
|
|
// partition key
|
|
{{"pkey", utf8_type}},
|
|
// clustering key
|
|
{{"ckey", bytes_type}},
|
|
// regular columns
|
|
{{"data", utf8_type}},
|
|
// static columns
|
|
{},
|
|
// regular column name type
|
|
utf8_type,
|
|
// comment
|
|
"save LISTs for redis"
|
|
);
|
|
builder.set_gc_grace_seconds(0);
|
|
builder.with(schema_builder::compact_storage::yes);
|
|
builder.with_hash_version();
|
|
return builder.build(schema_builder::compact_storage::yes);
|
|
}
|
|
|
|
schema_ptr hashes_schema(sstring ks_name) {
|
|
schema_builder builder(generate_legacy_id(ks_name, redis::HASHes), ks_name, redis::HASHes,
|
|
// partition key
|
|
{{"pkey", utf8_type}},
|
|
// clustering key
|
|
{{"ckey", utf8_type}},
|
|
// regular columns
|
|
{{"data", utf8_type}},
|
|
// static columns
|
|
{},
|
|
// regular column name type
|
|
utf8_type,
|
|
// comment
|
|
"save HASHes for redis"
|
|
);
|
|
builder.set_gc_grace_seconds(0);
|
|
builder.with(schema_builder::compact_storage::yes);
|
|
builder.with_hash_version();
|
|
return builder.build(schema_builder::compact_storage::yes);
|
|
}
|
|
|
|
schema_ptr sets_schema(sstring ks_name) {
|
|
schema_builder builder(generate_legacy_id(ks_name, redis::SETs), ks_name, redis::SETs,
|
|
// partition key
|
|
{{"pkey", utf8_type}},
|
|
// clustering key
|
|
{{"ckey", utf8_type}},
|
|
// regular columns
|
|
{},
|
|
// static columns
|
|
{},
|
|
// regular column name type
|
|
utf8_type,
|
|
// comment
|
|
"save SETs for redis"
|
|
);
|
|
builder.set_gc_grace_seconds(0);
|
|
builder.with(schema_builder::compact_storage::yes);
|
|
builder.with_hash_version();
|
|
return builder.build(schema_builder::compact_storage::yes);
|
|
}
|
|
|
|
schema_ptr zsets_schema(sstring ks_name) {
|
|
schema_builder builder(generate_legacy_id(ks_name, redis::ZSETs), ks_name, redis::ZSETs,
|
|
// partition key
|
|
{{"pkey", utf8_type}},
|
|
// clustering key
|
|
{{"ckey", double_type}},
|
|
// regular columns
|
|
{{"data", utf8_type}},
|
|
// static columns
|
|
{},
|
|
// regular column name type
|
|
utf8_type,
|
|
// comment
|
|
"save ZSETs for redis"
|
|
);
|
|
builder.set_gc_grace_seconds(0);
|
|
builder.with(schema_builder::compact_storage::yes);
|
|
builder.with_hash_version();
|
|
return builder.build(schema_builder::compact_storage::yes);
|
|
}
|
|
|
|
future<> create_keyspace_if_not_exists_impl(seastar::sharded<service::storage_proxy>& proxy, data_dictionary::database db, seastar::sharded<service::migration_manager>& mm, db::config& config, int default_replication_factor) {
|
|
SCYLLA_ASSERT(this_shard_id() == 0);
|
|
auto keyspace_replication_strategy_options = config.redis_keyspace_replication_strategy_options();
|
|
if (!keyspace_replication_strategy_options.contains("class")) {
|
|
keyspace_replication_strategy_options["class"] = "SimpleStrategy";
|
|
keyspace_replication_strategy_options["replication_factor"] = fmt::format("{}", default_replication_factor);
|
|
}
|
|
|
|
struct table {
|
|
const char* name;
|
|
std::function<schema_ptr(sstring)> schema;
|
|
};
|
|
|
|
static std::array tables{table{redis::STRINGs, strings_schema},
|
|
table{redis::LISTs, lists_schema},
|
|
table{redis::SETs, sets_schema},
|
|
table{redis::HASHes, hashes_schema},
|
|
table{redis::ZSETs, zsets_schema}};
|
|
|
|
auto ks_names =
|
|
std::views::iota(0u, config.redis_database_count()) |
|
|
std::views::transform([] (unsigned i) { return fmt::format("REDIS_{}", i); }) |
|
|
std::ranges::to<std::vector<sstring>>();
|
|
|
|
while (true) {
|
|
bool schema_ok = std::ranges::all_of(ks_names, [&] (auto& ks_name) {
|
|
auto check = [&] (table t) {
|
|
return db.has_schema(ks_name, t.name);
|
|
};
|
|
return db.has_keyspace(ks_name) && std::ranges::all_of(tables, check);
|
|
});
|
|
|
|
if (schema_ok) {
|
|
logger.info("Redis schema is already up-to-date");
|
|
co_return; // if schema is created already do nothing
|
|
}
|
|
|
|
auto& mml = mm.local();
|
|
auto tm = proxy.local().get_token_metadata_ptr();
|
|
|
|
std::vector<lw_shared_ptr<keyspace_metadata>> ksms;
|
|
for (auto& ks_name: ks_names) {
|
|
cql3::statements::ks_prop_defs attrs;
|
|
attrs.add_property(cql3::statements::ks_prop_defs::KW_DURABLE_WRITES, "true");
|
|
std::map<sstring, sstring> replication_properties;
|
|
for (auto&& option : keyspace_replication_strategy_options) {
|
|
replication_properties.emplace(option.first, option.second);
|
|
}
|
|
attrs.add_property(cql3::statements::ks_prop_defs::KW_REPLICATION, replication_properties);
|
|
attrs.validate();
|
|
|
|
ksms.push_back(attrs.as_ks_metadata(ks_name, *tm, proxy.local().features(), proxy.local().local_db().get_config()));
|
|
}
|
|
|
|
auto group0_guard = co_await mml.start_group0_operation();
|
|
auto ts = group0_guard.write_timestamp();
|
|
utils::chunked_vector<mutation> mutations;
|
|
|
|
for (auto ksm: ksms) {
|
|
if (db.has_keyspace(ksm->name())) {
|
|
continue;
|
|
}
|
|
|
|
auto muts = service::prepare_new_keyspace_announcement(db.real_database(), ksm, ts);
|
|
std::move(muts.begin(), muts.end(), std::back_inserter(mutations));
|
|
}
|
|
|
|
auto table_gen = std::bind_front(
|
|
[] (data_dictionary::database db, service::storage_proxy& sp, utils::chunked_vector<mutation>& mutations,
|
|
api::timestamp_type ts, const keyspace_metadata& ksm, sstring cf_name, schema_ptr schema) -> future<> {
|
|
if (db.has_schema(ksm.name(), cf_name)) {
|
|
co_return;
|
|
}
|
|
|
|
logger.info("Create keyspace: {}, table: {} for redis.", ksm.name(), cf_name);
|
|
co_await service::prepare_new_column_family_announcement(mutations, sp, ksm, schema, ts);
|
|
}, db, std::ref(proxy.local()), std::ref(mutations), ts);
|
|
|
|
co_await coroutine::parallel_for_each(ksms, [table_gen = std::move(table_gen)] (const lw_shared_ptr<keyspace_metadata> ksm) mutable {
|
|
return parallel_for_each(tables, [ksm, table_gen = std::move(table_gen)] (table t) {
|
|
return table_gen(*ksm, t.name, t.schema(ksm->name()));
|
|
}).discard_result();
|
|
});
|
|
|
|
if (mutations.empty()) {
|
|
co_return;
|
|
}
|
|
|
|
try {
|
|
co_return co_await mml.announce(std::move(mutations), std::move(group0_guard),
|
|
"keyspace-utils: create default keyspaces and databases for redis");
|
|
} catch (service::group0_concurrent_modification&) {
|
|
logger.info("Concurrent operation is detected while creating default databases for redis, retrying.");
|
|
}
|
|
}
|
|
}
|
|
|
|
future<> maybe_create_keyspace(seastar::sharded<service::storage_proxy>& proxy, data_dictionary::database db, seastar::sharded<service::migration_manager>& mm, db::config& config, sharded<gms::gossiper>& gossiper) {
|
|
auto live_endpoint_count = gossiper.local().get_up_endpoint_count();
|
|
int replication_factor = 3;
|
|
if (live_endpoint_count < replication_factor) {
|
|
replication_factor = 1;
|
|
logger.warn("Creating keyspace for redis with unsafe, live endpoint nodes count: {}.", live_endpoint_count);
|
|
}
|
|
return create_keyspace_if_not_exists_impl(proxy, db, mm, config, replication_factor);
|
|
}
|
|
|
|
}
|