mirror of
https://github.com/scylladb/scylladb.git
synced 2026-04-24 02:20:37 +00:00
Currently, we use std::vector<*mutation> to keep
a list of mutations for processing.
This can lead to large allocation, e.g. when the vector
size is a function of the number of tables.
Use a chunked vector instead to prevent oversized allocations.
`perf-simple-query --smp 1` results obtained for fixed 400MHz frequency
and PGO disabled:
Before (read path):
```
enable-cache=1
Running test with config: {partitions=10000, concurrency=100, mode=read, query_single_key=no, counters=no}
Disabling auto compaction
Creating 10000 partitions...
89055.97 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39417 insns/op, 18003 cycles/op, 0 errors)
103372.72 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39380 insns/op, 17300 cycles/op, 0 errors)
98942.27 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39413 insns/op, 17336 cycles/op, 0 errors)
103752.93 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39407 insns/op, 17252 cycles/op, 0 errors)
102516.77 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39403 insns/op, 17288 cycles/op, 0 errors)
throughput:
mean= 99528.13 standard-deviation=6155.71
median= 102516.77 median-absolute-deviation=3844.59
maximum=103752.93 minimum=89055.97
instructions_per_op:
mean= 39403.99 standard-deviation=14.25
median= 39406.75 median-absolute-deviation=9.30
maximum=39416.63 minimum=39380.39
cpu_cycles_per_op:
mean= 17435.81 standard-deviation=318.24
median= 17300.40 median-absolute-deviation=147.59
maximum=18002.53 minimum=17251.75
```
After (read path)
```
enable-cache=1
Running test with config: {partitions=10000, concurrency=100, mode=read, query_single_key=no, counters=no}
Disabling auto compaction
Creating 10000 partitions...
59755.04 tps ( 66.2 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39466 insns/op, 22834 cycles/op, 0 errors)
71854.16 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39417 insns/op, 17883 cycles/op, 0 errors)
82149.45 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.2 tasks/op, 39411 insns/op, 17409 cycles/op, 0 errors)
49640.04 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.3 tasks/op, 39474 insns/op, 19975 cycles/op, 0 errors)
54963.22 tps ( 66.1 allocs/op, 0.0 logallocs/op, 14.3 tasks/op, 39474 insns/op, 18235 cycles/op, 0 errors)
throughput:
mean= 63672.38 standard-deviation=13195.12
median= 59755.04 median-absolute-deviation=8709.16
maximum=82149.45 minimum=49640.04
instructions_per_op:
mean= 39448.38 standard-deviation=31.60
median= 39466.17 median-absolute-deviation=25.75
maximum=39474.12 minimum=39411.42
cpu_cycles_per_op:
mean= 19267.01 standard-deviation=2217.03
median= 18234.80 median-absolute-deviation=1384.25
maximum=22834.26 minimum=17408.67
```
`perf-simple-query --smp 1 --write` results obtained for fixed 400MHz frequency
and PGO disabled:
Before (write path):
```
enable-cache=1
Running test with config: {partitions=10000, concurrency=100, mode=write, query_single_key=no, counters=no}
Disabling auto compaction
63736.96 tps ( 59.4 allocs/op, 16.4 logallocs/op, 14.3 tasks/op, 49667 insns/op, 19924 cycles/op, 0 errors)
64109.41 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 49992 insns/op, 20084 cycles/op, 0 errors)
56950.47 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50005 insns/op, 20501 cycles/op, 0 errors)
44858.42 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50014 insns/op, 21947 cycles/op, 0 errors)
28592.87 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50027 insns/op, 27659 cycles/op, 0 errors)
throughput:
mean= 51649.63 standard-deviation=15059.74
median= 56950.47 median-absolute-deviation=12087.33
maximum=64109.41 minimum=28592.87
instructions_per_op:
mean= 49941.18 standard-deviation=153.76
median= 50005.24 median-absolute-deviation=73.01
maximum=50027.07 minimum=49667.05
cpu_cycles_per_op:
mean= 22023.01 standard-deviation=3249.92
median= 20500.74 median-absolute-deviation=1938.76
maximum=27658.75 minimum=19924.32
```
After (write path)
```
enable-cache=1
Running test with config: {partitions=10000, concurrency=100, mode=write, query_single_key=no, counters=no}
Disabling auto compaction
53395.93 tps ( 59.4 allocs/op, 16.5 logallocs/op, 14.3 tasks/op, 50326 insns/op, 21252 cycles/op, 0 errors)
46527.83 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50704 insns/op, 21555 cycles/op, 0 errors)
55846.30 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50731 insns/op, 21060 cycles/op, 0 errors)
55669.30 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50735 insns/op, 21521 cycles/op, 0 errors)
52130.17 tps ( 59.3 allocs/op, 16.0 logallocs/op, 14.3 tasks/op, 50757 insns/op, 21334 cycles/op, 0 errors)
throughput:
mean= 52713.91 standard-deviation=3795.38
median= 53395.93 median-absolute-deviation=2955.40
maximum=55846.30 minimum=46527.83
instructions_per_op:
mean= 50650.57 standard-deviation=182.46
median= 50731.38 median-absolute-deviation=84.09
maximum=50756.62 minimum=50325.87
cpu_cycles_per_op:
mean= 21344.42 standard-deviation=202.86
median= 21334.00 median-absolute-deviation=176.37
maximum=21554.61 minimum=21060.24
```
Fixes #24815
Improvement for rare corner cases. No backport required
Signed-off-by: Benny Halevy <bhalevy@scylladb.com>
Closes scylladb/scylladb#24919
205 lines
11 KiB
C++
205 lines
11 KiB
C++
/*
|
|
* Copyright (C) 2023-present ScyllaDB
|
|
*/
|
|
|
|
/*
|
|
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
|
*/
|
|
|
|
#include <seastar/core/sstring.hh>
|
|
#include <seastar/core/future-util.hh>
|
|
#include <seastar/core/aligned_buffer.hh>
|
|
#include <seastar/util/closeable.hh>
|
|
|
|
#include "test/lib/scylla_test_case.hh"
|
|
#include "test/lib/test_services.hh"
|
|
#include "test/lib/reader_concurrency_semaphore.hh"
|
|
#include "test/lib/sstable_utils.hh"
|
|
#include "test/lib/random_utils.hh"
|
|
#include "test/lib/key_utils.hh"
|
|
|
|
#include "schema/schema.hh"
|
|
#include "schema/schema_builder.hh"
|
|
|
|
#include "sstables/sstables.hh"
|
|
#include "sstables/compress.hh"
|
|
#include "compaction/compaction.hh"
|
|
#include "compaction/compaction_manager.hh"
|
|
#include "replica/compaction_group.hh"
|
|
|
|
using namespace sstables;
|
|
|
|
static sstables::shared_sstable generate_sstable(schema_ptr s, std::function<shared_sstable()> sst_gen, noncopyable_function<bool(dht::token)> token_filter) {
|
|
auto make_insert = [&] (const dht::decorated_key& key) {
|
|
static thread_local int32_t value = 1;
|
|
|
|
mutation m(s, key);
|
|
auto c_key = clustering_key::from_exploded(*s, {int32_type->decompose(value++)});
|
|
m.set_clustered_cell(c_key, bytes("value"), data_value(int32_t(value)), api::timestamp_clock::now().time_since_epoch().count());
|
|
return m;
|
|
};
|
|
|
|
auto keys = tests::generate_partition_keys(100, s);
|
|
utils::chunked_vector<mutation> muts;
|
|
|
|
muts.reserve(keys.size());
|
|
for (auto& k : keys) {
|
|
if (token_filter(k.token())) {
|
|
muts.push_back(make_insert(k));
|
|
}
|
|
}
|
|
return make_sstable_containing(sst_gen, std::move(muts));
|
|
}
|
|
|
|
static sstables::shared_sstable sstable_that_needs_split(schema_ptr s, std::function<shared_sstable()> sst_gen) {
|
|
return generate_sstable(std::move(s), std::move(sst_gen), [] (dht::token) { return true; });
|
|
}
|
|
|
|
class single_compaction_group : public compaction::table_state {
|
|
private:
|
|
schema_ptr _schema;
|
|
sstables::sstables_manager& _sst_man;
|
|
sstables::sstable_set _main_set;
|
|
sstables::sstable_set _maintenance_set;
|
|
std::vector<sstables::shared_sstable> _compacted_undeleted_sstables;
|
|
mutable sstables::compaction_strategy _compaction_strategy;
|
|
compaction_strategy_state _compaction_strategy_state;
|
|
tombstone_gc_state _tombstone_gc_state;
|
|
compaction_backlog_tracker _backlog_tracker;
|
|
condition_variable _staging_done_condition;
|
|
std::function<shared_sstable()> _sstable_factory;
|
|
mutable tests::reader_concurrency_semaphore_wrapper _semaphore;
|
|
public:
|
|
single_compaction_group(table_for_tests& t, sstables::sstables_manager& sst_man, std::function<shared_sstable()> sstable_factory)
|
|
: _schema(t.schema())
|
|
, _sst_man(sst_man)
|
|
, _main_set(sstables::make_partitioned_sstable_set(_schema, token_range()))
|
|
, _maintenance_set(sstables::make_partitioned_sstable_set(_schema, token_range()))
|
|
, _compaction_strategy(sstables::make_compaction_strategy(_schema->compaction_strategy(), _schema->compaction_strategy_options()))
|
|
, _compaction_strategy_state(compaction::compaction_strategy_state::make(_compaction_strategy))
|
|
, _tombstone_gc_state(nullptr)
|
|
, _backlog_tracker(_compaction_strategy.make_backlog_tracker())
|
|
, _sstable_factory(std::move(sstable_factory))
|
|
{
|
|
t->get_compaction_manager().add(*this);
|
|
}
|
|
|
|
future<> stop(table_for_tests& t) {
|
|
return t->get_compaction_manager().remove(*this);
|
|
}
|
|
|
|
void rebuild_main_set(std::vector<shared_sstable> to_add, std::vector<shared_sstable> to_remove) {
|
|
for (auto& sst : to_remove) {
|
|
_main_set.erase(sst);
|
|
}
|
|
for (auto& sst : to_add) {
|
|
_main_set.insert(sst);
|
|
}
|
|
}
|
|
|
|
virtual dht::token_range token_range() const noexcept override { return dht::token_range::make(dht::first_token(), dht::last_token()); }
|
|
virtual const schema_ptr& schema() const noexcept override { return _schema; }
|
|
virtual unsigned min_compaction_threshold() const noexcept override { return _schema->min_compaction_threshold(); }
|
|
virtual bool compaction_enforce_min_threshold() const noexcept override { return false; }
|
|
virtual const sstables::sstable_set& main_sstable_set() const override { return _main_set; }
|
|
virtual const sstables::sstable_set& maintenance_sstable_set() const override { return _maintenance_set; }
|
|
virtual lw_shared_ptr<const sstables::sstable_set> sstable_set_for_tombstone_gc() const override { return make_lw_shared<const sstables::sstable_set>(main_sstable_set()); }
|
|
virtual std::unordered_set<sstables::shared_sstable> fully_expired_sstables(const std::vector<sstables::shared_sstable>& sstables, gc_clock::time_point compaction_time) const override { return {}; }
|
|
virtual const std::vector<sstables::shared_sstable>& compacted_undeleted_sstables() const noexcept override { return _compacted_undeleted_sstables; }
|
|
virtual sstables::compaction_strategy& get_compaction_strategy() const noexcept override { return _compaction_strategy; }
|
|
virtual compaction_strategy_state& get_compaction_strategy_state() noexcept override { return _compaction_strategy_state; }
|
|
virtual reader_permit make_compaction_reader_permit() const override { return _semaphore.make_permit(); }
|
|
virtual sstables::sstables_manager& get_sstables_manager() noexcept override { return _sst_man; }
|
|
virtual sstables::shared_sstable make_sstable() const override { return _sstable_factory(); }
|
|
virtual sstables::sstable_writer_config configure_writer(sstring origin) const override { return _sst_man.configure_writer(std::move(origin)); }
|
|
virtual api::timestamp_type min_memtable_timestamp() const override { return api::min_timestamp; }
|
|
virtual api::timestamp_type min_memtable_live_timestamp() const override { return api::min_timestamp; }
|
|
virtual api::timestamp_type min_memtable_live_row_marker_timestamp() const override { return api::min_timestamp; }
|
|
virtual bool memtable_has_key(const dht::decorated_key& key) const override { return false; }
|
|
virtual future<> on_compaction_completion(sstables::compaction_completion_desc desc, sstables::offstrategy offstrategy) override {
|
|
testlog.info("Adding {} sstable(s), removing {} sstables", desc.new_sstables.size(), desc.old_sstables.size());
|
|
rebuild_main_set(desc.new_sstables, desc.old_sstables);
|
|
return make_ready_future<>();
|
|
}
|
|
virtual bool is_auto_compaction_disabled_by_user() const noexcept override { return false; }
|
|
virtual bool tombstone_gc_enabled() const noexcept override { return false; }
|
|
virtual const tombstone_gc_state& get_tombstone_gc_state() const noexcept override { return _tombstone_gc_state; }
|
|
virtual compaction_backlog_tracker& get_backlog_tracker() override { return _backlog_tracker; }
|
|
virtual const std::string get_group_id() const noexcept override { return "0"; }
|
|
virtual seastar::condition_variable& get_staging_done_condition() noexcept override { return _staging_done_condition; }
|
|
dht::token_range get_token_range_after_split(const dht::token& t) const noexcept override { return dht::token_range(); }
|
|
};
|
|
|
|
SEASTAR_TEST_CASE(basic_compaction_group_splitting_test) {
|
|
return test_env::do_with_async([] (test_env& env) {
|
|
auto builder = schema_builder("tests", "compaction_group_splitting")
|
|
.with_column("id", utf8_type, column_kind::partition_key)
|
|
.with_column("cl", int32_type, column_kind::clustering_key)
|
|
.with_column("value", int32_type);
|
|
auto s = builder.build();
|
|
|
|
auto t = env.make_table_for_tests(s);
|
|
auto close_table = deferred_stop(t);
|
|
t->start();
|
|
|
|
auto sst_factory = env.make_sst_factory(s);
|
|
auto classifier = [] (dht::token t) -> mutation_writer::token_group_id {
|
|
return dht::compaction_group_of(1, t);
|
|
};
|
|
auto sstable_needs_split = [&] (const sstables::shared_sstable& sst) {
|
|
return classifier(sst->get_first_decorated_key().token()) != classifier(sst->get_last_decorated_key().token());
|
|
};
|
|
|
|
auto run_test = [&] (std::vector<sstables::shared_sstable> ssts, size_t expected_output, noncopyable_function<void(const sstables::shared_sstable&)> validate) {
|
|
auto compaction_group = std::make_unique<single_compaction_group>(t, env.manager(), sst_factory);
|
|
|
|
compaction_group->rebuild_main_set(ssts, {});
|
|
|
|
auto& cm = t->get_compaction_manager();
|
|
auto expected_compaction_size = std::ranges::fold_left(ssts | std::views::transform([&] (auto& sst) {
|
|
// sstables that doesn't need split will have compaction bypassed.
|
|
return sstable_needs_split(sst) ? sst->bytes_on_disk() : size_t(0);
|
|
}), int64_t(0), std::plus{});
|
|
|
|
auto ret = cm.perform_split_compaction(*compaction_group, sstables::compaction_type_options::split{classifier}, tasks::task_info{}).get();
|
|
BOOST_REQUIRE_EQUAL(ret->start_size, expected_compaction_size);
|
|
|
|
BOOST_REQUIRE(compaction_group->main_sstable_set().size() == expected_output);
|
|
compaction_group->main_sstable_set().for_each_sstable([&] (const sstables::shared_sstable& sst) {
|
|
BOOST_REQUIRE(!sstable_needs_split(sst));
|
|
validate(sst);
|
|
});
|
|
compaction_group->stop(t).get();
|
|
};
|
|
|
|
// sstable that needs split case will generate 2 sstables, one for left, another for right.
|
|
{
|
|
auto input = sstable_that_needs_split(s, sst_factory);
|
|
std::unordered_set<mutation_writer::token_group_id> expected_ids { 0, 1 };
|
|
run_test({ input }, 2, [&] (const sstables::shared_sstable& sst) {
|
|
BOOST_REQUIRE(expected_ids.erase(classifier(sst->get_first_decorated_key().token())) == 1);
|
|
});
|
|
BOOST_REQUIRE(expected_ids.empty());
|
|
}
|
|
// sstable that doesn't need split won't actually be compacted
|
|
{
|
|
auto input = generate_sstable(s, sst_factory, [&] (dht::token t) { return classifier(t) == 0; });
|
|
run_test({ input }, 1, [&] (const sstables::shared_sstable& sst) {
|
|
BOOST_REQUIRE(sst->generation() == input->generation());
|
|
BOOST_REQUIRE_EQUAL(0, classifier(sst->get_first_decorated_key().token()));
|
|
});
|
|
}
|
|
|
|
// combination of both cases
|
|
{
|
|
auto input1 = sstable_that_needs_split(s, sst_factory);
|
|
auto input2 = generate_sstable(s, sst_factory, [&] (dht::token t) { return classifier(t) == 0; });
|
|
bool found_input2 = false;
|
|
run_test({ input1, input2 }, 3, [&] (const sstables::shared_sstable& sst) {
|
|
found_input2 |= sst->generation() == input2->generation();
|
|
});
|
|
BOOST_REQUIRE(found_input2);
|
|
}
|
|
});
|
|
}
|