cache: Evict with row granularity

Instead of evicting whole partitions, evicts whole rows.

As part of this, invalidation of partition entries was changed to not
evict from snapshots right away, but unlink them and let them be
evicted by the reclaimer.
This commit is contained in:
Tomasz Grabiec
2018-02-27 12:32:46 +01:00
parent dce9185fc9
commit 381bf02f55
11 changed files with 152 additions and 112 deletions

View File

@@ -118,6 +118,7 @@ class cache_flat_mutation_reader final : public flat_mutation_reader::impl {
_end_of_stream = true;
_state = state::end_of_stream;
}
void touch_partition();
public:
cache_flat_mutation_reader(schema_ptr s,
dht::decorated_key dk,
@@ -186,11 +187,20 @@ future<> cache_flat_mutation_reader::process_static_row(db::timeout_clock::time_
}
}
inline
void cache_flat_mutation_reader::touch_partition() {
if (_snp->at_latest_version()) {
rows_entry& last_dummy = *_snp->version()->partition().clustered_rows().rbegin();
_snp->tracker()->touch(last_dummy);
}
}
inline
future<> cache_flat_mutation_reader::fill_buffer(db::timeout_clock::time_point timeout) {
if (_state == state::before_static_row) {
auto after_static_row = [this, timeout] {
if (_ck_ranges_curr == _ck_ranges_end) {
touch_partition();
finish_reader();
return make_ready_future<>();
}
@@ -300,6 +310,7 @@ future<> cache_flat_mutation_reader::read_from_underlying(db::timeout_clock::tim
auto inserted = insert_result.second;
auto it = insert_result.first;
if (inserted) {
_snp->tracker()->insert(*e);
e.release();
auto next = std::next(it);
it->set_continuous(next->continuous());
@@ -315,6 +326,7 @@ future<> cache_flat_mutation_reader::read_from_underlying(db::timeout_clock::tim
auto inserted = insert_result.second;
if (inserted) {
clogger.trace("csm {}: inserted dummy at {}", this, _upper_bound);
_snp->tracker()->insert(*e);
e.release();
} else {
clogger.trace("csm {}: mark {} as continuous", this, insert_result.first->position());
@@ -359,6 +371,7 @@ bool cache_flat_mutation_reader::ensure_population_lower_bound() {
auto inserted = insert_result.second;
if (inserted) {
clogger.trace("csm {}: inserted lower bound dummy at {}", this, e->position());
_snp->tracker()->insert(*e);
e.release();
}
});
@@ -411,7 +424,7 @@ void cache_flat_mutation_reader::maybe_add_to_cache(const clustering_row& cr) {
: mp.clustered_rows().lower_bound(cr.key(), less);
auto insert_result = mp.clustered_rows().insert_check(it, *new_entry, less);
if (insert_result.second) {
_read_context->cache().on_row_insert();
_snp->tracker()->insert(*new_entry);
new_entry.release();
}
it = insert_result.first;
@@ -438,11 +451,13 @@ inline
void cache_flat_mutation_reader::start_reading_from_underlying() {
clogger.trace("csm {}: start_reading_from_underlying(), range=[{}, {})", this, _lower_bound, _next_row_in_range ? _next_row.position() : _upper_bound);
_state = state::move_to_underlying;
_next_row.touch();
}
inline
void cache_flat_mutation_reader::copy_from_cache_to_buffer() {
clogger.trace("csm {}: copy_from_cache, next={}, next_row_in_range={}", this, _next_row.position(), _next_row_in_range);
_next_row.touch();
position_in_partition_view next_lower_bound = _next_row.dummy() ? _next_row.position() : position_in_partition_view::after_key(_next_row.key());
for (auto &&rts : _snp->range_tombstones(_lower_bound, _next_row_in_range ? next_lower_bound : _upper_bound)) {
// This guarantees that rts starts after any emitted clustering_row
@@ -509,6 +524,7 @@ void cache_flat_mutation_reader::move_to_range(query::clustering_row_ranges::con
auto new_entry = current_allocator().construct<rows_entry>(*_schema, _lower_bound, is_dummy::yes, is_continuous::no);
return rows.insert_before(_next_row.get_iterator_in_latest_version(), *new_entry);
});
_snp->tracker()->insert(*it);
_last_row = partition_snapshot_row_weakref(*_snp, it, true);
} else {
_read_context->cache().on_mispopulate();

View File

@@ -310,6 +310,7 @@ void mutation_partition::apply_monotonically(const schema& s, mutation_partition
auto continuous = i->continuous() || src_e.continuous();
auto dummy = i->dummy() && src_e.dummy();
if (tracker) {
i->_lru_link.swap_nodes(src_e._lru_link);
// Newer evictable versions store complete rows
i->_row = std::move(src_e._row);
} else {
@@ -1361,8 +1362,15 @@ rows_entry::rows_entry(rows_entry&& o) noexcept
: _link(std::move(o._link))
, _key(std::move(o._key))
, _row(std::move(o._row))
, _lru_link()
, _flags(std::move(o._flags))
{ }
{
if (o._lru_link.is_linked()) {
auto prev = o._lru_link.prev_;
o._lru_link.unlink();
cache_tracker::lru_type::node_algorithms::link_after(prev, _lru_link.this_ptr());
}
}
row::row(const row& o)
: _type(o._type)
@@ -2129,20 +2137,6 @@ clustering_interval_set mutation_partition::get_continuity(const schema& s, is_c
return result;
}
void mutation_partition::evict(cache_tracker& tracker) noexcept {
{
assert(!_rows.empty());
auto del = current_deleter<rows_entry>();
auto i = _rows.erase_and_dispose(_rows.begin(), std::prev(_rows.end()), current_deleter<rows_entry>());
rows_entry& e = *i;
assert(e.is_last_dummy());
e.set_continuous(false);
}
_row_tombstones.clear();
_static_row_continuous = false;
_static_row = {};
}
bool
mutation_partition::check_continuity(const schema& s, const position_range& r, is_continuous cont) const {
auto less = rows_entry::compare(s);

View File

@@ -675,10 +675,16 @@ public:
deletable_row difference(const schema&, column_kind, const deletable_row& other) const;
};
class cache_tracker;
class rows_entry {
using lru_link_type = bi::list_member_hook<bi::link_mode<bi::auto_unlink>>;
friend class cache_tracker;
friend class size_calculator;
intrusive_set_external_comparator_member_hook _link;
clustering_key _key;
deletable_row _row;
lru_link_type _lru_link;
struct flags {
// _before_ck and _after_ck encode position_in_partition::weight
bool _before_ck : 1;
@@ -815,6 +821,7 @@ public:
bool equal(const schema& s, const rows_entry& other, const schema& other_schema) const;
size_t memory_usage() const;
void on_evicted(cache_tracker&) noexcept;
};
// Represents a set of writes made to a single partition.
@@ -920,8 +927,6 @@ public:
bool fully_discontinuous(const schema&, const position_range&);
// Returns true iff all keys from given range have continuity membership as specified by is_continuous.
bool check_continuity(const schema&, const position_range&, is_continuous) const;
// Removes all data, marking affected ranges as discontinuous.
void evict(cache_tracker&) noexcept;
// Applies mutation_fragment.
// The fragment must be goverened by the same schema as this object.
void apply(const schema& s, const mutation_fragment&);

View File

@@ -22,6 +22,7 @@
#pragma once
#include "partition_version.hh"
#include "row_cache.hh"
#include <boost/algorithm/cxx11/any_of.hpp>
class partition_snapshot_row_cursor;
@@ -318,12 +319,16 @@ public:
auto latest_i = get_iterator_in_latest_version();
rows_entry& latest = *latest_i;
if (is_in_latest_version()) {
if (_snp.at_latest_version()) {
_snp.tracker()->touch(latest);
}
return latest;
} else {
// Copy row from older version because rows in evictable versions must
// hold values which are independently complete to be consistent on eviction.
auto e = current_allocator().construct<rows_entry>(*_current_row[0].it);
e->set_continuous(latest_i != rows.end() && latest_i->continuous());
_snp.tracker()->insert(*e);
rows.insert_before(latest_i, *e);
return *e;
}
@@ -354,10 +359,22 @@ public:
auto latest_i = get_iterator_in_latest_version();
auto e = current_allocator().construct<rows_entry>(_schema, pos, is_dummy(!pos.is_clustering_row()),
is_continuous(latest_i != rows.end() && latest_i->continuous()));
_snp.tracker()->insert(*e);
rows.insert_before(latest_i, *e);
return e;
}
// Brings the entry pointed to by the cursor to the front of the LRU
// Cursor must be valid and pointing at a row.
void touch() {
// We cannot bring entries from non-latest versions to the front because that
// could result violate ordering invariant for the LRU, which states that older versions
// must be evicted first. Needed to keep the snapshot consistent.
if (_snp.at_latest_version() && is_in_latest_version()) {
_snp.tracker()->touch(*get_iterator_in_latest_version());
}
}
// Can be called when cursor is pointing at a row, even when invalid.
const position_in_partition& position() const {
return _position;

View File

@@ -251,6 +251,9 @@ partition_version& partition_entry::add_version(const schema& s, cache_tracker*
new_version->partition().set_static_row_continuous(_version->partition().static_row_continuous());
new_version->insert_before(*_version);
set_version(new_version);
if (tracker) {
tracker->insert(*new_version);
}
return *new_version;
}
@@ -490,6 +493,9 @@ void partition_entry::upgrade(schema_ptr from, schema_ptr to, cache_tracker* tra
auto new_version = current_allocator().construct<partition_version>(squashed(from, to));
auto old_version = &*_version;
set_version(new_version);
if (tracker) {
tracker->insert(*new_version);
}
remove_or_mark_as_unique_owner(old_version, tracker);
}
@@ -559,9 +565,13 @@ void partition_entry::evict(cache_tracker& tracker) noexcept {
if (!_version) {
return;
}
// Must evict from all versions atomically to keep snapshots consistent.
for (auto&& v : versions()) {
v.partition().evict(tracker);
if (_snapshot) {
_snapshot->_version = std::move(_version);
_snapshot->_version.mark_as_unique_owner();
_snapshot->_entry = nullptr;
} else {
auto v = &*_version;
_version = { };
remove_or_mark_as_unique_owner(v, &tracker);
}
current_allocator().invalidate_references();
}

View File

@@ -407,8 +407,9 @@ public:
return *this;
}
// Removes all data marking affected ranges as discontinuous.
// Includes versions referenced by snapshots.
// Removes data contained by this entry, but not owned by snapshots.
// Snapshots will be unlinked and evicted independently by reclaimer.
// This entry is invalid after this and can only be destroyed.
void evict(cache_tracker&) noexcept;
partition_version_ref& version() {

View File

@@ -97,6 +97,7 @@ cache_tracker::setup_metrics() {
sm::make_derive("row_hits", sm::description("total number of rows needed by reads and found in cache"), _stats.row_hits),
sm::make_derive("row_misses", sm::description("total number of rows needed by reads and missing in cache"), _stats.row_misses),
sm::make_derive("row_insertions", sm::description("total number of rows added to cache"), _stats.row_insertions),
sm::make_derive("row_evictions", sm::description("total number of rows evicted from cache"), _stats.row_evictions),
sm::make_derive("static_row_insertions", sm::description("total number of static rows added to cache"), _stats.static_row_insertions),
sm::make_derive("concurrent_misses_same_key", sm::description("total number of operation with misses same key"), _stats.concurrent_misses_same_key),
sm::make_derive("partition_merges", sm::description("total number of partitions merged"), _stats.partition_merges),
@@ -125,20 +126,36 @@ void cache_tracker::clear() {
allocator().invalidate_references();
}
void cache_tracker::touch(cache_entry& e) {
auto move_to_front = [this] (lru_type& lru, cache_entry& e) {
lru.erase(lru.iterator_to(e));
lru.push_front(e);
};
move_to_front(_lru, e);
void cache_tracker::touch(rows_entry& e) {
if (e._lru_link.is_linked()) { // last dummy may not be linked if evicted.
_lru.erase(_lru.iterator_to(e));
}
_lru.push_front(e);
}
void cache_tracker::insert(rows_entry& entry) noexcept {
++_stats.row_insertions;
_lru.push_front(entry);
}
void cache_tracker::insert(partition_version& pv) noexcept {
for (rows_entry& row : pv.partition().clustered_rows()) {
insert(row);
}
}
void cache_tracker::insert(partition_entry& pe) noexcept {
for (partition_version& pv : pe.versions_from_oldest()) {
insert(pv);
}
}
void cache_tracker::insert(cache_entry& entry) {
insert(entry.partition());
++_stats.partition_insertions;
++_stats.partitions;
// partition_range_cursor depends on this to detect invalidation of _end
_region.allocator().invalidate_references();
_lru.push_front(entry);
}
void cache_tracker::on_partition_erase() {
@@ -164,6 +181,10 @@ void cache_tracker::on_partition_eviction() {
++_stats.partition_evictions;
}
void cache_tracker::on_row_eviction() {
++_stats.row_evictions;
}
void cache_tracker::on_row_hit() {
++_stats.row_hits;
}
@@ -444,10 +465,6 @@ void row_cache::on_row_miss() {
_tracker.on_row_miss();
}
void row_cache::on_row_insert() {
++_tracker._stats.row_insertions;
}
void row_cache::on_static_row_insert() {
++_tracker._stats.static_row_insertions;
}
@@ -553,7 +570,6 @@ class scanning_and_populating_reader final : public flat_mutation_reader::impl {
private:
flat_mutation_reader read_from_entry(cache_entry& ce) {
_cache.upgrade_entry(ce);
_cache._tracker.touch(ce);
_cache.on_partition_hit();
return ce.read(_cache, *_read_context);
}
@@ -736,7 +752,6 @@ row_cache::make_reader(schema_ptr s,
auto i = _partitions.lower_bound(pos, cmp);
if (i != _partitions.end() && !cmp(pos, i->position())) {
cache_entry& e = *i;
_tracker.touch(e);
upgrade_entry(e);
on_partition_hit();
return e.read(*this, *ctx);
@@ -820,7 +835,6 @@ cache_entry& row_cache::find_or_create(const dht::decorated_key& key, tombstone
_tracker.on_miss_already_populated();
cache_entry& e = *i;
e.partition().open_version(*e.schema(), &_tracker, phase).partition().apply(t);
_tracker.touch(e);
upgrade_entry(e);
});
}
@@ -1000,7 +1014,6 @@ future<> row_cache::update(external_updater eu, memtable& m) {
cache_entry& entry = *cache_i;
upgrade_entry(entry);
entry.partition().apply_to_incomplete(*_schema, std::move(mem_e.partition()), *mem_e.schema(), _tracker.region(), _tracker);
_tracker.touch(entry);
_tracker.on_partition_merge();
} else if (cache_i->continuous() || is_present(mem_e.key()) == partition_presence_checker_result::definitely_doesnt_exist) {
// Partition is absent in underlying. First, insert a neutral partition entry.
@@ -1020,12 +1033,10 @@ future<> row_cache::update_invalidating(external_updater eu, memtable& m) {
partition_presence_checker& is_present) {
if (cache_i != partitions_end() && cache_i->key().equal(*_schema, mem_e.key())) {
// FIXME: Invalidate only affected row ranges.
// This invalidates all row ranges and the static row, leaving only the partition tombstone continuous,
// which has to always be continuous.
// This invalidates all information about the partition.
cache_entry& e = *cache_i;
e.evict(_tracker);
upgrade_entry(e);
e.partition().apply_to_incomplete(*_schema, std::move(mem_e.partition()), *mem_e.schema(), _tracker.region(), _tracker);
e.evict(_tracker); // FIXME: evict gradually
e.on_evicted(_tracker);
} else {
_tracker.clear_continuity(*cache_i);
}
@@ -1041,7 +1052,11 @@ void row_cache::touch(const dht::decorated_key& dk) {
with_linearized_managed_bytes([&] {
auto i = _partitions.find(dk, cache_entry::compare(_schema));
if (i != _partitions.end()) {
_tracker.touch(*i);
for (partition_version& pv : i->partition().versions_from_oldest()) {
for (rows_entry& row : pv.partition().clustered_rows()) {
_tracker.touch(row);
}
}
}
});
});
@@ -1123,15 +1138,8 @@ cache_entry::cache_entry(cache_entry&& o) noexcept
, _key(std::move(o._key))
, _pe(std::move(o._pe))
, _flags(o._flags)
, _lru_link()
, _cache_link()
{
if (o._lru_link.is_linked()) {
auto prev = o._lru_link.prev_;
o._lru_link.unlink();
cache_tracker::lru_type::node_algorithms::link_after(prev, _lru_link.this_ptr());
}
{
using container_type = row_cache::partitions_type;
container_type::node_algorithms::replace_node(o._cache_link.this_ptr(), _cache_link.this_ptr());
@@ -1158,6 +1166,32 @@ void cache_entry::on_evicted(cache_tracker& tracker) noexcept {
tracker.on_partition_eviction();
}
void rows_entry::on_evicted(cache_tracker& tracker) noexcept {
auto it = mutation_partition::rows_type::iterator_to(*this);
if (is_last_dummy()) {
// Every evictable partition entry must have a dummy entry at the end,
// so don't remove it, just unlink from the LRU.
// That dummy is linked in the LRU, because there may be partitions
// with no regular rows, and we need to track them.
_lru_link.unlink();
} else {
++it;
it->set_continuous(false);
current_deleter<rows_entry>()(this);
tracker.on_row_eviction();
}
if (mutation_partition::rows_type::is_only_member(*it)) {
assert(it->is_last_dummy());
partition_version& pv = partition_version::container_of(mutation_partition::container_of(
mutation_partition::rows_type::container_of_only_member(*it)));
if (pv.is_referenced_from_entry()) {
cache_entry& ce = cache_entry::container_of(partition_entry::container_of(pv));
ce.on_evicted(tracker);
}
}
}
flat_mutation_reader cache_entry::read(row_cache& rc, read_context& reader) {
auto source_and_phase = rc.snapshot_of(_key);
reader.enter_partition(_key, source_and_phase.snapshot, source_and_phase.phase);

View File

@@ -62,11 +62,7 @@ class lsa_manager;
class cache_entry {
// We need auto_unlink<> option on the _cache_link because when entry is
// evicted from cache via LRU we don't have a reference to the container
// and don't want to store it with each entry. As for the _lru_link, we
// have a global LRU, so technically we could not use auto_unlink<> on
// _lru_link, but it's convenient to do so too. We may also want to have
// multiple eviction spaces in the future and thus multiple LRUs.
using lru_link_type = bi::list_member_hook<bi::link_mode<bi::auto_unlink>>;
// and don't want to store it with each entry.
using cache_link_type = bi::set_member_hook<bi::link_mode<bi::auto_unlink>>;
schema_ptr _schema;
@@ -77,7 +73,6 @@ class cache_entry {
bool _continuous : 1;
bool _dummy_entry : 1;
} _flags{};
lru_link_type _lru_link;
cache_link_type _cache_link;
friend class size_calculator;
@@ -189,8 +184,8 @@ public:
// Tracks accesses and performs eviction of cache entries.
class cache_tracker final {
public:
using lru_type = bi::list<cache_entry,
bi::member_hook<cache_entry, cache_entry::lru_link_type, &cache_entry::_lru_link>,
using lru_type = bi::list<rows_entry,
bi::member_hook<rows_entry, rows_entry::lru_link_type, &rows_entry::_lru_link>,
bi::constant_time_size<false>>; // we need this to have bi::auto_unlink on hooks.
public:
friend class row_cache;
@@ -210,6 +205,7 @@ public:
uint64_t partition_merges;
uint64_t partition_evictions;
uint64_t partition_removals;
uint64_t row_evictions;
uint64_t partitions;
uint64_t mispopulations;
uint64_t underlying_recreations;
@@ -235,14 +231,18 @@ public:
cache_tracker();
~cache_tracker();
void clear();
void touch(cache_entry&);
void touch(rows_entry&);
void insert(cache_entry&);
void insert(partition_entry&) noexcept;
void insert(partition_version&) noexcept;
void insert(rows_entry&) noexcept;
void clear_continuity(cache_entry& ce);
void on_partition_erase();
void on_partition_merge();
void on_partition_hit();
void on_partition_miss();
void on_partition_eviction();
void on_row_eviction();
void on_row_hit();
void on_row_miss();
void on_miss_already_populated();
@@ -352,7 +352,6 @@ private:
void on_partition_miss();
void on_row_hit();
void on_row_miss();
void on_row_insert();
void on_static_row_insert();
void on_mispopulate();
void upgrade_entry(cache_entry&);

View File

@@ -58,7 +58,6 @@ public:
{
nest n;
std::cout << prefix() << "sizeof(decorated_key) = " << sizeof(dht::decorated_key) << "\n";
std::cout << prefix() << "sizeof(lru_link_type) = " << sizeof(cache_entry::lru_link_type) << "\n";
std::cout << prefix() << "sizeof(cache_link_type) = " << sizeof(cache_entry::cache_link_type) << "\n";
print_mutation_partition_size();
}
@@ -66,6 +65,7 @@ public:
std::cout << "\n";
std::cout << prefix() << "sizeof(rows_entry) = " << sizeof(rows_entry) << "\n";
std::cout << prefix() << "sizeof(lru_link_type) = " << sizeof(rows_entry::lru_link_type) << "\n";
std::cout << prefix() << "sizeof(deletable_row) = " << sizeof(deletable_row) << "\n";
std::cout << prefix() << "sizeof(row) = " << sizeof(row) << "\n";
std::cout << prefix() << "sizeof(atomic_cell_or_collection) = " << sizeof(atomic_cell_or_collection) << "\n";

View File

@@ -275,46 +275,6 @@ SEASTAR_TEST_CASE(test_schema_upgrade_preserves_continuity) {
});
}
SEASTAR_TEST_CASE(test_full_eviction_marks_affected_range_as_discontinuous) {
return seastar::async([] {
cache_tracker tracker;
logalloc::region& r = tracker.region();
with_allocator(r.allocator(), [&] {
logalloc::reclaim_lock l(r);
simple_schema table;
auto&& s = *table.schema();
auto ck1 = table.make_ckey(1);
auto ck2 = table.make_ckey(2);
auto e = partition_entry::make_evictable(s, mutation_partition(table.schema()));
auto t = table.new_tombstone();
auto&& p1 = e.open_version(s, &tracker).partition();
p1.clustered_row(s, ck2);
p1.apply(t);
auto snap1 = e.read(r, table.schema(), &tracker);
auto&& p2 = e.open_version(s, &tracker).partition();
p2.clustered_row(s, ck1);
auto snap2 = e.read(r, table.schema(), &tracker);
e.evict(tracker);
BOOST_REQUIRE(snap1->squashed().fully_discontinuous(s, position_range::all_clustered_rows()));
BOOST_REQUIRE(snap2->squashed().fully_discontinuous(s, position_range::all_clustered_rows()));
BOOST_REQUIRE(!snap1->squashed().static_row_continuous());
BOOST_REQUIRE(!snap2->squashed().static_row_continuous());
BOOST_REQUIRE_EQUAL(snap1->squashed().partition_tombstone(), t);
BOOST_REQUIRE_EQUAL(snap2->squashed().partition_tombstone(), t);
});
});
}
SEASTAR_TEST_CASE(test_eviction_with_active_reader) {
return seastar::async([] {
cache_tracker tracker;
@@ -347,11 +307,12 @@ SEASTAR_TEST_CASE(test_eviction_with_active_reader) {
e.evict(tracker);
cursor.maybe_refresh();
do {
BOOST_REQUIRE(!cursor.continuous());
BOOST_REQUIRE(cursor.dummy());
} while (cursor.next());
{
logalloc::reclaim_lock rl(r);
cursor.maybe_refresh();
auto mp = cursor.read_partition();
assert_that(table.schema(), mp).is_equal_to(s, (m1 + m2).partition());
}
});
});
}
@@ -696,16 +657,17 @@ SEASTAR_TEST_CASE(test_partition_snapshot_row_cursor) {
{
logalloc::reclaim_lock rl(r);
BOOST_REQUIRE(!cur.maybe_refresh());
BOOST_REQUIRE(eq(cur.position(), table.make_ckey(5)));
BOOST_REQUIRE(cur.maybe_refresh());
BOOST_REQUIRE(eq(cur.position(), table.make_ckey(4)));
BOOST_REQUIRE(cur.continuous());
}
{
logalloc::reclaim_lock rl(r);
BOOST_REQUIRE(!cur.advance_to(table.make_ckey(4)));
BOOST_REQUIRE(eq(cur.position(), table.make_ckey(5)));
BOOST_REQUIRE(cur.advance_to(table.make_ckey(4)));
BOOST_REQUIRE(eq(cur.position(), table.make_ckey(4)));
BOOST_REQUIRE(cur.continuous());
BOOST_REQUIRE(cur.next());
}
{

View File

@@ -713,6 +713,8 @@ SEASTAR_TEST_CASE(test_eviction) {
while (tracker.partitions() > 0) {
logalloc::shard_tracker().reclaim(100);
}
BOOST_REQUIRE_EQUAL(tracker.get_stats().partition_evictions, keys.size());
});
}