The latter is pretty popular test/lib header that disseminates the
former one over whole lot of unit tests. The former, in turn, naturally
includes sstables.hh thus making tons of unrelated tests depend on
sstables class unused by them.
However, simple removal doesn't work, becase of local_shard_only bool
class definition in sstable_utils.hh used in simple_schema.hh. This
thing, in turn, is used in keys making helpers that don't belong to
sstable utils, so these are moved into simple_schema as well.
When done, this affects the mutation_source_test.hh, which needs the
local_shard_only bool class (and helps spreading the sstables.hh
throughout more unrelated tests) and a bunch of .cc test sources that
used sstable_utils.hh to indirectly include various headers of their
demand.
After patching, sstables.hh touches 2x times less tests. As a side
effect the sstables_manager.hh also becomes 2x times less dependent
on by tests.
Continuation of 9bdea110a6
Signed-off-by: Pavel Emelyanov <xemul@scylladb.com>
Closes #12240
86 lines
4.2 KiB
C++
86 lines
4.2 KiB
C++
/*
|
|
* Copyright (C) 2015-present ScyllaDB
|
|
*/
|
|
|
|
/*
|
|
* SPDX-License-Identifier: AGPL-3.0-or-later
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
#include "readers/flat_mutation_reader_fwd.hh"
|
|
#include "test/lib/simple_schema.hh"
|
|
|
|
using populate_fn = std::function<mutation_source(schema_ptr s, const std::vector<mutation>&)>;
|
|
using populate_fn_ex = std::function<mutation_source(schema_ptr s, const std::vector<mutation>&, gc_clock::time_point)>;
|
|
|
|
// Must be run in a seastar thread
|
|
void run_mutation_source_tests(populate_fn populate, bool with_partition_range_forwarding = true);
|
|
void run_mutation_source_tests(populate_fn_ex populate, bool with_partition_range_forwarding = true);
|
|
void run_mutation_source_tests_plain(populate_fn_ex populate, bool with_partition_range_forwarding = true);
|
|
void run_mutation_source_tests_reverse(populate_fn_ex populate, bool with_partition_range_forwarding = true);
|
|
|
|
enum are_equal { no, yes };
|
|
|
|
// Calls the provided function on mutation pairs, equal and not equal. Is supposed
|
|
// to exercise all potential ways two mutations may differ.
|
|
void for_each_mutation_pair(std::function<void(const mutation&, const mutation&, are_equal)>);
|
|
|
|
// Calls the provided function on mutations. Is supposed to exercise as many differences as possible.
|
|
void for_each_mutation(std::function<void(const mutation&)>);
|
|
|
|
// Returns true if mutations in schema s1 can be upgraded to s2.
|
|
inline bool can_upgrade_schema(schema_ptr from, schema_ptr to) {
|
|
return from->is_counter() == to->is_counter();
|
|
}
|
|
|
|
// Merge mutations that have the same key.
|
|
// The returned vector has mutations with unique keys.
|
|
// run_mutation_source_tests() might pass in multiple mutations for the same key.
|
|
// Some tests need these deduplicated, which is what this method does.
|
|
std::vector<mutation> squash_mutations(std::vector<mutation> mutations);
|
|
|
|
class random_mutation_generator {
|
|
class impl;
|
|
std::unique_ptr<impl> _impl;
|
|
public:
|
|
struct generate_counters_tag { };
|
|
using generate_counters = bool_class<generate_counters_tag>;
|
|
using generate_uncompactable = bool_class<class generate_uncompactable_tag>;
|
|
|
|
// With generate_uncompactable::yes, the mutation will be uncompactable, that
|
|
// is no higher level tombstone will cover lower level tombstones and no
|
|
// tombstone will cover data, i.e. compacting the mutation will not result
|
|
// in any changes.
|
|
explicit random_mutation_generator(generate_counters, local_shard_only lso = local_shard_only::yes,
|
|
generate_uncompactable uc = generate_uncompactable::no, std::optional<uint32_t> seed_opt = std::nullopt, const char* ks_name="ks", const char* cf_name="cf");
|
|
random_mutation_generator(generate_counters gc, uint32_t seed)
|
|
: random_mutation_generator(gc, local_shard_only::yes, generate_uncompactable::no, seed) {}
|
|
~random_mutation_generator();
|
|
mutation operator()();
|
|
// Generates n mutations sharing the same schema nad sorted by their decorated keys.
|
|
std::vector<mutation> operator()(size_t n);
|
|
schema_ptr schema() const;
|
|
clustering_key make_random_key();
|
|
range_tombstone make_random_range_tombstone();
|
|
std::vector<dht::decorated_key> make_partition_keys(size_t n);
|
|
std::vector<query::clustering_range> make_random_ranges(unsigned n_ranges);
|
|
// Sets the number of distinct clustering keys which will be used in generated mutations.
|
|
void set_key_cardinality(size_t);
|
|
};
|
|
|
|
bytes make_blob(size_t blob_size);
|
|
|
|
void for_each_schema_change(std::function<void(schema_ptr, const std::vector<mutation>&,
|
|
schema_ptr, const std::vector<mutation>&)>);
|
|
|
|
void compare_readers(const schema&, flat_mutation_reader_v2 authority, flat_mutation_reader_v2 tested);
|
|
void compare_readers(const schema&, flat_mutation_reader_v2 authority, flat_mutation_reader_v2 tested, const std::vector<position_range>& fwd_ranges);
|
|
|
|
// Forward `r` to each range in `fwd_ranges` and consume all fragments produced by `r` in these ranges.
|
|
// Build a mutation out of these fragments.
|
|
//
|
|
// Assumes that for each subsequent `r1`, `r2` in `fwd_ranges`, `r1.end() <= r2.start()`.
|
|
// Must be run in a seastar::thread.
|
|
mutation forwardable_reader_to_mutation(flat_mutation_reader_v2 r, const std::vector<position_range>& fwd_ranges);
|