treewide: change assert() to SCYLLA_ASSERT()
assert() is traditionally disabled in release builds, but not in
scylladb. This hasn't caused problems so far, but the latest abseil
release includes a commit [1] that causes a 1000 insn/op regression when
NDEBUG is not defined.
Clearly, we must move towards a build system where NDEBUG is defined in
release builds. But we can't just define it blindly without vetting
all the assert() calls, as some were written with the expectation that
they are enabled in release mode.
To solve the conundrum, change all assert() calls to a new SCYLLA_ASSERT()
macro in utils/assert.hh. This macro is always defined and is not conditional
on NDEBUG, so we can later (after vetting Seastar) enable NDEBUG in release
mode.
[1] 66ef711d68
Closes scylladb/scylladb#20006
This commit is contained in:
@@ -30,6 +30,7 @@
|
||||
#include "conditions.hh"
|
||||
#include "cql3/util.hh"
|
||||
#include <optional>
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/overloaded_functor.hh"
|
||||
#include <seastar/json/json_elements.hh>
|
||||
#include <boost/algorithm/cxx11/any_of.hpp>
|
||||
@@ -85,7 +86,7 @@ static map_type attrs_type() {
|
||||
|
||||
static const column_definition& attrs_column(const schema& schema) {
|
||||
const column_definition* cdef = schema.get_column_definition(bytes(executor::ATTRS_COLUMN_NAME));
|
||||
assert(cdef);
|
||||
SCYLLA_ASSERT(cdef);
|
||||
return *cdef;
|
||||
}
|
||||
|
||||
@@ -932,7 +933,7 @@ static void validate_attribute_definitions(const rjson::value& attribute_definit
|
||||
}
|
||||
|
||||
static future<executor::request_return_type> create_table_on_shard0(tracing::trace_state_ptr trace_state, rjson::value request, service::storage_proxy& sp, service::migration_manager& mm, gms::gossiper& gossiper) {
|
||||
assert(this_shard_id() == 0);
|
||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||
|
||||
// We begin by parsing and validating the content of the CreateTable
|
||||
// command. We can't inspect the current database schema at this point
|
||||
@@ -1678,7 +1679,7 @@ future<executor::request_return_type> rmw_operation::execute(service::storage_pr
|
||||
}
|
||||
} else if (_write_isolation != write_isolation::LWT_ALWAYS) {
|
||||
std::optional<mutation> m = apply(nullptr, api::new_timestamp());
|
||||
assert(m); // !needs_read_before_write, so apply() did not check a condition
|
||||
SCYLLA_ASSERT(m); // !needs_read_before_write, so apply() did not check a condition
|
||||
return proxy.mutate(std::vector<mutation>{std::move(*m)}, db::consistency_level::LOCAL_QUORUM, executor::default_timeout(), trace_state, std::move(permit), db::allow_per_partition_rate_limit::yes).then([this] () mutable {
|
||||
return rmw_operation_return(std::move(_return_attributes));
|
||||
});
|
||||
@@ -3845,7 +3846,7 @@ static future<executor::request_return_type> do_query(service::storage_proxy& pr
|
||||
}
|
||||
|
||||
static dht::token token_for_segment(int segment, int total_segments) {
|
||||
assert(total_segments > 1 && segment >= 0 && segment < total_segments);
|
||||
SCYLLA_ASSERT(total_segments > 1 && segment >= 0 && segment < total_segments);
|
||||
uint64_t delta = std::numeric_limits<uint64_t>::max() / total_segments;
|
||||
return dht::token::from_int64(std::numeric_limits<int64_t>::min() + delta * segment);
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include "seastarx.hh"
|
||||
#include "error.hh"
|
||||
#include "service/qos/service_level_controller.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "auth.hh"
|
||||
#include <cctype>
|
||||
@@ -405,7 +406,7 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
|
||||
++_executor._stats.requests_blocked_memory;
|
||||
}
|
||||
auto units = co_await std::move(units_fut);
|
||||
assert(req->content_stream);
|
||||
SCYLLA_ASSERT(req->content_stream);
|
||||
chunked_content content = co_await util::read_entire_stream(*req->content_stream);
|
||||
auto username = co_await verify_signature(*req, content);
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#include "mutation/mutation.hh"
|
||||
#include "types/types.hh"
|
||||
#include "types/map.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "utils/big_decimal.hh"
|
||||
#include "cql3/selection/selection.hh"
|
||||
@@ -551,7 +552,7 @@ static future<> scan_table_ranges(
|
||||
expiration_service::stats& expiration_stats)
|
||||
{
|
||||
const schema_ptr& s = scan_ctx.s;
|
||||
assert (partition_ranges.size() == 1); // otherwise issue #9167 will cause incorrect results.
|
||||
SCYLLA_ASSERT (partition_ranges.size() == 1); // otherwise issue #9167 will cause incorrect results.
|
||||
auto p = service::pager::query_pagers::pager(proxy, s, scan_ctx.selection, *scan_ctx.query_state_ptr,
|
||||
*scan_ctx.query_options, scan_ctx.command, std::move(partition_ranges), nullptr);
|
||||
while (!p->is_exhausted()) {
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "sstables/sstables.hh"
|
||||
#include "sstables/metadata_collector.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/estimated_histogram.hh"
|
||||
#include <algorithm>
|
||||
#include "db/system_keyspace.hh"
|
||||
@@ -103,7 +104,7 @@ class autocompaction_toggle_guard {
|
||||
replica::database& _db;
|
||||
public:
|
||||
autocompaction_toggle_guard(replica::database& db) : _db(db) {
|
||||
assert(this_shard_id() == 0);
|
||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||
if (!_db._enable_autocompaction_toggle) {
|
||||
throw std::runtime_error("Autocompaction toggle is busy");
|
||||
}
|
||||
@@ -112,7 +113,7 @@ public:
|
||||
autocompaction_toggle_guard(const autocompaction_toggle_guard&) = delete;
|
||||
autocompaction_toggle_guard(autocompaction_toggle_guard&&) = default;
|
||||
~autocompaction_toggle_guard() {
|
||||
assert(this_shard_id() == 0);
|
||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||
_db._enable_autocompaction_toggle = true;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#include "mutation/canonical_mutation.hh"
|
||||
#include "schema/schema_fwd.hh"
|
||||
#include "timestamp.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/exponential_backoff_retry.hh"
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "cql3/statements/create_table_statement.hh"
|
||||
@@ -68,7 +69,7 @@ static future<> create_legacy_metadata_table_if_missing_impl(
|
||||
cql3::query_processor& qp,
|
||||
std::string_view cql,
|
||||
::service::migration_manager& mm) {
|
||||
assert(this_shard_id() == 0); // once_among_shards makes sure a function is executed on shard 0 only
|
||||
SCYLLA_ASSERT(this_shard_id() == 0); // once_among_shards makes sure a function is executed on shard 0 only
|
||||
|
||||
auto db = qp.db();
|
||||
auto parsed_statement = cql3::query_processor::parse_statement(cql);
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
#include "service/migration_manager.hh"
|
||||
#include "service/raft/raft_group0_client.hh"
|
||||
#include "timestamp.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/class_registrator.hh"
|
||||
#include "locator/abstract_replication_strategy.hh"
|
||||
#include "data_dictionary/keyspace_metadata.hh"
|
||||
@@ -194,7 +195,7 @@ service::service(
|
||||
}
|
||||
|
||||
future<> service::create_legacy_keyspace_if_missing(::service::migration_manager& mm) const {
|
||||
assert(this_shard_id() == 0); // once_among_shards makes sure a function is executed on shard 0 only
|
||||
SCYLLA_ASSERT(this_shard_id() == 0); // once_among_shards makes sure a function is executed on shard 0 only
|
||||
auto db = _qp.db();
|
||||
|
||||
while (!db.has_keyspace(meta::legacy::AUTH_KS)) {
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <boost/range/iterator_range.hpp>
|
||||
|
||||
#include "bytes.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/managed_bytes.hh"
|
||||
#include <seastar/core/simple-stream.hh>
|
||||
#include <seastar/core/loop.hh>
|
||||
@@ -269,7 +270,7 @@ public:
|
||||
|
||||
// Call only when is_linearized()
|
||||
bytes_view view() const {
|
||||
assert(is_linearized());
|
||||
SCYLLA_ASSERT(is_linearized());
|
||||
if (!_current) {
|
||||
return bytes_view();
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <vector>
|
||||
#include "row_cache.hh"
|
||||
#include "mutation/mutation_fragment.hh"
|
||||
@@ -283,7 +284,7 @@ future<> cache_mutation_reader::process_static_row() {
|
||||
return ensure_underlying().then([this] {
|
||||
return (*_underlying)().then([this] (mutation_fragment_v2_opt&& sr) {
|
||||
if (sr) {
|
||||
assert(sr->is_static_row());
|
||||
SCYLLA_ASSERT(sr->is_static_row());
|
||||
maybe_add_to_cache(sr->as_static_row());
|
||||
push_mutation_fragment(std::move(*sr));
|
||||
}
|
||||
@@ -382,7 +383,7 @@ future<> cache_mutation_reader::do_fill_buffer() {
|
||||
if (_state == state::reading_from_underlying) {
|
||||
return read_from_underlying();
|
||||
}
|
||||
// assert(_state == state::reading_from_cache)
|
||||
// SCYLLA_ASSERT(_state == state::reading_from_cache)
|
||||
return _lsa_manager.run_in_read_section([this] {
|
||||
auto next_valid = _next_row.iterators_valid();
|
||||
clogger.trace("csm {}: reading_from_cache, range=[{}, {}), next={}, valid={}, rt={}", fmt::ptr(this), _lower_bound,
|
||||
@@ -990,7 +991,7 @@ void cache_mutation_reader::offer_from_underlying(mutation_fragment_v2&& mf) {
|
||||
maybe_add_to_cache(mf.as_clustering_row());
|
||||
add_clustering_row_to_buffer(std::move(mf));
|
||||
} else {
|
||||
assert(mf.is_range_tombstone_change());
|
||||
SCYLLA_ASSERT(mf.is_range_tombstone_change());
|
||||
auto& chg = mf.as_range_tombstone_change();
|
||||
if (maybe_add_to_cache(chg)) {
|
||||
add_to_buffer(std::move(mf).as_range_tombstone_change());
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "mutation/mutation.hh"
|
||||
|
||||
/*
|
||||
@@ -246,7 +247,7 @@ void inspect_mutation(const mutation& m, V& v) {
|
||||
|
||||
if (r.deleted_at()) {
|
||||
auto t = r.deleted_at().tomb();
|
||||
assert(t.timestamp != api::missing_timestamp);
|
||||
SCYLLA_ASSERT(t.timestamp != api::missing_timestamp);
|
||||
v.clustered_row_delete(cr.key(), t);
|
||||
if (v.finished()) {
|
||||
return;
|
||||
@@ -255,7 +256,7 @@ void inspect_mutation(const mutation& m, V& v) {
|
||||
}
|
||||
|
||||
for (auto& rt: p.row_tombstones()) {
|
||||
assert(rt.tombstone().tomb.timestamp != api::missing_timestamp);
|
||||
SCYLLA_ASSERT(rt.tombstone().tomb.timestamp != api::missing_timestamp);
|
||||
v.range_delete(rt.tombstone());
|
||||
if (v.finished()) {
|
||||
return;
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include "gms/inet_address.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include "gms/feature_service.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include "utils/UUID_gen.hh"
|
||||
#include "utils/to_string.hh"
|
||||
@@ -107,8 +108,8 @@ stream_id::stream_id(dht::token token, size_t vnode_index)
|
||||
copy_int_to_bytes(dht::token::to_int64(token), 0, _value);
|
||||
copy_int_to_bytes(low_qword, sizeof(int64_t), _value);
|
||||
// not a hot code path. make sure we did not mess up the shifts and masks.
|
||||
assert(version() == version_1);
|
||||
assert(index() == vnode_index);
|
||||
SCYLLA_ASSERT(version() == version_1);
|
||||
SCYLLA_ASSERT(index() == vnode_index);
|
||||
}
|
||||
|
||||
stream_id::stream_id(bytes b)
|
||||
@@ -126,7 +127,7 @@ bool stream_id::is_set() const {
|
||||
}
|
||||
|
||||
static int64_t bytes_to_int64(bytes_view b, size_t offset) {
|
||||
assert(b.size() >= offset + sizeof(int64_t));
|
||||
SCYLLA_ASSERT(b.size() >= offset + sizeof(int64_t));
|
||||
int64_t res;
|
||||
std::copy_n(b.begin() + offset, sizeof(int64_t), reinterpret_cast<int8_t *>(&res));
|
||||
return net::ntoh(res);
|
||||
@@ -411,7 +412,7 @@ future<cdc::generation_id> generation_service::legacy_make_new_generation(const
|
||||
|
||||
// Our caller should ensure that there are normal tokens in the token ring.
|
||||
auto normal_token_owners = tmptr->count_normal_token_owners();
|
||||
assert(normal_token_owners);
|
||||
SCYLLA_ASSERT(normal_token_owners);
|
||||
|
||||
if (_feature_service.cdc_generations_v2) {
|
||||
cdc_log.info("Inserting new generation data at UUID {}", uuid);
|
||||
@@ -811,7 +812,7 @@ future<> generation_service::stop() {
|
||||
}
|
||||
|
||||
generation_service::~generation_service() {
|
||||
assert(_stopped);
|
||||
SCYLLA_ASSERT(_stopped);
|
||||
}
|
||||
|
||||
future<> generation_service::after_join(std::optional<cdc::generation_id>&& startup_gen_id) {
|
||||
|
||||
25
cdc/log.cc
25
cdc/log.cc
@@ -32,6 +32,7 @@
|
||||
#include "cql3/statements/select_statement.hh"
|
||||
#include "cql3/untyped_result_set.hh"
|
||||
#include "log.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "utils/UUID_gen.hh"
|
||||
#include "utils/managed_bytes.hh"
|
||||
@@ -148,7 +149,7 @@ public:
|
||||
_ctxt._migration_notifier.register_listener(this);
|
||||
}
|
||||
~impl() {
|
||||
assert(_stopped);
|
||||
SCYLLA_ASSERT(_stopped);
|
||||
}
|
||||
|
||||
future<> stop() {
|
||||
@@ -455,7 +456,7 @@ schema_ptr get_base_table(const replica::database& db, sstring_view ks_name,std:
|
||||
}
|
||||
|
||||
seastar::sstring base_name(std::string_view log_name) {
|
||||
assert(is_log_name(log_name));
|
||||
SCYLLA_ASSERT(is_log_name(log_name));
|
||||
return sstring(log_name.data(), log_name.size() - cdc_log_suffix.size());
|
||||
}
|
||||
|
||||
@@ -655,7 +656,7 @@ private:
|
||||
|
||||
template<>
|
||||
void collection_iterator<std::pair<managed_bytes_view, managed_bytes_view>>::parse() {
|
||||
assert(_rem > 0);
|
||||
SCYLLA_ASSERT(_rem > 0);
|
||||
_next = _v;
|
||||
auto k = read_collection_key(_next);
|
||||
auto v = read_collection_value_nonnull(_next);
|
||||
@@ -664,7 +665,7 @@ void collection_iterator<std::pair<managed_bytes_view, managed_bytes_view>>::par
|
||||
|
||||
template<>
|
||||
void collection_iterator<managed_bytes_view>::parse() {
|
||||
assert(_rem > 0);
|
||||
SCYLLA_ASSERT(_rem > 0);
|
||||
_next = _v;
|
||||
auto k = read_collection_key(_next);
|
||||
_current = k;
|
||||
@@ -672,7 +673,7 @@ void collection_iterator<managed_bytes_view>::parse() {
|
||||
|
||||
template<>
|
||||
void collection_iterator<managed_bytes_view_opt>::parse() {
|
||||
assert(_rem > 0);
|
||||
SCYLLA_ASSERT(_rem > 0);
|
||||
_next = _v;
|
||||
auto k = read_collection_value_nonnull(_next);
|
||||
_current = k;
|
||||
@@ -1065,7 +1066,7 @@ struct process_row_visitor {
|
||||
void update_row_state(const column_definition& cdef, managed_bytes_opt value) {
|
||||
if (!_row_state) {
|
||||
// static row always has a valid state, so this must be a clustering row missing
|
||||
assert(_base_ck);
|
||||
SCYLLA_ASSERT(_base_ck);
|
||||
auto [it, _] = _clustering_row_states.try_emplace(*_base_ck);
|
||||
_row_state = &it->second;
|
||||
}
|
||||
@@ -1496,12 +1497,12 @@ public:
|
||||
}
|
||||
|
||||
void generate_image(operation op, const clustering_key* ck, const one_kind_column_set* affected_columns) {
|
||||
assert(op == operation::pre_image || op == operation::post_image);
|
||||
SCYLLA_ASSERT(op == operation::pre_image || op == operation::post_image);
|
||||
|
||||
// assert that post_image is always full
|
||||
assert(!(op == operation::post_image && affected_columns));
|
||||
// SCYLLA_ASSERT that post_image is always full
|
||||
SCYLLA_ASSERT(!(op == operation::post_image && affected_columns));
|
||||
|
||||
assert(_builder);
|
||||
SCYLLA_ASSERT(_builder);
|
||||
|
||||
const auto kind = ck ? column_kind::regular_column : column_kind::static_column;
|
||||
|
||||
@@ -1571,7 +1572,7 @@ public:
|
||||
// TODO: is pre-image data based on query enough. We only have actual column data. Do we need
|
||||
// more details like tombstones/ttl? Probably not but keep in mind.
|
||||
void process_change(const mutation& m) override {
|
||||
assert(_builder);
|
||||
SCYLLA_ASSERT(_builder);
|
||||
process_change_visitor v {
|
||||
._touched_parts = _touched_parts,
|
||||
._builder = *_builder,
|
||||
@@ -1584,7 +1585,7 @@ public:
|
||||
}
|
||||
|
||||
void end_record() override {
|
||||
assert(_builder);
|
||||
SCYLLA_ASSERT(_builder);
|
||||
_builder->end_record();
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#include <boost/intrusive/unordered_set.hpp>
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/small_vector.hh"
|
||||
#include "mutation/mutation_partition.hh"
|
||||
#include "utils/xx_hasher.hh"
|
||||
@@ -342,7 +343,7 @@ public:
|
||||
{ }
|
||||
|
||||
~cell_locker() {
|
||||
assert(_partitions.empty());
|
||||
SCYLLA_ASSERT(_partitions.empty());
|
||||
}
|
||||
|
||||
void set_schema(schema_ptr s) {
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "schema/schema_fwd.hh"
|
||||
#include "mutation/position_in_partition.hh"
|
||||
#include <boost/icl/interval_set.hpp>
|
||||
@@ -87,8 +88,8 @@ public:
|
||||
}
|
||||
};
|
||||
static interval::type make_interval(const schema& s, const position_range& r) {
|
||||
assert(r.start().has_clustering_key());
|
||||
assert(r.end().has_clustering_key());
|
||||
SCYLLA_ASSERT(r.start().has_clustering_key());
|
||||
SCYLLA_ASSERT(r.end().has_clustering_key());
|
||||
return interval::right_open(
|
||||
position_in_partition_with_schema(s.shared_from_this(), r.start()),
|
||||
position_in_partition_with_schema(s.shared_from_this(), r.end()));
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "schema/schema.hh"
|
||||
#include "query-request.hh"
|
||||
#include "mutation/mutation_fragment.hh"
|
||||
@@ -249,7 +250,7 @@ public:
|
||||
auto range_end = position_in_partition_view::for_range_end(rng);
|
||||
if (!less(rt.position(), range_start) && !less(range_end, rt.end_position())) {
|
||||
// Fully enclosed by this range.
|
||||
assert(!first);
|
||||
SCYLLA_ASSERT(!first);
|
||||
return std::move(rt);
|
||||
}
|
||||
auto this_range_rt = rt;
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "types/collection.hh"
|
||||
#include "types/user.hh"
|
||||
#include "concrete_types.hh"
|
||||
@@ -391,7 +392,7 @@ deserialize_collection_mutation(collection_mutation_input_stream& in, F&& read_k
|
||||
ret.cells.push_back(read_kv(in));
|
||||
}
|
||||
|
||||
assert(in.empty());
|
||||
SCYLLA_ASSERT(in.empty());
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@
|
||||
#include "mutation_writer/partition_based_splitting_writer.hh"
|
||||
#include "mutation/mutation_source_metadata.hh"
|
||||
#include "mutation/mutation_fragment_stream_validator.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include "utils/pretty_printers.hh"
|
||||
#include "readers/multi_range.hh"
|
||||
@@ -283,7 +284,7 @@ private:
|
||||
|
||||
utils::observer<> make_stop_request_observer(utils::observable<>& sro) {
|
||||
return sro.observe([this] () mutable {
|
||||
assert(!_unclosed_partition);
|
||||
SCYLLA_ASSERT(!_unclosed_partition);
|
||||
consume_end_of_stream();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <seastar/coroutine/maybe_yield.hh>
|
||||
#include "sstables/exceptions.hh"
|
||||
#include "sstables/sstable_directory.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include "utils/UUID_gen.hh"
|
||||
#include "db/system_keyspace.hh"
|
||||
@@ -958,7 +959,7 @@ compaction_manager::compaction_manager(tasks::task_manager& tm)
|
||||
compaction_manager::~compaction_manager() {
|
||||
// Assert that compaction manager was explicitly stopped, if started.
|
||||
// Otherwise, fiber(s) will be alive after the object is stopped.
|
||||
assert(_state == state::none || _state == state::stopped);
|
||||
SCYLLA_ASSERT(_state == state::none || _state == state::stopped);
|
||||
}
|
||||
|
||||
future<> compaction_manager::update_throughput(uint32_t value_mbs) {
|
||||
@@ -998,7 +999,7 @@ void compaction_manager::register_metrics() {
|
||||
}
|
||||
|
||||
void compaction_manager::enable() {
|
||||
assert(_state == state::none || _state == state::disabled);
|
||||
SCYLLA_ASSERT(_state == state::none || _state == state::disabled);
|
||||
_state = state::enabled;
|
||||
_compaction_submission_timer.arm_periodic(periodic_compaction_submission_interval());
|
||||
_waiting_reevalution = postponed_compactions_reevaluation();
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "sstables/sstables.hh"
|
||||
#include "size_tiered_compaction_strategy.hh"
|
||||
#include "interval.hh"
|
||||
@@ -311,7 +312,7 @@ public:
|
||||
|
||||
template <typename T>
|
||||
static std::vector<sstables::shared_sstable> overlapping(const schema& s, const std::vector<sstables::shared_sstable>& candidates, const T& others) {
|
||||
assert(!candidates.empty());
|
||||
SCYLLA_ASSERT(!candidates.empty());
|
||||
/*
|
||||
* Picking each sstable from others that overlap one of the sstable of candidates is not enough
|
||||
* because you could have the following situation:
|
||||
@@ -350,7 +351,7 @@ public:
|
||||
*/
|
||||
template <typename T>
|
||||
static std::vector<sstables::shared_sstable> overlapping(const schema& s, dht::token start, dht::token end, const T& sstables) {
|
||||
assert(start <= end);
|
||||
SCYLLA_ASSERT(start <= end);
|
||||
|
||||
std::vector<sstables::shared_sstable> overlapped;
|
||||
auto range = ::wrapping_interval<dht::token>::make(start, end);
|
||||
@@ -459,7 +460,7 @@ private:
|
||||
* for prior failure), will return an empty list. Never returns null.
|
||||
*/
|
||||
candidates_info get_candidates_for(int level, const std::vector<std::optional<dht::decorated_key>>& last_compacted_keys) {
|
||||
assert(!get_level(level).empty());
|
||||
SCYLLA_ASSERT(!get_level(level).empty());
|
||||
|
||||
logger.debug("Choosing candidates for L{}", level);
|
||||
|
||||
@@ -517,7 +518,7 @@ public:
|
||||
new_level = 0;
|
||||
} else {
|
||||
new_level = (minimum_level == maximum_level && can_promote) ? maximum_level + 1 : maximum_level;
|
||||
assert(new_level > 0);
|
||||
SCYLLA_ASSERT(new_level > 0);
|
||||
}
|
||||
return new_level;
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "sstables/sstables.hh"
|
||||
#include "size_tiered_compaction_strategy.hh"
|
||||
#include "cql3/statements/property_definitions.hh"
|
||||
@@ -114,7 +115,7 @@ size_tiered_compaction_strategy::create_sstable_and_length_pairs(const std::vect
|
||||
|
||||
for(auto& sstable : sstables) {
|
||||
auto sstable_size = sstable->data_size();
|
||||
assert(sstable_size != 0);
|
||||
SCYLLA_ASSERT(sstable_size != 0);
|
||||
|
||||
sstable_length_pairs.emplace_back(sstable, sstable_size);
|
||||
}
|
||||
|
||||
11
compound.hh
11
compound.hh
@@ -14,6 +14,7 @@
|
||||
#include <span>
|
||||
#include <boost/range/iterator_range.hpp>
|
||||
#include <boost/range/adaptor/transformed.hpp>
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/serialization.hh"
|
||||
#include <seastar/util/backtrace.hh>
|
||||
|
||||
@@ -65,15 +66,15 @@ private:
|
||||
for (auto&& val : values) {
|
||||
using val_type = std::remove_cvref_t<decltype(val)>;
|
||||
if constexpr (FragmentedView<val_type>) {
|
||||
assert(val.size_bytes() <= std::numeric_limits<size_type>::max());
|
||||
SCYLLA_ASSERT(val.size_bytes() <= std::numeric_limits<size_type>::max());
|
||||
write<size_type>(out, size_type(val.size_bytes()));
|
||||
write_fragmented(out, val);
|
||||
} else if constexpr (std::same_as<val_type, managed_bytes>) {
|
||||
assert(val.size() <= std::numeric_limits<size_type>::max());
|
||||
SCYLLA_ASSERT(val.size() <= std::numeric_limits<size_type>::max());
|
||||
write<size_type>(out, size_type(val.size()));
|
||||
write_fragmented(out, managed_bytes_view(val));
|
||||
} else {
|
||||
assert(val.size() <= std::numeric_limits<size_type>::max());
|
||||
SCYLLA_ASSERT(val.size() <= std::numeric_limits<size_type>::max());
|
||||
write<size_type>(out, size_type(val.size()));
|
||||
write_fragmented(out, single_fragmented_view(val));
|
||||
}
|
||||
@@ -135,7 +136,7 @@ public:
|
||||
partial.reserve(values.size());
|
||||
auto i = _types.begin();
|
||||
for (auto&& component : values) {
|
||||
assert(i != _types.end());
|
||||
SCYLLA_ASSERT(i != _types.end());
|
||||
partial.push_back((*i++)->decompose(component));
|
||||
}
|
||||
return serialize_value(partial);
|
||||
@@ -256,7 +257,7 @@ public:
|
||||
}
|
||||
// Returns true iff given prefix has no missing components
|
||||
bool is_full(managed_bytes_view v) const {
|
||||
assert(AllowPrefixes == allow_prefixes::yes);
|
||||
SCYLLA_ASSERT(AllowPrefixes == allow_prefixes::yes);
|
||||
return std::distance(begin(v), end(v)) == (ssize_t)_types.size();
|
||||
}
|
||||
bool is_empty(managed_bytes_view v) const {
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "converting_mutation_partition_applier.hh"
|
||||
#include "concrete_types.hh"
|
||||
|
||||
@@ -53,7 +54,7 @@ converting_mutation_partition_applier::accept_cell(row& dst, column_kind kind, c
|
||||
|
||||
visit(old_type, make_visitor(
|
||||
[&] (const collection_type_impl& old_ctype) {
|
||||
assert(new_def.type->is_collection()); // because is_compatible
|
||||
SCYLLA_ASSERT(new_def.type->is_collection()); // because is_compatible
|
||||
auto& new_ctype = static_cast<const collection_type_impl&>(*new_def.type);
|
||||
|
||||
auto& new_value_type = *new_ctype.value_comparator();
|
||||
@@ -67,13 +68,13 @@ converting_mutation_partition_applier::accept_cell(row& dst, column_kind kind, c
|
||||
}
|
||||
},
|
||||
[&] (const user_type_impl& old_utype) {
|
||||
assert(new_def.type->is_user_type()); // because is_compatible
|
||||
SCYLLA_ASSERT(new_def.type->is_user_type()); // because is_compatible
|
||||
auto& new_utype = static_cast<const user_type_impl&>(*new_def.type);
|
||||
|
||||
for (auto& c : old_view.cells) {
|
||||
if (c.second.timestamp() > new_def.dropped_at()) {
|
||||
auto idx = deserialize_field_index(c.first);
|
||||
assert(idx < new_utype.size() && idx < old_utype.size());
|
||||
SCYLLA_ASSERT(idx < new_utype.size() && idx < old_utype.size());
|
||||
|
||||
new_view.cells.emplace_back(c.first, upgrade_cell(
|
||||
*new_utype.type(idx), *old_utype.type(idx), c.second, atomic_cell::collection_member::yes));
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "counters.hh"
|
||||
#include "mutation/mutation.hh"
|
||||
#include "combine.hh"
|
||||
@@ -104,8 +105,8 @@ void counter_cell_view::apply(const column_definition& cdef, atomic_cell_or_coll
|
||||
return;
|
||||
}
|
||||
|
||||
assert(!dst_ac.is_counter_update());
|
||||
assert(!src_ac.is_counter_update());
|
||||
SCYLLA_ASSERT(!dst_ac.is_counter_update());
|
||||
SCYLLA_ASSERT(!src_ac.is_counter_update());
|
||||
|
||||
auto src_ccv = counter_cell_view(src_ac);
|
||||
auto dst_ccv = counter_cell_view(dst_ac);
|
||||
@@ -132,8 +133,8 @@ void counter_cell_view::apply(const column_definition& cdef, atomic_cell_or_coll
|
||||
|
||||
std::optional<atomic_cell> counter_cell_view::difference(atomic_cell_view a, atomic_cell_view b)
|
||||
{
|
||||
assert(!a.is_counter_update());
|
||||
assert(!b.is_counter_update());
|
||||
SCYLLA_ASSERT(!a.is_counter_update());
|
||||
SCYLLA_ASSERT(!b.is_counter_update());
|
||||
|
||||
if (!b.is_live() || !a.is_live()) {
|
||||
if (b.is_live() || (!a.is_live() && compare_atomic_cell_for_merge(b, a) < 0)) {
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <boost/range/iterator_range.hpp>
|
||||
#include <boost/range/algorithm/find_if.hpp>
|
||||
#include <boost/range/numeric.hpp>
|
||||
@@ -311,8 +312,8 @@ public:
|
||||
explicit basic_counter_cell_view(basic_atomic_cell_view<is_mutable> ac) noexcept
|
||||
: _cell(ac)
|
||||
{
|
||||
assert(_cell.is_live());
|
||||
assert(!_cell.is_counter_update());
|
||||
SCYLLA_ASSERT(_cell.is_live());
|
||||
SCYLLA_ASSERT(!_cell.is_counter_update());
|
||||
}
|
||||
|
||||
api::timestamp_type timestamp() const { return _cell.timestamp(); }
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "cql3/column_specification.hh"
|
||||
|
||||
namespace cql3 {
|
||||
@@ -22,7 +23,7 @@ column_specification::column_specification(std::string_view ks_name_, std::strin
|
||||
|
||||
bool column_specification::all_in_same_table(const std::vector<lw_shared_ptr<column_specification>>& names)
|
||||
{
|
||||
assert(!names.empty());
|
||||
SCYLLA_ASSERT(!names.empty());
|
||||
|
||||
auto first = names.front();
|
||||
return std::all_of(std::next(names.begin()), names.end(), [first] (auto&& spec) {
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <iterator>
|
||||
#include <boost/regex.hpp>
|
||||
|
||||
@@ -47,8 +48,8 @@ static cql3_type::kind get_cql3_kind(const abstract_type& t) {
|
||||
cql3_type::kind operator()(const uuid_type_impl&) { return cql3_type::kind::UUID; }
|
||||
cql3_type::kind operator()(const varint_type_impl&) { return cql3_type::kind::VARINT; }
|
||||
cql3_type::kind operator()(const reversed_type_impl& r) { return get_cql3_kind(*r.underlying_type()); }
|
||||
cql3_type::kind operator()(const tuple_type_impl&) { assert(0 && "no kind for this type"); }
|
||||
cql3_type::kind operator()(const collection_type_impl&) { assert(0 && "no kind for this type"); }
|
||||
cql3_type::kind operator()(const tuple_type_impl&) { SCYLLA_ASSERT(0 && "no kind for this type"); }
|
||||
cql3_type::kind operator()(const collection_type_impl&) { SCYLLA_ASSERT(0 && "no kind for this type"); }
|
||||
};
|
||||
return visit(t, visitor{});
|
||||
}
|
||||
@@ -147,7 +148,7 @@ public:
|
||||
}
|
||||
|
||||
virtual cql3_type prepare_internal(const sstring& keyspace, const data_dictionary::user_types_metadata& user_types) override {
|
||||
assert(_values); // "Got null values type for a collection";
|
||||
SCYLLA_ASSERT(_values); // "Got null values type for a collection";
|
||||
|
||||
if (_values->is_counter()) {
|
||||
throw exceptions::invalid_request_exception(format("Counters are not allowed inside collections: {}", *this));
|
||||
@@ -187,7 +188,7 @@ private:
|
||||
}
|
||||
return cql3_type(set_type_impl::get_instance(_values->prepare_internal(keyspace, user_types).get_type(), !is_frozen()));
|
||||
} else if (_kind == abstract_type::kind::map) {
|
||||
assert(_keys); // "Got null keys type for a collection";
|
||||
SCYLLA_ASSERT(_keys); // "Got null keys type for a collection";
|
||||
if (_keys->is_duration()) {
|
||||
throw exceptions::invalid_request_exception(format("Durations are not allowed as map keys: {}", *this));
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "functions.hh"
|
||||
#include "token_fct.hh"
|
||||
#include "cql3/ut_name.hh"
|
||||
@@ -450,7 +451,7 @@ functions::get_user_aggregates(const sstring& keyspace) const {
|
||||
|
||||
boost::iterator_range<functions::declared_t::const_iterator>
|
||||
functions::find(const function_name& name) const {
|
||||
assert(name.has_keyspace()); // : "function name not fully qualified";
|
||||
SCYLLA_ASSERT(name.has_keyspace()); // : "function name not fully qualified";
|
||||
auto pair = _declared.equal_range(name);
|
||||
return boost::make_iterator_range(pair.first, pair.second);
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "cql3/keyspace_element_name.hh"
|
||||
|
||||
namespace cql3 {
|
||||
@@ -24,7 +25,7 @@ bool keyspace_element_name::has_keyspace() const
|
||||
|
||||
const sstring& keyspace_element_name::get_keyspace() const
|
||||
{
|
||||
assert(_ks_name);
|
||||
SCYLLA_ASSERT(_ks_name);
|
||||
return *_ks_name;
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include "cql3/expr/expr-utils.hh"
|
||||
#include <boost/iterator/transform_iterator.hpp>
|
||||
#include "types/list.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/UUID_gen.hh"
|
||||
#include "mutation/mutation.hh"
|
||||
|
||||
@@ -62,7 +63,7 @@ lists::setter_by_index::fill_prepare_context(prepare_context& ctx) {
|
||||
void
|
||||
lists::setter_by_index::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) {
|
||||
// we should not get here for frozen lists
|
||||
assert(column.type->is_multi_cell()); // "Attempted to set an individual element on a frozen list";
|
||||
SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to set an individual element on a frozen list";
|
||||
|
||||
auto index = expr::evaluate(_idx, params._options);
|
||||
if (index.is_null()) {
|
||||
@@ -105,7 +106,7 @@ lists::setter_by_uuid::requires_read() const {
|
||||
void
|
||||
lists::setter_by_uuid::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) {
|
||||
// we should not get here for frozen lists
|
||||
assert(column.type->is_multi_cell()); // "Attempted to set an individual element on a frozen list";
|
||||
SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to set an individual element on a frozen list";
|
||||
|
||||
auto index = expr::evaluate(_idx, params._options);
|
||||
auto value = expr::evaluate(*_e, params._options);
|
||||
@@ -133,7 +134,7 @@ lists::setter_by_uuid::execute(mutation& m, const clustering_key_prefix& prefix,
|
||||
void
|
||||
lists::appender::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) {
|
||||
const cql3::raw_value value = expr::evaluate(*_e, params._options);
|
||||
assert(column.type->is_multi_cell()); // "Attempted to append to a frozen list";
|
||||
SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to append to a frozen list";
|
||||
do_append(value, m, prefix, column, params);
|
||||
}
|
||||
|
||||
@@ -189,7 +190,7 @@ lists::do_append(const cql3::raw_value& list_value,
|
||||
|
||||
void
|
||||
lists::prepender::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) {
|
||||
assert(column.type->is_multi_cell()); // "Attempted to prepend to a frozen list";
|
||||
SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to prepend to a frozen list";
|
||||
cql3::raw_value lvalue = expr::evaluate(*_e, params._options);
|
||||
if (lvalue.is_null()) {
|
||||
return;
|
||||
@@ -244,7 +245,7 @@ lists::discarder::requires_read() const {
|
||||
|
||||
void
|
||||
lists::discarder::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) {
|
||||
assert(column.type->is_multi_cell()); // "Attempted to delete from a frozen list";
|
||||
SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to delete from a frozen list";
|
||||
|
||||
auto&& existing_list = params.get_prefetched_list(m.key(), prefix, column);
|
||||
// We want to call bind before possibly returning to reject queries where the value provided is not a list.
|
||||
@@ -300,7 +301,7 @@ lists::discarder_by_index::requires_read() const {
|
||||
|
||||
void
|
||||
lists::discarder_by_index::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) {
|
||||
assert(column.type->is_multi_cell()); // "Attempted to delete an item by index from a frozen list";
|
||||
SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to delete an item by index from a frozen list";
|
||||
cql3::raw_value index = expr::evaluate(*_e, params._options);
|
||||
if (index.is_null()) {
|
||||
throw exceptions::invalid_request_exception("Invalid null value for list index");
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "maps.hh"
|
||||
#include "operation.hh"
|
||||
#include "update_parameters.hh"
|
||||
@@ -44,7 +45,7 @@ maps::setter_by_key::fill_prepare_context(prepare_context& ctx) {
|
||||
void
|
||||
maps::setter_by_key::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) {
|
||||
using exceptions::invalid_request_exception;
|
||||
assert(column.type->is_multi_cell()); // "Attempted to set a value for a single key on a frozen map"m
|
||||
SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to set a value for a single key on a frozen map"m
|
||||
auto key = expr::evaluate(_k, params._options);
|
||||
auto value = expr::evaluate(*_e, params._options);
|
||||
if (key.is_null()) {
|
||||
@@ -62,7 +63,7 @@ maps::setter_by_key::execute(mutation& m, const clustering_key_prefix& prefix, c
|
||||
|
||||
void
|
||||
maps::putter::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) {
|
||||
assert(column.type->is_multi_cell()); // "Attempted to add items to a frozen map";
|
||||
SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to add items to a frozen map";
|
||||
cql3::raw_value value = expr::evaluate(*_e, params._options);
|
||||
do_put(m, prefix, params, value, column);
|
||||
}
|
||||
@@ -95,7 +96,7 @@ maps::do_put(mutation& m, const clustering_key_prefix& prefix, const update_para
|
||||
|
||||
void
|
||||
maps::discarder_by_key::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) {
|
||||
assert(column.type->is_multi_cell()); // "Attempted to delete a single key in a frozen map";
|
||||
SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to delete a single key in a frozen map";
|
||||
cql3::raw_value key = expr::evaluate(*_e, params._options);
|
||||
if (key.is_null()) {
|
||||
throw exceptions::invalid_request_exception("Invalid null map key");
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include "transport/messages/result_message.hh"
|
||||
#include "service/client_state.hh"
|
||||
#include "service/broadcast_tables/experimental/query_result.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/observable.hh"
|
||||
#include "service/raft/raft_group0_client.hh"
|
||||
#include "types/types.hh"
|
||||
@@ -542,7 +543,7 @@ private:
|
||||
bound_terms,
|
||||
std::numeric_limits<uint16_t>::max()));
|
||||
}
|
||||
assert(bound_terms == prepared->bound_names.size());
|
||||
SCYLLA_ASSERT(bound_terms == prepared->bound_names.size());
|
||||
return make_ready_future<std::unique_ptr<statements::prepared_statement>>(std::move(prepared));
|
||||
}).then([&key, &id_getter, &client_state] (auto prep_ptr) {
|
||||
const auto& warnings = prep_ptr->warnings;
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <seastar/core/shared_ptr.hh>
|
||||
#include "index/secondary_index_manager.hh"
|
||||
#include "cql3/expr/expression.hh"
|
||||
@@ -88,10 +89,10 @@ public:
|
||||
*/
|
||||
void merge(const bounds_slice& other) {
|
||||
if (has_bound(statements::bound::START)) {
|
||||
assert(!other.has_bound(statements::bound::START));
|
||||
SCYLLA_ASSERT(!other.has_bound(statements::bound::START));
|
||||
_bounds[get_idx(statements::bound::END)] = other._bounds[get_idx(statements::bound::END)];
|
||||
} else {
|
||||
assert(!other.has_bound(statements::bound::END));
|
||||
SCYLLA_ASSERT(!other.has_bound(statements::bound::END));
|
||||
_bounds[get_idx(statements::bound::START)] = other._bounds[get_idx(statements::bound::START)];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "cql3/result_set.hh"
|
||||
|
||||
namespace cql3 {
|
||||
@@ -49,7 +50,7 @@ void metadata::set_paging_state(lw_shared_ptr<const service::pager::paging_state
|
||||
}
|
||||
|
||||
void metadata::maybe_set_paging_state(lw_shared_ptr<const service::pager::paging_state> paging_state) {
|
||||
assert(paging_state);
|
||||
SCYLLA_ASSERT(paging_state);
|
||||
if (paging_state->get_remaining() > 0) {
|
||||
set_paging_state(std::move(paging_state));
|
||||
} else {
|
||||
@@ -114,7 +115,7 @@ bool result_set::empty() const {
|
||||
}
|
||||
|
||||
void result_set::add_row(std::vector<managed_bytes_opt> row) {
|
||||
assert(row.size() == _metadata->value_count());
|
||||
SCYLLA_ASSERT(row.size() == _metadata->value_count());
|
||||
_rows.emplace_back(std::move(row));
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "bytes.hh"
|
||||
#include "schema/schema_fwd.hh"
|
||||
#include "query-result-reader.hh"
|
||||
@@ -331,7 +332,7 @@ public:
|
||||
add_value(*def, static_row_iterator);
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
SCYLLA_ASSERT(0);
|
||||
}
|
||||
}
|
||||
_builder.complete_row();
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "sets.hh"
|
||||
#include "types/set.hh"
|
||||
#include "cql3/expr/evaluate.hh"
|
||||
@@ -32,7 +33,7 @@ sets::setter::execute(mutation& m, const clustering_key_prefix& row_key, const u
|
||||
void
|
||||
sets::adder::execute(mutation& m, const clustering_key_prefix& row_key, const update_parameters& params) {
|
||||
const cql3::raw_value value = expr::evaluate(*_e, params._options);
|
||||
assert(column.type->is_multi_cell()); // "Attempted to add items to a frozen set";
|
||||
SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to add items to a frozen set";
|
||||
do_add(m, row_key, params, value, column);
|
||||
}
|
||||
|
||||
@@ -75,7 +76,7 @@ sets::adder::do_add(mutation& m, const clustering_key_prefix& row_key, const upd
|
||||
|
||||
void
|
||||
sets::discarder::execute(mutation& m, const clustering_key_prefix& row_key, const update_parameters& params) {
|
||||
assert(column.type->is_multi_cell()); // "Attempted to remove items from a frozen set";
|
||||
SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to remove items from a frozen set";
|
||||
|
||||
cql3::raw_value svalue = expr::evaluate(*_e, params._options);
|
||||
if (svalue.is_null()) {
|
||||
@@ -96,7 +97,7 @@ sets::discarder::execute(mutation& m, const clustering_key_prefix& row_key, cons
|
||||
|
||||
void sets::element_discarder::execute(mutation& m, const clustering_key_prefix& row_key, const update_parameters& params)
|
||||
{
|
||||
assert(column.type->is_multi_cell() && "Attempted to remove items from a frozen set");
|
||||
SCYLLA_ASSERT(column.type->is_multi_cell() && "Attempted to remove items from a frozen set");
|
||||
cql3::raw_value elt = expr::evaluate(*_e, params._options);
|
||||
if (elt.is_null()) {
|
||||
throw exceptions::invalid_request_exception("Invalid null set element");
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include "cql3/query_options.hh"
|
||||
#include "cql3/statements/alter_table_statement.hh"
|
||||
@@ -304,7 +305,7 @@ std::pair<schema_builder, std::vector<view_ptr>> alter_table_statement::prepare_
|
||||
|
||||
switch (_type) {
|
||||
case alter_table_statement::type::add:
|
||||
assert(_column_changes.size());
|
||||
SCYLLA_ASSERT(_column_changes.size());
|
||||
if (s->is_dense()) {
|
||||
throw exceptions::invalid_request_exception("Cannot add new column to a COMPACT STORAGE table");
|
||||
}
|
||||
@@ -312,12 +313,12 @@ std::pair<schema_builder, std::vector<view_ptr>> alter_table_statement::prepare_
|
||||
break;
|
||||
|
||||
case alter_table_statement::type::alter:
|
||||
assert(_column_changes.size() == 1);
|
||||
SCYLLA_ASSERT(_column_changes.size() == 1);
|
||||
invoke_column_change_fn(std::mem_fn(&alter_table_statement::alter_column));
|
||||
break;
|
||||
|
||||
case alter_table_statement::type::drop:
|
||||
assert(_column_changes.size());
|
||||
SCYLLA_ASSERT(_column_changes.size());
|
||||
if (!s->is_cql3_table()) {
|
||||
throw exceptions::invalid_request_exception("Cannot drop columns from a non-CQL3 table");
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
#pragma once
|
||||
#include "utils/assert.hh"
|
||||
#include "service/paxos/cas_request.hh"
|
||||
#include "cql3/statements/modification_statement.hh"
|
||||
|
||||
@@ -49,7 +50,7 @@ public:
|
||||
, _key(std::move(key_arg))
|
||||
, _rows(schema_arg)
|
||||
{
|
||||
assert(_key.size() == 1 && query::is_single_partition(_key.front()));
|
||||
SCYLLA_ASSERT(_key.size() == 1 && query::is_single_partition(_key.front()));
|
||||
}
|
||||
|
||||
dht::partition_range_vector key() const {
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "raw/cf_statement.hh"
|
||||
#include "service/client_state.hh"
|
||||
|
||||
@@ -40,13 +41,13 @@ void cf_statement::prepare_keyspace(std::string_view keyspace)
|
||||
}
|
||||
|
||||
bool cf_statement::has_keyspace() const {
|
||||
assert(_cf_name.has_value());
|
||||
SCYLLA_ASSERT(_cf_name.has_value());
|
||||
return _cf_name->has_keyspace();
|
||||
}
|
||||
|
||||
const sstring& cf_statement::keyspace() const
|
||||
{
|
||||
assert(_cf_name->has_keyspace()); // "The statement hasn't be prepared correctly";
|
||||
SCYLLA_ASSERT(_cf_name->has_keyspace()); // "The statement hasn't be prepared correctly";
|
||||
return _cf_name->get_keyspace();
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
*/
|
||||
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <inttypes.h>
|
||||
#include <boost/regex.hpp>
|
||||
|
||||
@@ -128,7 +129,7 @@ void create_table_statement::apply_properties_to(schema_builder& builder, const
|
||||
|
||||
void create_table_statement::add_column_metadata_from_aliases(schema_builder& builder, std::vector<bytes> aliases, const std::vector<data_type>& types, column_kind kind) const
|
||||
{
|
||||
assert(aliases.size() == types.size());
|
||||
SCYLLA_ASSERT(aliases.size() == types.size());
|
||||
for (size_t i = 0; i < aliases.size(); i++) {
|
||||
if (!aliases[i].empty()) {
|
||||
builder.with_column(aliases[i], types[i], kind);
|
||||
@@ -212,7 +213,7 @@ std::unique_ptr<prepared_statement> create_table_statement::raw_statement::prepa
|
||||
for (auto&& inner: type->all_types()) {
|
||||
if (inner->is_multi_cell()) {
|
||||
// a nested non-frozen UDT should have already been rejected when defining the type
|
||||
assert(inner->is_collection());
|
||||
SCYLLA_ASSERT(inner->is_collection());
|
||||
throw exceptions::invalid_request_exception("Non-frozen UDTs with nested non-frozen collections are not supported");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
@@ -61,7 +62,7 @@ future<> create_view_statement::check_access(query_processor& qp, const service:
|
||||
|
||||
static const column_definition* get_column_definition(const schema& schema, column_identifier::raw& identifier) {
|
||||
auto prepared = identifier.prepare(schema);
|
||||
assert(dynamic_pointer_cast<column_identifier>(prepared));
|
||||
SCYLLA_ASSERT(dynamic_pointer_cast<column_identifier>(prepared));
|
||||
auto id = static_pointer_cast<column_identifier>(prepared);
|
||||
return schema.get_column_definition(id->name());
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <boost/algorithm/cxx11/all_of.hpp>
|
||||
#include <boost/range/adaptors.hpp>
|
||||
|
||||
@@ -102,7 +103,7 @@ delete_statement::delete_statement(cf_name name,
|
||||
, _deletions(std::move(deletions))
|
||||
, _where_clause(std::move(where_clause))
|
||||
{
|
||||
assert(!_attrs->time_to_live.has_value());
|
||||
SCYLLA_ASSERT(!_attrs->time_to_live.has_value());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "cql3/statements/ks_prop_defs.hh"
|
||||
#include "data_dictionary/data_dictionary.hh"
|
||||
#include "data_dictionary/keyspace_metadata.hh"
|
||||
@@ -158,7 +159,7 @@ ks_prop_defs::init_tablets_options ks_prop_defs::get_initial_tablets(const sstri
|
||||
if (enabled == "true") {
|
||||
ret = init_tablets_options{ .enabled = true, .specified_count = 0 }; // even if 'initial' is not set, it'll start with auto-detection
|
||||
} else if (enabled == "false") {
|
||||
assert(!ret.enabled);
|
||||
SCYLLA_ASSERT(!ret.enabled);
|
||||
return ret;
|
||||
} else {
|
||||
throw exceptions::configuration_exception(sstring("Tablets enabled value must be true or false; found: ") + enabled);
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "cql3/cql_statement.hh"
|
||||
#include "cql3/statements/modification_statement.hh"
|
||||
#include "cql3/statements/strongly_consistent_modification_statement.hh"
|
||||
@@ -422,7 +423,7 @@ modification_statement::process_where_clause(data_dictionary::database db, expr:
|
||||
* partition to check conditions.
|
||||
*/
|
||||
if (_if_exists || _if_not_exists) {
|
||||
assert(!_has_static_column_conditions && !_has_regular_column_conditions);
|
||||
SCYLLA_ASSERT(!_has_static_column_conditions && !_has_regular_column_conditions);
|
||||
if (s->has_static_columns() && !_restrictions->has_clustering_columns_restriction()) {
|
||||
_has_static_column_conditions = true;
|
||||
} else {
|
||||
@@ -604,13 +605,13 @@ modification_statement::prepare_conditions(data_dictionary::database db, const s
|
||||
|
||||
if (_if_not_exists) {
|
||||
// To have both 'IF NOT EXISTS' and some other conditions doesn't make sense.
|
||||
// So far this is enforced by the parser, but let's assert it for sanity if ever the parse changes.
|
||||
assert(!_conditions);
|
||||
assert(!_if_exists);
|
||||
// So far this is enforced by the parser, but let's SCYLLA_ASSERT it for sanity if ever the parse changes.
|
||||
SCYLLA_ASSERT(!_conditions);
|
||||
SCYLLA_ASSERT(!_if_exists);
|
||||
stmt.set_if_not_exist_condition();
|
||||
} else if (_if_exists) {
|
||||
assert(!_conditions);
|
||||
assert(!_if_not_exists);
|
||||
SCYLLA_ASSERT(!_conditions);
|
||||
SCYLLA_ASSERT(!_if_not_exists);
|
||||
stmt.set_if_exist_condition();
|
||||
} else {
|
||||
stmt._condition = column_condition_prepare(*_conditions, db, keyspace(), schema);
|
||||
|
||||
@@ -44,6 +44,7 @@
|
||||
#include "test/lib/select_statement_utils.hh"
|
||||
#include <boost/algorithm/cxx11/any_of.hpp>
|
||||
#include "gms/feature_service.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/result_combinators.hh"
|
||||
#include "utils/result_loop.hh"
|
||||
#include "replica/database.hh"
|
||||
@@ -815,7 +816,7 @@ select_statement::execute_without_checking_exception_message_non_aggregate_unpag
|
||||
auto timeout = db::timeout_clock::now() + get_timeout(state.get_client_state(), options);
|
||||
if (needs_post_query_ordering() && _limit) {
|
||||
return do_with(std::forward<dht::partition_range_vector>(partition_ranges), [this, &qp, &state, &options, cmd, timeout](auto& prs) {
|
||||
assert(cmd->partition_limit == query::max_partitions);
|
||||
SCYLLA_ASSERT(cmd->partition_limit == query::max_partitions);
|
||||
query::result_merger merger(cmd->get_row_limit() * prs.size(), query::max_partitions);
|
||||
return utils::result_map_reduce(prs.begin(), prs.end(), [this, &qp, &state, &options, cmd, timeout] (auto& pr) {
|
||||
dht::partition_range_vector prange { pr };
|
||||
@@ -1110,7 +1111,7 @@ indexed_table_select_statement::do_execute(query_processor& qp,
|
||||
? source_selector::INTERNAL : source_selector::USER;
|
||||
++_stats.query_cnt(src_sel, _ks_sel, cond_selector::NO_CONDITIONS, statement_type::SELECT);
|
||||
|
||||
assert(_restrictions->uses_secondary_indexing());
|
||||
SCYLLA_ASSERT(_restrictions->uses_secondary_indexing());
|
||||
|
||||
_stats.unpaged_select_queries(_ks_sel) += options.get_page_size() <= 0;
|
||||
|
||||
@@ -1842,8 +1843,8 @@ mutation_fragments_select_statement::do_execute(query_processor& qp, service::qu
|
||||
namespace raw {
|
||||
|
||||
static void validate_attrs(const cql3::attributes::raw& attrs) {
|
||||
assert(!attrs.timestamp.has_value());
|
||||
assert(!attrs.time_to_live.has_value());
|
||||
SCYLLA_ASSERT(!attrs.timestamp.has_value());
|
||||
SCYLLA_ASSERT(!attrs.time_to_live.has_value());
|
||||
}
|
||||
|
||||
select_statement::select_statement(cf_name cf_name,
|
||||
@@ -1975,7 +1976,7 @@ std::unique_ptr<prepared_statement> select_statement::prepare(data_dictionary::d
|
||||
bool is_reversed_ = false;
|
||||
|
||||
if (!_parameters->orderings().empty()) {
|
||||
assert(!for_view);
|
||||
SCYLLA_ASSERT(!for_view);
|
||||
verify_ordering_is_allowed(*_parameters, *restrictions);
|
||||
prepared_orderings_type prepared_orderings = prepare_orderings(*schema);
|
||||
verify_ordering_is_valid(prepared_orderings, *schema, *restrictions);
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "cql3/statements/raw/truncate_statement.hh"
|
||||
#include "cql3/statements/truncate_statement.hh"
|
||||
#include "cql3/statements/prepared_statement.hh"
|
||||
@@ -30,8 +31,8 @@ truncate_statement::truncate_statement(cf_name name, std::unique_ptr<attributes:
|
||||
{
|
||||
// Validate the attributes.
|
||||
// Currently, TRUNCATE supports only USING TIMEOUT
|
||||
assert(!_attrs->timestamp.has_value());
|
||||
assert(!_attrs->time_to_live.has_value());
|
||||
SCYLLA_ASSERT(!_attrs->timestamp.has_value());
|
||||
SCYLLA_ASSERT(!_attrs->time_to_live.has_value());
|
||||
}
|
||||
|
||||
std::unique_ptr<prepared_statement> truncate_statement::prepare(data_dictionary::database db, cql_stats& stats) {
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "update_statement.hh"
|
||||
#include "cql3/expr/expression.hh"
|
||||
#include "cql3/expr/evaluate.hh"
|
||||
@@ -121,7 +122,7 @@ void update_statement::add_update_for_key(mutation& m, const query::clustering_r
|
||||
auto rb = s->regular_begin();
|
||||
if (rb->name().empty() || rb->type == empty_type) {
|
||||
// There is no column outside the PK. So no operation could have passed through validation
|
||||
assert(_column_operations.empty());
|
||||
SCYLLA_ASSERT(_column_operations.empty());
|
||||
constants::setter(*s->regular_begin(), expr::constant(cql3::raw_value::make_value(bytes()), empty_type)).execute(m, prefix, params);
|
||||
} else {
|
||||
// dense means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648.
|
||||
@@ -438,7 +439,7 @@ insert_json_statement::prepare_internal(data_dictionary::database db, schema_ptr
|
||||
{
|
||||
// FIXME: handle _if_not_exists. For now, mark it used to quiet the compiler. #8682
|
||||
(void)_if_not_exists;
|
||||
assert(expr::is<cql3::expr::untyped_constant>(_json_value) || expr::is<cql3::expr::bind_variable>(_json_value));
|
||||
SCYLLA_ASSERT(expr::is<cql3::expr::untyped_constant>(_json_value) || expr::is<cql3::expr::bind_variable>(_json_value));
|
||||
auto json_column_placeholder = ::make_shared<column_identifier>("", true);
|
||||
auto prepared_json_value = prepare_expression(_json_value, db, "", nullptr, make_lw_shared<column_specification>("", "", json_column_placeholder, utf8_type));
|
||||
expr::verify_no_aggregate_functions(prepared_json_value, "JSON clause");
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "cql3/update_parameters.hh"
|
||||
#include "cql3/selection/selection.hh"
|
||||
#include "cql3/expr/expression.hh"
|
||||
@@ -50,7 +51,7 @@ update_parameters::get_prefetched_list(const partition_key& pkey, const clusteri
|
||||
}
|
||||
|
||||
// Ensured by collections_as_maps flag in read_command flags
|
||||
assert(type->is_map());
|
||||
SCYLLA_ASSERT(type->is_map());
|
||||
|
||||
auto cell = type->deserialize(managed_bytes_view(*val));
|
||||
const map_type_impl& map_type = static_cast<const map_type_impl&>(*cell.type());
|
||||
@@ -104,7 +105,7 @@ public:
|
||||
}
|
||||
|
||||
void accept_new_partition(uint64_t row_count) {
|
||||
assert(0);
|
||||
SCYLLA_ASSERT(0);
|
||||
}
|
||||
|
||||
void accept_new_row(const clustering_key& key, const query::result_row_view& static_row,
|
||||
@@ -118,7 +119,7 @@ public:
|
||||
}
|
||||
|
||||
void accept_new_row(const query::result_row_view& static_row, const query::result_row_view& row) {
|
||||
assert(0);
|
||||
SCYLLA_ASSERT(0);
|
||||
}
|
||||
|
||||
void accept_partition_end(const query::result_row_view& static_row) {
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "cql3/user_types.hh"
|
||||
|
||||
#include "cql3/expr/evaluate.hh"
|
||||
@@ -49,7 +50,7 @@ void user_types::setter::execute(mutation& m, const clustering_key_prefix& row_k
|
||||
const auto& elems = expr::get_user_type_elements(ut_value, type);
|
||||
// There might be fewer elements given than fields in the type
|
||||
// (e.g. when the user uses a short tuple literal), but never more.
|
||||
assert(elems.size() <= type.size());
|
||||
SCYLLA_ASSERT(elems.size() <= type.size());
|
||||
|
||||
for (size_t i = 0; i < elems.size(); ++i) {
|
||||
if (!elems[i]) {
|
||||
@@ -73,7 +74,7 @@ void user_types::setter::execute(mutation& m, const clustering_key_prefix& row_k
|
||||
}
|
||||
|
||||
void user_types::setter_by_field::execute(mutation& m, const clustering_key_prefix& row_key, const update_parameters& params) {
|
||||
assert(column.type->is_user_type() && column.type->is_multi_cell());
|
||||
SCYLLA_ASSERT(column.type->is_user_type() && column.type->is_multi_cell());
|
||||
|
||||
auto value = expr::evaluate(*_e, params._options);
|
||||
|
||||
@@ -88,7 +89,7 @@ void user_types::setter_by_field::execute(mutation& m, const clustering_key_pref
|
||||
}
|
||||
|
||||
void user_types::deleter_by_field::execute(mutation& m, const clustering_key_prefix& row_key, const update_parameters& params) {
|
||||
assert(column.type->is_user_type() && column.type->is_multi_cell());
|
||||
SCYLLA_ASSERT(column.type->is_user_type() && column.type->is_multi_cell());
|
||||
|
||||
collection_mutation_description mut;
|
||||
mut.cells.emplace_back(serialize_field_index(_field_idx), params.make_dead_cell());
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
/* Copyright 2020-present ScyllaDB */
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "util.hh"
|
||||
#include "cql3/expr/expr-utils.hh"
|
||||
|
||||
@@ -88,7 +89,7 @@ void do_with_parser_impl(const sstring_view& cql, noncopyable_function<void (cql
|
||||
};
|
||||
ucontext_t uc;
|
||||
auto r = getcontext(&uc);
|
||||
assert(r == 0);
|
||||
SCYLLA_ASSERT(r == 0);
|
||||
if (stack.get() <= (char*)&uc && (char*)&uc < stack.get() + stack_size) {
|
||||
// We are already running on the large stack, so just call the
|
||||
// parser directly.
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <unordered_map>
|
||||
|
||||
#include "bytes.hh"
|
||||
@@ -26,7 +27,7 @@ public:
|
||||
}
|
||||
void add_type(user_type type) {
|
||||
auto i = _user_types.find(type->_name);
|
||||
assert(i == _user_types.end() || type->is_compatible_with(*i->second));
|
||||
SCYLLA_ASSERT(i == _user_types.end() || type->is_compatible_with(*i->second));
|
||||
_user_types.insert_or_assign(i, type->_name, type);
|
||||
}
|
||||
void remove_type(user_type type) {
|
||||
|
||||
@@ -47,6 +47,7 @@
|
||||
#include "rp_set.hh"
|
||||
#include "db/config.hh"
|
||||
#include "db/extensions.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/crc.hh"
|
||||
#include "utils/runtime.hh"
|
||||
#include "utils/flush_queue.hh"
|
||||
@@ -520,7 +521,7 @@ private:
|
||||
};
|
||||
|
||||
future<> db::commitlog::segment_manager::named_file::open(open_flags flags, file_open_options opt, std::optional<uint64_t> size_in) noexcept {
|
||||
assert(!*this);
|
||||
SCYLLA_ASSERT(!*this);
|
||||
auto f = co_await open_file_dma(_name, flags, opt);
|
||||
// bypass roundtrip to disk if caller knows size, or open flags truncated file
|
||||
auto existing_size = size_in
|
||||
@@ -533,7 +534,7 @@ future<> db::commitlog::segment_manager::named_file::open(open_flags flags, file
|
||||
}
|
||||
|
||||
future<> db::commitlog::segment_manager::named_file::rename(std::string_view to) {
|
||||
assert(!*this);
|
||||
SCYLLA_ASSERT(!*this);
|
||||
try {
|
||||
auto s = sstring(to);
|
||||
auto dir = std::filesystem::path(to).parent_path();
|
||||
@@ -647,7 +648,7 @@ detail::sector_split_iterator::sector_split_iterator(base_iterator i, base_itera
|
||||
{}
|
||||
|
||||
detail::sector_split_iterator& detail::sector_split_iterator::operator++() {
|
||||
assert(_iter != _end);
|
||||
SCYLLA_ASSERT(_iter != _end);
|
||||
_ptr += _sector_size;
|
||||
// check if we have more pages in this temp-buffer (in out case they are always aligned + sized in page units)
|
||||
auto rem = _iter->size() - std::distance(_iter->get(), const_cast<const char*>(_ptr));
|
||||
@@ -658,7 +659,7 @@ detail::sector_split_iterator& detail::sector_split_iterator::operator++() {
|
||||
return *this;
|
||||
}
|
||||
rem = _iter->size();
|
||||
assert(rem >= _sector_size);
|
||||
SCYLLA_ASSERT(rem >= _sector_size);
|
||||
// booh. ugly.
|
||||
_ptr = const_cast<char*>(_iter->get());
|
||||
}
|
||||
@@ -926,7 +927,7 @@ public:
|
||||
// See class comment for info
|
||||
future<sseg_ptr> flush() {
|
||||
auto me = shared_from_this();
|
||||
assert(me.use_count() > 1);
|
||||
SCYLLA_ASSERT(me.use_count() > 1);
|
||||
uint64_t pos = _file_pos;
|
||||
|
||||
clogger.trace("Syncing {} {} -> {}", *this, _flush_pos, pos);
|
||||
@@ -937,13 +938,13 @@ public:
|
||||
|
||||
// Run like this to ensure flush ordering, and making flushes "waitable"
|
||||
co_await _pending_ops.run_with_ordered_post_op(rp, [] {}, [&] {
|
||||
assert(_pending_ops.has_operation(rp));
|
||||
SCYLLA_ASSERT(_pending_ops.has_operation(rp));
|
||||
return do_flush(pos);
|
||||
});
|
||||
co_return me;
|
||||
}
|
||||
future<sseg_ptr> terminate() {
|
||||
assert(_closed);
|
||||
SCYLLA_ASSERT(_closed);
|
||||
if (!std::exchange(_terminated, true)) {
|
||||
// write a terminating zero block iff we are ending (a reused)
|
||||
// block before actual file end.
|
||||
@@ -1000,7 +1001,7 @@ public:
|
||||
* Allocate a new buffer
|
||||
*/
|
||||
void new_buffer(size_t s) {
|
||||
assert(_buffer.empty());
|
||||
SCYLLA_ASSERT(_buffer.empty());
|
||||
|
||||
auto overhead = segment_overhead_size;
|
||||
if (_file_pos == 0) {
|
||||
@@ -1018,7 +1019,7 @@ public:
|
||||
// the amount of data we can actually write into.
|
||||
auto useable_size = size - n_blocks * detail::sector_overhead_size;
|
||||
|
||||
assert(useable_size >= s);
|
||||
SCYLLA_ASSERT(useable_size >= s);
|
||||
|
||||
_buffer_ostream = frag_ostream_type(detail::sector_split_iterator(_buffer.begin(), _buffer.end(), _alignment), useable_size);
|
||||
// #16298 - keep track of ostream initial size.
|
||||
@@ -1031,7 +1032,7 @@ public:
|
||||
// we should be in a allocate or terminate call. In either case, account for overhead now already.
|
||||
_segment_manager->account_memory_usage(overhead);
|
||||
|
||||
assert(buffer_position() == overhead);
|
||||
SCYLLA_ASSERT(buffer_position() == overhead);
|
||||
}
|
||||
|
||||
bool buffer_is_empty() const {
|
||||
@@ -1063,7 +1064,7 @@ public:
|
||||
_buffer_ostream_size = 0;
|
||||
_num_allocs = 0;
|
||||
|
||||
assert(me.use_count() > 1);
|
||||
SCYLLA_ASSERT(me.use_count() > 1);
|
||||
|
||||
auto out = buf.get_ostream();
|
||||
|
||||
@@ -1098,8 +1099,8 @@ public:
|
||||
|
||||
clogger.trace("Writing {} entries, {} k in {} -> {}", num, size, off, off + size);
|
||||
} else {
|
||||
assert(num == 0);
|
||||
assert(_closed);
|
||||
SCYLLA_ASSERT(num == 0);
|
||||
SCYLLA_ASSERT(_closed);
|
||||
clogger.trace("Terminating {} at pos {}", *this, _file_pos);
|
||||
write(out, uint64_t(0));
|
||||
}
|
||||
@@ -1114,7 +1115,7 @@ public:
|
||||
auto* p = const_cast<char*>(tbuf.get());
|
||||
auto* e = p + tbuf.size();
|
||||
while (p != e) {
|
||||
assert(align_up(p, _alignment) == p);
|
||||
SCYLLA_ASSERT(align_up(p, _alignment) == p);
|
||||
|
||||
// include segment id in crc:ed data
|
||||
auto be = p + ss;
|
||||
@@ -1137,7 +1138,7 @@ public:
|
||||
co_await _pending_ops.run_with_ordered_post_op(rp, [&]() -> future<> {
|
||||
auto view = fragmented_temporary_buffer::view(buf);
|
||||
view.remove_suffix(buf.size_bytes() - size);
|
||||
assert(size == view.size_bytes());
|
||||
SCYLLA_ASSERT(size == view.size_bytes());
|
||||
|
||||
if (view.empty()) {
|
||||
co_return;
|
||||
@@ -1179,7 +1180,7 @@ public:
|
||||
}
|
||||
}
|
||||
}, [&]() -> future<> {
|
||||
assert(_pending_ops.has_operation(rp));
|
||||
SCYLLA_ASSERT(_pending_ops.has_operation(rp));
|
||||
if (flush_after) {
|
||||
co_await do_flush(top);
|
||||
}
|
||||
@@ -1209,7 +1210,7 @@ public:
|
||||
replay_position rp(_desc.id, position_type(fp));
|
||||
co_await _pending_ops.wait_for_pending(rp, timeout);
|
||||
|
||||
assert(_segment_manager->cfg.mode != sync_mode::BATCH || _flush_pos > fp);
|
||||
SCYLLA_ASSERT(_segment_manager->cfg.mode != sync_mode::BATCH || _flush_pos > fp);
|
||||
if (_flush_pos <= fp) {
|
||||
// previous op we were waiting for was not sync one, so it did not flush
|
||||
// force flush here
|
||||
@@ -1372,7 +1373,7 @@ public:
|
||||
auto fill_size = size - buf_pos;
|
||||
if (fill_size > 0) {
|
||||
// we want to fill to a sector boundary, must leave room for metadata
|
||||
assert((fill_size - detail::sector_overhead_size) <= _buffer_ostream.size());
|
||||
SCYLLA_ASSERT((fill_size - detail::sector_overhead_size) <= _buffer_ostream.size());
|
||||
_buffer_ostream.fill('\0', fill_size - detail::sector_overhead_size);
|
||||
_segment_manager->totals.bytes_slack += fill_size;
|
||||
_segment_manager->account_memory_usage(fill_size);
|
||||
@@ -1382,7 +1383,7 @@ public:
|
||||
void mark_clean(const cf_id_type& id, uint64_t count) noexcept {
|
||||
auto i = _cf_dirty.find(id);
|
||||
if (i != _cf_dirty.end()) {
|
||||
assert(i->second >= count);
|
||||
SCYLLA_ASSERT(i->second >= count);
|
||||
i->second -= count;
|
||||
if (i->second == 0) {
|
||||
_cf_dirty.erase(i);
|
||||
@@ -1518,8 +1519,8 @@ db::commitlog::segment_manager::segment_manager(config c)
|
||||
, _reserve_replenisher(make_ready_future<>())
|
||||
, _background_sync(make_ready_future<>())
|
||||
{
|
||||
assert(max_size > 0);
|
||||
assert(max_mutation_size < segment::multi_entry_size_magic);
|
||||
SCYLLA_ASSERT(max_size > 0);
|
||||
SCYLLA_ASSERT(max_mutation_size < segment::multi_entry_size_magic);
|
||||
|
||||
clogger.trace("Commitlog {} maximum disk size: {} MB / cpu ({} cpus)",
|
||||
cfg.commit_log_location, max_disk_size / (1024 * 1024),
|
||||
@@ -1627,7 +1628,7 @@ gc_clock::time_point db::commitlog::segment_manager::min_gc_time(const cf_id_typ
|
||||
future<> db::commitlog::segment_manager::init() {
|
||||
auto descs = co_await list_descriptors(cfg.commit_log_location);
|
||||
|
||||
assert(_reserve_segments.empty()); // _segments_to_replay must not pick them up
|
||||
SCYLLA_ASSERT(_reserve_segments.empty()); // _segments_to_replay must not pick them up
|
||||
segment_id_type id = *cfg.base_segment_id;
|
||||
for (auto& d : descs) {
|
||||
id = std::max(id, replay_position(d.id).base_id());
|
||||
@@ -2325,7 +2326,7 @@ future<> db::commitlog::segment_manager::delete_segments(std::vector<sstring> fi
|
||||
|
||||
void db::commitlog::segment_manager::abort_recycled_list(std::exception_ptr ep) {
|
||||
// may not call here with elements in list. that would leak files.
|
||||
assert(_recycled_segments.empty());
|
||||
SCYLLA_ASSERT(_recycled_segments.empty());
|
||||
_recycled_segments.abort(ep);
|
||||
// and ensure next lap(s) still has a queue
|
||||
_recycled_segments = queue<named_file>(std::numeric_limits<size_t>::max());
|
||||
@@ -2424,7 +2425,7 @@ future<> db::commitlog::segment_manager::do_pending_deletes() {
|
||||
try {
|
||||
co_await f.rename(dst);
|
||||
auto b = _recycled_segments.push(std::move(f));
|
||||
assert(b); // we set this to max_size_t so...
|
||||
SCYLLA_ASSERT(b); // we set this to max_size_t so...
|
||||
continue;
|
||||
} catch (...) {
|
||||
clogger.error("Could not recycle segment {}: {}", f.name(), std::current_exception());
|
||||
@@ -2628,7 +2629,7 @@ future<db::rp_handle> db::commitlog::add(const cf_id_type& id,
|
||||
|
||||
future<db::rp_handle> db::commitlog::add_entry(const cf_id_type& id, const commitlog_entry_writer& cew, timeout_clock::time_point timeout)
|
||||
{
|
||||
assert(id == cew.schema()->id());
|
||||
SCYLLA_ASSERT(id == cew.schema()->id());
|
||||
|
||||
class cl_entry_writer final : public entry_writer {
|
||||
commitlog_entry_writer _writer;
|
||||
@@ -2716,7 +2717,7 @@ db::commitlog::add_entries(std::vector<commitlog_entry_writer> entry_writers, db
|
||||
w.write(out);
|
||||
}
|
||||
void result(size_t i, rp_handle h) override {
|
||||
assert(i == res.size());
|
||||
SCYLLA_ASSERT(i == res.size());
|
||||
res.emplace_back(std::move(h));
|
||||
}
|
||||
|
||||
@@ -2907,7 +2908,7 @@ db::commitlog::read_log_file(sstring filename, sstring pfx, commit_load_reader_f
|
||||
co_return;
|
||||
}
|
||||
// must be on page boundary now!
|
||||
assert(align_down(pos, alignment) == pos);
|
||||
SCYLLA_ASSERT(align_down(pos, alignment) == pos);
|
||||
|
||||
// this is in full sectors. no need to fiddle with overhead here.
|
||||
auto bytes = seek_to_pos - pos;
|
||||
@@ -3083,7 +3084,7 @@ db::commitlog::read_log_file(sstring filename, sstring pfx, commit_load_reader_f
|
||||
// #16298 - adjust position here, based on data returned.
|
||||
advance_pos(size);
|
||||
|
||||
assert(((filepos_to_datapos(pos) + buffer.size_bytes()) % (alignment - detail::sector_overhead_size)) == 0);
|
||||
SCYLLA_ASSERT(((filepos_to_datapos(pos) + buffer.size_bytes()) % (alignment - detail::sector_overhead_size)) == 0);
|
||||
|
||||
co_return res;
|
||||
}
|
||||
@@ -3161,7 +3162,7 @@ db::commitlog::read_log_file(sstring filename, sstring pfx, commit_load_reader_f
|
||||
* If not, this is small slack space in the chunk end, and we should just go
|
||||
* to the next.
|
||||
*/
|
||||
assert(pos <= next);
|
||||
SCYLLA_ASSERT(pos <= next);
|
||||
if (next_pos(entry_header_size) >= next) {
|
||||
co_await skip_to_chunk(next);
|
||||
co_return;
|
||||
@@ -3182,7 +3183,7 @@ db::commitlog::read_log_file(sstring filename, sstring pfx, commit_load_reader_f
|
||||
auto actual_size = checksum;
|
||||
auto end = pos + actual_size - entry_header_size - sizeof(uint32_t);
|
||||
|
||||
assert(end <= next);
|
||||
SCYLLA_ASSERT(end <= next);
|
||||
// really small read...
|
||||
buf = co_await read_data(sizeof(uint32_t));
|
||||
in = buf.get_istream();
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <optional>
|
||||
|
||||
#include "commitlog_types.hh"
|
||||
@@ -111,7 +112,7 @@ public:
|
||||
}
|
||||
|
||||
size_t size() const {
|
||||
assert(_size != std::numeric_limits<size_t>::max());
|
||||
SCYLLA_ASSERT(_size != std::numeric_limits<size_t>::max());
|
||||
return _size;
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
@@ -69,7 +70,7 @@ public:
|
||||
};
|
||||
|
||||
// move start/stop of the thread local bookkeep to "top level"
|
||||
// and also make sure to assert on it actually being started.
|
||||
// and also make sure to SCYLLA_ASSERT on it actually being started.
|
||||
future<> start() {
|
||||
return _column_mappings.start();
|
||||
}
|
||||
@@ -164,7 +165,7 @@ future<> db::commitlog_replayer::impl::init() {
|
||||
|
||||
future<db::commitlog_replayer::impl::stats>
|
||||
db::commitlog_replayer::impl::recover(sstring file, const sstring& fname_prefix) const {
|
||||
assert(_column_mappings.local_is_initialized());
|
||||
SCYLLA_ASSERT(_column_mappings.local_is_initialized());
|
||||
|
||||
replay_position rp{commitlog::descriptor(file, fname_prefix)};
|
||||
auto gp = min_pos(rp.shard_id());
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
* uniformly, and we need to choose K nodes and forward the request
|
||||
* to them).
|
||||
*/
|
||||
#include "utils/assert.hh"
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
#include <fmt/ranges.h>
|
||||
@@ -70,7 +71,7 @@ public:
|
||||
std::vector<Node> get() {
|
||||
auto n = _pp.size();
|
||||
auto ke = _k + (_extra ? 1 : 0);
|
||||
assert(ke <= n);
|
||||
SCYLLA_ASSERT(ke <= n);
|
||||
std::vector<Node> ret;
|
||||
ret.reserve(ke);
|
||||
std::vector<int> r = ssample(_k, _pp);
|
||||
@@ -97,7 +98,7 @@ public:
|
||||
}
|
||||
}
|
||||
}
|
||||
assert(ret.size() == ke);
|
||||
SCYLLA_ASSERT(ret.size() == ke);
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include "db/hints/manager.hh"
|
||||
#include "db/timeout_clock.hh"
|
||||
#include "replica/database.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/disk-error-handler.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include "utils/runtime.hh"
|
||||
@@ -173,7 +174,7 @@ hint_endpoint_manager::hint_endpoint_manager(hint_endpoint_manager&& other)
|
||||
{}
|
||||
|
||||
hint_endpoint_manager::~hint_endpoint_manager() {
|
||||
assert(stopped());
|
||||
SCYLLA_ASSERT(stopped());
|
||||
}
|
||||
|
||||
future<hints_store_ptr> hint_endpoint_manager::get_or_load() {
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#pragma once
|
||||
|
||||
// Seastar features.
|
||||
#include "utils/assert.hh"
|
||||
#include <seastar/core/abort_source.hh>
|
||||
#include <seastar/core/gate.hh>
|
||||
#include <seastar/core/lowres_clock.hh>
|
||||
@@ -167,7 +168,7 @@ public:
|
||||
manager& operator=(manager&&) = delete;
|
||||
|
||||
~manager() noexcept {
|
||||
assert(_ep_managers.empty());
|
||||
SCYLLA_ASSERT(_ep_managers.empty());
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <seastar/core/print.hh>
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include "db/system_keyspace.hh"
|
||||
@@ -36,7 +37,7 @@ large_data_handler::large_data_handler(uint64_t partition_threshold_bytes, uint6
|
||||
}
|
||||
|
||||
future<large_data_handler::partition_above_threshold> large_data_handler::maybe_record_large_partitions(const sstables::sstable& sst, const sstables::key& key, uint64_t partition_size, uint64_t rows, uint64_t range_tombstones, uint64_t dead_rows) {
|
||||
assert(running());
|
||||
SCYLLA_ASSERT(running());
|
||||
partition_above_threshold above_threshold{partition_size > _partition_threshold_bytes, rows > _rows_count_threshold};
|
||||
static_assert(std::is_same_v<decltype(above_threshold.size), bool>);
|
||||
_stats.partitions_bigger_than_threshold += above_threshold.size; // increment if true
|
||||
@@ -79,7 +80,7 @@ sstring large_data_handler::sst_filename(const sstables::sstable& sst) {
|
||||
}
|
||||
|
||||
future<> large_data_handler::maybe_delete_large_data_entries(sstables::shared_sstable sst) {
|
||||
assert(running());
|
||||
SCYLLA_ASSERT(running());
|
||||
auto schema = sst->get_schema();
|
||||
auto filename = sst_filename(*sst);
|
||||
using ldt = sstables::large_data_type;
|
||||
@@ -237,7 +238,7 @@ future<> cql_table_large_data_handler::record_large_rows(const sstables::sstable
|
||||
}
|
||||
|
||||
future<> cql_table_large_data_handler::delete_large_data_entries(const schema& s, sstring sstable_name, std::string_view large_table_name) const {
|
||||
assert(_sys_ks);
|
||||
SCYLLA_ASSERT(_sys_ks);
|
||||
const sstring req =
|
||||
format("DELETE FROM system.{} WHERE keyspace_name = ? AND table_name = ? AND sstable_name = ?",
|
||||
large_table_name);
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include "schema/schema_fwd.hh"
|
||||
#include "system_keyspace.hh"
|
||||
#include "sstables/shared_sstable.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/updateable_value.hh"
|
||||
|
||||
namespace sstables {
|
||||
@@ -78,7 +79,7 @@ public:
|
||||
|
||||
future<bool> maybe_record_large_rows(const sstables::sstable& sst, const sstables::key& partition_key,
|
||||
const clustering_key_prefix* clustering_key, uint64_t row_size) {
|
||||
assert(running());
|
||||
SCYLLA_ASSERT(running());
|
||||
if (__builtin_expect(row_size > _row_threshold_bytes, false)) {
|
||||
return with_sem([&sst, &partition_key, clustering_key, row_size, this] {
|
||||
return record_large_rows(sst, partition_key, clustering_key, row_size);
|
||||
@@ -98,7 +99,7 @@ public:
|
||||
|
||||
future<bool> maybe_record_large_cells(const sstables::sstable& sst, const sstables::key& partition_key,
|
||||
const clustering_key_prefix* clustering_key, const column_definition& cdef, uint64_t cell_size, uint64_t collection_elements) {
|
||||
assert(running());
|
||||
SCYLLA_ASSERT(running());
|
||||
if (__builtin_expect(cell_size > _cell_threshold_bytes || collection_elements > _collection_elements_count_threshold, false)) {
|
||||
return with_sem([&sst, &partition_key, clustering_key, &cdef, cell_size, collection_elements, this] {
|
||||
return record_large_cells(sst, partition_key, clustering_key, cdef, cell_size, collection_elements);
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include "query-result-writer.hh"
|
||||
#include "schema/schema_builder.hh"
|
||||
#include "map_difference.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/UUID_gen.hh"
|
||||
#include "utils/to_string.hh"
|
||||
#include <seastar/coroutine/all.hh>
|
||||
@@ -452,9 +453,9 @@ const std::unordered_set<table_id>& schema_tables_holding_schema_mutations() {
|
||||
db::system_keyspace::legacy::column_families(),
|
||||
db::system_keyspace::legacy::columns(),
|
||||
db::system_keyspace::legacy::triggers()}) {
|
||||
assert(s->clustering_key_size() > 0);
|
||||
SCYLLA_ASSERT(s->clustering_key_size() > 0);
|
||||
auto&& first_column_name = s->clustering_column_at(0).name_as_text();
|
||||
assert(first_column_name == "table_name"
|
||||
SCYLLA_ASSERT(first_column_name == "table_name"
|
||||
|| first_column_name == "view_name"
|
||||
|| first_column_name == "columnfamily_name");
|
||||
ids.emplace(s->id());
|
||||
@@ -904,7 +905,7 @@ read_schema_partition_for_keyspace(distributed<service::storage_proxy>& proxy, s
|
||||
future<mutation>
|
||||
read_schema_partition_for_table(distributed<service::storage_proxy>& proxy, schema_ptr schema, const sstring& keyspace_name, const sstring& table_name)
|
||||
{
|
||||
assert(schema_tables_holding_schema_mutations().contains(schema->id()));
|
||||
SCYLLA_ASSERT(schema_tables_holding_schema_mutations().contains(schema->id()));
|
||||
auto keyspace_key = partition_key::from_singular(*schema, keyspace_name);
|
||||
auto clustering_range = query::clustering_range(clustering_key_prefix::from_clustering_prefix(
|
||||
*schema, exploded_clustering_prefix({utf8_type->decompose(table_name)})));
|
||||
@@ -942,7 +943,7 @@ future<> merge_unlock() {
|
||||
}
|
||||
|
||||
future<semaphore_units<>> hold_merge_lock() noexcept {
|
||||
assert(this_shard_id() == 0);
|
||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||
|
||||
if (slogger.is_enabled(log_level::trace)) {
|
||||
slogger.trace("hold_merge_lock at {}", current_backtrace());
|
||||
@@ -2074,7 +2075,7 @@ template<typename K, typename Map>
|
||||
static void store_map(mutation& m, const K& ckey, const bytes& name, api::timestamp_type timestamp, const Map& map) {
|
||||
auto s = m.schema();
|
||||
auto column = s->get_column_definition(name);
|
||||
assert(column);
|
||||
SCYLLA_ASSERT(column);
|
||||
set_cell_or_clustered(m, ckey, *column, make_map_mutation(map, *column, timestamp));
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <boost/range/adaptor/indirected.hpp>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <boost/range/adaptor/transformed.hpp>
|
||||
@@ -188,7 +189,7 @@ static future<std::vector<token_range>> get_local_ranges(replica::database& db,
|
||||
auto ranges = db.get_token_metadata().get_primary_ranges_for(std::move(tokens));
|
||||
std::vector<token_range> local_ranges;
|
||||
auto to_bytes = [](const std::optional<dht::token_range::bound>& b) {
|
||||
assert(b);
|
||||
SCYLLA_ASSERT(b);
|
||||
return utf8_type->decompose(b->value().to_sstring());
|
||||
};
|
||||
// We merge the ranges to be compatible with how Cassandra shows it's size estimates table.
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include "sstables-format-selector.hh"
|
||||
#include "log.hh"
|
||||
@@ -82,7 +83,7 @@ future<> sstables_format_listener::maybe_select_format(sstables::sstable_version
|
||||
}
|
||||
|
||||
future<> sstables_format_listener::start() {
|
||||
assert(this_shard_id() == 0);
|
||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||
// The listener may fire immediately, create a thread for that case.
|
||||
co_await seastar::async([this] {
|
||||
_me_feature_listener.on_enabled();
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "db/system_distributed_keyspace.hh"
|
||||
|
||||
#include "cql3/untyped_result_set.hh"
|
||||
@@ -220,7 +221,7 @@ static schema_ptr get_current_service_levels(data_dictionary::database db) {
|
||||
}
|
||||
|
||||
static schema_ptr get_updated_service_levels(data_dictionary::database db) {
|
||||
assert(this_shard_id() == 0);
|
||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||
auto schema = get_current_service_levels(db);
|
||||
schema_builder b(schema);
|
||||
for (const auto& col : new_columns) {
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include "gms/feature_service.hh"
|
||||
#include "system_keyspace_view_types.hh"
|
||||
#include "schema/schema_builder.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/hashers.hh"
|
||||
#include "log.hh"
|
||||
#include <seastar/core/enum.hh>
|
||||
@@ -1580,7 +1581,7 @@ struct local_cache {
|
||||
};
|
||||
|
||||
future<> system_keyspace::peers_table_read_fixup() {
|
||||
assert(this_shard_id() == 0);
|
||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||
if (_peers_table_read_fixup_done) {
|
||||
co_return;
|
||||
}
|
||||
@@ -1839,7 +1840,7 @@ std::unordered_set<dht::token> decode_tokens(const set_type_impl::native_type& t
|
||||
std::unordered_set<dht::token> tset;
|
||||
for (auto& t: tokens) {
|
||||
auto str = value_cast<sstring>(t);
|
||||
assert(str == dht::token::from_sstring(str).to_sstring());
|
||||
SCYLLA_ASSERT(str == dht::token::from_sstring(str).to_sstring());
|
||||
tset.insert(dht::token::from_sstring(str));
|
||||
}
|
||||
return tset;
|
||||
@@ -1945,7 +1946,7 @@ future<std::vector<gms::inet_address>> system_keyspace::load_peers() {
|
||||
co_await peers_table_read_fixup();
|
||||
|
||||
const auto res = co_await execute_cql(format("SELECT peer, tokens FROM system.{}", PEERS));
|
||||
assert(res);
|
||||
SCYLLA_ASSERT(res);
|
||||
|
||||
std::vector<gms::inet_address> ret;
|
||||
for (const auto& row: *res) {
|
||||
@@ -2709,7 +2710,7 @@ future<utils::UUID> system_keyspace::get_last_group0_state_id() {
|
||||
format(
|
||||
"SELECT state_id FROM system.{} WHERE key = '{}' LIMIT 1",
|
||||
GROUP0_HISTORY, GROUP0_HISTORY_KEY));
|
||||
assert(rs);
|
||||
SCYLLA_ASSERT(rs);
|
||||
if (rs->empty()) {
|
||||
co_return utils::UUID{};
|
||||
}
|
||||
@@ -2722,7 +2723,7 @@ future<bool> system_keyspace::group0_history_contains(utils::UUID state_id) {
|
||||
"SELECT state_id FROM system.{} WHERE key = '{}' AND state_id = ?",
|
||||
GROUP0_HISTORY, GROUP0_HISTORY_KEY),
|
||||
state_id);
|
||||
assert(rs);
|
||||
SCYLLA_ASSERT(rs);
|
||||
co_return !rs->empty();
|
||||
}
|
||||
|
||||
@@ -2735,16 +2736,16 @@ mutation system_keyspace::make_group0_history_state_id_mutation(
|
||||
row.apply(row_marker(ts));
|
||||
if (!description.empty()) {
|
||||
auto cdef = s->get_column_definition("description");
|
||||
assert(cdef);
|
||||
SCYLLA_ASSERT(cdef);
|
||||
row.cells().apply(*cdef, atomic_cell::make_live(*cdef->type, ts, cdef->type->decompose(description)));
|
||||
}
|
||||
if (gc_older_than) {
|
||||
using namespace std::chrono;
|
||||
assert(*gc_older_than >= gc_clock::duration{0});
|
||||
SCYLLA_ASSERT(*gc_older_than >= gc_clock::duration{0});
|
||||
|
||||
auto ts_micros = microseconds{ts};
|
||||
auto gc_older_than_micros = duration_cast<microseconds>(*gc_older_than);
|
||||
assert(gc_older_than_micros < ts_micros);
|
||||
SCYLLA_ASSERT(gc_older_than_micros < ts_micros);
|
||||
|
||||
auto tomb_upper_bound = utils::UUID_gen::min_time_UUID(ts_micros - gc_older_than_micros);
|
||||
// We want to delete all entries with IDs smaller than `tomb_upper_bound`
|
||||
@@ -2761,7 +2762,7 @@ mutation system_keyspace::make_group0_history_state_id_mutation(
|
||||
future<mutation> system_keyspace::get_group0_history(distributed<replica::database>& db) {
|
||||
auto s = group0_history();
|
||||
auto rs = co_await db::system_keyspace::query_mutations(db, db::system_keyspace::NAME, db::system_keyspace::GROUP0_HISTORY);
|
||||
assert(rs);
|
||||
SCYLLA_ASSERT(rs);
|
||||
auto& ps = rs->partitions();
|
||||
for (auto& p: ps) {
|
||||
auto mut = p.mut().unfreeze(s);
|
||||
@@ -2783,7 +2784,7 @@ static future<std::optional<mutation>> get_scylla_local_mutation(replica::databa
|
||||
dht::partition_range pr = dht::partition_range::make_singular(dht::decorate_key(*s, pk));
|
||||
|
||||
auto rs = co_await replica::query_mutations(db.container(), s, pr, s->full_slice(), db::no_timeout);
|
||||
assert(rs);
|
||||
SCYLLA_ASSERT(rs);
|
||||
auto& ps = rs->partitions();
|
||||
for (auto& p: ps) {
|
||||
auto mut = p.mut().unfreeze(s);
|
||||
@@ -2906,7 +2907,7 @@ static bool must_have_tokens(service::node_state nst) {
|
||||
future<service::topology> system_keyspace::load_topology_state(const std::unordered_set<locator::host_id>& force_load_hosts) {
|
||||
auto rs = co_await execute_cql(
|
||||
format("SELECT * FROM system.{} WHERE key = '{}'", TOPOLOGY, TOPOLOGY));
|
||||
assert(rs);
|
||||
SCYLLA_ASSERT(rs);
|
||||
|
||||
service::topology_state_machine::topology_type ret;
|
||||
|
||||
@@ -3087,7 +3088,7 @@ future<service::topology> system_keyspace::load_topology_state(const std::unorde
|
||||
format("SELECT count(range_end) as cnt FROM {}.{} WHERE key = '{}' AND id = ?",
|
||||
NAME, CDC_GENERATIONS_V3, cdc::CDC_GENERATIONS_V3_KEY),
|
||||
gen_id.id);
|
||||
assert(gen_rows);
|
||||
SCYLLA_ASSERT(gen_rows);
|
||||
if (gen_rows->empty()) {
|
||||
on_internal_error(slogger, format(
|
||||
"load_topology_state: last committed CDC generation time UUID ({}) present, but data missing", gen_id.id));
|
||||
@@ -3146,7 +3147,7 @@ future<service::topology> system_keyspace::load_topology_state(const std::unorde
|
||||
future<std::optional<service::topology_features>> system_keyspace::load_topology_features_state() {
|
||||
auto rs = co_await execute_cql(
|
||||
format("SELECT host_id, node_state, supported_features, enabled_features FROM system.{} WHERE key = '{}'", TOPOLOGY, TOPOLOGY));
|
||||
assert(rs);
|
||||
SCYLLA_ASSERT(rs);
|
||||
|
||||
co_return decode_topology_features_state(std::move(rs));
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "row_locking.hh"
|
||||
#include "log.hh"
|
||||
|
||||
@@ -152,14 +153,14 @@ row_locker::unlock(const dht::decorated_key* pk, bool partition_exclusive,
|
||||
mylog.error("column_family::local_base_lock_holder::~local_base_lock_holder() can't find lock for partition", *pk);
|
||||
return;
|
||||
}
|
||||
assert(&pli->first == pk);
|
||||
SCYLLA_ASSERT(&pli->first == pk);
|
||||
if (cpk) {
|
||||
auto rli = pli->second._row_locks.find(*cpk);
|
||||
if (rli == pli->second._row_locks.end()) {
|
||||
mylog.error("column_family::local_base_lock_holder::~local_base_lock_holder() can't find lock for row", *cpk);
|
||||
return;
|
||||
}
|
||||
assert(&rli->first == cpk);
|
||||
SCYLLA_ASSERT(&rli->first == cpk);
|
||||
mylog.debug("releasing {} lock for row {} in partition {}", (row_exclusive ? "exclusive" : "shared"), *cpk, *pk);
|
||||
auto& lock = rli->second;
|
||||
if (row_exclusive) {
|
||||
|
||||
@@ -53,6 +53,7 @@
|
||||
#include "service/migration_manager.hh"
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "compaction/compaction_manager.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/small_vector.hh"
|
||||
#include "view_info.hh"
|
||||
#include "view_update_checks.hh"
|
||||
@@ -1490,7 +1491,7 @@ future<stop_iteration> view_update_builder::on_results() {
|
||||
existing.apply(std::max(_existing_partition_tombstone, _existing_current_tombstone));
|
||||
auto tombstone = std::max(_update_partition_tombstone, _update_current_tombstone);
|
||||
// The way we build the read command used for existing rows, we should always have a non-empty
|
||||
// tombstone, since we wouldn't have read the existing row otherwise. We don't assert that in case the
|
||||
// tombstone, since we wouldn't have read the existing row otherwise. We don't SCYLLA_ASSERT that in case the
|
||||
// read method ever changes.
|
||||
if (tombstone) {
|
||||
auto update = clustering_row(existing.key(), row_tombstone(std::move(tombstone)), row_marker(), ::row());
|
||||
@@ -1516,11 +1517,11 @@ future<stop_iteration> view_update_builder::on_results() {
|
||||
}
|
||||
// We're updating a row that had pre-existing data
|
||||
if (_update->is_range_tombstone_change()) {
|
||||
assert(_existing->is_range_tombstone_change());
|
||||
SCYLLA_ASSERT(_existing->is_range_tombstone_change());
|
||||
_existing_current_tombstone = std::move(*_existing).as_range_tombstone_change().tombstone();
|
||||
_update_current_tombstone = std::move(*_update).as_range_tombstone_change().tombstone();
|
||||
} else if (_update->is_clustering_row()) {
|
||||
assert(_existing->is_clustering_row());
|
||||
SCYLLA_ASSERT(_existing->is_clustering_row());
|
||||
_update->mutate_as_clustering_row(*_schema, [&] (clustering_row& cr) mutable {
|
||||
cr.apply(std::max(_update_partition_tombstone, _update_current_tombstone));
|
||||
});
|
||||
@@ -1752,7 +1753,7 @@ get_view_natural_endpoint(
|
||||
}
|
||||
}
|
||||
|
||||
assert(base_endpoints.size() == view_endpoints.size());
|
||||
SCYLLA_ASSERT(base_endpoints.size() == view_endpoints.size());
|
||||
auto base_it = std::find(base_endpoints.begin(), base_endpoints.end(), me);
|
||||
if (base_it == base_endpoints.end()) {
|
||||
// This node is not a base replica of this key, so we return empty
|
||||
@@ -2700,7 +2701,7 @@ future<> view_builder::mark_as_built(view_ptr view) {
|
||||
}
|
||||
|
||||
future<> view_builder::mark_existing_views_as_built() {
|
||||
assert(this_shard_id() == 0);
|
||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||
auto views = _db.get_views();
|
||||
co_await coroutine::parallel_for_each(views, [this] (view_ptr& view) {
|
||||
return mark_as_built(view);
|
||||
@@ -2890,7 +2891,7 @@ delete_ghost_rows_visitor::delete_ghost_rows_visitor(service::storage_proxy& pro
|
||||
{}
|
||||
|
||||
void delete_ghost_rows_visitor::accept_new_partition(const partition_key& key, uint32_t row_count) {
|
||||
assert(thread::running_in_thread());
|
||||
SCYLLA_ASSERT(thread::running_in_thread());
|
||||
_view_pk = key;
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <seastar/coroutine/maybe_yield.hh>
|
||||
#include "dht/ring_position.hh"
|
||||
#include "dht/token-sharding.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/class_registrator.hh"
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <boost/range/irange.hpp>
|
||||
@@ -423,7 +424,7 @@ future<dht::partition_range_vector> subtract_ranges(const schema& schema, const
|
||||
++range_to_subtract;
|
||||
break;
|
||||
default:
|
||||
assert(size <= 2);
|
||||
SCYLLA_ASSERT(size <= 2);
|
||||
}
|
||||
co_await coroutine::maybe_yield();
|
||||
}
|
||||
@@ -442,7 +443,7 @@ dht::token_range_vector split_token_range_msb(unsigned most_significant_bits) {
|
||||
}
|
||||
uint64_t number_of_ranges = 1 << most_significant_bits;
|
||||
ret.reserve(number_of_ranges);
|
||||
assert(most_significant_bits < 64);
|
||||
SCYLLA_ASSERT(most_significant_bits < 64);
|
||||
dht::token prev_last_token;
|
||||
for (uint64_t i = 0; i < number_of_ranges; i++) {
|
||||
std::optional<dht::token_range::bound> start_bound;
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include <fmt/ranges.h>
|
||||
#include <seastar/core/semaphore.hh>
|
||||
#include <seastar/core/sleep.hh>
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/stall_free.hh"
|
||||
|
||||
namespace dht {
|
||||
@@ -118,7 +119,7 @@ range_streamer::get_all_ranges_with_sources_for(const sstring& keyspace_name, lo
|
||||
std::unordered_map<dht::token_range, std::vector<inet_address>>
|
||||
range_streamer::get_all_ranges_with_strict_sources_for(const sstring& keyspace_name, locator::vnode_effective_replication_map_ptr erm, dht::token_range_vector desired_ranges, gms::gossiper& gossiper) {
|
||||
logger.debug("{} ks={}", __func__, keyspace_name);
|
||||
assert (_tokens.empty() == false);
|
||||
SCYLLA_ASSERT (_tokens.empty() == false);
|
||||
|
||||
auto& strat = erm->get_replication_strategy();
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "dht/token.hh"
|
||||
#include <seastar/core/smp.hh>
|
||||
#include <boost/container/static_vector.hpp>
|
||||
@@ -62,7 +63,7 @@ public:
|
||||
*
|
||||
* [] (const token& t) {
|
||||
* auto shards = shard_for_writes();
|
||||
* assert(shards.size() <= 1);
|
||||
* SCYLLA_ASSERT(shards.size() <= 1);
|
||||
* return shards.empty() ? 0 : shards[0];
|
||||
* }
|
||||
*
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <unordered_set>
|
||||
|
||||
#include <seastar/core/abort_source.hh>
|
||||
@@ -192,7 +193,7 @@ failure_detector::impl::impl(
|
||||
}
|
||||
|
||||
void failure_detector::impl::send_update_endpoint(pinger::endpoint_id ep, endpoint_update update) {
|
||||
assert(this_shard_id() == 0);
|
||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||
|
||||
auto it = _endpoint_updates.find(ep);
|
||||
if (it == _endpoint_updates.end()) {
|
||||
@@ -205,7 +206,7 @@ void failure_detector::impl::send_update_endpoint(pinger::endpoint_id ep, endpoi
|
||||
}
|
||||
|
||||
future<> failure_detector::impl::update_endpoint_fiber() {
|
||||
assert(this_shard_id() == 0);
|
||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||
|
||||
while (true) {
|
||||
co_await _endpoint_changed.wait([this] { return !_endpoint_updates.empty(); });
|
||||
@@ -246,7 +247,7 @@ future<> failure_detector::impl::update_endpoint_fiber() {
|
||||
}
|
||||
|
||||
future<> failure_detector::impl::add_endpoint(pinger::endpoint_id ep) {
|
||||
assert(this_shard_id() == 0);
|
||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||
|
||||
if (_workers.contains(ep)) {
|
||||
co_return;
|
||||
@@ -254,7 +255,7 @@ future<> failure_detector::impl::add_endpoint(pinger::endpoint_id ep) {
|
||||
|
||||
// Pick a shard with the smallest number of workers to create a new worker.
|
||||
auto shard = std::distance(_num_workers.begin(), std::min_element(_num_workers.begin(), _num_workers.end()));
|
||||
assert(_num_workers.size() == smp::count);
|
||||
SCYLLA_ASSERT(_num_workers.size() == smp::count);
|
||||
|
||||
++_num_workers[shard];
|
||||
auto [it, _] = _workers.emplace(ep, shard);
|
||||
@@ -269,7 +270,7 @@ future<> failure_detector::impl::add_endpoint(pinger::endpoint_id ep) {
|
||||
}
|
||||
|
||||
future<> failure_detector::impl::remove_endpoint(pinger::endpoint_id ep) {
|
||||
assert(this_shard_id() == 0);
|
||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||
|
||||
auto it = _workers.find(ep);
|
||||
if (it == _workers.end()) {
|
||||
@@ -279,8 +280,8 @@ future<> failure_detector::impl::remove_endpoint(pinger::endpoint_id ep) {
|
||||
auto shard = it->second;
|
||||
co_await _parent.container().invoke_on(shard, [ep] (failure_detector& fd) { return fd._impl->destroy_worker(ep); });
|
||||
|
||||
assert(_num_workers.size() == smp::count);
|
||||
assert(shard < _num_workers.size());
|
||||
SCYLLA_ASSERT(_num_workers.size() == smp::count);
|
||||
SCYLLA_ASSERT(shard < _num_workers.size());
|
||||
--_num_workers[shard];
|
||||
_workers.erase(it);
|
||||
|
||||
@@ -374,8 +375,8 @@ endpoint_worker::endpoint_worker(failure_detector::impl& fd, pinger::endpoint_id
|
||||
}
|
||||
|
||||
endpoint_worker::~endpoint_worker() {
|
||||
assert(_ping_fiber.available());
|
||||
assert(_notify_fiber.available());
|
||||
SCYLLA_ASSERT(_ping_fiber.available());
|
||||
SCYLLA_ASSERT(_notify_fiber.available());
|
||||
}
|
||||
|
||||
future<subscription> failure_detector::register_listener(listener& l, clock::interval_t threshold) {
|
||||
@@ -624,7 +625,7 @@ future<> endpoint_worker::notify_fiber() noexcept {
|
||||
auto& listeners = it->second.listeners;
|
||||
auto& endpoint_liveness = it->second.endpoint_liveness[_id];
|
||||
bool alive = endpoint_liveness.alive;
|
||||
assert(alive != endpoint_liveness.marked_alive);
|
||||
SCYLLA_ASSERT(alive != endpoint_liveness.marked_alive);
|
||||
endpoint_liveness.marked_alive = alive;
|
||||
|
||||
try {
|
||||
@@ -680,7 +681,7 @@ future<> failure_detector::stop() {
|
||||
|
||||
co_await container().invoke_on_all([] (failure_detector& fd) -> future<> {
|
||||
// All subscriptions must be destroyed before stopping the fd.
|
||||
assert(fd._impl->_registered.empty());
|
||||
SCYLLA_ASSERT(fd._impl->_registered.empty());
|
||||
|
||||
// There are no concurrent `{create,destroy}_worker` calls running since we waited for `update_endpoint_fiber` to finish.
|
||||
while (!fd._impl->_shard_workers.empty()) {
|
||||
@@ -697,13 +698,13 @@ future<> failure_detector::stop() {
|
||||
}
|
||||
|
||||
failure_detector::impl::~impl() {
|
||||
assert(_shard_workers.empty());
|
||||
assert(_destroy_subscriptions.available());
|
||||
assert(_update_endpoint_fiber.available());
|
||||
SCYLLA_ASSERT(_shard_workers.empty());
|
||||
SCYLLA_ASSERT(_destroy_subscriptions.available());
|
||||
SCYLLA_ASSERT(_update_endpoint_fiber.available());
|
||||
}
|
||||
|
||||
failure_detector::~failure_detector() {
|
||||
assert(!_impl);
|
||||
SCYLLA_ASSERT(!_impl);
|
||||
}
|
||||
|
||||
} // namespace direct_failure_detector
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <boost/iterator/transform_iterator.hpp>
|
||||
#include <seastar/core/bitset-iter.hh>
|
||||
|
||||
@@ -31,9 +32,9 @@
|
||||
*
|
||||
* static_assert(my_enumset::frozen<x::A, x::B>::contains<x::A>(), "it should...");
|
||||
*
|
||||
* assert(my_enumset::frozen<x::A, x::B>::contains(my_enumset::prepare<x::A>()));
|
||||
* SCYLLA_ASSERT(my_enumset::frozen<x::A, x::B>::contains(my_enumset::prepare<x::A>()));
|
||||
*
|
||||
* assert(my_enumset::frozen<x::A, x::B>::contains(x::A));
|
||||
* SCYLLA_ASSERT(my_enumset::frozen<x::A, x::B>::contains(x::A));
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include "gms/gossiper.hh"
|
||||
#include "gms/i_endpoint_state_change_subscriber.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include "service/storage_service.hh"
|
||||
|
||||
@@ -59,7 +60,7 @@ feature_config feature_config_from_db_config(const db::config& cfg, std::set<sst
|
||||
case sstables::sstable_version_types::me:
|
||||
break;
|
||||
default:
|
||||
assert(false && "Invalid sstable_format");
|
||||
SCYLLA_ASSERT(false && "Invalid sstable_format");
|
||||
}
|
||||
|
||||
if (!cfg.enable_user_defined_functions()) {
|
||||
@@ -101,7 +102,7 @@ future<> feature_service::stop() {
|
||||
|
||||
void feature_service::register_feature(feature& f) {
|
||||
auto i = _registered_features.emplace(f.name(), f);
|
||||
assert(i.second);
|
||||
SCYLLA_ASSERT(i.second);
|
||||
}
|
||||
|
||||
void feature_service::unregister_feature(feature& f) {
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <cassert>
|
||||
#include <chrono>
|
||||
#include <utility>
|
||||
@@ -22,7 +23,7 @@ generation_type get_generation_number() {
|
||||
int generation_number = duration_cast<seconds>(now).count();
|
||||
auto ret = generation_type(generation_number);
|
||||
// Make sure the clock didn't overflow the 32 bits value
|
||||
assert(ret.value() == generation_number);
|
||||
SCYLLA_ASSERT(ret.value() == generation_number);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
#include "gms/generation-number.hh"
|
||||
#include "locator/token_metadata.hh"
|
||||
#include "seastar/rpc/rpc_types.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/exceptions.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include "utils/to_string.hh"
|
||||
@@ -842,7 +843,7 @@ gossiper::endpoint_permit::~endpoint_permit() {
|
||||
|
||||
bool gossiper::endpoint_permit::release() noexcept {
|
||||
if (auto ptr = std::exchange(_ptr, nullptr)) {
|
||||
assert(ptr->pid == _permit_id);
|
||||
SCYLLA_ASSERT(ptr->pid == _permit_id);
|
||||
logger.debug("{}: lock_endpoint {}: released: permit_id={} holders={}", _caller.function_name(), _addr, _permit_id, ptr->holders);
|
||||
if (!--ptr->holders) {
|
||||
logger.debug("{}: lock_endpoint {}: released: permit_id={}", _caller.function_name(), _addr, _permit_id);
|
||||
@@ -885,7 +886,7 @@ future<gossiper::endpoint_permit> gossiper::lock_endpoint(inet_address ep, permi
|
||||
auto sub = _abort_source.subscribe([&aoe] () noexcept {
|
||||
aoe.abort_source().request_abort();
|
||||
});
|
||||
assert(sub); // due to check() above
|
||||
SCYLLA_ASSERT(sub); // due to check() above
|
||||
try {
|
||||
eptr->units = co_await get_units(eptr->sem, 1, aoe.abort_source());
|
||||
break;
|
||||
@@ -1043,7 +1044,7 @@ future<> gossiper::failure_detector_loop() {
|
||||
// This needs to be run with a lock
|
||||
future<> gossiper::replicate_live_endpoints_on_change(foreign_ptr<std::unique_ptr<live_and_unreachable_endpoints>> data0, uint64_t new_version) {
|
||||
auto coordinator = this_shard_id();
|
||||
assert(coordinator == 0);
|
||||
SCYLLA_ASSERT(coordinator == 0);
|
||||
//
|
||||
// Gossiper task runs only on CPU0:
|
||||
//
|
||||
@@ -1721,7 +1722,7 @@ future<> gossiper::real_mark_alive(inet_address addr) {
|
||||
|
||||
locator::host_id id(utils::UUID(app_state_ptr->value()));
|
||||
auto second_node_ip = handler.get("second_node_ip");
|
||||
assert(second_node_ip);
|
||||
SCYLLA_ASSERT(second_node_ip);
|
||||
|
||||
logger.info("real_mark_alive {}/{} second_node_ip={}", id, endpoint, *second_node_ip);
|
||||
if (endpoint == gms::inet_address(sstring{*second_node_ip})) {
|
||||
@@ -1888,7 +1889,7 @@ bool gossiper::is_silent_shutdown_state(const endpoint_state& ep_state) const{
|
||||
}
|
||||
|
||||
future<> gossiper::apply_new_states(inet_address addr, endpoint_state local_state, const endpoint_state& remote_state, permit_id pid) {
|
||||
// don't assert here, since if the node restarts the version will go back to zero
|
||||
// don't SCYLLA_ASSERT here, since if the node restarts the version will go back to zero
|
||||
//int oldVersion = local_state.get_heart_beat_state().get_heart_beat_version();
|
||||
|
||||
verify_permit(addr, pid);
|
||||
|
||||
17
interval.hh
17
interval.hh
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <list>
|
||||
#include <vector>
|
||||
#include <optional>
|
||||
@@ -138,7 +139,7 @@ public:
|
||||
// the point is before the interval (works only for non wrapped intervals)
|
||||
// Comparator must define a total ordering on T.
|
||||
bool before(const T& point, IntervalComparatorFor<T> auto&& cmp) const {
|
||||
assert(!is_wrap_around(cmp));
|
||||
SCYLLA_ASSERT(!is_wrap_around(cmp));
|
||||
if (!start()) {
|
||||
return false; //open start, no points before
|
||||
}
|
||||
@@ -154,8 +155,8 @@ public:
|
||||
// the other interval is before this interval (works only for non wrapped intervals)
|
||||
// Comparator must define a total ordering on T.
|
||||
bool other_is_before(const wrapping_interval<T>& o, IntervalComparatorFor<T> auto&& cmp) const {
|
||||
assert(!is_wrap_around(cmp));
|
||||
assert(!o.is_wrap_around(cmp));
|
||||
SCYLLA_ASSERT(!is_wrap_around(cmp));
|
||||
SCYLLA_ASSERT(!o.is_wrap_around(cmp));
|
||||
if (!start() || !o.end()) {
|
||||
return false;
|
||||
}
|
||||
@@ -181,7 +182,7 @@ public:
|
||||
// the point is after the interval (works only for non wrapped intervals)
|
||||
// Comparator must define a total ordering on T.
|
||||
bool after(const T& point, IntervalComparatorFor<T> auto&& cmp) const {
|
||||
assert(!is_wrap_around(cmp));
|
||||
SCYLLA_ASSERT(!is_wrap_around(cmp));
|
||||
if (!end()) {
|
||||
return false; //open end, no points after
|
||||
}
|
||||
@@ -211,8 +212,8 @@ public:
|
||||
}
|
||||
|
||||
// No interval should reach this point as wrap around.
|
||||
assert(!this_wraps);
|
||||
assert(!other_wraps);
|
||||
SCYLLA_ASSERT(!this_wraps);
|
||||
SCYLLA_ASSERT(!other_wraps);
|
||||
|
||||
// if both this and other have an open start, the two intervals will overlap.
|
||||
if (!start() && !other.start()) {
|
||||
@@ -377,7 +378,7 @@ public:
|
||||
// split_point will belong to first interval
|
||||
// Comparator must define a total ordering on T.
|
||||
std::pair<wrapping_interval<T>, wrapping_interval<T>> split(const T& split_point, IntervalComparatorFor<T> auto&& cmp) const {
|
||||
assert(contains(split_point, std::forward<decltype(cmp)>(cmp)));
|
||||
SCYLLA_ASSERT(contains(split_point, std::forward<decltype(cmp)>(cmp)));
|
||||
wrapping_interval left(start(), bound(split_point));
|
||||
wrapping_interval right(bound(split_point, false), end());
|
||||
return std::make_pair(std::move(left), std::move(right));
|
||||
@@ -584,7 +585,7 @@ public:
|
||||
// split_point will belong to first interval
|
||||
// Comparator must define a total ordering on T.
|
||||
std::pair<interval<T>, interval<T>> split(const T& split_point, IntervalComparatorFor<T> auto&& cmp) const {
|
||||
assert(contains(split_point, std::forward<decltype(cmp)>(cmp)));
|
||||
SCYLLA_ASSERT(contains(split_point, std::forward<decltype(cmp)>(cmp)));
|
||||
interval left(start(), bound(split_point));
|
||||
interval right(bound(split_point, false), end());
|
||||
return std::make_pair(std::move(left), std::move(right));
|
||||
|
||||
15
lang/lua.cc
15
lang/lua.cc
@@ -12,6 +12,7 @@
|
||||
#include "lang/lua_scylla_types.hh"
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "concrete_types.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/utf8.hh"
|
||||
#include "utils/ascii.hh"
|
||||
#include "utils/date.h"
|
||||
@@ -41,7 +42,7 @@ struct alloc_state {
|
||||
: max(max)
|
||||
, max_contiguous(max_contiguous) {
|
||||
// The max and max_contiguous limits are responsible for avoiding overflows.
|
||||
assert(max + max_contiguous >= max);
|
||||
SCYLLA_ASSERT(max + max_contiguous >= max);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -79,7 +80,7 @@ static void* lua_alloc(void* ud, void* ptr, size_t osize, size_t nsize) {
|
||||
size_t next = s->allocated + nsize;
|
||||
|
||||
// The max and max_contiguous limits should be small enough to avoid overflows.
|
||||
assert(next >= s->allocated);
|
||||
SCYLLA_ASSERT(next >= s->allocated);
|
||||
|
||||
if (ptr) {
|
||||
next -= osize;
|
||||
@@ -119,7 +120,7 @@ static void debug_hook(lua_State* l, lua_Debug* ar) {
|
||||
return;
|
||||
}
|
||||
if (lua_yield(l, 0)) {
|
||||
assert(0 && "lua_yield failed");
|
||||
SCYLLA_ASSERT(0 && "lua_yield failed");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,7 +224,7 @@ requires CanHandleRawLuaTypes<Func>
|
||||
static auto visit_lua_raw_value(lua_State* l, int index, Func&& f) {
|
||||
switch (lua_type(l, index)) {
|
||||
case LUA_TNONE:
|
||||
assert(0 && "Invalid index");
|
||||
SCYLLA_ASSERT(0 && "Invalid index");
|
||||
case LUA_TNUMBER:
|
||||
if (lua_isinteger(l, index)) {
|
||||
return f(lua_tointeger(l, index));
|
||||
@@ -244,9 +245,9 @@ static auto visit_lua_raw_value(lua_State* l, int index, Func&& f) {
|
||||
return f(*get_decimal(l, index));
|
||||
case LUA_TTHREAD:
|
||||
case LUA_TLIGHTUSERDATA:
|
||||
assert(0 && "We never make thread or light user data visible to scripts");
|
||||
SCYLLA_ASSERT(0 && "We never make thread or light user data visible to scripts");
|
||||
}
|
||||
assert(0 && "invalid lua type");
|
||||
SCYLLA_ASSERT(0 && "invalid lua type");
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
@@ -362,7 +363,7 @@ static const big_decimal& get_decimal_in_binary_op(lua_State* l) {
|
||||
if (a == nullptr) {
|
||||
lua_insert(l, 1);
|
||||
a = get_decimal(l, 1);
|
||||
assert(a);
|
||||
SCYLLA_ASSERT(a);
|
||||
}
|
||||
return *a;
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <boost/algorithm/string/classification.hpp>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/class_registrator.hh"
|
||||
|
||||
namespace locator {
|
||||
@@ -29,13 +30,13 @@ future<> ec2_snitch::load_config(bool prefer_local) {
|
||||
if (this_shard_id() == io_cpu_id()) {
|
||||
auto token = co_await aws_api_call(AWS_QUERY_SERVER_ADDR, AWS_QUERY_SERVER_PORT, TOKEN_REQ_ENDPOINT, std::nullopt);
|
||||
auto az = co_await aws_api_call(AWS_QUERY_SERVER_ADDR, AWS_QUERY_SERVER_PORT, ZONE_NAME_QUERY_REQ, token);
|
||||
assert(az.size());
|
||||
SCYLLA_ASSERT(az.size());
|
||||
|
||||
std::vector<std::string> splits;
|
||||
|
||||
// Split "us-east-1a" or "asia-1a" into "us-east"/"1a" and "asia"/"1a".
|
||||
split(splits, az, is_any_of("-"));
|
||||
assert(splits.size() > 1);
|
||||
SCYLLA_ASSERT(splits.size() > 1);
|
||||
|
||||
sstring my_rack = splits[splits.size() - 1];
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/range/adaptors.hpp>
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/class_registrator.hh"
|
||||
#include "utils/hash.hh"
|
||||
|
||||
@@ -195,7 +196,7 @@ public:
|
||||
, _racks(_tp.get_datacenter_racks())
|
||||
{
|
||||
// not aware of any cluster members
|
||||
assert(!_all_endpoints.empty() && !_racks.empty());
|
||||
SCYLLA_ASSERT(!_all_endpoints.empty() && !_racks.empty());
|
||||
|
||||
auto size_for = [](auto& map, auto& k) {
|
||||
auto i = map.find(k);
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
@@ -73,17 +74,17 @@ protected:
|
||||
std::unordered_map<sstring, sstring> _prop_values;
|
||||
|
||||
sharded<snitch_ptr>& container() noexcept {
|
||||
assert(_backreference != nullptr);
|
||||
SCYLLA_ASSERT(_backreference != nullptr);
|
||||
return _backreference->container();
|
||||
}
|
||||
|
||||
snitch_ptr& local() noexcept {
|
||||
assert(_backreference != nullptr);
|
||||
SCYLLA_ASSERT(_backreference != nullptr);
|
||||
return *_backreference;
|
||||
}
|
||||
|
||||
const snitch_ptr& local() const noexcept {
|
||||
assert(_backreference != nullptr);
|
||||
SCYLLA_ASSERT(_backreference != nullptr);
|
||||
return *_backreference;
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
|
||||
#include "simple_strategy.hh"
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/class_registrator.hh"
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
@@ -50,7 +51,7 @@ future<host_id_set> simple_strategy::calculate_natural_endpoints(const token& t,
|
||||
}
|
||||
|
||||
auto ep = tm.get_endpoint(token);
|
||||
assert(ep);
|
||||
SCYLLA_ASSERT(ep);
|
||||
|
||||
endpoints.push_back(*ep);
|
||||
co_await coroutine::maybe_yield();
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <boost/signals2.hpp>
|
||||
#include <boost/signals2/dummy_mutex.hpp>
|
||||
|
||||
@@ -81,7 +82,7 @@ public:
|
||||
*/
|
||||
virtual gms::application_state_map get_app_states() const = 0;
|
||||
|
||||
virtual ~i_endpoint_snitch() { assert(_state == snitch_state::stopped); };
|
||||
virtual ~i_endpoint_snitch() { SCYLLA_ASSERT(_state == snitch_state::stopped); };
|
||||
|
||||
// noop by default
|
||||
virtual future<> stop() {
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#include <seastar/coroutine/maybe_yield.hh>
|
||||
#include <boost/range/adaptors.hpp>
|
||||
#include <seastar/core/smp.hh>
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/stall_free.hh"
|
||||
|
||||
namespace locator {
|
||||
@@ -1216,7 +1217,7 @@ future<> shared_token_metadata::mutate_token_metadata(seastar::noncopyable_funct
|
||||
|
||||
future<> shared_token_metadata::mutate_on_all_shards(sharded<shared_token_metadata>& stm, seastar::noncopyable_function<future<> (token_metadata&)> func) {
|
||||
auto base_shard = this_shard_id();
|
||||
assert(base_shard == 0);
|
||||
SCYLLA_ASSERT(base_shard == 0);
|
||||
auto lk = co_await stm.local().get_lock();
|
||||
|
||||
std::vector<mutable_token_metadata_ptr> pending_token_metadata_ptr;
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include "log.hh"
|
||||
#include "locator/topology.hh"
|
||||
#include "locator/production_snitch_base.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/stall_free.hh"
|
||||
#include "utils/to_string.hh"
|
||||
|
||||
@@ -117,7 +118,7 @@ topology::topology(topology&& o) noexcept
|
||||
, _sort_by_proximity(o._sort_by_proximity)
|
||||
, _datacenters(std::move(o._datacenters))
|
||||
{
|
||||
assert(_shard == this_shard_id());
|
||||
SCYLLA_ASSERT(_shard == this_shard_id());
|
||||
tlogger.trace("topology[{}]: move from [{}]", fmt::ptr(this), fmt::ptr(&o));
|
||||
|
||||
for (auto& n : _nodes) {
|
||||
|
||||
3
main.cc
3
main.cc
@@ -21,6 +21,7 @@
|
||||
#include <seastar/core/timer.hh>
|
||||
#include "service/qos/raft_service_level_distributed_data_accessor.hh"
|
||||
#include "tasks/task_manager.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/build_id.hh"
|
||||
#include "supervisor.hh"
|
||||
#include "replica/database.hh"
|
||||
@@ -528,7 +529,7 @@ static auto defer_verbose_shutdown(const char* what, Func&& func) {
|
||||
|
||||
// Call _exit() rather than exit() to exit immediately
|
||||
// without calling exit handlers, avoiding
|
||||
// boost::intrusive::detail::destructor_impl assert failure
|
||||
// boost::intrusive::detail::destructor_impl SCYLLA_ASSERT failure
|
||||
// from ~segment_pool exit handler.
|
||||
_exit(255);
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <fmt/ranges.h>
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/coroutine/as_future.hh>
|
||||
@@ -661,9 +662,9 @@ static constexpr std::array<uint8_t, static_cast<size_t>(messaging_verb::LAST)>
|
||||
for (size_t i = 0; i < tab.size(); ++i) {
|
||||
tab[i] = do_get_rpc_client_idx(messaging_verb(i));
|
||||
|
||||
// This assert guards against adding new connection types without
|
||||
// This SCYLLA_ASSERT guards against adding new connection types without
|
||||
// updating *_CONNECTION_COUNT constants.
|
||||
assert(tab[i] < PER_TENANT_CONNECTION_COUNT + PER_SHARD_CONNECTION_COUNT);
|
||||
SCYLLA_ASSERT(tab[i] < PER_TENANT_CONNECTION_COUNT + PER_SHARD_CONNECTION_COUNT);
|
||||
}
|
||||
return tab;
|
||||
}
|
||||
@@ -709,7 +710,7 @@ messaging_service::initial_scheduling_info() const {
|
||||
}
|
||||
}
|
||||
|
||||
assert(sched_infos.size() == PER_SHARD_CONNECTION_COUNT +
|
||||
SCYLLA_ASSERT(sched_infos.size() == PER_SHARD_CONNECTION_COUNT +
|
||||
_scheduling_config.statement_tenants.size() * PER_TENANT_CONNECTION_COUNT);
|
||||
return sched_infos;
|
||||
};
|
||||
@@ -798,7 +799,7 @@ gms::inet_address messaging_service::get_public_endpoint_for(const gms::inet_add
|
||||
}
|
||||
|
||||
shared_ptr<messaging_service::rpc_protocol_client_wrapper> messaging_service::get_rpc_client(messaging_verb verb, msg_addr id) {
|
||||
assert(!_shutting_down);
|
||||
SCYLLA_ASSERT(!_shutting_down);
|
||||
if (_cfg.maintenance_mode) {
|
||||
on_internal_error(mlogger, "This node is in maintenance mode, it shouldn't contact other nodes");
|
||||
}
|
||||
@@ -900,7 +901,7 @@ shared_ptr<messaging_service::rpc_protocol_client_wrapper> messaging_service::ge
|
||||
opts.isolation_cookie = _scheduling_info_for_connection_index[idx].isolation_cookie;
|
||||
opts.metrics_domain = client_metrics_domain(idx, id.addr); // not just `addr` as the latter may be internal IP
|
||||
|
||||
assert(!must_encrypt || _credentials);
|
||||
SCYLLA_ASSERT(!must_encrypt || _credentials);
|
||||
|
||||
auto client = must_encrypt ?
|
||||
::make_shared<rpc_protocol_client_wrapper>(_rpc->protocol(), std::move(opts),
|
||||
@@ -916,7 +917,7 @@ shared_ptr<messaging_service::rpc_protocol_client_wrapper> messaging_service::ge
|
||||
// the topology (so we always set `topology_ignored` to `false` in that case).
|
||||
bool topology_ignored = idx != TOPOLOGY_INDEPENDENT_IDX && topology_status.has_value() && *topology_status == false;
|
||||
auto res = _clients[idx].emplace(id, shard_info(std::move(client), topology_ignored));
|
||||
assert(res.second);
|
||||
SCYLLA_ASSERT(res.second);
|
||||
it = res.first;
|
||||
uint32_t src_cpu_id = this_shard_id();
|
||||
// No reply is received, nothing to wait for.
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include "timestamp.hh"
|
||||
#include "mutation/tombstone.hh"
|
||||
#include "gc_clock.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/managed_bytes.hh"
|
||||
#include <seastar/net//byteorder.hh>
|
||||
#include <seastar/util/bool_class.hh>
|
||||
@@ -126,18 +127,18 @@ public:
|
||||
}
|
||||
// Can be called only when is_dead() is true.
|
||||
static gc_clock::time_point deletion_time(atomic_cell_value_view cell) {
|
||||
assert(is_dead(cell));
|
||||
SCYLLA_ASSERT(is_dead(cell));
|
||||
return gc_clock::time_point(gc_clock::duration(get_field<int64_t>(cell, deletion_time_offset)));
|
||||
}
|
||||
// Can be called only when is_live_and_has_ttl() is true.
|
||||
static gc_clock::time_point expiry(atomic_cell_value_view cell) {
|
||||
assert(is_live_and_has_ttl(cell));
|
||||
SCYLLA_ASSERT(is_live_and_has_ttl(cell));
|
||||
auto expiry = get_field<int64_t>(cell, expiry_offset);
|
||||
return gc_clock::time_point(gc_clock::duration(expiry));
|
||||
}
|
||||
// Can be called only when is_live_and_has_ttl() is true.
|
||||
static gc_clock::duration ttl(atomic_cell_value_view cell) {
|
||||
assert(is_live_and_has_ttl(cell));
|
||||
SCYLLA_ASSERT(is_live_and_has_ttl(cell));
|
||||
return gc_clock::duration(get_field<int32_t>(cell, ttl_offset));
|
||||
}
|
||||
static managed_bytes make_dead(api::timestamp_type timestamp, gc_clock::time_point deletion_time) {
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include "mutation_partition.hh"
|
||||
#include "keys.hh"
|
||||
#include "schema/schema_fwd.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/hashing.hh"
|
||||
#include "mutation_fragment_v2.hh"
|
||||
#include "mutation_consumer.hh"
|
||||
@@ -302,7 +303,7 @@ std::optional<stop_iteration> consume_clustering_fragments(schema_ptr s, mutatio
|
||||
if (crs_it == crs_end && rts_it == rts_end) {
|
||||
flush_tombstones(position_in_partition::after_all_clustered_rows());
|
||||
} else {
|
||||
assert(preempt && need_preempt());
|
||||
SCYLLA_ASSERT(preempt && need_preempt());
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include "mutation_fragment.hh"
|
||||
#include "mutation_fragment_v2.hh"
|
||||
#include "clustering_interval_set.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/hashing.hh"
|
||||
#include "utils/xx_hasher.hh"
|
||||
|
||||
@@ -172,13 +173,13 @@ struct get_key_visitor {
|
||||
|
||||
const clustering_key_prefix& mutation_fragment::key() const
|
||||
{
|
||||
assert(has_key());
|
||||
SCYLLA_ASSERT(has_key());
|
||||
return visit(get_key_visitor());
|
||||
}
|
||||
|
||||
void mutation_fragment::apply(const schema& s, mutation_fragment&& mf)
|
||||
{
|
||||
assert(mergeable_with(mf));
|
||||
SCYLLA_ASSERT(mergeable_with(mf));
|
||||
switch (_kind) {
|
||||
case mutation_fragment::kind::partition_start:
|
||||
_data->_partition_start.partition_tombstone().apply(mf._data->_partition_start.partition_tombstone());
|
||||
@@ -257,13 +258,13 @@ auto fmt::formatter<mutation_fragment::printer>::format(const mutation_fragment:
|
||||
|
||||
const clustering_key_prefix& mutation_fragment_v2::key() const
|
||||
{
|
||||
assert(has_key());
|
||||
SCYLLA_ASSERT(has_key());
|
||||
return visit(get_key_visitor());
|
||||
}
|
||||
|
||||
void mutation_fragment_v2::apply(const schema& s, mutation_fragment_v2&& mf)
|
||||
{
|
||||
assert(mergeable_with(mf));
|
||||
SCYLLA_ASSERT(mergeable_with(mf));
|
||||
switch (_kind) {
|
||||
case mutation_fragment_v2::kind::partition_start:
|
||||
_data->_partition_start.partition_tombstone().apply(mf._data->_partition_start.partition_tombstone());
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
#include "clustering_key_filter.hh"
|
||||
#include "mutation_partition_view.hh"
|
||||
#include "tombstone_gc.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/unconst.hh"
|
||||
#include "mutation/async_utils.hh"
|
||||
|
||||
@@ -144,7 +145,7 @@ mutation_partition::mutation_partition(const schema& s, const mutation_partition
|
||||
#endif
|
||||
{
|
||||
#ifdef SEASTAR_DEBUG
|
||||
assert(x._schema_version == _schema_version);
|
||||
SCYLLA_ASSERT(x._schema_version == _schema_version);
|
||||
#endif
|
||||
auto cloner = [&s] (const rows_entry* x) -> rows_entry* {
|
||||
return current_allocator().construct<rows_entry>(s, *x);
|
||||
@@ -164,7 +165,7 @@ mutation_partition::mutation_partition(const mutation_partition& x, const schema
|
||||
#endif
|
||||
{
|
||||
#ifdef SEASTAR_DEBUG
|
||||
assert(x._schema_version == _schema_version);
|
||||
SCYLLA_ASSERT(x._schema_version == _schema_version);
|
||||
#endif
|
||||
try {
|
||||
for(auto&& r : ck_ranges) {
|
||||
@@ -194,7 +195,7 @@ mutation_partition::mutation_partition(mutation_partition&& x, const schema& sch
|
||||
#endif
|
||||
{
|
||||
#ifdef SEASTAR_DEBUG
|
||||
assert(x._schema_version == _schema_version);
|
||||
SCYLLA_ASSERT(x._schema_version == _schema_version);
|
||||
#endif
|
||||
{
|
||||
auto deleter = current_deleter<rows_entry>();
|
||||
@@ -280,8 +281,8 @@ mutation_partition::apply(const schema& s, const mutation_fragment& mf) {
|
||||
stop_iteration mutation_partition::apply_monotonically(const schema& s, mutation_partition&& p, cache_tracker* tracker,
|
||||
mutation_application_stats& app_stats, is_preemptible preemptible, apply_resume& res) {
|
||||
#ifdef SEASTAR_DEBUG
|
||||
assert(s.version() == _schema_version);
|
||||
assert(p._schema_version == _schema_version);
|
||||
SCYLLA_ASSERT(s.version() == _schema_version);
|
||||
SCYLLA_ASSERT(p._schema_version == _schema_version);
|
||||
#endif
|
||||
_tombstone.apply(p._tombstone);
|
||||
_static_row.apply_monotonically(s, column_kind::static_column, std::move(p._static_row));
|
||||
@@ -531,7 +532,7 @@ mutation_partition::tombstone_for_row(const schema& schema, const rows_entry& e)
|
||||
void
|
||||
mutation_partition::apply_row_tombstone(const schema& schema, clustering_key_prefix prefix, tombstone t) {
|
||||
check_schema(schema);
|
||||
assert(!prefix.is_full(schema));
|
||||
SCYLLA_ASSERT(!prefix.is_full(schema));
|
||||
auto start = prefix;
|
||||
_row_tombstones.apply(schema, {std::move(start), std::move(prefix), std::move(t)});
|
||||
}
|
||||
@@ -748,7 +749,7 @@ void mutation_partition::for_each_row(const schema& schema, const query::cluster
|
||||
|
||||
template<typename RowWriter>
|
||||
void write_cell(RowWriter& w, const query::partition_slice& slice, ::atomic_cell_view c) {
|
||||
assert(c.is_live());
|
||||
SCYLLA_ASSERT(c.is_live());
|
||||
auto wr = w.add().write();
|
||||
auto after_timestamp = [&, wr = std::move(wr)] () mutable {
|
||||
if (slice.options.contains<query::partition_slice::option::send_timestamp>()) {
|
||||
@@ -789,7 +790,7 @@ void write_cell(RowWriter& w, const query::partition_slice& slice, data_type typ
|
||||
|
||||
template<typename RowWriter>
|
||||
void write_counter_cell(RowWriter& w, const query::partition_slice& slice, ::atomic_cell_view c) {
|
||||
assert(c.is_live());
|
||||
SCYLLA_ASSERT(c.is_live());
|
||||
auto ccv = counter_cell_view(c);
|
||||
auto wr = w.add().write();
|
||||
[&, wr = std::move(wr)] () mutable {
|
||||
@@ -1179,8 +1180,8 @@ bool mutation_partition::equal(const schema& s, const mutation_partition& p) con
|
||||
|
||||
bool mutation_partition::equal(const schema& this_schema, const mutation_partition& p, const schema& p_schema) const {
|
||||
#ifdef SEASTAR_DEBUG
|
||||
assert(_schema_version == this_schema.version());
|
||||
assert(p._schema_version == p_schema.version());
|
||||
SCYLLA_ASSERT(_schema_version == this_schema.version());
|
||||
SCYLLA_ASSERT(p._schema_version == p_schema.version());
|
||||
#endif
|
||||
if (_tombstone != p._tombstone) {
|
||||
return false;
|
||||
@@ -1375,7 +1376,7 @@ uint32_t mutation_partition::do_compact(const schema& s,
|
||||
const tombstone_gc_state& gc_state)
|
||||
{
|
||||
check_schema(s);
|
||||
assert(row_limit > 0);
|
||||
SCYLLA_ASSERT(row_limit > 0);
|
||||
|
||||
auto gc_before = drop_tombstones_unconditionally ? gc_clock::time_point::max() :
|
||||
gc_state.get_gc_before_for_key(s.shared_from_this(), dk, query_time);
|
||||
@@ -2383,7 +2384,7 @@ void mutation_partition::set_continuity(const schema& s, const position_range& p
|
||||
i = _rows.insert_before(i, std::move(e));
|
||||
}
|
||||
|
||||
assert(i != end);
|
||||
SCYLLA_ASSERT(i != end);
|
||||
++i;
|
||||
|
||||
while (1) {
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include "atomic_cell_or_collection.hh"
|
||||
#include "hashing_partition_visitor.hh"
|
||||
#include "range_tombstone_list.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/intrusive_btree.hh"
|
||||
#include "utils/preempt.hh"
|
||||
#include "utils/lru.hh"
|
||||
@@ -1486,7 +1487,7 @@ private:
|
||||
|
||||
void check_schema(const schema& s) const {
|
||||
#ifdef SEASTAR_DEBUG
|
||||
assert(s.version() == _schema_version);
|
||||
SCYLLA_ASSERT(s.version() == _schema_version);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <seastar/core/execution_stage.hh>
|
||||
#include "compaction/compaction_garbage_collector.hh"
|
||||
#include "mutation_partition_view.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/unconst.hh"
|
||||
|
||||
extern logging::logger mplog;
|
||||
@@ -34,7 +35,7 @@ mutation_partition_v2::mutation_partition_v2(const schema& s, const mutation_par
|
||||
#endif
|
||||
{
|
||||
#ifdef SEASTAR_DEBUG
|
||||
assert(x._schema_version == _schema_version);
|
||||
SCYLLA_ASSERT(x._schema_version == _schema_version);
|
||||
#endif
|
||||
auto cloner = [&s] (const rows_entry* x) -> rows_entry* {
|
||||
return current_allocator().construct<rows_entry>(s, *x);
|
||||
@@ -117,8 +118,8 @@ void mutation_partition_v2::apply(const schema& s, mutation_partition_v2&& p, ca
|
||||
stop_iteration mutation_partition_v2::apply_monotonically(const schema& s, const schema& p_s, mutation_partition_v2&& p, cache_tracker* tracker,
|
||||
mutation_application_stats& app_stats, preemption_check need_preempt, apply_resume& res, is_evictable evictable) {
|
||||
#ifdef SEASTAR_DEBUG
|
||||
assert(_schema_version == s.version());
|
||||
assert(p._schema_version == p_s.version());
|
||||
SCYLLA_ASSERT(_schema_version == s.version());
|
||||
SCYLLA_ASSERT(p._schema_version == p_s.version());
|
||||
#endif
|
||||
bool same_schema = s.version() == p_s.version();
|
||||
_tombstone.apply(p._tombstone);
|
||||
@@ -217,7 +218,7 @@ stop_iteration mutation_partition_v2::apply_monotonically(const schema& s, const
|
||||
// some memory for the new tree nodes. This is done by the `hold_reserve`
|
||||
// constructed after the lambda.
|
||||
if (this_sentinel) {
|
||||
assert(p_i != p._rows.end());
|
||||
SCYLLA_ASSERT(p_i != p._rows.end());
|
||||
auto rt = this_sentinel->range_tombstone();
|
||||
auto insert_result = _rows.insert_before_hint(i, std::move(this_sentinel), cmp);
|
||||
auto i2 = insert_result.first;
|
||||
@@ -233,10 +234,10 @@ stop_iteration mutation_partition_v2::apply_monotonically(const schema& s, const
|
||||
}
|
||||
}
|
||||
if (p_sentinel) {
|
||||
assert(p_i != p._rows.end());
|
||||
SCYLLA_ASSERT(p_i != p._rows.end());
|
||||
if (cmp(p_i->position(), p_sentinel->position()) == 0) {
|
||||
mplog.trace("{}: clearing attributes on {}", fmt::ptr(&p), p_i->position());
|
||||
assert(p_i->dummy());
|
||||
SCYLLA_ASSERT(p_i->dummy());
|
||||
p_i->set_continuous(false);
|
||||
p_i->set_range_tombstone({});
|
||||
} else {
|
||||
@@ -409,7 +410,7 @@ stop_iteration mutation_partition_v2::apply_monotonically(const schema& s, const
|
||||
lb_i->set_continuous(true);
|
||||
}
|
||||
} else {
|
||||
assert(i->dummy() == src_e.dummy());
|
||||
SCYLLA_ASSERT(i->dummy() == src_e.dummy());
|
||||
alloc_strategy_unique_ptr<rows_entry> s1;
|
||||
alloc_strategy_unique_ptr<rows_entry> s2;
|
||||
|
||||
@@ -521,7 +522,7 @@ stop_iteration mutation_partition_v2::apply_monotonically(const schema& s, const
|
||||
void
|
||||
mutation_partition_v2::apply_row_tombstone(const schema& schema, clustering_key_prefix prefix, tombstone t) {
|
||||
check_schema(schema);
|
||||
assert(!prefix.is_full(schema));
|
||||
SCYLLA_ASSERT(!prefix.is_full(schema));
|
||||
auto start = prefix;
|
||||
apply_row_tombstone(schema, range_tombstone{std::move(start), std::move(prefix), std::move(t)});
|
||||
}
|
||||
@@ -842,8 +843,8 @@ bool mutation_partition_v2::equal(const schema& s, const mutation_partition_v2&
|
||||
|
||||
bool mutation_partition_v2::equal(const schema& this_schema, const mutation_partition_v2& p, const schema& p_schema) const {
|
||||
#ifdef SEASTAR_DEBUG
|
||||
assert(_schema_version == this_schema.version());
|
||||
assert(p._schema_version == p_schema.version());
|
||||
SCYLLA_ASSERT(_schema_version == this_schema.version());
|
||||
SCYLLA_ASSERT(p._schema_version == p_schema.version());
|
||||
#endif
|
||||
if (_tombstone != p._tombstone) {
|
||||
return false;
|
||||
@@ -1010,7 +1011,7 @@ void mutation_partition_v2::set_continuity(const schema& s, const position_range
|
||||
i = _rows.insert_before(i, std::move(e));
|
||||
}
|
||||
|
||||
assert(i != end);
|
||||
SCYLLA_ASSERT(i != end);
|
||||
++i;
|
||||
|
||||
while (1) {
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <iosfwd>
|
||||
#include <boost/intrusive/set.hpp>
|
||||
#include <boost/range/iterator_range.hpp>
|
||||
@@ -268,7 +269,7 @@ private:
|
||||
|
||||
void check_schema(const schema& s) const {
|
||||
#ifdef SEASTAR_DEBUG
|
||||
assert(s.version() == _schema_version);
|
||||
SCYLLA_ASSERT(s.version() == _schema_version);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <seastar/core/simple-stream.hh>
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/coroutine/maybe_yield.hh>
|
||||
@@ -103,7 +104,7 @@ collection_mutation read_collection_cell(const abstract_type& type, ser::collect
|
||||
for (auto&& e : elements) {
|
||||
bytes key = e.key();
|
||||
auto idx = deserialize_field_index(key);
|
||||
assert(idx < utype.size());
|
||||
SCYLLA_ASSERT(idx < utype.size());
|
||||
|
||||
mut.cells.emplace_back(key, read_atomic_cell(*utype.type(idx), e.value(), atomic_cell::collection_member::yes));
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "mutation.hh"
|
||||
#include "range_tombstone_assembler.hh"
|
||||
|
||||
@@ -20,31 +21,31 @@ public:
|
||||
|
||||
// Returned reference is valid until consume_end_of_stream() or flush() is called.
|
||||
const mutation& consume_new_partition(const dht::decorated_key& dk) {
|
||||
assert(!_m);
|
||||
SCYLLA_ASSERT(!_m);
|
||||
_m = mutation(_s, dk);
|
||||
return *_m;
|
||||
}
|
||||
|
||||
stop_iteration consume(tombstone t) {
|
||||
assert(_m);
|
||||
SCYLLA_ASSERT(_m);
|
||||
_m->partition().apply(t);
|
||||
return stop_iteration::no;
|
||||
}
|
||||
|
||||
stop_iteration consume(range_tombstone&& rt) {
|
||||
assert(_m);
|
||||
SCYLLA_ASSERT(_m);
|
||||
_m->partition().apply_row_tombstone(*_s, std::move(rt));
|
||||
return stop_iteration::no;
|
||||
}
|
||||
|
||||
stop_iteration consume(static_row&& sr) {
|
||||
assert(_m);
|
||||
SCYLLA_ASSERT(_m);
|
||||
_m->partition().static_row().apply(*_s, column_kind::static_column, std::move(sr.cells()));
|
||||
return stop_iteration::no;
|
||||
}
|
||||
|
||||
stop_iteration consume(clustering_row&& cr) {
|
||||
assert(_m);
|
||||
SCYLLA_ASSERT(_m);
|
||||
auto& dr = _m->partition().clustered_row(*_s, std::move(cr.key()));
|
||||
dr.apply(cr.tomb());
|
||||
dr.apply(cr.marker());
|
||||
@@ -53,7 +54,7 @@ public:
|
||||
}
|
||||
|
||||
stop_iteration consume_end_of_partition() {
|
||||
assert(_m);
|
||||
SCYLLA_ASSERT(_m);
|
||||
return stop_iteration::yes;
|
||||
}
|
||||
|
||||
@@ -64,7 +65,7 @@ public:
|
||||
// Can be used to split the processing of a large mutation into
|
||||
// multiple smaller `mutation` objects (which add up to the full mutation).
|
||||
mutation flush() {
|
||||
assert(_m);
|
||||
SCYLLA_ASSERT(_m);
|
||||
return std::exchange(*_m, mutation(_s, _m->decorated_key()));
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include "partition_version.hh"
|
||||
#include "row_cache.hh"
|
||||
#include "partition_snapshot_row_cursor.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/coroutine.hh"
|
||||
#include "real_dirty_memory_accounter.hh"
|
||||
|
||||
@@ -342,7 +343,7 @@ partition_entry::~partition_entry() {
|
||||
return;
|
||||
}
|
||||
if (_snapshot) {
|
||||
assert(!_snapshot->is_locked());
|
||||
SCYLLA_ASSERT(!_snapshot->is_locked());
|
||||
_snapshot->_version = std::move(_version);
|
||||
_snapshot->_version.mark_as_unique_owner();
|
||||
_snapshot->_entry = nullptr;
|
||||
@@ -359,7 +360,7 @@ stop_iteration partition_entry::clear_gently(cache_tracker* tracker) noexcept {
|
||||
}
|
||||
|
||||
if (_snapshot) {
|
||||
assert(!_snapshot->is_locked());
|
||||
SCYLLA_ASSERT(!_snapshot->is_locked());
|
||||
_snapshot->_version = std::move(_version);
|
||||
_snapshot->_version.mark_as_unique_owner();
|
||||
_snapshot->_entry = nullptr;
|
||||
@@ -387,7 +388,7 @@ stop_iteration partition_entry::clear_gently(cache_tracker* tracker) noexcept {
|
||||
void partition_entry::set_version(partition_version* new_version)
|
||||
{
|
||||
if (_snapshot) {
|
||||
assert(!_snapshot->is_locked());
|
||||
SCYLLA_ASSERT(!_snapshot->is_locked());
|
||||
_snapshot->_version = std::move(_version);
|
||||
_snapshot->_entry = nullptr;
|
||||
}
|
||||
@@ -552,7 +553,7 @@ utils::coroutine partition_entry::apply_to_incomplete(const schema& s,
|
||||
do {
|
||||
auto size = src_cur.memory_usage();
|
||||
// Range tombstones in memtables are bounded by dummy entries on both sides.
|
||||
assert(src_cur.range_tombstone_for_row() == src_cur.range_tombstone());
|
||||
SCYLLA_ASSERT(src_cur.range_tombstone_for_row() == src_cur.range_tombstone());
|
||||
if (src_cur.range_tombstone()) {
|
||||
// Apply the tombstone to (lb, src_cur.position())
|
||||
// FIXME: Avoid if before all rows
|
||||
@@ -564,11 +565,11 @@ utils::coroutine partition_entry::apply_to_incomplete(const schema& s,
|
||||
cur.next();
|
||||
}
|
||||
position_in_partition::less_compare less(s);
|
||||
assert(less(lb, cur.position()));
|
||||
SCYLLA_ASSERT(less(lb, cur.position()));
|
||||
while (less(cur.position(), src_cur.position())) {
|
||||
auto res = cur.ensure_entry_in_latest();
|
||||
if (cur.continuous()) {
|
||||
assert(cur.dummy() || cur.range_tombstone_for_row() == cur.range_tombstone());
|
||||
SCYLLA_ASSERT(cur.dummy() || cur.range_tombstone_for_row() == cur.range_tombstone());
|
||||
res.row.set_continuous(is_continuous::yes);
|
||||
}
|
||||
res.row.set_range_tombstone(cur.range_tombstone_for_row() + src_cur.range_tombstone());
|
||||
@@ -600,7 +601,7 @@ utils::coroutine partition_entry::apply_to_incomplete(const schema& s,
|
||||
// only then the lower bound of the range is ensured in the latest version earlier.
|
||||
if (src_cur.range_tombstone()) {
|
||||
if (cur.continuous()) {
|
||||
assert(cur.dummy() || cur.range_tombstone_for_row() == cur.range_tombstone());
|
||||
SCYLLA_ASSERT(cur.dummy() || cur.range_tombstone_for_row() == cur.range_tombstone());
|
||||
e.set_continuous(is_continuous::yes);
|
||||
}
|
||||
e.set_range_tombstone(cur.range_tombstone_for_row() + src_cur.range_tombstone());
|
||||
@@ -666,9 +667,9 @@ partition_snapshot_ptr partition_entry::read(logalloc::region& r,
|
||||
// If entry is being updated, we will get reads for non-latest phase, and
|
||||
// they must attach to the non-current version.
|
||||
partition_version* second = _version->next();
|
||||
assert(second && second->is_referenced());
|
||||
SCYLLA_ASSERT(second && second->is_referenced());
|
||||
auto snp = partition_snapshot::container_of(second->_backref).shared_from_this();
|
||||
assert(phase == snp->_phase);
|
||||
SCYLLA_ASSERT(phase == snp->_phase);
|
||||
return snp;
|
||||
} else { // phase > _snapshot->_phase
|
||||
with_allocator(r.allocator(), [&] {
|
||||
@@ -687,9 +688,9 @@ void partition_snapshot::touch() noexcept {
|
||||
// can be touched.
|
||||
if (_tracker && at_latest_version()) {
|
||||
auto&& rows = version()->partition().clustered_rows();
|
||||
assert(!rows.empty());
|
||||
SCYLLA_ASSERT(!rows.empty());
|
||||
rows_entry& last_dummy = *rows.rbegin();
|
||||
assert(last_dummy.is_last_dummy());
|
||||
SCYLLA_ASSERT(last_dummy.is_last_dummy());
|
||||
_tracker->touch(last_dummy);
|
||||
}
|
||||
}
|
||||
@@ -732,7 +733,7 @@ void partition_entry::evict(mutation_cleaner& cleaner) noexcept {
|
||||
return;
|
||||
}
|
||||
if (_snapshot) {
|
||||
assert(!_snapshot->is_locked());
|
||||
SCYLLA_ASSERT(!_snapshot->is_locked());
|
||||
_snapshot->_version = std::move(_version);
|
||||
_snapshot->_version.mark_as_unique_owner();
|
||||
_snapshot->_entry = nullptr;
|
||||
@@ -756,14 +757,14 @@ partition_snapshot_ptr::~partition_snapshot_ptr() {
|
||||
void partition_snapshot::lock() noexcept {
|
||||
// partition_entry::is_locked() assumes that if there is a locked snapshot,
|
||||
// it can be found attached directly to it.
|
||||
assert(at_latest_version());
|
||||
SCYLLA_ASSERT(at_latest_version());
|
||||
_locked = true;
|
||||
}
|
||||
|
||||
void partition_snapshot::unlock() noexcept {
|
||||
// Locked snapshots must always be latest, is_locked() assumes that.
|
||||
// Also, touch() is only effective when this snapshot is latest.
|
||||
assert(at_latest_version());
|
||||
SCYLLA_ASSERT(at_latest_version());
|
||||
_locked = false;
|
||||
touch(); // Make the entry evictable again in case it was fully unlinked by eviction attempt.
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#include "mutation_partition.hh"
|
||||
#include "mutation_partition_v2.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/anchorless_list.hh"
|
||||
#include "utils/logalloc.hh"
|
||||
#include "utils/coroutine.hh"
|
||||
@@ -208,13 +209,13 @@ public:
|
||||
: _schema(std::move(s))
|
||||
, _partition(*_schema)
|
||||
{
|
||||
assert(_schema);
|
||||
SCYLLA_ASSERT(_schema);
|
||||
}
|
||||
explicit partition_version(mutation_partition_v2 mp, schema_ptr s) noexcept
|
||||
: _schema(std::move(s))
|
||||
, _partition(std::move(mp))
|
||||
{
|
||||
assert(_schema);
|
||||
SCYLLA_ASSERT(_schema);
|
||||
}
|
||||
|
||||
partition_version(partition_version&& pv) noexcept;
|
||||
@@ -251,7 +252,7 @@ public:
|
||||
: _version(&pv)
|
||||
, _unique_owner(unique_owner)
|
||||
{
|
||||
assert(!_version->_backref);
|
||||
SCYLLA_ASSERT(!_version->_backref);
|
||||
_version->_backref = this;
|
||||
}
|
||||
~partition_version_ref() {
|
||||
@@ -279,19 +280,19 @@ public:
|
||||
explicit operator bool() const { return _version; }
|
||||
|
||||
partition_version& operator*() {
|
||||
assert(_version);
|
||||
SCYLLA_ASSERT(_version);
|
||||
return *_version;
|
||||
}
|
||||
const partition_version& operator*() const {
|
||||
assert(_version);
|
||||
SCYLLA_ASSERT(_version);
|
||||
return *_version;
|
||||
}
|
||||
partition_version* operator->() {
|
||||
assert(_version);
|
||||
SCYLLA_ASSERT(_version);
|
||||
return _version;
|
||||
}
|
||||
const partition_version* operator->() const {
|
||||
assert(_version);
|
||||
SCYLLA_ASSERT(_version);
|
||||
return _version;
|
||||
}
|
||||
|
||||
@@ -669,9 +670,9 @@ public:
|
||||
// If entry is being updated, we will get reads for non-latest phase, and
|
||||
// they must attach to the non-current version.
|
||||
partition_version* second = _version->next();
|
||||
assert(second && second->is_referenced());
|
||||
SCYLLA_ASSERT(second && second->is_referenced());
|
||||
auto&& snp = partition_snapshot::referer_of(*second);
|
||||
assert(phase == snp._phase);
|
||||
SCYLLA_ASSERT(phase == snp._phase);
|
||||
return *second;
|
||||
} else { // phase > _snapshot->_phase
|
||||
add_version(s, t);
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "partition_version.hh"
|
||||
|
||||
// Double-ended chained list of partition_version objects
|
||||
@@ -29,13 +30,13 @@ public:
|
||||
}
|
||||
_head = partition_version_ref(v, true);
|
||||
#ifdef SEASTAR_DEBUG
|
||||
assert(!_head->is_referenced_from_entry());
|
||||
SCYLLA_ASSERT(!_head->is_referenced_from_entry());
|
||||
#endif
|
||||
} else {
|
||||
v.insert_after(*_tail);
|
||||
_tail = partition_version_ref(v, true);
|
||||
#ifdef SEASTAR_DEBUG
|
||||
assert(!_tail->is_referenced_from_entry());
|
||||
SCYLLA_ASSERT(!_tail->is_referenced_from_entry());
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@@ -63,7 +64,7 @@ public:
|
||||
if (next) {
|
||||
_head = partition_version_ref(*next, true);
|
||||
#ifdef SEASTAR_DEBUG
|
||||
assert(!_head->is_referenced_from_entry());
|
||||
SCYLLA_ASSERT(!_head->is_referenced_from_entry());
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "types/types.hh"
|
||||
#include "keys.hh"
|
||||
#include "clustering_bounds_comparator.hh"
|
||||
@@ -238,12 +239,12 @@ public:
|
||||
|
||||
// Can be called only when !is_static_row && !is_clustering_row().
|
||||
bound_view as_start_bound_view() const {
|
||||
assert(_bound_weight != bound_weight::equal);
|
||||
SCYLLA_ASSERT(_bound_weight != bound_weight::equal);
|
||||
return bound_view(*_ck, _bound_weight == bound_weight::before_all_prefixed ? bound_kind::incl_start : bound_kind::excl_start);
|
||||
}
|
||||
|
||||
bound_view as_end_bound_view() const {
|
||||
assert(_bound_weight != bound_weight::equal);
|
||||
SCYLLA_ASSERT(_bound_weight != bound_weight::equal);
|
||||
return bound_view(*_ck, _bound_weight == bound_weight::before_all_prefixed ? bound_kind::excl_end : bound_kind::incl_end);
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#include <boost/range/adaptor/reversed.hpp>
|
||||
#include "range_tombstone_list.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/allocation_strategy.hh"
|
||||
#include <seastar/util/variant_utils.hh>
|
||||
|
||||
@@ -409,7 +410,7 @@ void range_tombstone_list::nop_reverter::update(range_tombstones_type::iterator
|
||||
|
||||
void range_tombstone_list::insert_undo_op::undo(const schema& s, range_tombstone_list& rt_list) noexcept {
|
||||
auto it = rt_list.find(s, _new_rt);
|
||||
assert (it != rt_list.end());
|
||||
SCYLLA_ASSERT (it != rt_list.end());
|
||||
rt_list._tombstones.erase_and_dispose(it, current_deleter<range_tombstone_entry>());
|
||||
}
|
||||
|
||||
@@ -419,7 +420,7 @@ void range_tombstone_list::erase_undo_op::undo(const schema& s, range_tombstone_
|
||||
|
||||
void range_tombstone_list::update_undo_op::undo(const schema& s, range_tombstone_list& rt_list) noexcept {
|
||||
auto it = rt_list.find(s, _new_rt);
|
||||
assert (it != rt_list.end());
|
||||
SCYLLA_ASSERT (it != rt_list.end());
|
||||
*it = std::move(_old_rt);
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <seastar/util/defer.hh>
|
||||
#include "range_tombstone.hh"
|
||||
#include "query-request.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/preempt.hh"
|
||||
#include "utils/chunked_vector.hh"
|
||||
#include <variant>
|
||||
@@ -238,7 +239,7 @@ public:
|
||||
// The list is assumed not to be empty
|
||||
range_tombstone pop_front_and_lock() {
|
||||
range_tombstone_entry* rt = _tombstones.unlink_leftmost_without_rebalance();
|
||||
assert(rt != nullptr);
|
||||
SCYLLA_ASSERT(rt != nullptr);
|
||||
auto _ = seastar::defer([rt] () noexcept { current_deleter<range_tombstone_entry>()(rt); });
|
||||
return std::move(rt->tombstone());
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include "mutation_writer/multishard_writer.hh"
|
||||
#include "mutation/mutation_fragment_v2.hh"
|
||||
#include "schema/schema_registry.hh"
|
||||
@@ -150,7 +151,7 @@ future<stop_iteration> multishard_writer::handle_mutation_fragment(mutation_frag
|
||||
}
|
||||
}
|
||||
return f.then([this, mf = std::move(mf)] () mutable {
|
||||
assert(!_current_shards.empty());
|
||||
SCYLLA_ASSERT(!_current_shards.empty());
|
||||
if (_current_shards.size() == 1) [[likely]] {
|
||||
return _queue_reader_handles[_current_shards[0]]->push(std::move(mf));
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#include "mutation/partition_version.hh"
|
||||
#include "row_cache.hh"
|
||||
#include "utils/assert.hh"
|
||||
#include "utils/small_vector.hh"
|
||||
#include <boost/algorithm/cxx11/any_of.hpp>
|
||||
#include <boost/range/algorithm/find_if.hpp>
|
||||
@@ -314,7 +315,7 @@ class partition_snapshot_row_cursor final {
|
||||
bool advance(bool keep) {
|
||||
memory::on_alloc_point();
|
||||
version_heap_less_compare heap_less(*this);
|
||||
assert(iterators_valid());
|
||||
SCYLLA_ASSERT(iterators_valid());
|
||||
for (auto&& curr : _current_row) {
|
||||
if (!keep && curr.unique_owner) {
|
||||
mutation_partition::rows_type::key_grabber kg(curr.it);
|
||||
@@ -382,7 +383,7 @@ public:
|
||||
|
||||
// If is_in_latest_version() then this returns an iterator to the entry under cursor in the latest version.
|
||||
mutation_partition::rows_type::iterator get_iterator_in_latest_version() const {
|
||||
assert(_latest_it);
|
||||
SCYLLA_ASSERT(_latest_it);
|
||||
return *_latest_it;
|
||||
}
|
||||
|
||||
@@ -688,7 +689,7 @@ public:
|
||||
position_in_partition::less_compare less(_schema);
|
||||
if (!iterators_valid() || less(position(), pos)) {
|
||||
auto has_entry = maybe_advance_to(pos);
|
||||
assert(has_entry); // evictable snapshots must have a dummy after all rows.
|
||||
SCYLLA_ASSERT(has_entry); // evictable snapshots must have a dummy after all rows.
|
||||
}
|
||||
auto&& rows = _snp.version()->partition().mutable_clustered_rows();
|
||||
auto latest_i = get_iterator_in_latest_version();
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user