diff --git a/alternator/executor.cc b/alternator/executor.cc index 1dd6b7338f..c25a222853 100644 --- a/alternator/executor.cc +++ b/alternator/executor.cc @@ -30,6 +30,7 @@ #include "conditions.hh" #include "cql3/util.hh" #include +#include "utils/assert.hh" #include "utils/overloaded_functor.hh" #include #include @@ -85,7 +86,7 @@ static map_type attrs_type() { static const column_definition& attrs_column(const schema& schema) { const column_definition* cdef = schema.get_column_definition(bytes(executor::ATTRS_COLUMN_NAME)); - assert(cdef); + SCYLLA_ASSERT(cdef); return *cdef; } @@ -932,7 +933,7 @@ static void validate_attribute_definitions(const rjson::value& attribute_definit } static future create_table_on_shard0(tracing::trace_state_ptr trace_state, rjson::value request, service::storage_proxy& sp, service::migration_manager& mm, gms::gossiper& gossiper) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); // We begin by parsing and validating the content of the CreateTable // command. We can't inspect the current database schema at this point @@ -1678,7 +1679,7 @@ future rmw_operation::execute(service::storage_pr } } else if (_write_isolation != write_isolation::LWT_ALWAYS) { std::optional m = apply(nullptr, api::new_timestamp()); - assert(m); // !needs_read_before_write, so apply() did not check a condition + SCYLLA_ASSERT(m); // !needs_read_before_write, so apply() did not check a condition return proxy.mutate(std::vector{std::move(*m)}, db::consistency_level::LOCAL_QUORUM, executor::default_timeout(), trace_state, std::move(permit), db::allow_per_partition_rate_limit::yes).then([this] () mutable { return rmw_operation_return(std::move(_return_attributes)); }); @@ -3845,7 +3846,7 @@ static future do_query(service::storage_proxy& pr } static dht::token token_for_segment(int segment, int total_segments) { - assert(total_segments > 1 && segment >= 0 && segment < total_segments); + SCYLLA_ASSERT(total_segments > 1 && segment >= 0 && segment < total_segments); uint64_t delta = std::numeric_limits::max() / total_segments; return dht::token::from_int64(std::numeric_limits::min() + delta * segment); } diff --git a/alternator/server.cc b/alternator/server.cc index 7eac6744a7..48f642d249 100644 --- a/alternator/server.cc +++ b/alternator/server.cc @@ -18,6 +18,7 @@ #include "seastarx.hh" #include "error.hh" #include "service/qos/service_level_controller.hh" +#include "utils/assert.hh" #include "utils/rjson.hh" #include "auth.hh" #include @@ -405,7 +406,7 @@ future server::handle_api_request(std::unique_ptr ++_executor._stats.requests_blocked_memory; } auto units = co_await std::move(units_fut); - assert(req->content_stream); + SCYLLA_ASSERT(req->content_stream); chunked_content content = co_await util::read_entire_stream(*req->content_stream); auto username = co_await verify_signature(*req, content); diff --git a/alternator/ttl.cc b/alternator/ttl.cc index 888eadf315..9e2b4e9e7c 100644 --- a/alternator/ttl.cc +++ b/alternator/ttl.cc @@ -35,6 +35,7 @@ #include "mutation/mutation.hh" #include "types/types.hh" #include "types/map.hh" +#include "utils/assert.hh" #include "utils/rjson.hh" #include "utils/big_decimal.hh" #include "cql3/selection/selection.hh" @@ -551,7 +552,7 @@ static future<> scan_table_ranges( expiration_service::stats& expiration_stats) { const schema_ptr& s = scan_ctx.s; - assert (partition_ranges.size() == 1); // otherwise issue #9167 will cause incorrect results. + SCYLLA_ASSERT (partition_ranges.size() == 1); // otherwise issue #9167 will cause incorrect results. auto p = service::pager::query_pagers::pager(proxy, s, scan_ctx.selection, *scan_ctx.query_state_ptr, *scan_ctx.query_options, scan_ctx.command, std::move(partition_ranges), nullptr); while (!p->is_exhausted()) { diff --git a/api/column_family.cc b/api/column_family.cc index 20e721e907..699f09c7bb 100644 --- a/api/column_family.cc +++ b/api/column_family.cc @@ -15,6 +15,7 @@ #include #include "sstables/sstables.hh" #include "sstables/metadata_collector.hh" +#include "utils/assert.hh" #include "utils/estimated_histogram.hh" #include #include "db/system_keyspace.hh" @@ -103,7 +104,7 @@ class autocompaction_toggle_guard { replica::database& _db; public: autocompaction_toggle_guard(replica::database& db) : _db(db) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); if (!_db._enable_autocompaction_toggle) { throw std::runtime_error("Autocompaction toggle is busy"); } @@ -112,7 +113,7 @@ public: autocompaction_toggle_guard(const autocompaction_toggle_guard&) = delete; autocompaction_toggle_guard(autocompaction_toggle_guard&&) = default; ~autocompaction_toggle_guard() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); _db._enable_autocompaction_toggle = true; } }; diff --git a/auth/common.cc b/auth/common.cc index 2bb6dc5e6a..1cf3a6a200 100644 --- a/auth/common.cc +++ b/auth/common.cc @@ -16,6 +16,7 @@ #include "mutation/canonical_mutation.hh" #include "schema/schema_fwd.hh" #include "timestamp.hh" +#include "utils/assert.hh" #include "utils/exponential_backoff_retry.hh" #include "cql3/query_processor.hh" #include "cql3/statements/create_table_statement.hh" @@ -68,7 +69,7 @@ static future<> create_legacy_metadata_table_if_missing_impl( cql3::query_processor& qp, std::string_view cql, ::service::migration_manager& mm) { - assert(this_shard_id() == 0); // once_among_shards makes sure a function is executed on shard 0 only + SCYLLA_ASSERT(this_shard_id() == 0); // once_among_shards makes sure a function is executed on shard 0 only auto db = qp.db(); auto parsed_statement = cql3::query_processor::parse_statement(cql); diff --git a/auth/service.cc b/auth/service.cc index f2137bd715..38ce58d146 100644 --- a/auth/service.cc +++ b/auth/service.cc @@ -36,6 +36,7 @@ #include "service/migration_manager.hh" #include "service/raft/raft_group0_client.hh" #include "timestamp.hh" +#include "utils/assert.hh" #include "utils/class_registrator.hh" #include "locator/abstract_replication_strategy.hh" #include "data_dictionary/keyspace_metadata.hh" @@ -194,7 +195,7 @@ service::service( } future<> service::create_legacy_keyspace_if_missing(::service::migration_manager& mm) const { - assert(this_shard_id() == 0); // once_among_shards makes sure a function is executed on shard 0 only + SCYLLA_ASSERT(this_shard_id() == 0); // once_among_shards makes sure a function is executed on shard 0 only auto db = _qp.db(); while (!db.has_keyspace(meta::legacy::AUTH_KS)) { diff --git a/bytes_ostream.hh b/bytes_ostream.hh index 613e0c621d..34e51e5310 100644 --- a/bytes_ostream.hh +++ b/bytes_ostream.hh @@ -11,6 +11,7 @@ #include #include "bytes.hh" +#include "utils/assert.hh" #include "utils/managed_bytes.hh" #include #include @@ -269,7 +270,7 @@ public: // Call only when is_linearized() bytes_view view() const { - assert(is_linearized()); + SCYLLA_ASSERT(is_linearized()); if (!_current) { return bytes_view(); } diff --git a/cache_mutation_reader.hh b/cache_mutation_reader.hh index c1c48dc2bb..cbe28e8f23 100644 --- a/cache_mutation_reader.hh +++ b/cache_mutation_reader.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include "row_cache.hh" #include "mutation/mutation_fragment.hh" @@ -283,7 +284,7 @@ future<> cache_mutation_reader::process_static_row() { return ensure_underlying().then([this] { return (*_underlying)().then([this] (mutation_fragment_v2_opt&& sr) { if (sr) { - assert(sr->is_static_row()); + SCYLLA_ASSERT(sr->is_static_row()); maybe_add_to_cache(sr->as_static_row()); push_mutation_fragment(std::move(*sr)); } @@ -382,7 +383,7 @@ future<> cache_mutation_reader::do_fill_buffer() { if (_state == state::reading_from_underlying) { return read_from_underlying(); } - // assert(_state == state::reading_from_cache) + // SCYLLA_ASSERT(_state == state::reading_from_cache) return _lsa_manager.run_in_read_section([this] { auto next_valid = _next_row.iterators_valid(); clogger.trace("csm {}: reading_from_cache, range=[{}, {}), next={}, valid={}, rt={}", fmt::ptr(this), _lower_bound, @@ -990,7 +991,7 @@ void cache_mutation_reader::offer_from_underlying(mutation_fragment_v2&& mf) { maybe_add_to_cache(mf.as_clustering_row()); add_clustering_row_to_buffer(std::move(mf)); } else { - assert(mf.is_range_tombstone_change()); + SCYLLA_ASSERT(mf.is_range_tombstone_change()); auto& chg = mf.as_range_tombstone_change(); if (maybe_add_to_cache(chg)) { add_to_buffer(std::move(mf).as_range_tombstone_change()); diff --git a/cdc/change_visitor.hh b/cdc/change_visitor.hh index 32c1861caa..97c18ffad1 100644 --- a/cdc/change_visitor.hh +++ b/cdc/change_visitor.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "mutation/mutation.hh" /* @@ -246,7 +247,7 @@ void inspect_mutation(const mutation& m, V& v) { if (r.deleted_at()) { auto t = r.deleted_at().tomb(); - assert(t.timestamp != api::missing_timestamp); + SCYLLA_ASSERT(t.timestamp != api::missing_timestamp); v.clustered_row_delete(cr.key(), t); if (v.finished()) { return; @@ -255,7 +256,7 @@ void inspect_mutation(const mutation& m, V& v) { } for (auto& rt: p.row_tombstones()) { - assert(rt.tombstone().tomb.timestamp != api::missing_timestamp); + SCYLLA_ASSERT(rt.tombstone().tomb.timestamp != api::missing_timestamp); v.range_delete(rt.tombstone()); if (v.finished()) { return; diff --git a/cdc/generation.cc b/cdc/generation.cc index 524f78f4ac..c29113343a 100644 --- a/cdc/generation.cc +++ b/cdc/generation.cc @@ -26,6 +26,7 @@ #include "gms/inet_address.hh" #include "gms/gossiper.hh" #include "gms/feature_service.hh" +#include "utils/assert.hh" #include "utils/error_injection.hh" #include "utils/UUID_gen.hh" #include "utils/to_string.hh" @@ -107,8 +108,8 @@ stream_id::stream_id(dht::token token, size_t vnode_index) copy_int_to_bytes(dht::token::to_int64(token), 0, _value); copy_int_to_bytes(low_qword, sizeof(int64_t), _value); // not a hot code path. make sure we did not mess up the shifts and masks. - assert(version() == version_1); - assert(index() == vnode_index); + SCYLLA_ASSERT(version() == version_1); + SCYLLA_ASSERT(index() == vnode_index); } stream_id::stream_id(bytes b) @@ -126,7 +127,7 @@ bool stream_id::is_set() const { } static int64_t bytes_to_int64(bytes_view b, size_t offset) { - assert(b.size() >= offset + sizeof(int64_t)); + SCYLLA_ASSERT(b.size() >= offset + sizeof(int64_t)); int64_t res; std::copy_n(b.begin() + offset, sizeof(int64_t), reinterpret_cast(&res)); return net::ntoh(res); @@ -411,7 +412,7 @@ future generation_service::legacy_make_new_generation(const // Our caller should ensure that there are normal tokens in the token ring. auto normal_token_owners = tmptr->count_normal_token_owners(); - assert(normal_token_owners); + SCYLLA_ASSERT(normal_token_owners); if (_feature_service.cdc_generations_v2) { cdc_log.info("Inserting new generation data at UUID {}", uuid); @@ -811,7 +812,7 @@ future<> generation_service::stop() { } generation_service::~generation_service() { - assert(_stopped); + SCYLLA_ASSERT(_stopped); } future<> generation_service::after_join(std::optional&& startup_gen_id) { diff --git a/cdc/log.cc b/cdc/log.cc index d90fd396a8..f431c7a3bb 100644 --- a/cdc/log.cc +++ b/cdc/log.cc @@ -32,6 +32,7 @@ #include "cql3/statements/select_statement.hh" #include "cql3/untyped_result_set.hh" #include "log.hh" +#include "utils/assert.hh" #include "utils/rjson.hh" #include "utils/UUID_gen.hh" #include "utils/managed_bytes.hh" @@ -148,7 +149,7 @@ public: _ctxt._migration_notifier.register_listener(this); } ~impl() { - assert(_stopped); + SCYLLA_ASSERT(_stopped); } future<> stop() { @@ -455,7 +456,7 @@ schema_ptr get_base_table(const replica::database& db, sstring_view ks_name,std: } seastar::sstring base_name(std::string_view log_name) { - assert(is_log_name(log_name)); + SCYLLA_ASSERT(is_log_name(log_name)); return sstring(log_name.data(), log_name.size() - cdc_log_suffix.size()); } @@ -655,7 +656,7 @@ private: template<> void collection_iterator>::parse() { - assert(_rem > 0); + SCYLLA_ASSERT(_rem > 0); _next = _v; auto k = read_collection_key(_next); auto v = read_collection_value_nonnull(_next); @@ -664,7 +665,7 @@ void collection_iterator>::par template<> void collection_iterator::parse() { - assert(_rem > 0); + SCYLLA_ASSERT(_rem > 0); _next = _v; auto k = read_collection_key(_next); _current = k; @@ -672,7 +673,7 @@ void collection_iterator::parse() { template<> void collection_iterator::parse() { - assert(_rem > 0); + SCYLLA_ASSERT(_rem > 0); _next = _v; auto k = read_collection_value_nonnull(_next); _current = k; @@ -1065,7 +1066,7 @@ struct process_row_visitor { void update_row_state(const column_definition& cdef, managed_bytes_opt value) { if (!_row_state) { // static row always has a valid state, so this must be a clustering row missing - assert(_base_ck); + SCYLLA_ASSERT(_base_ck); auto [it, _] = _clustering_row_states.try_emplace(*_base_ck); _row_state = &it->second; } @@ -1496,12 +1497,12 @@ public: } void generate_image(operation op, const clustering_key* ck, const one_kind_column_set* affected_columns) { - assert(op == operation::pre_image || op == operation::post_image); + SCYLLA_ASSERT(op == operation::pre_image || op == operation::post_image); - // assert that post_image is always full - assert(!(op == operation::post_image && affected_columns)); + // SCYLLA_ASSERT that post_image is always full + SCYLLA_ASSERT(!(op == operation::post_image && affected_columns)); - assert(_builder); + SCYLLA_ASSERT(_builder); const auto kind = ck ? column_kind::regular_column : column_kind::static_column; @@ -1571,7 +1572,7 @@ public: // TODO: is pre-image data based on query enough. We only have actual column data. Do we need // more details like tombstones/ttl? Probably not but keep in mind. void process_change(const mutation& m) override { - assert(_builder); + SCYLLA_ASSERT(_builder); process_change_visitor v { ._touched_parts = _touched_parts, ._builder = *_builder, @@ -1584,7 +1585,7 @@ public: } void end_record() override { - assert(_builder); + SCYLLA_ASSERT(_builder); _builder->end_record(); } diff --git a/cell_locking.hh b/cell_locking.hh index 25590d6c37..1e596f641f 100644 --- a/cell_locking.hh +++ b/cell_locking.hh @@ -10,6 +10,7 @@ #include +#include "utils/assert.hh" #include "utils/small_vector.hh" #include "mutation/mutation_partition.hh" #include "utils/xx_hasher.hh" @@ -342,7 +343,7 @@ public: { } ~cell_locker() { - assert(_partitions.empty()); + SCYLLA_ASSERT(_partitions.empty()); } void set_schema(schema_ptr s) { diff --git a/clustering_interval_set.hh b/clustering_interval_set.hh index 9b9f4c26b1..977478db33 100644 --- a/clustering_interval_set.hh +++ b/clustering_interval_set.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "schema/schema_fwd.hh" #include "mutation/position_in_partition.hh" #include @@ -87,8 +88,8 @@ public: } }; static interval::type make_interval(const schema& s, const position_range& r) { - assert(r.start().has_clustering_key()); - assert(r.end().has_clustering_key()); + SCYLLA_ASSERT(r.start().has_clustering_key()); + SCYLLA_ASSERT(r.end().has_clustering_key()); return interval::right_open( position_in_partition_with_schema(s.shared_from_this(), r.start()), position_in_partition_with_schema(s.shared_from_this(), r.end())); diff --git a/clustering_ranges_walker.hh b/clustering_ranges_walker.hh index f976824553..82120d7cfe 100644 --- a/clustering_ranges_walker.hh +++ b/clustering_ranges_walker.hh @@ -10,6 +10,7 @@ #pragma once +#include "utils/assert.hh" #include "schema/schema.hh" #include "query-request.hh" #include "mutation/mutation_fragment.hh" @@ -249,7 +250,7 @@ public: auto range_end = position_in_partition_view::for_range_end(rng); if (!less(rt.position(), range_start) && !less(range_end, rt.end_position())) { // Fully enclosed by this range. - assert(!first); + SCYLLA_ASSERT(!first); return std::move(rt); } auto this_range_rt = rt; diff --git a/collection_mutation.cc b/collection_mutation.cc index b355cea760..009097994c 100644 --- a/collection_mutation.cc +++ b/collection_mutation.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "types/collection.hh" #include "types/user.hh" #include "concrete_types.hh" @@ -391,7 +392,7 @@ deserialize_collection_mutation(collection_mutation_input_stream& in, F&& read_k ret.cells.push_back(read_kv(in)); } - assert(in.empty()); + SCYLLA_ASSERT(in.empty()); return ret; } diff --git a/compaction/compaction.cc b/compaction/compaction.cc index 1cd2c62d01..900c08d252 100644 --- a/compaction/compaction.cc +++ b/compaction/compaction.cc @@ -46,6 +46,7 @@ #include "mutation_writer/partition_based_splitting_writer.hh" #include "mutation/mutation_source_metadata.hh" #include "mutation/mutation_fragment_stream_validator.hh" +#include "utils/assert.hh" #include "utils/error_injection.hh" #include "utils/pretty_printers.hh" #include "readers/multi_range.hh" @@ -283,7 +284,7 @@ private: utils::observer<> make_stop_request_observer(utils::observable<>& sro) { return sro.observe([this] () mutable { - assert(!_unclosed_partition); + SCYLLA_ASSERT(!_unclosed_partition); consume_end_of_stream(); }); } diff --git a/compaction/compaction_manager.cc b/compaction/compaction_manager.cc index 4f542c1148..3994ab5bd5 100644 --- a/compaction/compaction_manager.cc +++ b/compaction/compaction_manager.cc @@ -22,6 +22,7 @@ #include #include "sstables/exceptions.hh" #include "sstables/sstable_directory.hh" +#include "utils/assert.hh" #include "utils/error_injection.hh" #include "utils/UUID_gen.hh" #include "db/system_keyspace.hh" @@ -958,7 +959,7 @@ compaction_manager::compaction_manager(tasks::task_manager& tm) compaction_manager::~compaction_manager() { // Assert that compaction manager was explicitly stopped, if started. // Otherwise, fiber(s) will be alive after the object is stopped. - assert(_state == state::none || _state == state::stopped); + SCYLLA_ASSERT(_state == state::none || _state == state::stopped); } future<> compaction_manager::update_throughput(uint32_t value_mbs) { @@ -998,7 +999,7 @@ void compaction_manager::register_metrics() { } void compaction_manager::enable() { - assert(_state == state::none || _state == state::disabled); + SCYLLA_ASSERT(_state == state::none || _state == state::disabled); _state = state::enabled; _compaction_submission_timer.arm_periodic(periodic_compaction_submission_interval()); _waiting_reevalution = postponed_compactions_reevaluation(); diff --git a/compaction/leveled_manifest.hh b/compaction/leveled_manifest.hh index 5b1d8d7b9b..4b1d861ae4 100644 --- a/compaction/leveled_manifest.hh +++ b/compaction/leveled_manifest.hh @@ -10,6 +10,7 @@ #pragma once +#include "utils/assert.hh" #include "sstables/sstables.hh" #include "size_tiered_compaction_strategy.hh" #include "interval.hh" @@ -311,7 +312,7 @@ public: template static std::vector overlapping(const schema& s, const std::vector& candidates, const T& others) { - assert(!candidates.empty()); + SCYLLA_ASSERT(!candidates.empty()); /* * Picking each sstable from others that overlap one of the sstable of candidates is not enough * because you could have the following situation: @@ -350,7 +351,7 @@ public: */ template static std::vector overlapping(const schema& s, dht::token start, dht::token end, const T& sstables) { - assert(start <= end); + SCYLLA_ASSERT(start <= end); std::vector overlapped; auto range = ::wrapping_interval::make(start, end); @@ -459,7 +460,7 @@ private: * for prior failure), will return an empty list. Never returns null. */ candidates_info get_candidates_for(int level, const std::vector>& last_compacted_keys) { - assert(!get_level(level).empty()); + SCYLLA_ASSERT(!get_level(level).empty()); logger.debug("Choosing candidates for L{}", level); @@ -517,7 +518,7 @@ public: new_level = 0; } else { new_level = (minimum_level == maximum_level && can_promote) ? maximum_level + 1 : maximum_level; - assert(new_level > 0); + SCYLLA_ASSERT(new_level > 0); } return new_level; } diff --git a/compaction/size_tiered_compaction_strategy.cc b/compaction/size_tiered_compaction_strategy.cc index 3fbc189f77..3fc1ef50ce 100644 --- a/compaction/size_tiered_compaction_strategy.cc +++ b/compaction/size_tiered_compaction_strategy.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "sstables/sstables.hh" #include "size_tiered_compaction_strategy.hh" #include "cql3/statements/property_definitions.hh" @@ -114,7 +115,7 @@ size_tiered_compaction_strategy::create_sstable_and_length_pairs(const std::vect for(auto& sstable : sstables) { auto sstable_size = sstable->data_size(); - assert(sstable_size != 0); + SCYLLA_ASSERT(sstable_size != 0); sstable_length_pairs.emplace_back(sstable, sstable_size); } diff --git a/compound.hh b/compound.hh index 041d45c576..820e7c78b9 100644 --- a/compound.hh +++ b/compound.hh @@ -14,6 +14,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/serialization.hh" #include @@ -65,15 +66,15 @@ private: for (auto&& val : values) { using val_type = std::remove_cvref_t; if constexpr (FragmentedView) { - assert(val.size_bytes() <= std::numeric_limits::max()); + SCYLLA_ASSERT(val.size_bytes() <= std::numeric_limits::max()); write(out, size_type(val.size_bytes())); write_fragmented(out, val); } else if constexpr (std::same_as) { - assert(val.size() <= std::numeric_limits::max()); + SCYLLA_ASSERT(val.size() <= std::numeric_limits::max()); write(out, size_type(val.size())); write_fragmented(out, managed_bytes_view(val)); } else { - assert(val.size() <= std::numeric_limits::max()); + SCYLLA_ASSERT(val.size() <= std::numeric_limits::max()); write(out, size_type(val.size())); write_fragmented(out, single_fragmented_view(val)); } @@ -135,7 +136,7 @@ public: partial.reserve(values.size()); auto i = _types.begin(); for (auto&& component : values) { - assert(i != _types.end()); + SCYLLA_ASSERT(i != _types.end()); partial.push_back((*i++)->decompose(component)); } return serialize_value(partial); @@ -256,7 +257,7 @@ public: } // Returns true iff given prefix has no missing components bool is_full(managed_bytes_view v) const { - assert(AllowPrefixes == allow_prefixes::yes); + SCYLLA_ASSERT(AllowPrefixes == allow_prefixes::yes); return std::distance(begin(v), end(v)) == (ssize_t)_types.size(); } bool is_empty(managed_bytes_view v) const { diff --git a/converting_mutation_partition_applier.cc b/converting_mutation_partition_applier.cc index 5ec8ed7f12..0ba50235a6 100644 --- a/converting_mutation_partition_applier.cc +++ b/converting_mutation_partition_applier.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "converting_mutation_partition_applier.hh" #include "concrete_types.hh" @@ -53,7 +54,7 @@ converting_mutation_partition_applier::accept_cell(row& dst, column_kind kind, c visit(old_type, make_visitor( [&] (const collection_type_impl& old_ctype) { - assert(new_def.type->is_collection()); // because is_compatible + SCYLLA_ASSERT(new_def.type->is_collection()); // because is_compatible auto& new_ctype = static_cast(*new_def.type); auto& new_value_type = *new_ctype.value_comparator(); @@ -67,13 +68,13 @@ converting_mutation_partition_applier::accept_cell(row& dst, column_kind kind, c } }, [&] (const user_type_impl& old_utype) { - assert(new_def.type->is_user_type()); // because is_compatible + SCYLLA_ASSERT(new_def.type->is_user_type()); // because is_compatible auto& new_utype = static_cast(*new_def.type); for (auto& c : old_view.cells) { if (c.second.timestamp() > new_def.dropped_at()) { auto idx = deserialize_field_index(c.first); - assert(idx < new_utype.size() && idx < old_utype.size()); + SCYLLA_ASSERT(idx < new_utype.size() && idx < old_utype.size()); new_view.cells.emplace_back(c.first, upgrade_cell( *new_utype.type(idx), *old_utype.type(idx), c.second, atomic_cell::collection_member::yes)); diff --git a/counters.cc b/counters.cc index a42b7415ef..92e6461877 100644 --- a/counters.cc +++ b/counters.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "counters.hh" #include "mutation/mutation.hh" #include "combine.hh" @@ -104,8 +105,8 @@ void counter_cell_view::apply(const column_definition& cdef, atomic_cell_or_coll return; } - assert(!dst_ac.is_counter_update()); - assert(!src_ac.is_counter_update()); + SCYLLA_ASSERT(!dst_ac.is_counter_update()); + SCYLLA_ASSERT(!src_ac.is_counter_update()); auto src_ccv = counter_cell_view(src_ac); auto dst_ccv = counter_cell_view(dst_ac); @@ -132,8 +133,8 @@ void counter_cell_view::apply(const column_definition& cdef, atomic_cell_or_coll std::optional counter_cell_view::difference(atomic_cell_view a, atomic_cell_view b) { - assert(!a.is_counter_update()); - assert(!b.is_counter_update()); + SCYLLA_ASSERT(!a.is_counter_update()); + SCYLLA_ASSERT(!b.is_counter_update()); if (!b.is_live() || !a.is_live()) { if (b.is_live() || (!a.is_live() && compare_atomic_cell_for_merge(b, a) < 0)) { diff --git a/counters.hh b/counters.hh index 5b08064859..4b95d465df 100644 --- a/counters.hh +++ b/counters.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include @@ -311,8 +312,8 @@ public: explicit basic_counter_cell_view(basic_atomic_cell_view ac) noexcept : _cell(ac) { - assert(_cell.is_live()); - assert(!_cell.is_counter_update()); + SCYLLA_ASSERT(_cell.is_live()); + SCYLLA_ASSERT(!_cell.is_counter_update()); } api::timestamp_type timestamp() const { return _cell.timestamp(); } diff --git a/cql3/column_specification.cc b/cql3/column_specification.cc index bfbb4bfdb7..12d0418485 100644 --- a/cql3/column_specification.cc +++ b/cql3/column_specification.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include "cql3/column_specification.hh" namespace cql3 { @@ -22,7 +23,7 @@ column_specification::column_specification(std::string_view ks_name_, std::strin bool column_specification::all_in_same_table(const std::vector>& names) { - assert(!names.empty()); + SCYLLA_ASSERT(!names.empty()); auto first = names.front(); return std::all_of(std::next(names.begin()), names.end(), [first] (auto&& spec) { diff --git a/cql3/cql3_type.cc b/cql3/cql3_type.cc index ee39d689dd..ff9bbbb559 100644 --- a/cql3/cql3_type.cc +++ b/cql3/cql3_type.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include @@ -47,8 +48,8 @@ static cql3_type::kind get_cql3_kind(const abstract_type& t) { cql3_type::kind operator()(const uuid_type_impl&) { return cql3_type::kind::UUID; } cql3_type::kind operator()(const varint_type_impl&) { return cql3_type::kind::VARINT; } cql3_type::kind operator()(const reversed_type_impl& r) { return get_cql3_kind(*r.underlying_type()); } - cql3_type::kind operator()(const tuple_type_impl&) { assert(0 && "no kind for this type"); } - cql3_type::kind operator()(const collection_type_impl&) { assert(0 && "no kind for this type"); } + cql3_type::kind operator()(const tuple_type_impl&) { SCYLLA_ASSERT(0 && "no kind for this type"); } + cql3_type::kind operator()(const collection_type_impl&) { SCYLLA_ASSERT(0 && "no kind for this type"); } }; return visit(t, visitor{}); } @@ -147,7 +148,7 @@ public: } virtual cql3_type prepare_internal(const sstring& keyspace, const data_dictionary::user_types_metadata& user_types) override { - assert(_values); // "Got null values type for a collection"; + SCYLLA_ASSERT(_values); // "Got null values type for a collection"; if (_values->is_counter()) { throw exceptions::invalid_request_exception(format("Counters are not allowed inside collections: {}", *this)); @@ -187,7 +188,7 @@ private: } return cql3_type(set_type_impl::get_instance(_values->prepare_internal(keyspace, user_types).get_type(), !is_frozen())); } else if (_kind == abstract_type::kind::map) { - assert(_keys); // "Got null keys type for a collection"; + SCYLLA_ASSERT(_keys); // "Got null keys type for a collection"; if (_keys->is_duration()) { throw exceptions::invalid_request_exception(format("Durations are not allowed as map keys: {}", *this)); } diff --git a/cql3/functions/functions.cc b/cql3/functions/functions.cc index 1e795b3129..7dc7b4495d 100644 --- a/cql3/functions/functions.cc +++ b/cql3/functions/functions.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "functions.hh" #include "token_fct.hh" #include "cql3/ut_name.hh" @@ -450,7 +451,7 @@ functions::get_user_aggregates(const sstring& keyspace) const { boost::iterator_range functions::find(const function_name& name) const { - assert(name.has_keyspace()); // : "function name not fully qualified"; + SCYLLA_ASSERT(name.has_keyspace()); // : "function name not fully qualified"; auto pair = _declared.equal_range(name); return boost::make_iterator_range(pair.first, pair.second); } diff --git a/cql3/keyspace_element_name.cc b/cql3/keyspace_element_name.cc index 3a6f71bb9e..ba47e2e8b6 100644 --- a/cql3/keyspace_element_name.cc +++ b/cql3/keyspace_element_name.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include "cql3/keyspace_element_name.hh" namespace cql3 { @@ -24,7 +25,7 @@ bool keyspace_element_name::has_keyspace() const const sstring& keyspace_element_name::get_keyspace() const { - assert(_ks_name); + SCYLLA_ASSERT(_ks_name); return *_ks_name; } diff --git a/cql3/lists.cc b/cql3/lists.cc index cfb62b25e2..f838e673cf 100644 --- a/cql3/lists.cc +++ b/cql3/lists.cc @@ -13,6 +13,7 @@ #include "cql3/expr/expr-utils.hh" #include #include "types/list.hh" +#include "utils/assert.hh" #include "utils/UUID_gen.hh" #include "mutation/mutation.hh" @@ -62,7 +63,7 @@ lists::setter_by_index::fill_prepare_context(prepare_context& ctx) { void lists::setter_by_index::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) { // we should not get here for frozen lists - assert(column.type->is_multi_cell()); // "Attempted to set an individual element on a frozen list"; + SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to set an individual element on a frozen list"; auto index = expr::evaluate(_idx, params._options); if (index.is_null()) { @@ -105,7 +106,7 @@ lists::setter_by_uuid::requires_read() const { void lists::setter_by_uuid::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) { // we should not get here for frozen lists - assert(column.type->is_multi_cell()); // "Attempted to set an individual element on a frozen list"; + SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to set an individual element on a frozen list"; auto index = expr::evaluate(_idx, params._options); auto value = expr::evaluate(*_e, params._options); @@ -133,7 +134,7 @@ lists::setter_by_uuid::execute(mutation& m, const clustering_key_prefix& prefix, void lists::appender::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) { const cql3::raw_value value = expr::evaluate(*_e, params._options); - assert(column.type->is_multi_cell()); // "Attempted to append to a frozen list"; + SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to append to a frozen list"; do_append(value, m, prefix, column, params); } @@ -189,7 +190,7 @@ lists::do_append(const cql3::raw_value& list_value, void lists::prepender::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) { - assert(column.type->is_multi_cell()); // "Attempted to prepend to a frozen list"; + SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to prepend to a frozen list"; cql3::raw_value lvalue = expr::evaluate(*_e, params._options); if (lvalue.is_null()) { return; @@ -244,7 +245,7 @@ lists::discarder::requires_read() const { void lists::discarder::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) { - assert(column.type->is_multi_cell()); // "Attempted to delete from a frozen list"; + SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to delete from a frozen list"; auto&& existing_list = params.get_prefetched_list(m.key(), prefix, column); // We want to call bind before possibly returning to reject queries where the value provided is not a list. @@ -300,7 +301,7 @@ lists::discarder_by_index::requires_read() const { void lists::discarder_by_index::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) { - assert(column.type->is_multi_cell()); // "Attempted to delete an item by index from a frozen list"; + SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to delete an item by index from a frozen list"; cql3::raw_value index = expr::evaluate(*_e, params._options); if (index.is_null()) { throw exceptions::invalid_request_exception("Invalid null value for list index"); diff --git a/cql3/maps.cc b/cql3/maps.cc index e27aff4b15..7f8c36723e 100644 --- a/cql3/maps.cc +++ b/cql3/maps.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include "maps.hh" #include "operation.hh" #include "update_parameters.hh" @@ -44,7 +45,7 @@ maps::setter_by_key::fill_prepare_context(prepare_context& ctx) { void maps::setter_by_key::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) { using exceptions::invalid_request_exception; - assert(column.type->is_multi_cell()); // "Attempted to set a value for a single key on a frozen map"m + SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to set a value for a single key on a frozen map"m auto key = expr::evaluate(_k, params._options); auto value = expr::evaluate(*_e, params._options); if (key.is_null()) { @@ -62,7 +63,7 @@ maps::setter_by_key::execute(mutation& m, const clustering_key_prefix& prefix, c void maps::putter::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) { - assert(column.type->is_multi_cell()); // "Attempted to add items to a frozen map"; + SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to add items to a frozen map"; cql3::raw_value value = expr::evaluate(*_e, params._options); do_put(m, prefix, params, value, column); } @@ -95,7 +96,7 @@ maps::do_put(mutation& m, const clustering_key_prefix& prefix, const update_para void maps::discarder_by_key::execute(mutation& m, const clustering_key_prefix& prefix, const update_parameters& params) { - assert(column.type->is_multi_cell()); // "Attempted to delete a single key in a frozen map"; + SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to delete a single key in a frozen map"; cql3::raw_value key = expr::evaluate(*_e, params._options); if (key.is_null()) { throw exceptions::invalid_request_exception("Invalid null map key"); diff --git a/cql3/query_processor.hh b/cql3/query_processor.hh index ac898797b4..9a86e80a52 100644 --- a/cql3/query_processor.hh +++ b/cql3/query_processor.hh @@ -27,6 +27,7 @@ #include "transport/messages/result_message.hh" #include "service/client_state.hh" #include "service/broadcast_tables/experimental/query_result.hh" +#include "utils/assert.hh" #include "utils/observable.hh" #include "service/raft/raft_group0_client.hh" #include "types/types.hh" @@ -542,7 +543,7 @@ private: bound_terms, std::numeric_limits::max())); } - assert(bound_terms == prepared->bound_names.size()); + SCYLLA_ASSERT(bound_terms == prepared->bound_names.size()); return make_ready_future>(std::move(prepared)); }).then([&key, &id_getter, &client_state] (auto prep_ptr) { const auto& warnings = prep_ptr->warnings; diff --git a/cql3/restrictions/bounds_slice.hh b/cql3/restrictions/bounds_slice.hh index e39c50da34..233287f3db 100644 --- a/cql3/restrictions/bounds_slice.hh +++ b/cql3/restrictions/bounds_slice.hh @@ -10,6 +10,7 @@ #pragma once +#include "utils/assert.hh" #include #include "index/secondary_index_manager.hh" #include "cql3/expr/expression.hh" @@ -88,10 +89,10 @@ public: */ void merge(const bounds_slice& other) { if (has_bound(statements::bound::START)) { - assert(!other.has_bound(statements::bound::START)); + SCYLLA_ASSERT(!other.has_bound(statements::bound::START)); _bounds[get_idx(statements::bound::END)] = other._bounds[get_idx(statements::bound::END)]; } else { - assert(!other.has_bound(statements::bound::END)); + SCYLLA_ASSERT(!other.has_bound(statements::bound::END)); _bounds[get_idx(statements::bound::START)] = other._bounds[get_idx(statements::bound::START)]; } } diff --git a/cql3/result_set.cc b/cql3/result_set.cc index 06ae31c901..093e37fea5 100644 --- a/cql3/result_set.cc +++ b/cql3/result_set.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include "cql3/result_set.hh" namespace cql3 { @@ -49,7 +50,7 @@ void metadata::set_paging_state(lw_shared_ptr paging_state) { - assert(paging_state); + SCYLLA_ASSERT(paging_state); if (paging_state->get_remaining() > 0) { set_paging_state(std::move(paging_state)); } else { @@ -114,7 +115,7 @@ bool result_set::empty() const { } void result_set::add_row(std::vector row) { - assert(row.size() == _metadata->value_count()); + SCYLLA_ASSERT(row.size() == _metadata->value_count()); _rows.emplace_back(std::move(row)); } diff --git a/cql3/selection/selection.hh b/cql3/selection/selection.hh index d1060bf599..6c3a75c3f7 100644 --- a/cql3/selection/selection.hh +++ b/cql3/selection/selection.hh @@ -10,6 +10,7 @@ #pragma once +#include "utils/assert.hh" #include "bytes.hh" #include "schema/schema_fwd.hh" #include "query-result-reader.hh" @@ -331,7 +332,7 @@ public: add_value(*def, static_row_iterator); break; default: - assert(0); + SCYLLA_ASSERT(0); } } _builder.complete_row(); diff --git a/cql3/sets.cc b/cql3/sets.cc index 700901eee4..c09a1e435a 100644 --- a/cql3/sets.cc +++ b/cql3/sets.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "sets.hh" #include "types/set.hh" #include "cql3/expr/evaluate.hh" @@ -32,7 +33,7 @@ sets::setter::execute(mutation& m, const clustering_key_prefix& row_key, const u void sets::adder::execute(mutation& m, const clustering_key_prefix& row_key, const update_parameters& params) { const cql3::raw_value value = expr::evaluate(*_e, params._options); - assert(column.type->is_multi_cell()); // "Attempted to add items to a frozen set"; + SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to add items to a frozen set"; do_add(m, row_key, params, value, column); } @@ -75,7 +76,7 @@ sets::adder::do_add(mutation& m, const clustering_key_prefix& row_key, const upd void sets::discarder::execute(mutation& m, const clustering_key_prefix& row_key, const update_parameters& params) { - assert(column.type->is_multi_cell()); // "Attempted to remove items from a frozen set"; + SCYLLA_ASSERT(column.type->is_multi_cell()); // "Attempted to remove items from a frozen set"; cql3::raw_value svalue = expr::evaluate(*_e, params._options); if (svalue.is_null()) { @@ -96,7 +97,7 @@ sets::discarder::execute(mutation& m, const clustering_key_prefix& row_key, cons void sets::element_discarder::execute(mutation& m, const clustering_key_prefix& row_key, const update_parameters& params) { - assert(column.type->is_multi_cell() && "Attempted to remove items from a frozen set"); + SCYLLA_ASSERT(column.type->is_multi_cell() && "Attempted to remove items from a frozen set"); cql3::raw_value elt = expr::evaluate(*_e, params._options); if (elt.is_null()) { throw exceptions::invalid_request_exception("Invalid null set element"); diff --git a/cql3/statements/alter_table_statement.cc b/cql3/statements/alter_table_statement.cc index 7f6648580b..e5f05f1c14 100644 --- a/cql3/statements/alter_table_statement.cc +++ b/cql3/statements/alter_table_statement.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include #include "cql3/query_options.hh" #include "cql3/statements/alter_table_statement.hh" @@ -304,7 +305,7 @@ std::pair> alter_table_statement::prepare_ switch (_type) { case alter_table_statement::type::add: - assert(_column_changes.size()); + SCYLLA_ASSERT(_column_changes.size()); if (s->is_dense()) { throw exceptions::invalid_request_exception("Cannot add new column to a COMPACT STORAGE table"); } @@ -312,12 +313,12 @@ std::pair> alter_table_statement::prepare_ break; case alter_table_statement::type::alter: - assert(_column_changes.size() == 1); + SCYLLA_ASSERT(_column_changes.size() == 1); invoke_column_change_fn(std::mem_fn(&alter_table_statement::alter_column)); break; case alter_table_statement::type::drop: - assert(_column_changes.size()); + SCYLLA_ASSERT(_column_changes.size()); if (!s->is_cql3_table()) { throw exceptions::invalid_request_exception("Cannot drop columns from a non-CQL3 table"); } diff --git a/cql3/statements/cas_request.hh b/cql3/statements/cas_request.hh index fc087b6305..d511a6880a 100644 --- a/cql3/statements/cas_request.hh +++ b/cql3/statements/cas_request.hh @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ #pragma once +#include "utils/assert.hh" #include "service/paxos/cas_request.hh" #include "cql3/statements/modification_statement.hh" @@ -49,7 +50,7 @@ public: , _key(std::move(key_arg)) , _rows(schema_arg) { - assert(_key.size() == 1 && query::is_single_partition(_key.front())); + SCYLLA_ASSERT(_key.size() == 1 && query::is_single_partition(_key.front())); } dht::partition_range_vector key() const { diff --git a/cql3/statements/cf_statement.cc b/cql3/statements/cf_statement.cc index 1fb86c7a8a..33409e3df4 100644 --- a/cql3/statements/cf_statement.cc +++ b/cql3/statements/cf_statement.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include "raw/cf_statement.hh" #include "service/client_state.hh" @@ -40,13 +41,13 @@ void cf_statement::prepare_keyspace(std::string_view keyspace) } bool cf_statement::has_keyspace() const { - assert(_cf_name.has_value()); + SCYLLA_ASSERT(_cf_name.has_value()); return _cf_name->has_keyspace(); } const sstring& cf_statement::keyspace() const { - assert(_cf_name->has_keyspace()); // "The statement hasn't be prepared correctly"; + SCYLLA_ASSERT(_cf_name->has_keyspace()); // "The statement hasn't be prepared correctly"; return _cf_name->get_keyspace(); } diff --git a/cql3/statements/create_table_statement.cc b/cql3/statements/create_table_statement.cc index b512d4bad5..1a6fbf8036 100644 --- a/cql3/statements/create_table_statement.cc +++ b/cql3/statements/create_table_statement.cc @@ -9,6 +9,7 @@ */ +#include "utils/assert.hh" #include #include @@ -128,7 +129,7 @@ void create_table_statement::apply_properties_to(schema_builder& builder, const void create_table_statement::add_column_metadata_from_aliases(schema_builder& builder, std::vector aliases, const std::vector& types, column_kind kind) const { - assert(aliases.size() == types.size()); + SCYLLA_ASSERT(aliases.size() == types.size()); for (size_t i = 0; i < aliases.size(); i++) { if (!aliases[i].empty()) { builder.with_column(aliases[i], types[i], kind); @@ -212,7 +213,7 @@ std::unique_ptr create_table_statement::raw_statement::prepa for (auto&& inner: type->all_types()) { if (inner->is_multi_cell()) { // a nested non-frozen UDT should have already been rejected when defining the type - assert(inner->is_collection()); + SCYLLA_ASSERT(inner->is_collection()); throw exceptions::invalid_request_exception("Non-frozen UDTs with nested non-frozen collections are not supported"); } } diff --git a/cql3/statements/create_view_statement.cc b/cql3/statements/create_view_statement.cc index 39846fc2b8..2f25358801 100644 --- a/cql3/statements/create_view_statement.cc +++ b/cql3/statements/create_view_statement.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include #include @@ -61,7 +62,7 @@ future<> create_view_statement::check_access(query_processor& qp, const service: static const column_definition* get_column_definition(const schema& schema, column_identifier::raw& identifier) { auto prepared = identifier.prepare(schema); - assert(dynamic_pointer_cast(prepared)); + SCYLLA_ASSERT(dynamic_pointer_cast(prepared)); auto id = static_pointer_cast(prepared); return schema.get_column_definition(id->name()); } diff --git a/cql3/statements/delete_statement.cc b/cql3/statements/delete_statement.cc index bdf56a1c48..c152d1ea9c 100644 --- a/cql3/statements/delete_statement.cc +++ b/cql3/statements/delete_statement.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include #include @@ -102,7 +103,7 @@ delete_statement::delete_statement(cf_name name, , _deletions(std::move(deletions)) , _where_clause(std::move(where_clause)) { - assert(!_attrs->time_to_live.has_value()); + SCYLLA_ASSERT(!_attrs->time_to_live.has_value()); } } diff --git a/cql3/statements/ks_prop_defs.cc b/cql3/statements/ks_prop_defs.cc index e985020471..6bd4628e39 100644 --- a/cql3/statements/ks_prop_defs.cc +++ b/cql3/statements/ks_prop_defs.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include "cql3/statements/ks_prop_defs.hh" #include "data_dictionary/data_dictionary.hh" #include "data_dictionary/keyspace_metadata.hh" @@ -158,7 +159,7 @@ ks_prop_defs::init_tablets_options ks_prop_defs::get_initial_tablets(const sstri if (enabled == "true") { ret = init_tablets_options{ .enabled = true, .specified_count = 0 }; // even if 'initial' is not set, it'll start with auto-detection } else if (enabled == "false") { - assert(!ret.enabled); + SCYLLA_ASSERT(!ret.enabled); return ret; } else { throw exceptions::configuration_exception(sstring("Tablets enabled value must be true or false; found: ") + enabled); diff --git a/cql3/statements/modification_statement.cc b/cql3/statements/modification_statement.cc index aa23f4303e..782208f610 100644 --- a/cql3/statements/modification_statement.cc +++ b/cql3/statements/modification_statement.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include "cql3/cql_statement.hh" #include "cql3/statements/modification_statement.hh" #include "cql3/statements/strongly_consistent_modification_statement.hh" @@ -422,7 +423,7 @@ modification_statement::process_where_clause(data_dictionary::database db, expr: * partition to check conditions. */ if (_if_exists || _if_not_exists) { - assert(!_has_static_column_conditions && !_has_regular_column_conditions); + SCYLLA_ASSERT(!_has_static_column_conditions && !_has_regular_column_conditions); if (s->has_static_columns() && !_restrictions->has_clustering_columns_restriction()) { _has_static_column_conditions = true; } else { @@ -604,13 +605,13 @@ modification_statement::prepare_conditions(data_dictionary::database db, const s if (_if_not_exists) { // To have both 'IF NOT EXISTS' and some other conditions doesn't make sense. - // So far this is enforced by the parser, but let's assert it for sanity if ever the parse changes. - assert(!_conditions); - assert(!_if_exists); + // So far this is enforced by the parser, but let's SCYLLA_ASSERT it for sanity if ever the parse changes. + SCYLLA_ASSERT(!_conditions); + SCYLLA_ASSERT(!_if_exists); stmt.set_if_not_exist_condition(); } else if (_if_exists) { - assert(!_conditions); - assert(!_if_not_exists); + SCYLLA_ASSERT(!_conditions); + SCYLLA_ASSERT(!_if_not_exists); stmt.set_if_exist_condition(); } else { stmt._condition = column_condition_prepare(*_conditions, db, keyspace(), schema); diff --git a/cql3/statements/select_statement.cc b/cql3/statements/select_statement.cc index 2e29bac1de..c3473f4075 100644 --- a/cql3/statements/select_statement.cc +++ b/cql3/statements/select_statement.cc @@ -44,6 +44,7 @@ #include "test/lib/select_statement_utils.hh" #include #include "gms/feature_service.hh" +#include "utils/assert.hh" #include "utils/result_combinators.hh" #include "utils/result_loop.hh" #include "replica/database.hh" @@ -815,7 +816,7 @@ select_statement::execute_without_checking_exception_message_non_aggregate_unpag auto timeout = db::timeout_clock::now() + get_timeout(state.get_client_state(), options); if (needs_post_query_ordering() && _limit) { return do_with(std::forward(partition_ranges), [this, &qp, &state, &options, cmd, timeout](auto& prs) { - assert(cmd->partition_limit == query::max_partitions); + SCYLLA_ASSERT(cmd->partition_limit == query::max_partitions); query::result_merger merger(cmd->get_row_limit() * prs.size(), query::max_partitions); return utils::result_map_reduce(prs.begin(), prs.end(), [this, &qp, &state, &options, cmd, timeout] (auto& pr) { dht::partition_range_vector prange { pr }; @@ -1110,7 +1111,7 @@ indexed_table_select_statement::do_execute(query_processor& qp, ? source_selector::INTERNAL : source_selector::USER; ++_stats.query_cnt(src_sel, _ks_sel, cond_selector::NO_CONDITIONS, statement_type::SELECT); - assert(_restrictions->uses_secondary_indexing()); + SCYLLA_ASSERT(_restrictions->uses_secondary_indexing()); _stats.unpaged_select_queries(_ks_sel) += options.get_page_size() <= 0; @@ -1842,8 +1843,8 @@ mutation_fragments_select_statement::do_execute(query_processor& qp, service::qu namespace raw { static void validate_attrs(const cql3::attributes::raw& attrs) { - assert(!attrs.timestamp.has_value()); - assert(!attrs.time_to_live.has_value()); + SCYLLA_ASSERT(!attrs.timestamp.has_value()); + SCYLLA_ASSERT(!attrs.time_to_live.has_value()); } select_statement::select_statement(cf_name cf_name, @@ -1975,7 +1976,7 @@ std::unique_ptr select_statement::prepare(data_dictionary::d bool is_reversed_ = false; if (!_parameters->orderings().empty()) { - assert(!for_view); + SCYLLA_ASSERT(!for_view); verify_ordering_is_allowed(*_parameters, *restrictions); prepared_orderings_type prepared_orderings = prepare_orderings(*schema); verify_ordering_is_valid(prepared_orderings, *schema, *restrictions); diff --git a/cql3/statements/truncate_statement.cc b/cql3/statements/truncate_statement.cc index bb5d9d0148..98728212ab 100644 --- a/cql3/statements/truncate_statement.cc +++ b/cql3/statements/truncate_statement.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include "cql3/statements/raw/truncate_statement.hh" #include "cql3/statements/truncate_statement.hh" #include "cql3/statements/prepared_statement.hh" @@ -30,8 +31,8 @@ truncate_statement::truncate_statement(cf_name name, std::unique_ptrtimestamp.has_value()); - assert(!_attrs->time_to_live.has_value()); + SCYLLA_ASSERT(!_attrs->timestamp.has_value()); + SCYLLA_ASSERT(!_attrs->time_to_live.has_value()); } std::unique_ptr truncate_statement::prepare(data_dictionary::database db, cql_stats& stats) { diff --git a/cql3/statements/update_statement.cc b/cql3/statements/update_statement.cc index e8e551a746..603ef81c52 100644 --- a/cql3/statements/update_statement.cc +++ b/cql3/statements/update_statement.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include "update_statement.hh" #include "cql3/expr/expression.hh" #include "cql3/expr/evaluate.hh" @@ -121,7 +122,7 @@ void update_statement::add_update_for_key(mutation& m, const query::clustering_r auto rb = s->regular_begin(); if (rb->name().empty() || rb->type == empty_type) { // There is no column outside the PK. So no operation could have passed through validation - assert(_column_operations.empty()); + SCYLLA_ASSERT(_column_operations.empty()); constants::setter(*s->regular_begin(), expr::constant(cql3::raw_value::make_value(bytes()), empty_type)).execute(m, prefix, params); } else { // dense means we don't have a row marker, so don't accept to set only the PK. See CASSANDRA-5648. @@ -438,7 +439,7 @@ insert_json_statement::prepare_internal(data_dictionary::database db, schema_ptr { // FIXME: handle _if_not_exists. For now, mark it used to quiet the compiler. #8682 (void)_if_not_exists; - assert(expr::is(_json_value) || expr::is(_json_value)); + SCYLLA_ASSERT(expr::is(_json_value) || expr::is(_json_value)); auto json_column_placeholder = ::make_shared("", true); auto prepared_json_value = prepare_expression(_json_value, db, "", nullptr, make_lw_shared("", "", json_column_placeholder, utf8_type)); expr::verify_no_aggregate_functions(prepared_json_value, "JSON clause"); diff --git a/cql3/update_parameters.cc b/cql3/update_parameters.cc index b38bd2f45b..e85ad26192 100644 --- a/cql3/update_parameters.cc +++ b/cql3/update_parameters.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include "cql3/update_parameters.hh" #include "cql3/selection/selection.hh" #include "cql3/expr/expression.hh" @@ -50,7 +51,7 @@ update_parameters::get_prefetched_list(const partition_key& pkey, const clusteri } // Ensured by collections_as_maps flag in read_command flags - assert(type->is_map()); + SCYLLA_ASSERT(type->is_map()); auto cell = type->deserialize(managed_bytes_view(*val)); const map_type_impl& map_type = static_cast(*cell.type()); @@ -104,7 +105,7 @@ public: } void accept_new_partition(uint64_t row_count) { - assert(0); + SCYLLA_ASSERT(0); } void accept_new_row(const clustering_key& key, const query::result_row_view& static_row, @@ -118,7 +119,7 @@ public: } void accept_new_row(const query::result_row_view& static_row, const query::result_row_view& row) { - assert(0); + SCYLLA_ASSERT(0); } void accept_partition_end(const query::result_row_view& static_row) { diff --git a/cql3/user_types.cc b/cql3/user_types.cc index 0675fafc0d..c0e5831e5d 100644 --- a/cql3/user_types.cc +++ b/cql3/user_types.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include "cql3/user_types.hh" #include "cql3/expr/evaluate.hh" @@ -49,7 +50,7 @@ void user_types::setter::execute(mutation& m, const clustering_key_prefix& row_k const auto& elems = expr::get_user_type_elements(ut_value, type); // There might be fewer elements given than fields in the type // (e.g. when the user uses a short tuple literal), but never more. - assert(elems.size() <= type.size()); + SCYLLA_ASSERT(elems.size() <= type.size()); for (size_t i = 0; i < elems.size(); ++i) { if (!elems[i]) { @@ -73,7 +74,7 @@ void user_types::setter::execute(mutation& m, const clustering_key_prefix& row_k } void user_types::setter_by_field::execute(mutation& m, const clustering_key_prefix& row_key, const update_parameters& params) { - assert(column.type->is_user_type() && column.type->is_multi_cell()); + SCYLLA_ASSERT(column.type->is_user_type() && column.type->is_multi_cell()); auto value = expr::evaluate(*_e, params._options); @@ -88,7 +89,7 @@ void user_types::setter_by_field::execute(mutation& m, const clustering_key_pref } void user_types::deleter_by_field::execute(mutation& m, const clustering_key_prefix& row_key, const update_parameters& params) { - assert(column.type->is_user_type() && column.type->is_multi_cell()); + SCYLLA_ASSERT(column.type->is_user_type() && column.type->is_multi_cell()); collection_mutation_description mut; mut.cells.emplace_back(serialize_field_index(_field_idx), params.make_dead_cell()); diff --git a/cql3/util.cc b/cql3/util.cc index 4d2da1a340..88d6d5a626 100644 --- a/cql3/util.cc +++ b/cql3/util.cc @@ -4,6 +4,7 @@ /* Copyright 2020-present ScyllaDB */ +#include "utils/assert.hh" #include "util.hh" #include "cql3/expr/expr-utils.hh" @@ -88,7 +89,7 @@ void do_with_parser_impl(const sstring_view& cql, noncopyable_function #include "bytes.hh" @@ -26,7 +27,7 @@ public: } void add_type(user_type type) { auto i = _user_types.find(type->_name); - assert(i == _user_types.end() || type->is_compatible_with(*i->second)); + SCYLLA_ASSERT(i == _user_types.end() || type->is_compatible_with(*i->second)); _user_types.insert_or_assign(i, type->_name, type); } void remove_type(user_type type) { diff --git a/db/commitlog/commitlog.cc b/db/commitlog/commitlog.cc index e8904a28d2..cd52151098 100644 --- a/db/commitlog/commitlog.cc +++ b/db/commitlog/commitlog.cc @@ -47,6 +47,7 @@ #include "rp_set.hh" #include "db/config.hh" #include "db/extensions.hh" +#include "utils/assert.hh" #include "utils/crc.hh" #include "utils/runtime.hh" #include "utils/flush_queue.hh" @@ -520,7 +521,7 @@ private: }; future<> db::commitlog::segment_manager::named_file::open(open_flags flags, file_open_options opt, std::optional size_in) noexcept { - assert(!*this); + SCYLLA_ASSERT(!*this); auto f = co_await open_file_dma(_name, flags, opt); // bypass roundtrip to disk if caller knows size, or open flags truncated file auto existing_size = size_in @@ -533,7 +534,7 @@ future<> db::commitlog::segment_manager::named_file::open(open_flags flags, file } future<> db::commitlog::segment_manager::named_file::rename(std::string_view to) { - assert(!*this); + SCYLLA_ASSERT(!*this); try { auto s = sstring(to); auto dir = std::filesystem::path(to).parent_path(); @@ -647,7 +648,7 @@ detail::sector_split_iterator::sector_split_iterator(base_iterator i, base_itera {} detail::sector_split_iterator& detail::sector_split_iterator::operator++() { - assert(_iter != _end); + SCYLLA_ASSERT(_iter != _end); _ptr += _sector_size; // check if we have more pages in this temp-buffer (in out case they are always aligned + sized in page units) auto rem = _iter->size() - std::distance(_iter->get(), const_cast(_ptr)); @@ -658,7 +659,7 @@ detail::sector_split_iterator& detail::sector_split_iterator::operator++() { return *this; } rem = _iter->size(); - assert(rem >= _sector_size); + SCYLLA_ASSERT(rem >= _sector_size); // booh. ugly. _ptr = const_cast(_iter->get()); } @@ -926,7 +927,7 @@ public: // See class comment for info future flush() { auto me = shared_from_this(); - assert(me.use_count() > 1); + SCYLLA_ASSERT(me.use_count() > 1); uint64_t pos = _file_pos; clogger.trace("Syncing {} {} -> {}", *this, _flush_pos, pos); @@ -937,13 +938,13 @@ public: // Run like this to ensure flush ordering, and making flushes "waitable" co_await _pending_ops.run_with_ordered_post_op(rp, [] {}, [&] { - assert(_pending_ops.has_operation(rp)); + SCYLLA_ASSERT(_pending_ops.has_operation(rp)); return do_flush(pos); }); co_return me; } future terminate() { - assert(_closed); + SCYLLA_ASSERT(_closed); if (!std::exchange(_terminated, true)) { // write a terminating zero block iff we are ending (a reused) // block before actual file end. @@ -1000,7 +1001,7 @@ public: * Allocate a new buffer */ void new_buffer(size_t s) { - assert(_buffer.empty()); + SCYLLA_ASSERT(_buffer.empty()); auto overhead = segment_overhead_size; if (_file_pos == 0) { @@ -1018,7 +1019,7 @@ public: // the amount of data we can actually write into. auto useable_size = size - n_blocks * detail::sector_overhead_size; - assert(useable_size >= s); + SCYLLA_ASSERT(useable_size >= s); _buffer_ostream = frag_ostream_type(detail::sector_split_iterator(_buffer.begin(), _buffer.end(), _alignment), useable_size); // #16298 - keep track of ostream initial size. @@ -1031,7 +1032,7 @@ public: // we should be in a allocate or terminate call. In either case, account for overhead now already. _segment_manager->account_memory_usage(overhead); - assert(buffer_position() == overhead); + SCYLLA_ASSERT(buffer_position() == overhead); } bool buffer_is_empty() const { @@ -1063,7 +1064,7 @@ public: _buffer_ostream_size = 0; _num_allocs = 0; - assert(me.use_count() > 1); + SCYLLA_ASSERT(me.use_count() > 1); auto out = buf.get_ostream(); @@ -1098,8 +1099,8 @@ public: clogger.trace("Writing {} entries, {} k in {} -> {}", num, size, off, off + size); } else { - assert(num == 0); - assert(_closed); + SCYLLA_ASSERT(num == 0); + SCYLLA_ASSERT(_closed); clogger.trace("Terminating {} at pos {}", *this, _file_pos); write(out, uint64_t(0)); } @@ -1114,7 +1115,7 @@ public: auto* p = const_cast(tbuf.get()); auto* e = p + tbuf.size(); while (p != e) { - assert(align_up(p, _alignment) == p); + SCYLLA_ASSERT(align_up(p, _alignment) == p); // include segment id in crc:ed data auto be = p + ss; @@ -1137,7 +1138,7 @@ public: co_await _pending_ops.run_with_ordered_post_op(rp, [&]() -> future<> { auto view = fragmented_temporary_buffer::view(buf); view.remove_suffix(buf.size_bytes() - size); - assert(size == view.size_bytes()); + SCYLLA_ASSERT(size == view.size_bytes()); if (view.empty()) { co_return; @@ -1179,7 +1180,7 @@ public: } } }, [&]() -> future<> { - assert(_pending_ops.has_operation(rp)); + SCYLLA_ASSERT(_pending_ops.has_operation(rp)); if (flush_after) { co_await do_flush(top); } @@ -1209,7 +1210,7 @@ public: replay_position rp(_desc.id, position_type(fp)); co_await _pending_ops.wait_for_pending(rp, timeout); - assert(_segment_manager->cfg.mode != sync_mode::BATCH || _flush_pos > fp); + SCYLLA_ASSERT(_segment_manager->cfg.mode != sync_mode::BATCH || _flush_pos > fp); if (_flush_pos <= fp) { // previous op we were waiting for was not sync one, so it did not flush // force flush here @@ -1372,7 +1373,7 @@ public: auto fill_size = size - buf_pos; if (fill_size > 0) { // we want to fill to a sector boundary, must leave room for metadata - assert((fill_size - detail::sector_overhead_size) <= _buffer_ostream.size()); + SCYLLA_ASSERT((fill_size - detail::sector_overhead_size) <= _buffer_ostream.size()); _buffer_ostream.fill('\0', fill_size - detail::sector_overhead_size); _segment_manager->totals.bytes_slack += fill_size; _segment_manager->account_memory_usage(fill_size); @@ -1382,7 +1383,7 @@ public: void mark_clean(const cf_id_type& id, uint64_t count) noexcept { auto i = _cf_dirty.find(id); if (i != _cf_dirty.end()) { - assert(i->second >= count); + SCYLLA_ASSERT(i->second >= count); i->second -= count; if (i->second == 0) { _cf_dirty.erase(i); @@ -1518,8 +1519,8 @@ db::commitlog::segment_manager::segment_manager(config c) , _reserve_replenisher(make_ready_future<>()) , _background_sync(make_ready_future<>()) { - assert(max_size > 0); - assert(max_mutation_size < segment::multi_entry_size_magic); + SCYLLA_ASSERT(max_size > 0); + SCYLLA_ASSERT(max_mutation_size < segment::multi_entry_size_magic); clogger.trace("Commitlog {} maximum disk size: {} MB / cpu ({} cpus)", cfg.commit_log_location, max_disk_size / (1024 * 1024), @@ -1627,7 +1628,7 @@ gc_clock::time_point db::commitlog::segment_manager::min_gc_time(const cf_id_typ future<> db::commitlog::segment_manager::init() { auto descs = co_await list_descriptors(cfg.commit_log_location); - assert(_reserve_segments.empty()); // _segments_to_replay must not pick them up + SCYLLA_ASSERT(_reserve_segments.empty()); // _segments_to_replay must not pick them up segment_id_type id = *cfg.base_segment_id; for (auto& d : descs) { id = std::max(id, replay_position(d.id).base_id()); @@ -2325,7 +2326,7 @@ future<> db::commitlog::segment_manager::delete_segments(std::vector fi void db::commitlog::segment_manager::abort_recycled_list(std::exception_ptr ep) { // may not call here with elements in list. that would leak files. - assert(_recycled_segments.empty()); + SCYLLA_ASSERT(_recycled_segments.empty()); _recycled_segments.abort(ep); // and ensure next lap(s) still has a queue _recycled_segments = queue(std::numeric_limits::max()); @@ -2424,7 +2425,7 @@ future<> db::commitlog::segment_manager::do_pending_deletes() { try { co_await f.rename(dst); auto b = _recycled_segments.push(std::move(f)); - assert(b); // we set this to max_size_t so... + SCYLLA_ASSERT(b); // we set this to max_size_t so... continue; } catch (...) { clogger.error("Could not recycle segment {}: {}", f.name(), std::current_exception()); @@ -2628,7 +2629,7 @@ future db::commitlog::add(const cf_id_type& id, future db::commitlog::add_entry(const cf_id_type& id, const commitlog_entry_writer& cew, timeout_clock::time_point timeout) { - assert(id == cew.schema()->id()); + SCYLLA_ASSERT(id == cew.schema()->id()); class cl_entry_writer final : public entry_writer { commitlog_entry_writer _writer; @@ -2716,7 +2717,7 @@ db::commitlog::add_entries(std::vector entry_writers, db w.write(out); } void result(size_t i, rp_handle h) override { - assert(i == res.size()); + SCYLLA_ASSERT(i == res.size()); res.emplace_back(std::move(h)); } @@ -2907,7 +2908,7 @@ db::commitlog::read_log_file(sstring filename, sstring pfx, commit_load_reader_f co_return; } // must be on page boundary now! - assert(align_down(pos, alignment) == pos); + SCYLLA_ASSERT(align_down(pos, alignment) == pos); // this is in full sectors. no need to fiddle with overhead here. auto bytes = seek_to_pos - pos; @@ -3083,7 +3084,7 @@ db::commitlog::read_log_file(sstring filename, sstring pfx, commit_load_reader_f // #16298 - adjust position here, based on data returned. advance_pos(size); - assert(((filepos_to_datapos(pos) + buffer.size_bytes()) % (alignment - detail::sector_overhead_size)) == 0); + SCYLLA_ASSERT(((filepos_to_datapos(pos) + buffer.size_bytes()) % (alignment - detail::sector_overhead_size)) == 0); co_return res; } @@ -3161,7 +3162,7 @@ db::commitlog::read_log_file(sstring filename, sstring pfx, commit_load_reader_f * If not, this is small slack space in the chunk end, and we should just go * to the next. */ - assert(pos <= next); + SCYLLA_ASSERT(pos <= next); if (next_pos(entry_header_size) >= next) { co_await skip_to_chunk(next); co_return; @@ -3182,7 +3183,7 @@ db::commitlog::read_log_file(sstring filename, sstring pfx, commit_load_reader_f auto actual_size = checksum; auto end = pos + actual_size - entry_header_size - sizeof(uint32_t); - assert(end <= next); + SCYLLA_ASSERT(end <= next); // really small read... buf = co_await read_data(sizeof(uint32_t)); in = buf.get_istream(); diff --git a/db/commitlog/commitlog_entry.hh b/db/commitlog/commitlog_entry.hh index b980afebb3..7943457ede 100644 --- a/db/commitlog/commitlog_entry.hh +++ b/db/commitlog/commitlog_entry.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include "commitlog_types.hh" @@ -111,7 +112,7 @@ public: } size_t size() const { - assert(_size != std::numeric_limits::max()); + SCYLLA_ASSERT(_size != std::numeric_limits::max()); return _size; } diff --git a/db/commitlog/commitlog_replayer.cc b/db/commitlog/commitlog_replayer.cc index 51d7ce59e5..654bc78fbf 100644 --- a/db/commitlog/commitlog_replayer.cc +++ b/db/commitlog/commitlog_replayer.cc @@ -7,6 +7,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include #include #include @@ -69,7 +70,7 @@ public: }; // move start/stop of the thread local bookkeep to "top level" - // and also make sure to assert on it actually being started. + // and also make sure to SCYLLA_ASSERT on it actually being started. future<> start() { return _column_mappings.start(); } @@ -164,7 +165,7 @@ future<> db::commitlog_replayer::impl::init() { future db::commitlog_replayer::impl::recover(sstring file, const sstring& fname_prefix) const { - assert(_column_mappings.local_is_initialized()); + SCYLLA_ASSERT(_column_mappings.local_is_initialized()); replay_position rp{commitlog::descriptor(file, fname_prefix)}; auto gp = min_pos(rp.shard_id()); diff --git a/db/heat_load_balance.hh b/db/heat_load_balance.hh index dc21c76175..e490628877 100644 --- a/db/heat_load_balance.hh +++ b/db/heat_load_balance.hh @@ -27,6 +27,7 @@ * uniformly, and we need to choose K nodes and forward the request * to them). */ +#include "utils/assert.hh" #include #include #include @@ -70,7 +71,7 @@ public: std::vector get() { auto n = _pp.size(); auto ke = _k + (_extra ? 1 : 0); - assert(ke <= n); + SCYLLA_ASSERT(ke <= n); std::vector ret; ret.reserve(ke); std::vector r = ssample(_k, _pp); @@ -97,7 +98,7 @@ public: } } } - assert(ret.size() == ke); + SCYLLA_ASSERT(ret.size() == ke); return ret; } }; diff --git a/db/hints/internal/hint_endpoint_manager.cc b/db/hints/internal/hint_endpoint_manager.cc index ea2a2a6ff3..8e08d4fb18 100644 --- a/db/hints/internal/hint_endpoint_manager.cc +++ b/db/hints/internal/hint_endpoint_manager.cc @@ -24,6 +24,7 @@ #include "db/hints/manager.hh" #include "db/timeout_clock.hh" #include "replica/database.hh" +#include "utils/assert.hh" #include "utils/disk-error-handler.hh" #include "utils/error_injection.hh" #include "utils/runtime.hh" @@ -173,7 +174,7 @@ hint_endpoint_manager::hint_endpoint_manager(hint_endpoint_manager&& other) {} hint_endpoint_manager::~hint_endpoint_manager() { - assert(stopped()); + SCYLLA_ASSERT(stopped()); } future hint_endpoint_manager::get_or_load() { diff --git a/db/hints/manager.hh b/db/hints/manager.hh index c589f68046..02e30b00df 100644 --- a/db/hints/manager.hh +++ b/db/hints/manager.hh @@ -10,6 +10,7 @@ #pragma once // Seastar features. +#include "utils/assert.hh" #include #include #include @@ -167,7 +168,7 @@ public: manager& operator=(manager&&) = delete; ~manager() noexcept { - assert(_ep_managers.empty()); + SCYLLA_ASSERT(_ep_managers.empty()); } public: diff --git a/db/large_data_handler.cc b/db/large_data_handler.cc index 416ee9862b..4e916b8a48 100644 --- a/db/large_data_handler.cc +++ b/db/large_data_handler.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include "db/system_keyspace.hh" @@ -36,7 +37,7 @@ large_data_handler::large_data_handler(uint64_t partition_threshold_bytes, uint6 } future large_data_handler::maybe_record_large_partitions(const sstables::sstable& sst, const sstables::key& key, uint64_t partition_size, uint64_t rows, uint64_t range_tombstones, uint64_t dead_rows) { - assert(running()); + SCYLLA_ASSERT(running()); partition_above_threshold above_threshold{partition_size > _partition_threshold_bytes, rows > _rows_count_threshold}; static_assert(std::is_same_v); _stats.partitions_bigger_than_threshold += above_threshold.size; // increment if true @@ -79,7 +80,7 @@ sstring large_data_handler::sst_filename(const sstables::sstable& sst) { } future<> large_data_handler::maybe_delete_large_data_entries(sstables::shared_sstable sst) { - assert(running()); + SCYLLA_ASSERT(running()); auto schema = sst->get_schema(); auto filename = sst_filename(*sst); using ldt = sstables::large_data_type; @@ -237,7 +238,7 @@ future<> cql_table_large_data_handler::record_large_rows(const sstables::sstable } future<> cql_table_large_data_handler::delete_large_data_entries(const schema& s, sstring sstable_name, std::string_view large_table_name) const { - assert(_sys_ks); + SCYLLA_ASSERT(_sys_ks); const sstring req = format("DELETE FROM system.{} WHERE keyspace_name = ? AND table_name = ? AND sstable_name = ?", large_table_name); diff --git a/db/large_data_handler.hh b/db/large_data_handler.hh index d813467633..645d933016 100644 --- a/db/large_data_handler.hh +++ b/db/large_data_handler.hh @@ -12,6 +12,7 @@ #include "schema/schema_fwd.hh" #include "system_keyspace.hh" #include "sstables/shared_sstable.hh" +#include "utils/assert.hh" #include "utils/updateable_value.hh" namespace sstables { @@ -78,7 +79,7 @@ public: future maybe_record_large_rows(const sstables::sstable& sst, const sstables::key& partition_key, const clustering_key_prefix* clustering_key, uint64_t row_size) { - assert(running()); + SCYLLA_ASSERT(running()); if (__builtin_expect(row_size > _row_threshold_bytes, false)) { return with_sem([&sst, &partition_key, clustering_key, row_size, this] { return record_large_rows(sst, partition_key, clustering_key, row_size); @@ -98,7 +99,7 @@ public: future maybe_record_large_cells(const sstables::sstable& sst, const sstables::key& partition_key, const clustering_key_prefix* clustering_key, const column_definition& cdef, uint64_t cell_size, uint64_t collection_elements) { - assert(running()); + SCYLLA_ASSERT(running()); if (__builtin_expect(cell_size > _cell_threshold_bytes || collection_elements > _collection_elements_count_threshold, false)) { return with_sem([&sst, &partition_key, clustering_key, &cdef, cell_size, collection_elements, this] { return record_large_cells(sst, partition_key, clustering_key, cdef, cell_size, collection_elements); diff --git a/db/schema_tables.cc b/db/schema_tables.cc index c132df913e..4b537a9ec7 100644 --- a/db/schema_tables.cc +++ b/db/schema_tables.cc @@ -19,6 +19,7 @@ #include "query-result-writer.hh" #include "schema/schema_builder.hh" #include "map_difference.hh" +#include "utils/assert.hh" #include "utils/UUID_gen.hh" #include "utils/to_string.hh" #include @@ -452,9 +453,9 @@ const std::unordered_set& schema_tables_holding_schema_mutations() { db::system_keyspace::legacy::column_families(), db::system_keyspace::legacy::columns(), db::system_keyspace::legacy::triggers()}) { - assert(s->clustering_key_size() > 0); + SCYLLA_ASSERT(s->clustering_key_size() > 0); auto&& first_column_name = s->clustering_column_at(0).name_as_text(); - assert(first_column_name == "table_name" + SCYLLA_ASSERT(first_column_name == "table_name" || first_column_name == "view_name" || first_column_name == "columnfamily_name"); ids.emplace(s->id()); @@ -904,7 +905,7 @@ read_schema_partition_for_keyspace(distributed& proxy, s future read_schema_partition_for_table(distributed& proxy, schema_ptr schema, const sstring& keyspace_name, const sstring& table_name) { - assert(schema_tables_holding_schema_mutations().contains(schema->id())); + SCYLLA_ASSERT(schema_tables_holding_schema_mutations().contains(schema->id())); auto keyspace_key = partition_key::from_singular(*schema, keyspace_name); auto clustering_range = query::clustering_range(clustering_key_prefix::from_clustering_prefix( *schema, exploded_clustering_prefix({utf8_type->decompose(table_name)}))); @@ -942,7 +943,7 @@ future<> merge_unlock() { } future> hold_merge_lock() noexcept { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); if (slogger.is_enabled(log_level::trace)) { slogger.trace("hold_merge_lock at {}", current_backtrace()); @@ -2074,7 +2075,7 @@ template static void store_map(mutation& m, const K& ckey, const bytes& name, api::timestamp_type timestamp, const Map& map) { auto s = m.schema(); auto column = s->get_column_definition(name); - assert(column); + SCYLLA_ASSERT(column); set_cell_or_clustered(m, ckey, *column, make_map_mutation(map, *column, timestamp)); } diff --git a/db/size_estimates_virtual_reader.cc b/db/size_estimates_virtual_reader.cc index b9fd4cecf4..fa4af0a788 100644 --- a/db/size_estimates_virtual_reader.cc +++ b/db/size_estimates_virtual_reader.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include @@ -188,7 +189,7 @@ static future> get_local_ranges(replica::database& db, auto ranges = db.get_token_metadata().get_primary_ranges_for(std::move(tokens)); std::vector local_ranges; auto to_bytes = [](const std::optional& b) { - assert(b); + SCYLLA_ASSERT(b); return utf8_type->decompose(b->value().to_sstring()); }; // We merge the ranges to be compatible with how Cassandra shows it's size estimates table. diff --git a/db/sstables-format-selector.cc b/db/sstables-format-selector.cc index 97f3fd9550..6331396411 100644 --- a/db/sstables-format-selector.cc +++ b/db/sstables-format-selector.cc @@ -7,6 +7,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include "sstables-format-selector.hh" #include "log.hh" @@ -82,7 +83,7 @@ future<> sstables_format_listener::maybe_select_format(sstables::sstable_version } future<> sstables_format_listener::start() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); // The listener may fire immediately, create a thread for that case. co_await seastar::async([this] { _me_feature_listener.on_enabled(); diff --git a/db/system_distributed_keyspace.cc b/db/system_distributed_keyspace.cc index 9edaf039fb..4d690e6125 100644 --- a/db/system_distributed_keyspace.cc +++ b/db/system_distributed_keyspace.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "db/system_distributed_keyspace.hh" #include "cql3/untyped_result_set.hh" @@ -220,7 +221,7 @@ static schema_ptr get_current_service_levels(data_dictionary::database db) { } static schema_ptr get_updated_service_levels(data_dictionary::database db) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto schema = get_current_service_levels(db); schema_builder b(schema); for (const auto& col : new_columns) { diff --git a/db/system_keyspace.cc b/db/system_keyspace.cc index 8029d75f92..c504185f5b 100644 --- a/db/system_keyspace.cc +++ b/db/system_keyspace.cc @@ -24,6 +24,7 @@ #include "gms/feature_service.hh" #include "system_keyspace_view_types.hh" #include "schema/schema_builder.hh" +#include "utils/assert.hh" #include "utils/hashers.hh" #include "log.hh" #include @@ -1580,7 +1581,7 @@ struct local_cache { }; future<> system_keyspace::peers_table_read_fixup() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); if (_peers_table_read_fixup_done) { co_return; } @@ -1839,7 +1840,7 @@ std::unordered_set decode_tokens(const set_type_impl::native_type& t std::unordered_set tset; for (auto& t: tokens) { auto str = value_cast(t); - assert(str == dht::token::from_sstring(str).to_sstring()); + SCYLLA_ASSERT(str == dht::token::from_sstring(str).to_sstring()); tset.insert(dht::token::from_sstring(str)); } return tset; @@ -1945,7 +1946,7 @@ future> system_keyspace::load_peers() { co_await peers_table_read_fixup(); const auto res = co_await execute_cql(format("SELECT peer, tokens FROM system.{}", PEERS)); - assert(res); + SCYLLA_ASSERT(res); std::vector ret; for (const auto& row: *res) { @@ -2709,7 +2710,7 @@ future system_keyspace::get_last_group0_state_id() { format( "SELECT state_id FROM system.{} WHERE key = '{}' LIMIT 1", GROUP0_HISTORY, GROUP0_HISTORY_KEY)); - assert(rs); + SCYLLA_ASSERT(rs); if (rs->empty()) { co_return utils::UUID{}; } @@ -2722,7 +2723,7 @@ future system_keyspace::group0_history_contains(utils::UUID state_id) { "SELECT state_id FROM system.{} WHERE key = '{}' AND state_id = ?", GROUP0_HISTORY, GROUP0_HISTORY_KEY), state_id); - assert(rs); + SCYLLA_ASSERT(rs); co_return !rs->empty(); } @@ -2735,16 +2736,16 @@ mutation system_keyspace::make_group0_history_state_id_mutation( row.apply(row_marker(ts)); if (!description.empty()) { auto cdef = s->get_column_definition("description"); - assert(cdef); + SCYLLA_ASSERT(cdef); row.cells().apply(*cdef, atomic_cell::make_live(*cdef->type, ts, cdef->type->decompose(description))); } if (gc_older_than) { using namespace std::chrono; - assert(*gc_older_than >= gc_clock::duration{0}); + SCYLLA_ASSERT(*gc_older_than >= gc_clock::duration{0}); auto ts_micros = microseconds{ts}; auto gc_older_than_micros = duration_cast(*gc_older_than); - assert(gc_older_than_micros < ts_micros); + SCYLLA_ASSERT(gc_older_than_micros < ts_micros); auto tomb_upper_bound = utils::UUID_gen::min_time_UUID(ts_micros - gc_older_than_micros); // We want to delete all entries with IDs smaller than `tomb_upper_bound` @@ -2761,7 +2762,7 @@ mutation system_keyspace::make_group0_history_state_id_mutation( future system_keyspace::get_group0_history(distributed& db) { auto s = group0_history(); auto rs = co_await db::system_keyspace::query_mutations(db, db::system_keyspace::NAME, db::system_keyspace::GROUP0_HISTORY); - assert(rs); + SCYLLA_ASSERT(rs); auto& ps = rs->partitions(); for (auto& p: ps) { auto mut = p.mut().unfreeze(s); @@ -2783,7 +2784,7 @@ static future> get_scylla_local_mutation(replica::databa dht::partition_range pr = dht::partition_range::make_singular(dht::decorate_key(*s, pk)); auto rs = co_await replica::query_mutations(db.container(), s, pr, s->full_slice(), db::no_timeout); - assert(rs); + SCYLLA_ASSERT(rs); auto& ps = rs->partitions(); for (auto& p: ps) { auto mut = p.mut().unfreeze(s); @@ -2906,7 +2907,7 @@ static bool must_have_tokens(service::node_state nst) { future system_keyspace::load_topology_state(const std::unordered_set& force_load_hosts) { auto rs = co_await execute_cql( format("SELECT * FROM system.{} WHERE key = '{}'", TOPOLOGY, TOPOLOGY)); - assert(rs); + SCYLLA_ASSERT(rs); service::topology_state_machine::topology_type ret; @@ -3087,7 +3088,7 @@ future system_keyspace::load_topology_state(const std::unorde format("SELECT count(range_end) as cnt FROM {}.{} WHERE key = '{}' AND id = ?", NAME, CDC_GENERATIONS_V3, cdc::CDC_GENERATIONS_V3_KEY), gen_id.id); - assert(gen_rows); + SCYLLA_ASSERT(gen_rows); if (gen_rows->empty()) { on_internal_error(slogger, format( "load_topology_state: last committed CDC generation time UUID ({}) present, but data missing", gen_id.id)); @@ -3146,7 +3147,7 @@ future system_keyspace::load_topology_state(const std::unorde future> system_keyspace::load_topology_features_state() { auto rs = co_await execute_cql( format("SELECT host_id, node_state, supported_features, enabled_features FROM system.{} WHERE key = '{}'", TOPOLOGY, TOPOLOGY)); - assert(rs); + SCYLLA_ASSERT(rs); co_return decode_topology_features_state(std::move(rs)); } diff --git a/db/view/row_locking.cc b/db/view/row_locking.cc index 5310c66520..c68fa2faef 100644 --- a/db/view/row_locking.cc +++ b/db/view/row_locking.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "row_locking.hh" #include "log.hh" @@ -152,14 +153,14 @@ row_locker::unlock(const dht::decorated_key* pk, bool partition_exclusive, mylog.error("column_family::local_base_lock_holder::~local_base_lock_holder() can't find lock for partition", *pk); return; } - assert(&pli->first == pk); + SCYLLA_ASSERT(&pli->first == pk); if (cpk) { auto rli = pli->second._row_locks.find(*cpk); if (rli == pli->second._row_locks.end()) { mylog.error("column_family::local_base_lock_holder::~local_base_lock_holder() can't find lock for row", *cpk); return; } - assert(&rli->first == cpk); + SCYLLA_ASSERT(&rli->first == cpk); mylog.debug("releasing {} lock for row {} in partition {}", (row_exclusive ? "exclusive" : "shared"), *cpk, *pk); auto& lock = rli->second; if (row_exclusive) { diff --git a/db/view/view.cc b/db/view/view.cc index eea715613b..166b5271e3 100644 --- a/db/view/view.cc +++ b/db/view/view.cc @@ -53,6 +53,7 @@ #include "service/migration_manager.hh" #include "service/storage_proxy.hh" #include "compaction/compaction_manager.hh" +#include "utils/assert.hh" #include "utils/small_vector.hh" #include "view_info.hh" #include "view_update_checks.hh" @@ -1490,7 +1491,7 @@ future view_update_builder::on_results() { existing.apply(std::max(_existing_partition_tombstone, _existing_current_tombstone)); auto tombstone = std::max(_update_partition_tombstone, _update_current_tombstone); // The way we build the read command used for existing rows, we should always have a non-empty - // tombstone, since we wouldn't have read the existing row otherwise. We don't assert that in case the + // tombstone, since we wouldn't have read the existing row otherwise. We don't SCYLLA_ASSERT that in case the // read method ever changes. if (tombstone) { auto update = clustering_row(existing.key(), row_tombstone(std::move(tombstone)), row_marker(), ::row()); @@ -1516,11 +1517,11 @@ future view_update_builder::on_results() { } // We're updating a row that had pre-existing data if (_update->is_range_tombstone_change()) { - assert(_existing->is_range_tombstone_change()); + SCYLLA_ASSERT(_existing->is_range_tombstone_change()); _existing_current_tombstone = std::move(*_existing).as_range_tombstone_change().tombstone(); _update_current_tombstone = std::move(*_update).as_range_tombstone_change().tombstone(); } else if (_update->is_clustering_row()) { - assert(_existing->is_clustering_row()); + SCYLLA_ASSERT(_existing->is_clustering_row()); _update->mutate_as_clustering_row(*_schema, [&] (clustering_row& cr) mutable { cr.apply(std::max(_update_partition_tombstone, _update_current_tombstone)); }); @@ -1752,7 +1753,7 @@ get_view_natural_endpoint( } } - assert(base_endpoints.size() == view_endpoints.size()); + SCYLLA_ASSERT(base_endpoints.size() == view_endpoints.size()); auto base_it = std::find(base_endpoints.begin(), base_endpoints.end(), me); if (base_it == base_endpoints.end()) { // This node is not a base replica of this key, so we return empty @@ -2700,7 +2701,7 @@ future<> view_builder::mark_as_built(view_ptr view) { } future<> view_builder::mark_existing_views_as_built() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto views = _db.get_views(); co_await coroutine::parallel_for_each(views, [this] (view_ptr& view) { return mark_as_built(view); @@ -2890,7 +2891,7 @@ delete_ghost_rows_visitor::delete_ghost_rows_visitor(service::storage_proxy& pro {} void delete_ghost_rows_visitor::accept_new_partition(const partition_key& key, uint32_t row_count) { - assert(thread::running_in_thread()); + SCYLLA_ASSERT(thread::running_in_thread()); _view_pk = key; } diff --git a/dht/i_partitioner.cc b/dht/i_partitioner.cc index e7a5e3e6fb..f0786b20dc 100644 --- a/dht/i_partitioner.cc +++ b/dht/i_partitioner.cc @@ -12,6 +12,7 @@ #include #include "dht/ring_position.hh" #include "dht/token-sharding.hh" +#include "utils/assert.hh" #include "utils/class_registrator.hh" #include #include @@ -423,7 +424,7 @@ future subtract_ranges(const schema& schema, const ++range_to_subtract; break; default: - assert(size <= 2); + SCYLLA_ASSERT(size <= 2); } co_await coroutine::maybe_yield(); } @@ -442,7 +443,7 @@ dht::token_range_vector split_token_range_msb(unsigned most_significant_bits) { } uint64_t number_of_ranges = 1 << most_significant_bits; ret.reserve(number_of_ranges); - assert(most_significant_bits < 64); + SCYLLA_ASSERT(most_significant_bits < 64); dht::token prev_last_token; for (uint64_t i = 0; i < number_of_ranges; i++) { std::optional start_bound; diff --git a/dht/range_streamer.cc b/dht/range_streamer.cc index a76352d4c3..6ae7bf316b 100644 --- a/dht/range_streamer.cc +++ b/dht/range_streamer.cc @@ -18,6 +18,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/stall_free.hh" namespace dht { @@ -118,7 +119,7 @@ range_streamer::get_all_ranges_with_sources_for(const sstring& keyspace_name, lo std::unordered_map> range_streamer::get_all_ranges_with_strict_sources_for(const sstring& keyspace_name, locator::vnode_effective_replication_map_ptr erm, dht::token_range_vector desired_ranges, gms::gossiper& gossiper) { logger.debug("{} ks={}", __func__, keyspace_name); - assert (_tokens.empty() == false); + SCYLLA_ASSERT (_tokens.empty() == false); auto& strat = erm->get_replication_strategy(); diff --git a/dht/token-sharding.hh b/dht/token-sharding.hh index 4e5e897f41..915914cabd 100644 --- a/dht/token-sharding.hh +++ b/dht/token-sharding.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "dht/token.hh" #include #include @@ -62,7 +63,7 @@ public: * * [] (const token& t) { * auto shards = shard_for_writes(); - * assert(shards.size() <= 1); + * SCYLLA_ASSERT(shards.size() <= 1); * return shards.empty() ? 0 : shards[0]; * } * diff --git a/direct_failure_detector/failure_detector.cc b/direct_failure_detector/failure_detector.cc index 680974dbff..c60d97f1fe 100644 --- a/direct_failure_detector/failure_detector.cc +++ b/direct_failure_detector/failure_detector.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include @@ -192,7 +193,7 @@ failure_detector::impl::impl( } void failure_detector::impl::send_update_endpoint(pinger::endpoint_id ep, endpoint_update update) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto it = _endpoint_updates.find(ep); if (it == _endpoint_updates.end()) { @@ -205,7 +206,7 @@ void failure_detector::impl::send_update_endpoint(pinger::endpoint_id ep, endpoi } future<> failure_detector::impl::update_endpoint_fiber() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); while (true) { co_await _endpoint_changed.wait([this] { return !_endpoint_updates.empty(); }); @@ -246,7 +247,7 @@ future<> failure_detector::impl::update_endpoint_fiber() { } future<> failure_detector::impl::add_endpoint(pinger::endpoint_id ep) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); if (_workers.contains(ep)) { co_return; @@ -254,7 +255,7 @@ future<> failure_detector::impl::add_endpoint(pinger::endpoint_id ep) { // Pick a shard with the smallest number of workers to create a new worker. auto shard = std::distance(_num_workers.begin(), std::min_element(_num_workers.begin(), _num_workers.end())); - assert(_num_workers.size() == smp::count); + SCYLLA_ASSERT(_num_workers.size() == smp::count); ++_num_workers[shard]; auto [it, _] = _workers.emplace(ep, shard); @@ -269,7 +270,7 @@ future<> failure_detector::impl::add_endpoint(pinger::endpoint_id ep) { } future<> failure_detector::impl::remove_endpoint(pinger::endpoint_id ep) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto it = _workers.find(ep); if (it == _workers.end()) { @@ -279,8 +280,8 @@ future<> failure_detector::impl::remove_endpoint(pinger::endpoint_id ep) { auto shard = it->second; co_await _parent.container().invoke_on(shard, [ep] (failure_detector& fd) { return fd._impl->destroy_worker(ep); }); - assert(_num_workers.size() == smp::count); - assert(shard < _num_workers.size()); + SCYLLA_ASSERT(_num_workers.size() == smp::count); + SCYLLA_ASSERT(shard < _num_workers.size()); --_num_workers[shard]; _workers.erase(it); @@ -374,8 +375,8 @@ endpoint_worker::endpoint_worker(failure_detector::impl& fd, pinger::endpoint_id } endpoint_worker::~endpoint_worker() { - assert(_ping_fiber.available()); - assert(_notify_fiber.available()); + SCYLLA_ASSERT(_ping_fiber.available()); + SCYLLA_ASSERT(_notify_fiber.available()); } future failure_detector::register_listener(listener& l, clock::interval_t threshold) { @@ -624,7 +625,7 @@ future<> endpoint_worker::notify_fiber() noexcept { auto& listeners = it->second.listeners; auto& endpoint_liveness = it->second.endpoint_liveness[_id]; bool alive = endpoint_liveness.alive; - assert(alive != endpoint_liveness.marked_alive); + SCYLLA_ASSERT(alive != endpoint_liveness.marked_alive); endpoint_liveness.marked_alive = alive; try { @@ -680,7 +681,7 @@ future<> failure_detector::stop() { co_await container().invoke_on_all([] (failure_detector& fd) -> future<> { // All subscriptions must be destroyed before stopping the fd. - assert(fd._impl->_registered.empty()); + SCYLLA_ASSERT(fd._impl->_registered.empty()); // There are no concurrent `{create,destroy}_worker` calls running since we waited for `update_endpoint_fiber` to finish. while (!fd._impl->_shard_workers.empty()) { @@ -697,13 +698,13 @@ future<> failure_detector::stop() { } failure_detector::impl::~impl() { - assert(_shard_workers.empty()); - assert(_destroy_subscriptions.available()); - assert(_update_endpoint_fiber.available()); + SCYLLA_ASSERT(_shard_workers.empty()); + SCYLLA_ASSERT(_destroy_subscriptions.available()); + SCYLLA_ASSERT(_update_endpoint_fiber.available()); } failure_detector::~failure_detector() { - assert(!_impl); + SCYLLA_ASSERT(!_impl); } } // namespace direct_failure_detector diff --git a/enum_set.hh b/enum_set.hh index c17d6637a0..69d4d53585 100644 --- a/enum_set.hh +++ b/enum_set.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include @@ -31,9 +32,9 @@ * * static_assert(my_enumset::frozen::contains(), "it should..."); * - * assert(my_enumset::frozen::contains(my_enumset::prepare())); + * SCYLLA_ASSERT(my_enumset::frozen::contains(my_enumset::prepare())); * - * assert(my_enumset::frozen::contains(x::A)); + * SCYLLA_ASSERT(my_enumset::frozen::contains(x::A)); * */ diff --git a/gms/feature_service.cc b/gms/feature_service.cc index 0509a5d130..e6963b94b6 100644 --- a/gms/feature_service.cc +++ b/gms/feature_service.cc @@ -17,6 +17,7 @@ #include #include "gms/gossiper.hh" #include "gms/i_endpoint_state_change_subscriber.hh" +#include "utils/assert.hh" #include "utils/error_injection.hh" #include "service/storage_service.hh" @@ -59,7 +60,7 @@ feature_config feature_config_from_db_config(const db::config& cfg, std::set feature_service::stop() { void feature_service::register_feature(feature& f) { auto i = _registered_features.emplace(f.name(), f); - assert(i.second); + SCYLLA_ASSERT(i.second); } void feature_service::unregister_feature(feature& f) { diff --git a/gms/generation-number.cc b/gms/generation-number.cc index 5c7e3dc8c5..8c6231728d 100644 --- a/gms/generation-number.cc +++ b/gms/generation-number.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include @@ -22,7 +23,7 @@ generation_type get_generation_number() { int generation_number = duration_cast(now).count(); auto ret = generation_type(generation_number); // Make sure the clock didn't overflow the 32 bits value - assert(ret.value() == generation_number); + SCYLLA_ASSERT(ret.value() == generation_number); return ret; } diff --git a/gms/gossiper.cc b/gms/gossiper.cc index c2908cd81e..e9ff9d48a8 100644 --- a/gms/gossiper.cc +++ b/gms/gossiper.cc @@ -43,6 +43,7 @@ #include "gms/generation-number.hh" #include "locator/token_metadata.hh" #include "seastar/rpc/rpc_types.hh" +#include "utils/assert.hh" #include "utils/exceptions.hh" #include "utils/error_injection.hh" #include "utils/to_string.hh" @@ -842,7 +843,7 @@ gossiper::endpoint_permit::~endpoint_permit() { bool gossiper::endpoint_permit::release() noexcept { if (auto ptr = std::exchange(_ptr, nullptr)) { - assert(ptr->pid == _permit_id); + SCYLLA_ASSERT(ptr->pid == _permit_id); logger.debug("{}: lock_endpoint {}: released: permit_id={} holders={}", _caller.function_name(), _addr, _permit_id, ptr->holders); if (!--ptr->holders) { logger.debug("{}: lock_endpoint {}: released: permit_id={}", _caller.function_name(), _addr, _permit_id); @@ -885,7 +886,7 @@ future gossiper::lock_endpoint(inet_address ep, permi auto sub = _abort_source.subscribe([&aoe] () noexcept { aoe.abort_source().request_abort(); }); - assert(sub); // due to check() above + SCYLLA_ASSERT(sub); // due to check() above try { eptr->units = co_await get_units(eptr->sem, 1, aoe.abort_source()); break; @@ -1043,7 +1044,7 @@ future<> gossiper::failure_detector_loop() { // This needs to be run with a lock future<> gossiper::replicate_live_endpoints_on_change(foreign_ptr> data0, uint64_t new_version) { auto coordinator = this_shard_id(); - assert(coordinator == 0); + SCYLLA_ASSERT(coordinator == 0); // // Gossiper task runs only on CPU0: // @@ -1721,7 +1722,7 @@ future<> gossiper::real_mark_alive(inet_address addr) { locator::host_id id(utils::UUID(app_state_ptr->value())); auto second_node_ip = handler.get("second_node_ip"); - assert(second_node_ip); + SCYLLA_ASSERT(second_node_ip); logger.info("real_mark_alive {}/{} second_node_ip={}", id, endpoint, *second_node_ip); if (endpoint == gms::inet_address(sstring{*second_node_ip})) { @@ -1888,7 +1889,7 @@ bool gossiper::is_silent_shutdown_state(const endpoint_state& ep_state) const{ } future<> gossiper::apply_new_states(inet_address addr, endpoint_state local_state, const endpoint_state& remote_state, permit_id pid) { - // don't assert here, since if the node restarts the version will go back to zero + // don't SCYLLA_ASSERT here, since if the node restarts the version will go back to zero //int oldVersion = local_state.get_heart_beat_state().get_heart_beat_version(); verify_permit(addr, pid); diff --git a/interval.hh b/interval.hh index 861753e9b3..43871b8a68 100644 --- a/interval.hh +++ b/interval.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include @@ -138,7 +139,7 @@ public: // the point is before the interval (works only for non wrapped intervals) // Comparator must define a total ordering on T. bool before(const T& point, IntervalComparatorFor auto&& cmp) const { - assert(!is_wrap_around(cmp)); + SCYLLA_ASSERT(!is_wrap_around(cmp)); if (!start()) { return false; //open start, no points before } @@ -154,8 +155,8 @@ public: // the other interval is before this interval (works only for non wrapped intervals) // Comparator must define a total ordering on T. bool other_is_before(const wrapping_interval& o, IntervalComparatorFor auto&& cmp) const { - assert(!is_wrap_around(cmp)); - assert(!o.is_wrap_around(cmp)); + SCYLLA_ASSERT(!is_wrap_around(cmp)); + SCYLLA_ASSERT(!o.is_wrap_around(cmp)); if (!start() || !o.end()) { return false; } @@ -181,7 +182,7 @@ public: // the point is after the interval (works only for non wrapped intervals) // Comparator must define a total ordering on T. bool after(const T& point, IntervalComparatorFor auto&& cmp) const { - assert(!is_wrap_around(cmp)); + SCYLLA_ASSERT(!is_wrap_around(cmp)); if (!end()) { return false; //open end, no points after } @@ -211,8 +212,8 @@ public: } // No interval should reach this point as wrap around. - assert(!this_wraps); - assert(!other_wraps); + SCYLLA_ASSERT(!this_wraps); + SCYLLA_ASSERT(!other_wraps); // if both this and other have an open start, the two intervals will overlap. if (!start() && !other.start()) { @@ -377,7 +378,7 @@ public: // split_point will belong to first interval // Comparator must define a total ordering on T. std::pair, wrapping_interval> split(const T& split_point, IntervalComparatorFor auto&& cmp) const { - assert(contains(split_point, std::forward(cmp))); + SCYLLA_ASSERT(contains(split_point, std::forward(cmp))); wrapping_interval left(start(), bound(split_point)); wrapping_interval right(bound(split_point, false), end()); return std::make_pair(std::move(left), std::move(right)); @@ -584,7 +585,7 @@ public: // split_point will belong to first interval // Comparator must define a total ordering on T. std::pair, interval> split(const T& split_point, IntervalComparatorFor auto&& cmp) const { - assert(contains(split_point, std::forward(cmp))); + SCYLLA_ASSERT(contains(split_point, std::forward(cmp))); interval left(start(), bound(split_point)); interval right(bound(split_point, false), end()); return std::make_pair(std::move(left), std::move(right)); diff --git a/lang/lua.cc b/lang/lua.cc index 5e360f06a0..af06e342bb 100644 --- a/lang/lua.cc +++ b/lang/lua.cc @@ -12,6 +12,7 @@ #include "lang/lua_scylla_types.hh" #include "exceptions/exceptions.hh" #include "concrete_types.hh" +#include "utils/assert.hh" #include "utils/utf8.hh" #include "utils/ascii.hh" #include "utils/date.h" @@ -41,7 +42,7 @@ struct alloc_state { : max(max) , max_contiguous(max_contiguous) { // The max and max_contiguous limits are responsible for avoiding overflows. - assert(max + max_contiguous >= max); + SCYLLA_ASSERT(max + max_contiguous >= max); } }; @@ -79,7 +80,7 @@ static void* lua_alloc(void* ud, void* ptr, size_t osize, size_t nsize) { size_t next = s->allocated + nsize; // The max and max_contiguous limits should be small enough to avoid overflows. - assert(next >= s->allocated); + SCYLLA_ASSERT(next >= s->allocated); if (ptr) { next -= osize; @@ -119,7 +120,7 @@ static void debug_hook(lua_State* l, lua_Debug* ar) { return; } if (lua_yield(l, 0)) { - assert(0 && "lua_yield failed"); + SCYLLA_ASSERT(0 && "lua_yield failed"); } } @@ -223,7 +224,7 @@ requires CanHandleRawLuaTypes static auto visit_lua_raw_value(lua_State* l, int index, Func&& f) { switch (lua_type(l, index)) { case LUA_TNONE: - assert(0 && "Invalid index"); + SCYLLA_ASSERT(0 && "Invalid index"); case LUA_TNUMBER: if (lua_isinteger(l, index)) { return f(lua_tointeger(l, index)); @@ -244,9 +245,9 @@ static auto visit_lua_raw_value(lua_State* l, int index, Func&& f) { return f(*get_decimal(l, index)); case LUA_TTHREAD: case LUA_TLIGHTUSERDATA: - assert(0 && "We never make thread or light user data visible to scripts"); + SCYLLA_ASSERT(0 && "We never make thread or light user data visible to scripts"); } - assert(0 && "invalid lua type"); + SCYLLA_ASSERT(0 && "invalid lua type"); } template @@ -362,7 +363,7 @@ static const big_decimal& get_decimal_in_binary_op(lua_State* l) { if (a == nullptr) { lua_insert(l, 1); a = get_decimal(l, 1); - assert(a); + SCYLLA_ASSERT(a); } return *a; } diff --git a/locator/ec2_snitch.cc b/locator/ec2_snitch.cc index b3b39fface..2cd899a0c8 100644 --- a/locator/ec2_snitch.cc +++ b/locator/ec2_snitch.cc @@ -8,6 +8,7 @@ #include #include +#include "utils/assert.hh" #include "utils/class_registrator.hh" namespace locator { @@ -29,13 +30,13 @@ future<> ec2_snitch::load_config(bool prefer_local) { if (this_shard_id() == io_cpu_id()) { auto token = co_await aws_api_call(AWS_QUERY_SERVER_ADDR, AWS_QUERY_SERVER_PORT, TOKEN_REQ_ENDPOINT, std::nullopt); auto az = co_await aws_api_call(AWS_QUERY_SERVER_ADDR, AWS_QUERY_SERVER_PORT, ZONE_NAME_QUERY_REQ, token); - assert(az.size()); + SCYLLA_ASSERT(az.size()); std::vector splits; // Split "us-east-1a" or "asia-1a" into "us-east"/"1a" and "asia"/"1a". split(splits, az, is_any_of("-")); - assert(splits.size() > 1); + SCYLLA_ASSERT(splits.size() > 1); sstring my_rack = splits[splits.size() - 1]; diff --git a/locator/network_topology_strategy.cc b/locator/network_topology_strategy.cc index 7a9560dfec..cb3218fde2 100644 --- a/locator/network_topology_strategy.cc +++ b/locator/network_topology_strategy.cc @@ -22,6 +22,7 @@ #include #include #include "exceptions/exceptions.hh" +#include "utils/assert.hh" #include "utils/class_registrator.hh" #include "utils/hash.hh" @@ -195,7 +196,7 @@ public: , _racks(_tp.get_datacenter_racks()) { // not aware of any cluster members - assert(!_all_endpoints.empty() && !_racks.empty()); + SCYLLA_ASSERT(!_all_endpoints.empty() && !_racks.empty()); auto size_for = [](auto& map, auto& k) { auto i = map.find(k); diff --git a/locator/production_snitch_base.hh b/locator/production_snitch_base.hh index 63c8c89f59..95d4d8e0c7 100644 --- a/locator/production_snitch_base.hh +++ b/locator/production_snitch_base.hh @@ -10,6 +10,7 @@ #pragma once +#include "utils/assert.hh" #include #include @@ -73,17 +74,17 @@ protected: std::unordered_map _prop_values; sharded& container() noexcept { - assert(_backreference != nullptr); + SCYLLA_ASSERT(_backreference != nullptr); return _backreference->container(); } snitch_ptr& local() noexcept { - assert(_backreference != nullptr); + SCYLLA_ASSERT(_backreference != nullptr); return *_backreference; } const snitch_ptr& local() const noexcept { - assert(_backreference != nullptr); + SCYLLA_ASSERT(_backreference != nullptr); return *_backreference; } diff --git a/locator/simple_strategy.cc b/locator/simple_strategy.cc index 0aece018e3..45d0ac743c 100644 --- a/locator/simple_strategy.cc +++ b/locator/simple_strategy.cc @@ -11,6 +11,7 @@ #include "simple_strategy.hh" #include "exceptions/exceptions.hh" +#include "utils/assert.hh" #include "utils/class_registrator.hh" #include @@ -50,7 +51,7 @@ future simple_strategy::calculate_natural_endpoints(const token& t, } auto ep = tm.get_endpoint(token); - assert(ep); + SCYLLA_ASSERT(ep); endpoints.push_back(*ep); co_await coroutine::maybe_yield(); diff --git a/locator/snitch_base.hh b/locator/snitch_base.hh index 4963b9b1e1..13906d5a79 100644 --- a/locator/snitch_base.hh +++ b/locator/snitch_base.hh @@ -10,6 +10,7 @@ #pragma once +#include "utils/assert.hh" #include #include @@ -81,7 +82,7 @@ public: */ virtual gms::application_state_map get_app_states() const = 0; - virtual ~i_endpoint_snitch() { assert(_state == snitch_state::stopped); }; + virtual ~i_endpoint_snitch() { SCYLLA_ASSERT(_state == snitch_state::stopped); }; // noop by default virtual future<> stop() { diff --git a/locator/token_metadata.cc b/locator/token_metadata.cc index 14fd0dfb20..562686d6b5 100644 --- a/locator/token_metadata.cc +++ b/locator/token_metadata.cc @@ -21,6 +21,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/stall_free.hh" namespace locator { @@ -1216,7 +1217,7 @@ future<> shared_token_metadata::mutate_token_metadata(seastar::noncopyable_funct future<> shared_token_metadata::mutate_on_all_shards(sharded& stm, seastar::noncopyable_function (token_metadata&)> func) { auto base_shard = this_shard_id(); - assert(base_shard == 0); + SCYLLA_ASSERT(base_shard == 0); auto lk = co_await stm.local().get_lock(); std::vector pending_token_metadata_ptr; diff --git a/locator/topology.cc b/locator/topology.cc index 3aab35bb77..e61fe9da01 100644 --- a/locator/topology.cc +++ b/locator/topology.cc @@ -15,6 +15,7 @@ #include "log.hh" #include "locator/topology.hh" #include "locator/production_snitch_base.hh" +#include "utils/assert.hh" #include "utils/stall_free.hh" #include "utils/to_string.hh" @@ -117,7 +118,7 @@ topology::topology(topology&& o) noexcept , _sort_by_proximity(o._sort_by_proximity) , _datacenters(std::move(o._datacenters)) { - assert(_shard == this_shard_id()); + SCYLLA_ASSERT(_shard == this_shard_id()); tlogger.trace("topology[{}]: move from [{}]", fmt::ptr(this), fmt::ptr(&o)); for (auto& n : _nodes) { diff --git a/main.cc b/main.cc index 83750cc038..46b57643af 100644 --- a/main.cc +++ b/main.cc @@ -21,6 +21,7 @@ #include #include "service/qos/raft_service_level_distributed_data_accessor.hh" #include "tasks/task_manager.hh" +#include "utils/assert.hh" #include "utils/build_id.hh" #include "supervisor.hh" #include "replica/database.hh" @@ -528,7 +529,7 @@ static auto defer_verbose_shutdown(const char* what, Func&& func) { // Call _exit() rather than exit() to exit immediately // without calling exit handlers, avoiding - // boost::intrusive::detail::destructor_impl assert failure + // boost::intrusive::detail::destructor_impl SCYLLA_ASSERT failure // from ~segment_pool exit handler. _exit(255); } diff --git a/message/messaging_service.cc b/message/messaging_service.cc index d2b367cbb0..7bb4cf64ba 100644 --- a/message/messaging_service.cc +++ b/message/messaging_service.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include @@ -661,9 +662,9 @@ static constexpr std::array(messaging_verb::LAST)> for (size_t i = 0; i < tab.size(); ++i) { tab[i] = do_get_rpc_client_idx(messaging_verb(i)); - // This assert guards against adding new connection types without + // This SCYLLA_ASSERT guards against adding new connection types without // updating *_CONNECTION_COUNT constants. - assert(tab[i] < PER_TENANT_CONNECTION_COUNT + PER_SHARD_CONNECTION_COUNT); + SCYLLA_ASSERT(tab[i] < PER_TENANT_CONNECTION_COUNT + PER_SHARD_CONNECTION_COUNT); } return tab; } @@ -709,7 +710,7 @@ messaging_service::initial_scheduling_info() const { } } - assert(sched_infos.size() == PER_SHARD_CONNECTION_COUNT + + SCYLLA_ASSERT(sched_infos.size() == PER_SHARD_CONNECTION_COUNT + _scheduling_config.statement_tenants.size() * PER_TENANT_CONNECTION_COUNT); return sched_infos; }; @@ -798,7 +799,7 @@ gms::inet_address messaging_service::get_public_endpoint_for(const gms::inet_add } shared_ptr messaging_service::get_rpc_client(messaging_verb verb, msg_addr id) { - assert(!_shutting_down); + SCYLLA_ASSERT(!_shutting_down); if (_cfg.maintenance_mode) { on_internal_error(mlogger, "This node is in maintenance mode, it shouldn't contact other nodes"); } @@ -900,7 +901,7 @@ shared_ptr messaging_service::ge opts.isolation_cookie = _scheduling_info_for_connection_index[idx].isolation_cookie; opts.metrics_domain = client_metrics_domain(idx, id.addr); // not just `addr` as the latter may be internal IP - assert(!must_encrypt || _credentials); + SCYLLA_ASSERT(!must_encrypt || _credentials); auto client = must_encrypt ? ::make_shared(_rpc->protocol(), std::move(opts), @@ -916,7 +917,7 @@ shared_ptr messaging_service::ge // the topology (so we always set `topology_ignored` to `false` in that case). bool topology_ignored = idx != TOPOLOGY_INDEPENDENT_IDX && topology_status.has_value() && *topology_status == false; auto res = _clients[idx].emplace(id, shard_info(std::move(client), topology_ignored)); - assert(res.second); + SCYLLA_ASSERT(res.second); it = res.first; uint32_t src_cpu_id = this_shard_id(); // No reply is received, nothing to wait for. diff --git a/mutation/atomic_cell.hh b/mutation/atomic_cell.hh index 7b1b13a30e..b5d7b36abe 100644 --- a/mutation/atomic_cell.hh +++ b/mutation/atomic_cell.hh @@ -12,6 +12,7 @@ #include "timestamp.hh" #include "mutation/tombstone.hh" #include "gc_clock.hh" +#include "utils/assert.hh" #include "utils/managed_bytes.hh" #include #include @@ -126,18 +127,18 @@ public: } // Can be called only when is_dead() is true. static gc_clock::time_point deletion_time(atomic_cell_value_view cell) { - assert(is_dead(cell)); + SCYLLA_ASSERT(is_dead(cell)); return gc_clock::time_point(gc_clock::duration(get_field(cell, deletion_time_offset))); } // Can be called only when is_live_and_has_ttl() is true. static gc_clock::time_point expiry(atomic_cell_value_view cell) { - assert(is_live_and_has_ttl(cell)); + SCYLLA_ASSERT(is_live_and_has_ttl(cell)); auto expiry = get_field(cell, expiry_offset); return gc_clock::time_point(gc_clock::duration(expiry)); } // Can be called only when is_live_and_has_ttl() is true. static gc_clock::duration ttl(atomic_cell_value_view cell) { - assert(is_live_and_has_ttl(cell)); + SCYLLA_ASSERT(is_live_and_has_ttl(cell)); return gc_clock::duration(get_field(cell, ttl_offset)); } static managed_bytes make_dead(api::timestamp_type timestamp, gc_clock::time_point deletion_time) { diff --git a/mutation/mutation.hh b/mutation/mutation.hh index eed1f7dd2f..9e3e10a878 100644 --- a/mutation/mutation.hh +++ b/mutation/mutation.hh @@ -13,6 +13,7 @@ #include "mutation_partition.hh" #include "keys.hh" #include "schema/schema_fwd.hh" +#include "utils/assert.hh" #include "utils/hashing.hh" #include "mutation_fragment_v2.hh" #include "mutation_consumer.hh" @@ -302,7 +303,7 @@ std::optional consume_clustering_fragments(schema_ptr s, mutatio if (crs_it == crs_end && rts_it == rts_end) { flush_tombstones(position_in_partition::after_all_clustered_rows()); } else { - assert(preempt && need_preempt()); + SCYLLA_ASSERT(preempt && need_preempt()); return std::nullopt; } } diff --git a/mutation/mutation_fragment.cc b/mutation/mutation_fragment.cc index a59c423e12..85af133415 100644 --- a/mutation/mutation_fragment.cc +++ b/mutation/mutation_fragment.cc @@ -11,6 +11,7 @@ #include "mutation_fragment.hh" #include "mutation_fragment_v2.hh" #include "clustering_interval_set.hh" +#include "utils/assert.hh" #include "utils/hashing.hh" #include "utils/xx_hasher.hh" @@ -172,13 +173,13 @@ struct get_key_visitor { const clustering_key_prefix& mutation_fragment::key() const { - assert(has_key()); + SCYLLA_ASSERT(has_key()); return visit(get_key_visitor()); } void mutation_fragment::apply(const schema& s, mutation_fragment&& mf) { - assert(mergeable_with(mf)); + SCYLLA_ASSERT(mergeable_with(mf)); switch (_kind) { case mutation_fragment::kind::partition_start: _data->_partition_start.partition_tombstone().apply(mf._data->_partition_start.partition_tombstone()); @@ -257,13 +258,13 @@ auto fmt::formatter::format(const mutation_fragment: const clustering_key_prefix& mutation_fragment_v2::key() const { - assert(has_key()); + SCYLLA_ASSERT(has_key()); return visit(get_key_visitor()); } void mutation_fragment_v2::apply(const schema& s, mutation_fragment_v2&& mf) { - assert(mergeable_with(mf)); + SCYLLA_ASSERT(mergeable_with(mf)); switch (_kind) { case mutation_fragment_v2::kind::partition_start: _data->_partition_start.partition_tombstone().apply(mf._data->_partition_start.partition_tombstone()); diff --git a/mutation/mutation_partition.cc b/mutation/mutation_partition.cc index 92437bc37f..0215c1f8fe 100644 --- a/mutation/mutation_partition.cc +++ b/mutation/mutation_partition.cc @@ -28,6 +28,7 @@ #include "clustering_key_filter.hh" #include "mutation_partition_view.hh" #include "tombstone_gc.hh" +#include "utils/assert.hh" #include "utils/unconst.hh" #include "mutation/async_utils.hh" @@ -144,7 +145,7 @@ mutation_partition::mutation_partition(const schema& s, const mutation_partition #endif { #ifdef SEASTAR_DEBUG - assert(x._schema_version == _schema_version); + SCYLLA_ASSERT(x._schema_version == _schema_version); #endif auto cloner = [&s] (const rows_entry* x) -> rows_entry* { return current_allocator().construct(s, *x); @@ -164,7 +165,7 @@ mutation_partition::mutation_partition(const mutation_partition& x, const schema #endif { #ifdef SEASTAR_DEBUG - assert(x._schema_version == _schema_version); + SCYLLA_ASSERT(x._schema_version == _schema_version); #endif try { for(auto&& r : ck_ranges) { @@ -194,7 +195,7 @@ mutation_partition::mutation_partition(mutation_partition&& x, const schema& sch #endif { #ifdef SEASTAR_DEBUG - assert(x._schema_version == _schema_version); + SCYLLA_ASSERT(x._schema_version == _schema_version); #endif { auto deleter = current_deleter(); @@ -280,8 +281,8 @@ mutation_partition::apply(const schema& s, const mutation_fragment& mf) { stop_iteration mutation_partition::apply_monotonically(const schema& s, mutation_partition&& p, cache_tracker* tracker, mutation_application_stats& app_stats, is_preemptible preemptible, apply_resume& res) { #ifdef SEASTAR_DEBUG - assert(s.version() == _schema_version); - assert(p._schema_version == _schema_version); + SCYLLA_ASSERT(s.version() == _schema_version); + SCYLLA_ASSERT(p._schema_version == _schema_version); #endif _tombstone.apply(p._tombstone); _static_row.apply_monotonically(s, column_kind::static_column, std::move(p._static_row)); @@ -531,7 +532,7 @@ mutation_partition::tombstone_for_row(const schema& schema, const rows_entry& e) void mutation_partition::apply_row_tombstone(const schema& schema, clustering_key_prefix prefix, tombstone t) { check_schema(schema); - assert(!prefix.is_full(schema)); + SCYLLA_ASSERT(!prefix.is_full(schema)); auto start = prefix; _row_tombstones.apply(schema, {std::move(start), std::move(prefix), std::move(t)}); } @@ -748,7 +749,7 @@ void mutation_partition::for_each_row(const schema& schema, const query::cluster template void write_cell(RowWriter& w, const query::partition_slice& slice, ::atomic_cell_view c) { - assert(c.is_live()); + SCYLLA_ASSERT(c.is_live()); auto wr = w.add().write(); auto after_timestamp = [&, wr = std::move(wr)] () mutable { if (slice.options.contains()) { @@ -789,7 +790,7 @@ void write_cell(RowWriter& w, const query::partition_slice& slice, data_type typ template void write_counter_cell(RowWriter& w, const query::partition_slice& slice, ::atomic_cell_view c) { - assert(c.is_live()); + SCYLLA_ASSERT(c.is_live()); auto ccv = counter_cell_view(c); auto wr = w.add().write(); [&, wr = std::move(wr)] () mutable { @@ -1179,8 +1180,8 @@ bool mutation_partition::equal(const schema& s, const mutation_partition& p) con bool mutation_partition::equal(const schema& this_schema, const mutation_partition& p, const schema& p_schema) const { #ifdef SEASTAR_DEBUG - assert(_schema_version == this_schema.version()); - assert(p._schema_version == p_schema.version()); + SCYLLA_ASSERT(_schema_version == this_schema.version()); + SCYLLA_ASSERT(p._schema_version == p_schema.version()); #endif if (_tombstone != p._tombstone) { return false; @@ -1375,7 +1376,7 @@ uint32_t mutation_partition::do_compact(const schema& s, const tombstone_gc_state& gc_state) { check_schema(s); - assert(row_limit > 0); + SCYLLA_ASSERT(row_limit > 0); auto gc_before = drop_tombstones_unconditionally ? gc_clock::time_point::max() : gc_state.get_gc_before_for_key(s.shared_from_this(), dk, query_time); @@ -2383,7 +2384,7 @@ void mutation_partition::set_continuity(const schema& s, const position_range& p i = _rows.insert_before(i, std::move(e)); } - assert(i != end); + SCYLLA_ASSERT(i != end); ++i; while (1) { diff --git a/mutation/mutation_partition.hh b/mutation/mutation_partition.hh index 640166c323..ae86bf5f67 100644 --- a/mutation/mutation_partition.hh +++ b/mutation/mutation_partition.hh @@ -24,6 +24,7 @@ #include "atomic_cell_or_collection.hh" #include "hashing_partition_visitor.hh" #include "range_tombstone_list.hh" +#include "utils/assert.hh" #include "utils/intrusive_btree.hh" #include "utils/preempt.hh" #include "utils/lru.hh" @@ -1486,7 +1487,7 @@ private: void check_schema(const schema& s) const { #ifdef SEASTAR_DEBUG - assert(s.version() == _schema_version); + SCYLLA_ASSERT(s.version() == _schema_version); #endif } }; diff --git a/mutation/mutation_partition_v2.cc b/mutation/mutation_partition_v2.cc index 4db9ed8bbd..917c9ab209 100644 --- a/mutation/mutation_partition_v2.cc +++ b/mutation/mutation_partition_v2.cc @@ -20,6 +20,7 @@ #include #include "compaction/compaction_garbage_collector.hh" #include "mutation_partition_view.hh" +#include "utils/assert.hh" #include "utils/unconst.hh" extern logging::logger mplog; @@ -34,7 +35,7 @@ mutation_partition_v2::mutation_partition_v2(const schema& s, const mutation_par #endif { #ifdef SEASTAR_DEBUG - assert(x._schema_version == _schema_version); + SCYLLA_ASSERT(x._schema_version == _schema_version); #endif auto cloner = [&s] (const rows_entry* x) -> rows_entry* { return current_allocator().construct(s, *x); @@ -117,8 +118,8 @@ void mutation_partition_v2::apply(const schema& s, mutation_partition_v2&& p, ca stop_iteration mutation_partition_v2::apply_monotonically(const schema& s, const schema& p_s, mutation_partition_v2&& p, cache_tracker* tracker, mutation_application_stats& app_stats, preemption_check need_preempt, apply_resume& res, is_evictable evictable) { #ifdef SEASTAR_DEBUG - assert(_schema_version == s.version()); - assert(p._schema_version == p_s.version()); + SCYLLA_ASSERT(_schema_version == s.version()); + SCYLLA_ASSERT(p._schema_version == p_s.version()); #endif bool same_schema = s.version() == p_s.version(); _tombstone.apply(p._tombstone); @@ -217,7 +218,7 @@ stop_iteration mutation_partition_v2::apply_monotonically(const schema& s, const // some memory for the new tree nodes. This is done by the `hold_reserve` // constructed after the lambda. if (this_sentinel) { - assert(p_i != p._rows.end()); + SCYLLA_ASSERT(p_i != p._rows.end()); auto rt = this_sentinel->range_tombstone(); auto insert_result = _rows.insert_before_hint(i, std::move(this_sentinel), cmp); auto i2 = insert_result.first; @@ -233,10 +234,10 @@ stop_iteration mutation_partition_v2::apply_monotonically(const schema& s, const } } if (p_sentinel) { - assert(p_i != p._rows.end()); + SCYLLA_ASSERT(p_i != p._rows.end()); if (cmp(p_i->position(), p_sentinel->position()) == 0) { mplog.trace("{}: clearing attributes on {}", fmt::ptr(&p), p_i->position()); - assert(p_i->dummy()); + SCYLLA_ASSERT(p_i->dummy()); p_i->set_continuous(false); p_i->set_range_tombstone({}); } else { @@ -409,7 +410,7 @@ stop_iteration mutation_partition_v2::apply_monotonically(const schema& s, const lb_i->set_continuous(true); } } else { - assert(i->dummy() == src_e.dummy()); + SCYLLA_ASSERT(i->dummy() == src_e.dummy()); alloc_strategy_unique_ptr s1; alloc_strategy_unique_ptr s2; @@ -521,7 +522,7 @@ stop_iteration mutation_partition_v2::apply_monotonically(const schema& s, const void mutation_partition_v2::apply_row_tombstone(const schema& schema, clustering_key_prefix prefix, tombstone t) { check_schema(schema); - assert(!prefix.is_full(schema)); + SCYLLA_ASSERT(!prefix.is_full(schema)); auto start = prefix; apply_row_tombstone(schema, range_tombstone{std::move(start), std::move(prefix), std::move(t)}); } @@ -842,8 +843,8 @@ bool mutation_partition_v2::equal(const schema& s, const mutation_partition_v2& bool mutation_partition_v2::equal(const schema& this_schema, const mutation_partition_v2& p, const schema& p_schema) const { #ifdef SEASTAR_DEBUG - assert(_schema_version == this_schema.version()); - assert(p._schema_version == p_schema.version()); + SCYLLA_ASSERT(_schema_version == this_schema.version()); + SCYLLA_ASSERT(p._schema_version == p_schema.version()); #endif if (_tombstone != p._tombstone) { return false; @@ -1010,7 +1011,7 @@ void mutation_partition_v2::set_continuity(const schema& s, const position_range i = _rows.insert_before(i, std::move(e)); } - assert(i != end); + SCYLLA_ASSERT(i != end); ++i; while (1) { diff --git a/mutation/mutation_partition_v2.hh b/mutation/mutation_partition_v2.hh index 733c6dd59d..62f273516d 100644 --- a/mutation/mutation_partition_v2.hh +++ b/mutation/mutation_partition_v2.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include @@ -268,7 +269,7 @@ private: void check_schema(const schema& s) const { #ifdef SEASTAR_DEBUG - assert(s.version() == _schema_version); + SCYLLA_ASSERT(s.version() == _schema_version); #endif } }; diff --git a/mutation/mutation_partition_view.cc b/mutation/mutation_partition_view.cc index 7481c17046..e70c50a62d 100644 --- a/mutation/mutation_partition_view.cc +++ b/mutation/mutation_partition_view.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include @@ -103,7 +104,7 @@ collection_mutation read_collection_cell(const abstract_type& type, ser::collect for (auto&& e : elements) { bytes key = e.key(); auto idx = deserialize_field_index(key); - assert(idx < utype.size()); + SCYLLA_ASSERT(idx < utype.size()); mut.cells.emplace_back(key, read_atomic_cell(*utype.type(idx), e.value(), atomic_cell::collection_member::yes)); } diff --git a/mutation/mutation_rebuilder.hh b/mutation/mutation_rebuilder.hh index a0987f6e78..5519c5d7a4 100644 --- a/mutation/mutation_rebuilder.hh +++ b/mutation/mutation_rebuilder.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "mutation.hh" #include "range_tombstone_assembler.hh" @@ -20,31 +21,31 @@ public: // Returned reference is valid until consume_end_of_stream() or flush() is called. const mutation& consume_new_partition(const dht::decorated_key& dk) { - assert(!_m); + SCYLLA_ASSERT(!_m); _m = mutation(_s, dk); return *_m; } stop_iteration consume(tombstone t) { - assert(_m); + SCYLLA_ASSERT(_m); _m->partition().apply(t); return stop_iteration::no; } stop_iteration consume(range_tombstone&& rt) { - assert(_m); + SCYLLA_ASSERT(_m); _m->partition().apply_row_tombstone(*_s, std::move(rt)); return stop_iteration::no; } stop_iteration consume(static_row&& sr) { - assert(_m); + SCYLLA_ASSERT(_m); _m->partition().static_row().apply(*_s, column_kind::static_column, std::move(sr.cells())); return stop_iteration::no; } stop_iteration consume(clustering_row&& cr) { - assert(_m); + SCYLLA_ASSERT(_m); auto& dr = _m->partition().clustered_row(*_s, std::move(cr.key())); dr.apply(cr.tomb()); dr.apply(cr.marker()); @@ -53,7 +54,7 @@ public: } stop_iteration consume_end_of_partition() { - assert(_m); + SCYLLA_ASSERT(_m); return stop_iteration::yes; } @@ -64,7 +65,7 @@ public: // Can be used to split the processing of a large mutation into // multiple smaller `mutation` objects (which add up to the full mutation). mutation flush() { - assert(_m); + SCYLLA_ASSERT(_m); return std::exchange(*_m, mutation(_s, _m->decorated_key())); } diff --git a/mutation/partition_version.cc b/mutation/partition_version.cc index c6313cc554..7e09b7ddf7 100644 --- a/mutation/partition_version.cc +++ b/mutation/partition_version.cc @@ -11,6 +11,7 @@ #include "partition_version.hh" #include "row_cache.hh" #include "partition_snapshot_row_cursor.hh" +#include "utils/assert.hh" #include "utils/coroutine.hh" #include "real_dirty_memory_accounter.hh" @@ -342,7 +343,7 @@ partition_entry::~partition_entry() { return; } if (_snapshot) { - assert(!_snapshot->is_locked()); + SCYLLA_ASSERT(!_snapshot->is_locked()); _snapshot->_version = std::move(_version); _snapshot->_version.mark_as_unique_owner(); _snapshot->_entry = nullptr; @@ -359,7 +360,7 @@ stop_iteration partition_entry::clear_gently(cache_tracker* tracker) noexcept { } if (_snapshot) { - assert(!_snapshot->is_locked()); + SCYLLA_ASSERT(!_snapshot->is_locked()); _snapshot->_version = std::move(_version); _snapshot->_version.mark_as_unique_owner(); _snapshot->_entry = nullptr; @@ -387,7 +388,7 @@ stop_iteration partition_entry::clear_gently(cache_tracker* tracker) noexcept { void partition_entry::set_version(partition_version* new_version) { if (_snapshot) { - assert(!_snapshot->is_locked()); + SCYLLA_ASSERT(!_snapshot->is_locked()); _snapshot->_version = std::move(_version); _snapshot->_entry = nullptr; } @@ -552,7 +553,7 @@ utils::coroutine partition_entry::apply_to_incomplete(const schema& s, do { auto size = src_cur.memory_usage(); // Range tombstones in memtables are bounded by dummy entries on both sides. - assert(src_cur.range_tombstone_for_row() == src_cur.range_tombstone()); + SCYLLA_ASSERT(src_cur.range_tombstone_for_row() == src_cur.range_tombstone()); if (src_cur.range_tombstone()) { // Apply the tombstone to (lb, src_cur.position()) // FIXME: Avoid if before all rows @@ -564,11 +565,11 @@ utils::coroutine partition_entry::apply_to_incomplete(const schema& s, cur.next(); } position_in_partition::less_compare less(s); - assert(less(lb, cur.position())); + SCYLLA_ASSERT(less(lb, cur.position())); while (less(cur.position(), src_cur.position())) { auto res = cur.ensure_entry_in_latest(); if (cur.continuous()) { - assert(cur.dummy() || cur.range_tombstone_for_row() == cur.range_tombstone()); + SCYLLA_ASSERT(cur.dummy() || cur.range_tombstone_for_row() == cur.range_tombstone()); res.row.set_continuous(is_continuous::yes); } res.row.set_range_tombstone(cur.range_tombstone_for_row() + src_cur.range_tombstone()); @@ -600,7 +601,7 @@ utils::coroutine partition_entry::apply_to_incomplete(const schema& s, // only then the lower bound of the range is ensured in the latest version earlier. if (src_cur.range_tombstone()) { if (cur.continuous()) { - assert(cur.dummy() || cur.range_tombstone_for_row() == cur.range_tombstone()); + SCYLLA_ASSERT(cur.dummy() || cur.range_tombstone_for_row() == cur.range_tombstone()); e.set_continuous(is_continuous::yes); } e.set_range_tombstone(cur.range_tombstone_for_row() + src_cur.range_tombstone()); @@ -666,9 +667,9 @@ partition_snapshot_ptr partition_entry::read(logalloc::region& r, // If entry is being updated, we will get reads for non-latest phase, and // they must attach to the non-current version. partition_version* second = _version->next(); - assert(second && second->is_referenced()); + SCYLLA_ASSERT(second && second->is_referenced()); auto snp = partition_snapshot::container_of(second->_backref).shared_from_this(); - assert(phase == snp->_phase); + SCYLLA_ASSERT(phase == snp->_phase); return snp; } else { // phase > _snapshot->_phase with_allocator(r.allocator(), [&] { @@ -687,9 +688,9 @@ void partition_snapshot::touch() noexcept { // can be touched. if (_tracker && at_latest_version()) { auto&& rows = version()->partition().clustered_rows(); - assert(!rows.empty()); + SCYLLA_ASSERT(!rows.empty()); rows_entry& last_dummy = *rows.rbegin(); - assert(last_dummy.is_last_dummy()); + SCYLLA_ASSERT(last_dummy.is_last_dummy()); _tracker->touch(last_dummy); } } @@ -732,7 +733,7 @@ void partition_entry::evict(mutation_cleaner& cleaner) noexcept { return; } if (_snapshot) { - assert(!_snapshot->is_locked()); + SCYLLA_ASSERT(!_snapshot->is_locked()); _snapshot->_version = std::move(_version); _snapshot->_version.mark_as_unique_owner(); _snapshot->_entry = nullptr; @@ -756,14 +757,14 @@ partition_snapshot_ptr::~partition_snapshot_ptr() { void partition_snapshot::lock() noexcept { // partition_entry::is_locked() assumes that if there is a locked snapshot, // it can be found attached directly to it. - assert(at_latest_version()); + SCYLLA_ASSERT(at_latest_version()); _locked = true; } void partition_snapshot::unlock() noexcept { // Locked snapshots must always be latest, is_locked() assumes that. // Also, touch() is only effective when this snapshot is latest. - assert(at_latest_version()); + SCYLLA_ASSERT(at_latest_version()); _locked = false; touch(); // Make the entry evictable again in case it was fully unlinked by eviction attempt. } diff --git a/mutation/partition_version.hh b/mutation/partition_version.hh index 7d70763958..7071d88053 100644 --- a/mutation/partition_version.hh +++ b/mutation/partition_version.hh @@ -10,6 +10,7 @@ #include "mutation_partition.hh" #include "mutation_partition_v2.hh" +#include "utils/assert.hh" #include "utils/anchorless_list.hh" #include "utils/logalloc.hh" #include "utils/coroutine.hh" @@ -208,13 +209,13 @@ public: : _schema(std::move(s)) , _partition(*_schema) { - assert(_schema); + SCYLLA_ASSERT(_schema); } explicit partition_version(mutation_partition_v2 mp, schema_ptr s) noexcept : _schema(std::move(s)) , _partition(std::move(mp)) { - assert(_schema); + SCYLLA_ASSERT(_schema); } partition_version(partition_version&& pv) noexcept; @@ -251,7 +252,7 @@ public: : _version(&pv) , _unique_owner(unique_owner) { - assert(!_version->_backref); + SCYLLA_ASSERT(!_version->_backref); _version->_backref = this; } ~partition_version_ref() { @@ -279,19 +280,19 @@ public: explicit operator bool() const { return _version; } partition_version& operator*() { - assert(_version); + SCYLLA_ASSERT(_version); return *_version; } const partition_version& operator*() const { - assert(_version); + SCYLLA_ASSERT(_version); return *_version; } partition_version* operator->() { - assert(_version); + SCYLLA_ASSERT(_version); return _version; } const partition_version* operator->() const { - assert(_version); + SCYLLA_ASSERT(_version); return _version; } @@ -669,9 +670,9 @@ public: // If entry is being updated, we will get reads for non-latest phase, and // they must attach to the non-current version. partition_version* second = _version->next(); - assert(second && second->is_referenced()); + SCYLLA_ASSERT(second && second->is_referenced()); auto&& snp = partition_snapshot::referer_of(*second); - assert(phase == snp._phase); + SCYLLA_ASSERT(phase == snp._phase); return *second; } else { // phase > _snapshot->_phase add_version(s, t); diff --git a/mutation/partition_version_list.hh b/mutation/partition_version_list.hh index 382a4081fb..cca07dff13 100644 --- a/mutation/partition_version_list.hh +++ b/mutation/partition_version_list.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "partition_version.hh" // Double-ended chained list of partition_version objects @@ -29,13 +30,13 @@ public: } _head = partition_version_ref(v, true); #ifdef SEASTAR_DEBUG - assert(!_head->is_referenced_from_entry()); + SCYLLA_ASSERT(!_head->is_referenced_from_entry()); #endif } else { v.insert_after(*_tail); _tail = partition_version_ref(v, true); #ifdef SEASTAR_DEBUG - assert(!_tail->is_referenced_from_entry()); + SCYLLA_ASSERT(!_tail->is_referenced_from_entry()); #endif } } @@ -63,7 +64,7 @@ public: if (next) { _head = partition_version_ref(*next, true); #ifdef SEASTAR_DEBUG - assert(!_head->is_referenced_from_entry()); + SCYLLA_ASSERT(!_head->is_referenced_from_entry()); #endif } } diff --git a/mutation/position_in_partition.hh b/mutation/position_in_partition.hh index e3dd4efe42..fcb9160439 100644 --- a/mutation/position_in_partition.hh +++ b/mutation/position_in_partition.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "types/types.hh" #include "keys.hh" #include "clustering_bounds_comparator.hh" @@ -238,12 +239,12 @@ public: // Can be called only when !is_static_row && !is_clustering_row(). bound_view as_start_bound_view() const { - assert(_bound_weight != bound_weight::equal); + SCYLLA_ASSERT(_bound_weight != bound_weight::equal); return bound_view(*_ck, _bound_weight == bound_weight::before_all_prefixed ? bound_kind::incl_start : bound_kind::excl_start); } bound_view as_end_bound_view() const { - assert(_bound_weight != bound_weight::equal); + SCYLLA_ASSERT(_bound_weight != bound_weight::equal); return bound_view(*_ck, _bound_weight == bound_weight::before_all_prefixed ? bound_kind::excl_end : bound_kind::incl_end); } diff --git a/mutation/range_tombstone_list.cc b/mutation/range_tombstone_list.cc index 8762ac4718..781ca48bf3 100644 --- a/mutation/range_tombstone_list.cc +++ b/mutation/range_tombstone_list.cc @@ -8,6 +8,7 @@ #include #include "range_tombstone_list.hh" +#include "utils/assert.hh" #include "utils/allocation_strategy.hh" #include @@ -409,7 +410,7 @@ void range_tombstone_list::nop_reverter::update(range_tombstones_type::iterator void range_tombstone_list::insert_undo_op::undo(const schema& s, range_tombstone_list& rt_list) noexcept { auto it = rt_list.find(s, _new_rt); - assert (it != rt_list.end()); + SCYLLA_ASSERT (it != rt_list.end()); rt_list._tombstones.erase_and_dispose(it, current_deleter()); } @@ -419,7 +420,7 @@ void range_tombstone_list::erase_undo_op::undo(const schema& s, range_tombstone_ void range_tombstone_list::update_undo_op::undo(const schema& s, range_tombstone_list& rt_list) noexcept { auto it = rt_list.find(s, _new_rt); - assert (it != rt_list.end()); + SCYLLA_ASSERT (it != rt_list.end()); *it = std::move(_old_rt); } diff --git a/mutation/range_tombstone_list.hh b/mutation/range_tombstone_list.hh index 46546d2ecb..348723ca18 100644 --- a/mutation/range_tombstone_list.hh +++ b/mutation/range_tombstone_list.hh @@ -11,6 +11,7 @@ #include #include "range_tombstone.hh" #include "query-request.hh" +#include "utils/assert.hh" #include "utils/preempt.hh" #include "utils/chunked_vector.hh" #include @@ -238,7 +239,7 @@ public: // The list is assumed not to be empty range_tombstone pop_front_and_lock() { range_tombstone_entry* rt = _tombstones.unlink_leftmost_without_rebalance(); - assert(rt != nullptr); + SCYLLA_ASSERT(rt != nullptr); auto _ = seastar::defer([rt] () noexcept { current_deleter()(rt); }); return std::move(rt->tombstone()); } diff --git a/mutation_writer/multishard_writer.cc b/mutation_writer/multishard_writer.cc index a2918f75cc..6c8dcf2472 100644 --- a/mutation_writer/multishard_writer.cc +++ b/mutation_writer/multishard_writer.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "mutation_writer/multishard_writer.hh" #include "mutation/mutation_fragment_v2.hh" #include "schema/schema_registry.hh" @@ -150,7 +151,7 @@ future multishard_writer::handle_mutation_fragment(mutation_frag } } return f.then([this, mf = std::move(mf)] () mutable { - assert(!_current_shards.empty()); + SCYLLA_ASSERT(!_current_shards.empty()); if (_current_shards.size() == 1) [[likely]] { return _queue_reader_handles[_current_shards[0]]->push(std::move(mf)); } diff --git a/partition_snapshot_row_cursor.hh b/partition_snapshot_row_cursor.hh index 208274df96..95044aa119 100644 --- a/partition_snapshot_row_cursor.hh +++ b/partition_snapshot_row_cursor.hh @@ -10,6 +10,7 @@ #include "mutation/partition_version.hh" #include "row_cache.hh" +#include "utils/assert.hh" #include "utils/small_vector.hh" #include #include @@ -314,7 +315,7 @@ class partition_snapshot_row_cursor final { bool advance(bool keep) { memory::on_alloc_point(); version_heap_less_compare heap_less(*this); - assert(iterators_valid()); + SCYLLA_ASSERT(iterators_valid()); for (auto&& curr : _current_row) { if (!keep && curr.unique_owner) { mutation_partition::rows_type::key_grabber kg(curr.it); @@ -382,7 +383,7 @@ public: // If is_in_latest_version() then this returns an iterator to the entry under cursor in the latest version. mutation_partition::rows_type::iterator get_iterator_in_latest_version() const { - assert(_latest_it); + SCYLLA_ASSERT(_latest_it); return *_latest_it; } @@ -688,7 +689,7 @@ public: position_in_partition::less_compare less(_schema); if (!iterators_valid() || less(position(), pos)) { auto has_entry = maybe_advance_to(pos); - assert(has_entry); // evictable snapshots must have a dummy after all rows. + SCYLLA_ASSERT(has_entry); // evictable snapshots must have a dummy after all rows. } auto&& rows = _snp.version()->partition().mutable_clustered_rows(); auto latest_i = get_iterator_in_latest_version(); diff --git a/query-result-reader.hh b/query-result-reader.hh index 45f0422f95..3fc7095f02 100644 --- a/query-result-reader.hh +++ b/query-result-reader.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include @@ -186,7 +187,7 @@ public: full_position calculate_last_position() const { auto ps = _v.partitions(); - assert(!ps.empty()); + SCYLLA_ASSERT(!ps.empty()); auto pit = ps.begin(); auto pnext = pit; while (++pnext != ps.end()) { diff --git a/query.cc b/query.cc index 3cd52d860c..3dfb686179 100644 --- a/query.cc +++ b/query.cc @@ -22,6 +22,7 @@ #include "query_result_merger.hh" #include "partition_slice_builder.hh" #include "schema/schema_registry.hh" +#include "utils/assert.hh" #include "utils/overloaded_functor.hh" namespace query { @@ -115,7 +116,7 @@ void trim_clustering_row_ranges_to(const schema& s, clustering_row_ranges& range it = ranges.erase(it); continue; } else if (cmp(start_bound(*it), pos) <= 0) { - assert(cmp(pos, end_bound(*it)) < 0); + SCYLLA_ASSERT(cmp(pos, end_bound(*it)) < 0); auto r = reversed ? clustering_range(it->start(), clustering_range::bound(pos.key(), pos.get_bound_weight() != bound_weight::before_all_prefixed)) : clustering_range(clustering_range::bound(pos.key(), pos.get_bound_weight() != bound_weight::after_all_prefixed), it->end()); @@ -266,7 +267,7 @@ void partition_slice::clear_range(const schema& s, const partition_key& k) { // just in case someone changes the impl above, // we should do actual remove if specific_ranges suddenly // becomes an actual map - assert(_specific_ranges->size() == 1); + SCYLLA_ASSERT(_specific_ranges->size() == 1); _specific_ranges = nullptr; } } @@ -426,12 +427,12 @@ std::ostream& operator<<(std::ostream& out, const query::mapreduce_result::print } std::optional position_range_to_clustering_range(const position_range& r, const schema& s) { - assert(r.start().get_type() == partition_region::clustered); - assert(r.end().get_type() == partition_region::clustered); + SCYLLA_ASSERT(r.start().get_type() == partition_region::clustered); + SCYLLA_ASSERT(r.end().get_type() == partition_region::clustered); if (r.start().has_key() && r.end().has_key() && clustering_key_prefix::equality(s)(r.start().key(), r.end().key())) { - assert(r.start().get_bound_weight() != r.end().get_bound_weight()); + SCYLLA_ASSERT(r.start().get_bound_weight() != r.end().get_bound_weight()); if (r.end().get_bound_weight() == bound_weight::after_all_prefixed && r.start().get_bound_weight() != bound_weight::after_all_prefixed) { @@ -452,16 +453,16 @@ std::optional position_range_to_clustering_range(const auto to_bound = [&s] (const position_in_partition& p, bool left) -> std::optional { if (p.is_before_all_clustered_rows(s)) { - assert(left); + SCYLLA_ASSERT(left); return {}; } if (p.is_after_all_clustered_rows(s)) { - assert(!left); + SCYLLA_ASSERT(!left); return {}; } - assert(p.has_key()); + SCYLLA_ASSERT(p.has_key()); auto bw = p.get_bound_weight(); bool inclusive = left diff --git a/raft/fsm.cc b/raft/fsm.cc index 2a9713a95c..f7577b177c 100644 --- a/raft/fsm.cc +++ b/raft/fsm.cc @@ -8,6 +8,7 @@ #include "fsm.hh" #include #include +#include "utils/assert.hh" #include "utils/error_injection.hh" namespace raft { @@ -140,7 +141,7 @@ void fsm::advance_commit_idx(index_t leader_commit_idx) { void fsm::update_current_term(term_t current_term) { - assert(_current_term < current_term); + SCYLLA_ASSERT(_current_term < current_term); _current_term = current_term; _voted_for = server_id{}; } @@ -156,7 +157,7 @@ void fsm::reset_election_timeout() { } void fsm::become_leader() { - assert(!std::holds_alternative(_state)); + SCYLLA_ASSERT(!std::holds_alternative(_state)); _output.state_changed = true; _state.emplace(_config.max_log_size, *this); @@ -258,7 +259,7 @@ void fsm::become_candidate(bool is_prevote, bool is_leadership_transfer) { // This means we must still have access to the previous configuration. // Become a candidate only if we were previously a voter. auto prev_cfg = _log.get_prev_configuration(); - assert(prev_cfg); + SCYLLA_ASSERT(prev_cfg); if (!prev_cfg->can_vote(_my_id)) { // We weren't a voter before. become_follower(server_id{}); @@ -628,7 +629,7 @@ void fsm::append_entries(server_id from, append_request&& request) { _my_id, request.current_term, request.prev_log_idx, request.prev_log_term, request.leader_commit_idx, request.entries.size() ? request.entries[0]->idx : index_t(0), request.entries.size()); - assert(is_follower()); + SCYLLA_ASSERT(is_follower()); // Ensure log matching property, even if we append no entries. // 3.5 @@ -663,7 +664,7 @@ void fsm::append_entries(server_id from, append_request&& request) { } void fsm::append_entries_reply(server_id from, append_reply&& reply) { - assert(is_leader()); + SCYLLA_ASSERT(is_leader()); follower_progress* opt_progress = leader_state().tracker.find(from); if (opt_progress == nullptr) { @@ -754,7 +755,7 @@ void fsm::append_entries_reply(server_id from, append_reply&& reply) { // By `is_stray_reject(rejected) == false` we know that `rejected.non_matching_idx > progress.match_idx` // and `rejected.last_idx + 1 > progress.match_idx`. By the assignment to `progress.next_idx` above, we get: - assert(progress.next_idx > progress.match_idx); + SCYLLA_ASSERT(progress.next_idx > progress.match_idx); } // We may have just applied a configuration that removes this @@ -773,7 +774,7 @@ void fsm::request_vote(server_id from, vote_request&& request) { // We can cast a vote in any state. If the candidate's term is // lower than ours, we ignore the request. Otherwise we first // update our current term and convert to a follower. - assert(request.is_prevote || _current_term == request.current_term); + SCYLLA_ASSERT(request.is_prevote || _current_term == request.current_term); bool can_vote = // We can vote if this is a repeat of a vote we've already cast... @@ -824,7 +825,7 @@ void fsm::request_vote(server_id from, vote_request&& request) { } void fsm::request_vote_reply(server_id from, vote_reply&& reply) { - assert(is_candidate()); + SCYLLA_ASSERT(is_candidate()); logger.trace("request_vote_reply[{}] received a {} vote from {}", _my_id, reply.vote_granted ? "yes" : "no", from); @@ -961,7 +962,7 @@ void fsm::replicate_to(follower_progress& progress, bool allow_empty) { } void fsm::replicate() { - assert(is_leader()); + SCYLLA_ASSERT(is_leader()); for (auto& [id, progress] : leader_state().tracker) { if (progress.id != _my_id) { replicate_to(progress, false); @@ -999,7 +1000,7 @@ bool fsm::apply_snapshot(snapshot_descriptor snp, size_t max_trailing_entries, s // If the snapshot is locally generated, all entries up to its index must have been locally applied, // so in particular they must have been observed as committed. // Remote snapshots are only applied if we're a follower. - assert((local && snp.idx <= _observed._commit_idx) || (!local && is_follower())); + SCYLLA_ASSERT((local && snp.idx <= _observed._commit_idx) || (!local && is_follower())); // We don't apply snapshots older than the last applied one. // Furthermore, for remote snapshots, we can *only* apply them if they are fresher than our commit index. @@ -1074,7 +1075,7 @@ void fsm::broadcast_read_quorum(read_id id) { } void fsm::handle_read_quorum_reply(server_id from, const read_quorum_reply& reply) { - assert(is_leader()); + SCYLLA_ASSERT(is_leader()); logger.trace("handle_read_quorum_reply[{}] got reply from {} for id {}", _my_id, from, reply.id); auto& state = leader_state(); follower_progress* progress = state.tracker.find(from); @@ -1114,7 +1115,7 @@ std::optional> fsm::start_read_barrier(server_id req } auto term_for_commit_idx = _log.term_for(_commit_idx); - assert(term_for_commit_idx); + SCYLLA_ASSERT(term_for_commit_idx); if (*term_for_commit_idx != _current_term) { return {}; diff --git a/raft/fsm.hh b/raft/fsm.hh index 0e8153a043..544a320b8f 100644 --- a/raft/fsm.hh +++ b/raft/fsm.hh @@ -9,6 +9,7 @@ #include #include +#include "utils/assert.hh" #include "utils/small_vector.hh" #include "raft.hh" #include "tracker.hh" @@ -314,7 +315,7 @@ private: // Issue the next read identifier read_id next_read_id() { - assert(is_leader()); + SCYLLA_ASSERT(is_leader()); ++leader_state().last_read_id; leader_state().last_read_id_changed = true; _sm_events.signal(); @@ -399,7 +400,7 @@ public: // Ask to search for a leader if one is not known. void ping_leader() { - assert(!current_leader()); + SCYLLA_ASSERT(!current_leader()); _ping_leader = true; } diff --git a/raft/log.cc b/raft/log.cc index cf38c4bf77..82f311c95e 100644 --- a/raft/log.cc +++ b/raft/log.cc @@ -5,6 +5,7 @@ /* * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "log.hh" namespace raft { @@ -26,7 +27,7 @@ size_t log::range_memory_usage(log_entries::iterator first, log_entries::iterato } log_entry_ptr& log::operator[](size_t i) { - assert(!_log.empty() && index_t(i) >= _first_idx); + SCYLLA_ASSERT(!_log.empty() && index_t(i) >= _first_idx); return get_entry(index_t(i)); } @@ -62,7 +63,7 @@ index_t log::next_idx() const { } void log::truncate_uncommitted(index_t idx) { - assert(idx >= _first_idx); + SCYLLA_ASSERT(idx >= _first_idx); auto it = _log.begin() + (idx - _first_idx); const auto released_memory = range_memory_usage(it, _log.end()); _log.erase(it, _log.end()); @@ -73,7 +74,7 @@ void log::truncate_uncommitted(index_t idx) { // If _prev_conf_idx is 0, this log does not contain any // other configuration changes, since no two uncommitted // configuration changes can be in progress. - assert(_prev_conf_idx < _last_conf_idx); + SCYLLA_ASSERT(_prev_conf_idx < _last_conf_idx); _last_conf_idx = _prev_conf_idx; _prev_conf_idx = index_t{0}; } @@ -100,7 +101,7 @@ term_t log::last_term() const { } void log::stable_to(index_t idx) { - assert(idx <= last_idx()); + SCYLLA_ASSERT(idx <= last_idx()); _stable_idx = idx; } @@ -150,11 +151,11 @@ const configuration& log::get_configuration() const { } const configuration& log::last_conf_for(index_t idx) const { - assert(last_idx() >= idx); - assert(idx >= _snapshot.idx); + SCYLLA_ASSERT(last_idx() >= idx); + SCYLLA_ASSERT(idx >= _snapshot.idx); if (!_last_conf_idx) { - assert(!_prev_conf_idx); + SCYLLA_ASSERT(!_prev_conf_idx); return _snapshot.config; } @@ -181,7 +182,7 @@ const configuration& log::last_conf_for(index_t idx) const { } index_t log::maybe_append(std::vector&& entries) { - assert(!entries.empty()); + SCYLLA_ASSERT(!entries.empty()); index_t last_new_idx = entries.back()->idx; @@ -203,11 +204,11 @@ index_t log::maybe_append(std::vector&& entries) { // If an existing entry conflicts with a new one (same // index but different terms), delete the existing // entry and all that follow it (§5.3). - assert(e->idx > _snapshot.idx); + SCYLLA_ASSERT(e->idx > _snapshot.idx); truncate_uncommitted(e->idx); } // Assert log monotonicity - assert(e->idx == next_idx()); + SCYLLA_ASSERT(e->idx == next_idx()); emplace_back(std::move(e)); } @@ -228,7 +229,7 @@ const configuration* log::get_prev_configuration() const { } size_t log::apply_snapshot(snapshot_descriptor&& snp, size_t max_trailing_entries, size_t max_trailing_bytes) { - assert (snp.idx > _snapshot.idx); + SCYLLA_ASSERT (snp.idx > _snapshot.idx); size_t released_memory; auto idx = snp.idx; diff --git a/raft/log.hh b/raft/log.hh index 245d68815e..b1f1b1fb3b 100644 --- a/raft/log.hh +++ b/raft/log.hh @@ -7,6 +7,7 @@ */ #pragma once +#include "utils/assert.hh" #include "raft.hh" namespace raft { @@ -78,12 +79,12 @@ public: // All log entries following the snapshot must // be present, otherwise we will not be able to // perform an initial state transfer. - assert(_first_idx <= _snapshot.idx + 1); + SCYLLA_ASSERT(_first_idx <= _snapshot.idx + 1); } _memory_usage = range_memory_usage(_log.begin(), _log.end()); // The snapshot index is at least 0, so _first_idx // is at least 1 - assert(_first_idx > 0); + SCYLLA_ASSERT(_first_idx > 0); stable_to(last_idx()); init_last_conf_idx(); } diff --git a/raft/raft.hh b/raft/raft.hh index e831ee431e..069bd5211a 100644 --- a/raft/raft.hh +++ b/raft/raft.hh @@ -7,6 +7,7 @@ */ #pragma once +#include "utils/assert.hh" #include #include #include @@ -228,7 +229,7 @@ struct configuration { // Transition from C_old + C_new to C_new. void leave_joint() { - assert(is_joint()); + SCYLLA_ASSERT(is_joint()); previous.clear(); } }; diff --git a/raft/server.cc b/raft/server.cc index 6eaec6f9d4..008902abeb 100644 --- a/raft/server.cc +++ b/raft/server.cc @@ -7,6 +7,7 @@ */ #include "server.hh" +#include "utils/assert.hh" #include "utils/error_injection.hh" #include #include @@ -487,7 +488,7 @@ future server_impl::trigger_snapshot(seastar::abort_source* as) { if (as) { as->check(); sub = as->subscribe([this] () noexcept { _snapshot_desc_idx_changed.broadcast(); }); - assert(sub); // due to `check()` above + SCYLLA_ASSERT(sub); // due to `check()` above } co_await _snapshot_desc_idx_changed.when([this, as, awaited_idx] { return (as && as->abort_requested()) || awaited_idx <= _snapshot_desc_idx; @@ -536,8 +537,8 @@ future<> server_impl::wait_for_entry(entry_id eid, wait_type type, seastar::abor // was created, it included the entry `eid`. auto snap_idx = _fsm->log_last_snapshot_idx(); auto snap_term = _fsm->log_term_for(snap_idx); - assert(snap_term); - assert(snap_idx >= eid.idx); + SCYLLA_ASSERT(snap_term); + SCYLLA_ASSERT(snap_idx >= eid.idx); if (type == wait_type::committed && snap_term == eid.term) { logger.trace("[{}] wait_for_entry {}.{}: entry got truncated away, but has the snapshot's term" " (snapshot index: {})", id(), eid.term, eid.idx, snap_idx); @@ -585,7 +586,7 @@ future<> server_impl::wait_for_entry(entry_id eid, wait_type type, seastar::abor auto [it, inserted] = container.emplace(eid.idx, op_status{eid.term, promise<>()}); if (!inserted) { // No two leaders can exist with the same term. - assert(it->second.term != eid.term); + SCYLLA_ASSERT(it->second.term != eid.term); auto term_of_commit_idx = *_fsm->log_term_for(_fsm->commit_idx()); if (it->second.term > eid.term) { @@ -621,13 +622,13 @@ future<> server_impl::wait_for_entry(entry_id eid, wait_type type, seastar::abor _stats.waiters_dropped++; } } - assert(inserted); + SCYLLA_ASSERT(inserted); if (as) { it->second.abort = as->subscribe([it = it, &container] () noexcept { it->second.done.set_exception(request_aborted()); container.erase(it); }); - assert(it->second.abort); + SCYLLA_ASSERT(it->second.abort); } co_await it->second.done.get_future(); logger.trace("[{}] done waiting for {}.{}", id(), eid.term, eid.idx); @@ -929,7 +930,7 @@ void server_impl::notify_waiters(std::map& waiters, // if there is a waiter entry with an index smaller than first entry // it means that notification is out of order which is prohibited - assert(entry_idx >= first_idx); + SCYLLA_ASSERT(entry_idx >= first_idx); waiters.erase(it); if (status.term == entries[entry_idx - first_idx]->term) { @@ -1033,7 +1034,7 @@ void server_impl::send_message(server_id id, Message m) { send_snapshot(id, std::move(m)); } else if constexpr (std::is_same_v) { _stats.snapshot_reply_sent++; - assert(_snapshot_application_done.contains(id)); + SCYLLA_ASSERT(_snapshot_application_done.contains(id)); // Send a reply to install_snapshot after // snapshot application is done. _snapshot_application_done[id].set_value(std::move(m)); @@ -1273,7 +1274,7 @@ void server_impl::send_snapshot(server_id dst, install_snapshot&& snp) { }); }); auto res = _snapshot_transfers.emplace(dst, snapshot_transfer{std::move(f), std::move(as), id}); - assert(res.second); + SCYLLA_ASSERT(res.second); } future server_impl::apply_snapshot(server_id from, install_snapshot snp) { @@ -1320,7 +1321,7 @@ future<> server_impl::applier_fiber() { index_t last_idx = batch.back()->idx; term_t last_term = batch.back()->term; - assert(last_idx == _applied_idx + batch.size()); + SCYLLA_ASSERT(last_idx == _applied_idx + batch.size()); boost::range::copy( batch | @@ -1379,7 +1380,7 @@ future<> server_impl::applier_fiber() { } }, [this] (snapshot_descriptor& snp) -> future<> { - assert(snp.idx >= _applied_idx); + SCYLLA_ASSERT(snp.idx >= _applied_idx); // Apply snapshot it to the state machine logger.trace("[{}] apply_fiber applying snapshot {}", _id, snp.id); co_await _state_machine->load_snapshot(snp.id); @@ -1397,7 +1398,7 @@ future<> server_impl::applier_fiber() { [this] (const trigger_snapshot_msg&) -> future<> { auto applied_term = _fsm->log_term_for(_applied_idx); // last truncation index <= snapshot index <= applied index - assert(applied_term); + SCYLLA_ASSERT(applied_term); snapshot_descriptor snp; snp.term = *applied_term; @@ -1443,7 +1444,7 @@ future<> server_impl::wait_for_apply(index_t idx, abort_source* as) { it->second.promise.set_exception(request_aborted()); _awaited_indexes.erase(it); }); - assert(it->second.abort); + SCYLLA_ASSERT(it->second.abort); } co_await it->second.promise.get_future(); } @@ -1476,7 +1477,7 @@ future server_impl::execute_read_barrier(server_id from, sea read->promise.set_exception(request_aborted()); _reads.erase(read); }); - assert(read->abort); + SCYLLA_ASSERT(read->abort); } return read->promise.get_future(); } @@ -1679,7 +1680,7 @@ future<> server_impl::set_configuration(config_member_set c_new, seastar::abort_ _non_joint_conf_commit_promise->abort = as->subscribe([this] () noexcept { // If we're inside this callback, the subscription wasn't destroyed yet. // The subscription is destroyed when the field is reset, so if we're here, the field must be engaged. - assert(_non_joint_conf_commit_promise); + SCYLLA_ASSERT(_non_joint_conf_commit_promise); // Whoever resolves the promise must reset the field. Thus, if we're here, the promise is not resolved. std::exchange(_non_joint_conf_commit_promise, std::nullopt)->promise.set_exception(request_aborted{}); }); @@ -1877,7 +1878,7 @@ size_t server_impl::max_command_size() const { std::unique_ptr create_server(server_id uuid, std::unique_ptr rpc, std::unique_ptr state_machine, std::unique_ptr persistence, seastar::shared_ptr failure_detector, server::configuration config) { - assert(uuid != raft::server_id{utils::UUID(0, 0)}); + SCYLLA_ASSERT(uuid != raft::server_id{utils::UUID(0, 0)}); return std::make_unique(uuid, std::move(rpc), std::move(state_machine), std::move(persistence), failure_detector, config); } diff --git a/raft/tracker.cc b/raft/tracker.cc index 8e7d34279f..d9c913071d 100644 --- a/raft/tracker.cc +++ b/raft/tracker.cc @@ -5,6 +5,7 @@ /* * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "tracker.hh" #include @@ -45,7 +46,7 @@ bool follower_progress::is_stray_reject(const append_reply::rejected& rejected) // any reject during snapshot transfer is stray one return true; default: - assert(false); + SCYLLA_ASSERT(false); } return false; } @@ -86,7 +87,7 @@ bool follower_progress::can_send_to() { // before starting to sync the log. return false; } - assert(false); + SCYLLA_ASSERT(false); return false; } diff --git a/raft/tracker.hh b/raft/tracker.hh index b3364dd3af..1f69d1d280 100644 --- a/raft/tracker.hh +++ b/raft/tracker.hh @@ -7,6 +7,7 @@ */ #pragma once +#include "utils/assert.hh" #include #include #include "raft.hh" @@ -89,7 +90,7 @@ class tracker: private progress { // Hide size() function we inherited from progress since // it is never right to use it directly in case of joint config size_t size() const { - assert(false); + SCYLLA_ASSERT(false); } public: using progress::begin, progress::end, progress::cbegin, progress::cend, progress::size; @@ -177,7 +178,7 @@ public: if (_granted >= quorum) { return vote_result::WON; } - assert(_responded.size() <= _suffrage.size()); + SCYLLA_ASSERT(_responded.size() <= _suffrage.size()); auto unknown = _suffrage.size() - _responded.size(); return _granted + unknown >= quorum ? vote_result::UNKNOWN : vote_result::LOST; } diff --git a/read_context.hh b/read_context.hh index 84744f5622..34463a49b4 100644 --- a/read_context.hh +++ b/read_context.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "schema/schema_fwd.hh" #include "query-request.hh" #include "mutation/mutation_fragment.hh" @@ -73,7 +74,7 @@ public: } auto mfopt = co_await (*_reader)(); if (mfopt) { - assert(mfopt->is_partition_start()); + SCYLLA_ASSERT(mfopt->is_partition_start()); _new_last_key = mfopt->as_partition_start().key(); } co_return std::move(mfopt); diff --git a/reader_concurrency_semaphore.cc b/reader_concurrency_semaphore.cc index 38592bae8b..cbb415cd05 100644 --- a/reader_concurrency_semaphore.cc +++ b/reader_concurrency_semaphore.cc @@ -19,6 +19,7 @@ #include "reader_concurrency_semaphore.hh" #include "query-result.hh" #include "readers/mutation_reader.hh" +#include "utils/assert.hh" #include "utils/exceptions.hh" #include "schema/schema.hh" #include "utils/human_readable.hh" @@ -107,7 +108,7 @@ reader_permit::resource_units& reader_permit::resource_units::operator=(resource } void reader_permit::resource_units::add(resource_units&& o) { - assert(_permit == o._permit); + SCYLLA_ASSERT(_permit == o._permit); _resources += std::exchange(o._resources, {}); } @@ -335,7 +336,7 @@ public: } void on_admission() { - assert(_state != reader_permit::state::active_await); + SCYLLA_ASSERT(_state != reader_permit::state::active_await); on_permit_active(); consume(_base_resources); _base_resources_consumed = true; @@ -353,17 +354,17 @@ public: } void on_register_as_inactive() { - assert(_state == reader_permit::state::active || _state == reader_permit::state::active_need_cpu || _state == reader_permit::state::waiting_for_memory); + SCYLLA_ASSERT(_state == reader_permit::state::active || _state == reader_permit::state::active_need_cpu || _state == reader_permit::state::waiting_for_memory); on_permit_inactive(reader_permit::state::inactive); } void on_unregister_as_inactive() { - assert(_state == reader_permit::state::inactive); + SCYLLA_ASSERT(_state == reader_permit::state::inactive); on_permit_active(); } void on_evicted() { - assert(_state == reader_permit::state::inactive); + SCYLLA_ASSERT(_state == reader_permit::state::inactive); _state = reader_permit::state::evicted; if (_base_resources_consumed) { signal(_base_resources); @@ -424,7 +425,7 @@ public: } void mark_not_need_cpu() noexcept { - assert(_need_cpu_branches); + SCYLLA_ASSERT(_need_cpu_branches); --_need_cpu_branches; if (_marked_as_need_cpu && !_need_cpu_branches) { // When an exception is thrown, need_cpu and awaits guards might be @@ -447,7 +448,7 @@ public: } void mark_not_awaits() noexcept { - assert(_awaits_branches); + SCYLLA_ASSERT(_awaits_branches); --_awaits_branches; if (_marked_as_awaits && !_awaits_branches) { _state = reader_permit::state::active_need_cpu; @@ -1071,7 +1072,7 @@ reader_concurrency_semaphore::reader_concurrency_semaphore(no_limits, sstring na metrics) {} reader_concurrency_semaphore::~reader_concurrency_semaphore() { - assert(!_stats.waiters); + SCYLLA_ASSERT(!_stats.waiters); if (!_stats.total_permits) { // We allow destroy without stop() when the semaphore wasn't used at all yet. return; @@ -1080,7 +1081,7 @@ reader_concurrency_semaphore::~reader_concurrency_semaphore() { on_internal_error_noexcept(rcslog, format("~reader_concurrency_semaphore(): semaphore {} not stopped before destruction", _name)); // With the below conditions, we can get away with the semaphore being // unstopped. In this case don't force an abort. - assert(_inactive_reads.empty() && !_close_readers_gate.get_count() && !_permit_gate.get_count() && !_execution_loop_future); + SCYLLA_ASSERT(_inactive_reads.empty() && !_close_readers_gate.get_count() && !_permit_gate.get_count() && !_execution_loop_future); broken(); } } @@ -1210,7 +1211,7 @@ std::runtime_error reader_concurrency_semaphore::stopped_exception() { } future<> reader_concurrency_semaphore::stop() noexcept { - assert(!_stopped); + SCYLLA_ASSERT(!_stopped); _stopped = true; co_await stop_ext_pre(); clear_inactive_reads(); @@ -1534,20 +1535,20 @@ void reader_concurrency_semaphore::on_permit_need_cpu() noexcept { } void reader_concurrency_semaphore::on_permit_not_need_cpu() noexcept { - assert(_stats.need_cpu_permits); + SCYLLA_ASSERT(_stats.need_cpu_permits); --_stats.need_cpu_permits; - assert(_stats.need_cpu_permits >= _stats.awaits_permits); + SCYLLA_ASSERT(_stats.need_cpu_permits >= _stats.awaits_permits); maybe_admit_waiters(); } void reader_concurrency_semaphore::on_permit_awaits() noexcept { ++_stats.awaits_permits; - assert(_stats.need_cpu_permits >= _stats.awaits_permits); + SCYLLA_ASSERT(_stats.need_cpu_permits >= _stats.awaits_permits); maybe_admit_waiters(); } void reader_concurrency_semaphore::on_permit_not_awaits() noexcept { - assert(_stats.awaits_permits); + SCYLLA_ASSERT(_stats.awaits_permits); --_stats.awaits_permits; } diff --git a/readers/multishard.cc b/readers/multishard.cc index b5e92041d7..c2a3034022 100644 --- a/readers/multishard.cc +++ b/readers/multishard.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include @@ -281,7 +282,7 @@ public: }; void evictable_reader_v2::do_pause(mutation_reader reader) noexcept { - assert(!_irh); + SCYLLA_ASSERT(!_irh); _irh = _permit.semaphore().register_inactive_read(std::move(reader)); } diff --git a/readers/mutation_readers.cc b/readers/mutation_readers.cc index 0e9ed4fd8b..85529a2122 100644 --- a/readers/mutation_readers.cc +++ b/readers/mutation_readers.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "clustering_key_filter.hh" #include "clustering_ranges_walker.hh" #include "mutation/mutation.hh" @@ -58,7 +59,7 @@ public: switch (mf.mutation_fragment_kind()) { case mutation_fragment_v2::kind::partition_start: // can't happen - assert(false); + SCYLLA_ASSERT(false); break; case mutation_fragment_v2::kind::static_row: break; @@ -1130,7 +1131,7 @@ make_mutation_reader_from_fragments(schema_ptr schema, reader_permit permit, std for (auto it = fragments.begin(); it != fragments.end(); ) { auto&& mf = *it++; auto kind = mf.mutation_fragment_kind(); - assert(kind == mutation_fragment_v2::kind::partition_start); + SCYLLA_ASSERT(kind == mutation_fragment_v2::kind::partition_start); partition_slicer slicer(schema, permit, slice.row_ranges(*schema, mf.as_partition_start().key().key()), [&filtered] (mutation_fragment_v2 mf) { filtered.push_back(std::move(mf)); diff --git a/readers/mutation_source.hh b/readers/mutation_source.hh index 50626baffd..634e22209e 100644 --- a/readers/mutation_source.hh +++ b/readers/mutation_source.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "query-request.hh" #include "tracing/trace_state.hh" #include "readers/mutation_reader_fwd.hh" @@ -81,7 +82,7 @@ public: tracing::trace_state_ptr, streamed_mutation::forwarding fwd, mutation_reader::forwarding) { - assert(!fwd); + SCYLLA_ASSERT(!fwd); return fn(std::move(s), std::move(permit), range, slice); }) {} mutation_source(std::function fn) @@ -92,7 +93,7 @@ public: tracing::trace_state_ptr, streamed_mutation::forwarding fwd, mutation_reader::forwarding) { - assert(!fwd); + SCYLLA_ASSERT(!fwd); return fn(std::move(s), std::move(permit), range); }) {} diff --git a/redis/keyspace_utils.cc b/redis/keyspace_utils.cc index 830688d2ff..26e619aa0d 100644 --- a/redis/keyspace_utils.cc +++ b/redis/keyspace_utils.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include "redis/keyspace_utils.hh" @@ -138,7 +139,7 @@ schema_ptr zsets_schema(sstring ks_name) { } future<> create_keyspace_if_not_exists_impl(seastar::sharded& proxy, data_dictionary::database db, seastar::sharded& mm, db::config& config, int default_replication_factor) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto keyspace_replication_strategy_options = config.redis_keyspace_replication_strategy_options(); if (!keyspace_replication_strategy_options.contains("class")) { keyspace_replication_strategy_options["class"] = "SimpleStrategy"; diff --git a/release.cc b/release.cc index f475a4750e..596e750d5b 100644 --- a/release.cc +++ b/release.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "version.hh" #include "build_mode.hh" @@ -40,7 +41,7 @@ std::string doc_link(std::string_view url_tail) { std::vector components; boost::split(components, version, boost::algorithm::is_any_of(".")); // Version is compiled into the binary, testing will step on this immediately. - assert(components.size() >= 2); + SCYLLA_ASSERT(components.size() >= 2); branch = fmt::format("branch-{}.{}", components[0], components[1]); } diff --git a/repair/repair.cc b/repair/repair.cc index faac8bff62..080fc9dc31 100644 --- a/repair/repair.cc +++ b/repair/repair.cc @@ -21,6 +21,7 @@ #include "service/storage_service.hh" #include "sstables/sstables.hh" #include "partition_range_compat.hh" +#include "utils/assert.hh" #include "utils/error_injection.hh" #include @@ -1014,7 +1015,7 @@ void repair::shard_repair_task_impl::release_resources() noexcept { future<> repair::shard_repair_task_impl::do_repair_ranges() { // Repair tables in the keyspace one after another - assert(table_names().size() == table_ids.size()); + SCYLLA_ASSERT(table_names().size() == table_ids.size()); for (size_t idx = 0; idx < table_ids.size(); idx++) { table_info table_info{ .name = table_names()[idx], @@ -1475,7 +1476,7 @@ future<> repair_service::sync_data_using_repair( co_return; } - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto task = co_await _repair_module->make_and_start_task({}, _repair_module->new_repair_uniq_id(), std::move(keyspace), "", std::move(ranges), std::move(neighbors), reason, ops_info); co_await task->done(); } @@ -1559,7 +1560,7 @@ std::optional repair::data_sync_repair_task_impl::expected_children_numb } future<> repair_service::bootstrap_with_repair(locator::token_metadata_ptr tmptr, std::unordered_set bootstrap_tokens) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); using inet_address = gms::inet_address; return seastar::async([this, tmptr = std::move(tmptr), tokens = std::move(bootstrap_tokens)] () mutable { auto& db = get_db().local(); @@ -1736,7 +1737,7 @@ future<> repair_service::bootstrap_with_repair(locator::token_metadata_ptr tmptr } future<> repair_service::do_decommission_removenode_with_repair(locator::token_metadata_ptr tmptr, gms::inet_address leaving_node, shared_ptr ops) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); using inet_address = gms::inet_address; return seastar::async([this, tmptr = std::move(tmptr), leaving_node = std::move(leaving_node), ops] () mutable { auto& db = get_db().local(); @@ -1932,13 +1933,13 @@ future<> repair_service::do_decommission_removenode_with_repair(locator::token_m } future<> repair_service::decommission_with_repair(locator::token_metadata_ptr tmptr) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto my_address = tmptr->get_topology().my_address(); return do_decommission_removenode_with_repair(std::move(tmptr), my_address, {}); } future<> repair_service::removenode_with_repair(locator::token_metadata_ptr tmptr, gms::inet_address leaving_node, shared_ptr ops) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); return do_decommission_removenode_with_repair(std::move(tmptr), std::move(leaving_node), std::move(ops)).then([this] { rlogger.debug("Triggering off-strategy compaction for all non-system tables on removenode completion"); seastar::sharded& db = get_db(); @@ -1951,7 +1952,7 @@ future<> repair_service::removenode_with_repair(locator::token_metadata_ptr tmpt } future<> repair_service::do_rebuild_replace_with_repair(locator::token_metadata_ptr tmptr, sstring op, sstring source_dc, streaming::stream_reason reason, std::unordered_set ignore_nodes) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); return seastar::async([this, tmptr = std::move(tmptr), source_dc = std::move(source_dc), op = std::move(op), reason, ignore_nodes = std::move(ignore_nodes)] () mutable { auto& db = get_db().local(); auto ks_erms = db.get_non_local_strategy_keyspaces_erms(); @@ -2036,7 +2037,7 @@ future<> repair_service::do_rebuild_replace_with_repair(locator::token_metadata_ } future<> repair_service::rebuild_with_repair(locator::token_metadata_ptr tmptr, sstring source_dc) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto op = sstring("rebuild_with_repair"); if (source_dc.empty()) { auto& topology = tmptr->get_topology(); @@ -2052,7 +2053,7 @@ future<> repair_service::rebuild_with_repair(locator::token_metadata_ptr tmptr, } future<> repair_service::replace_with_repair(locator::token_metadata_ptr tmptr, std::unordered_set replacing_tokens, std::unordered_set ignore_nodes) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto cloned_tm = co_await tmptr->clone_async(); auto op = sstring("replace_with_repair"); auto& topology = tmptr->get_topology(); diff --git a/repair/row_level.cc b/repair/row_level.cc index fb1c0506eb..f1ccb41f74 100644 --- a/repair/row_level.cc +++ b/repair/row_level.cc @@ -20,6 +20,7 @@ #include "mutation_writer/multishard_writer.hh" #include "dht/i_partitioner.hh" #include "dht/sharder.hh" +#include "utils/assert.hh" #include "utils/xx_hasher.hh" #include "utils/UUID.hh" #include "replica/database.hh" @@ -889,7 +890,7 @@ public: } else { add_to_repair_meta_for_followers(*this); } - assert(all_live_peer_shards.size() == all_live_peer_nodes.size()); + SCYLLA_ASSERT(all_live_peer_shards.size() == all_live_peer_nodes.size()); _all_node_states.push_back(repair_node_state(myip(), this_shard_id())); for (unsigned i = 0; i < all_live_peer_nodes.size(); i++) { _all_node_states.push_back(repair_node_state(all_live_peer_nodes[i], all_live_peer_shards[i].value_or(repair_unspecified_shard))); @@ -926,7 +927,7 @@ public: public: std::optional get_peer_node_dst_cpu_id(uint32_t peer_node_idx) { - assert(peer_node_idx + 1 < all_nodes().size()); + SCYLLA_ASSERT(peer_node_idx + 1 < all_nodes().size()); return all_nodes()[peer_node_idx + 1].shard; } @@ -3229,7 +3230,7 @@ future<> repair_service::stop() { } repair_service::~repair_service() { - assert(_stopped); + SCYLLA_ASSERT(_stopped); } static shard_id repair_id_to_shard(tasks::task_id& repair_id) { diff --git a/replica/database.cc b/replica/database.cc index 4c84b00aca..e74c70dba8 100644 --- a/replica/database.cc +++ b/replica/database.cc @@ -10,6 +10,7 @@ #include #include "log.hh" #include "replica/database_fwd.hh" +#include "utils/assert.hh" #include "utils/lister.hh" #include "replica/database.hh" #include @@ -388,7 +389,7 @@ database::database(const db::config& cfg, database_config dbcfg, service::migrat , _update_memtable_flush_static_shares_action([this, &cfg] { return _memtable_controller.update_static_shares(cfg.memtable_flush_static_shares()); }) , _memtable_flush_static_shares_observer(cfg.memtable_flush_static_shares.observe(_update_memtable_flush_static_shares_action.make_observer())) { - assert(dbcfg.available_memory != 0); // Detect misconfigured unit tests, see #7544 + SCYLLA_ASSERT(dbcfg.available_memory != 0); // Detect misconfigured unit tests, see #7544 local_schema_registry().init(*this); // TODO: we're never unbound. setup_metrics(); @@ -828,7 +829,7 @@ static bool is_system_table(const schema& s) { } void database::init_schema_commitlog() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); db::commitlog::config c; c.sched_group = _dbcfg.schema_commitlog_scheduling_group; @@ -2587,15 +2588,15 @@ future<> database::truncate(db::system_keyspace& sys_ks, column_family& cf, cons db::replay_position rp = co_await cf.discard_sstables(truncated_at); // TODO: indexes. // Note: since discard_sstables was changed to only count tables owned by this shard, - // we can get zero rp back. Changed assert, and ensure we save at least low_mark. - // #6995 - the assert below was broken in c2c6c71 and remained so for many years. + // we can get zero rp back. Changed SCYLLA_ASSERT, and ensure we save at least low_mark. + // #6995 - the SCYLLA_ASSERT below was broken in c2c6c71 and remained so for many years. // We nowadays do not flush tables with sstables but autosnapshot=false. This means // the low_mark assertion does not hold, because we maybe/probably never got around to // creating the sstables that would create them. // If truncated_at is earlier than the time low_mark was taken // then the replay_position returned by discard_sstables may be // smaller than low_mark. - assert(!st.did_flush || rp == db::replay_position() || (truncated_at <= st.low_mark_at ? rp <= st.low_mark : st.low_mark <= rp)); + SCYLLA_ASSERT(!st.did_flush || rp == db::replay_position() || (truncated_at <= st.low_mark_at ? rp <= st.low_mark : st.low_mark <= rp)); if (rp == db::replay_position()) { rp = st.low_mark; } diff --git a/replica/database.hh b/replica/database.hh index 4fd3c275bf..528114c8bf 100644 --- a/replica/database.hh +++ b/replica/database.hh @@ -14,6 +14,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/hash.hh" #include "db_clock.hh" #include "gc_clock.hh" @@ -1767,12 +1768,12 @@ public: const db::extensions& extensions() const; sstables::sstables_manager& get_user_sstables_manager() const noexcept { - assert(_user_sstables_manager); + SCYLLA_ASSERT(_user_sstables_manager); return *_user_sstables_manager; } sstables::sstables_manager& get_system_sstables_manager() const noexcept { - assert(_system_sstables_manager); + SCYLLA_ASSERT(_system_sstables_manager); return *_system_sstables_manager; } diff --git a/replica/dirty_memory_manager.cc b/replica/dirty_memory_manager.cc index ce37ab7f2c..a6db865c54 100644 --- a/replica/dirty_memory_manager.cc +++ b/replica/dirty_memory_manager.cc @@ -2,6 +2,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +#include "utils/assert.hh" #include "dirty_memory_manager.hh" #include "database.hh" // for memtable_list #include @@ -43,7 +44,7 @@ region_group_binomial_group_sanity_check(const region_group::region_heap& bh) { auto t = r->evictable_occupancy().total_space(); fmt::print(" r = {} (id={}), occupancy = {}\n", fmt::ptr(r), r->id(), t); } - assert(0); + SCYLLA_ASSERT(0); #endif } @@ -63,7 +64,7 @@ dirty_memory_manager_logalloc::size_tracked_region* region_group::get_largest_re void region_group::add(logalloc::region* child_r) { auto child = static_cast(child_r); - assert(!child->_heap_handle); + SCYLLA_ASSERT(!child->_heap_handle); child->_heap_handle = std::make_optional(_regions.push(child)); region_group_binomial_group_sanity_check(_regions); update_unspooled(child_r->occupancy().total_space()); diff --git a/replica/dirty_memory_manager.hh b/replica/dirty_memory_manager.hh index 10599e2cfa..79350b3160 100644 --- a/replica/dirty_memory_manager.hh +++ b/replica/dirty_memory_manager.hh @@ -15,6 +15,7 @@ #include #include #include "replica/database_fwd.hh" +#include "utils/assert.hh" #include "utils/logalloc.hh" class test_region_group; @@ -257,7 +258,7 @@ public: // If we set a throttle threshold, we'd be postponing many operations. So shutdown must be // called. if (reclaimer_can_block()) { - assert(_shutdown_requested); + SCYLLA_ASSERT(_shutdown_requested); } } region_group& operator=(const region_group&) = delete; diff --git a/replica/distributed_loader.cc b/replica/distributed_loader.cc index 8a82e4c126..e0f22e619c 100644 --- a/replica/distributed_loader.cc +++ b/replica/distributed_loader.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include @@ -43,7 +44,7 @@ static std::unordered_set load_prio_keyspaces; static bool population_started = false; void replica::distributed_loader::mark_keyspace_as_load_prio(const sstring& ks) { - assert(!population_started); + SCYLLA_ASSERT(!population_started); load_prio_keyspaces.insert(ks); } @@ -288,11 +289,11 @@ public: ~table_populator() { // All directories must have been stopped // using table_populator::stop() - assert(_sstable_directories.empty()); + SCYLLA_ASSERT(_sstable_directories.empty()); } future<> start() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); for (auto state : { sstables::sstable_state::normal, sstables::sstable_state::staging, sstables::sstable_state::quarantine }) { co_await start_subdir(state); diff --git a/replica/memtable.cc b/replica/memtable.cc index 21a47a5d0e..3b68aae07f 100644 --- a/replica/memtable.cc +++ b/replica/memtable.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "memtable.hh" #include "replica/database.hh" #include "mutation/frozen_mutation.hh" @@ -205,7 +206,7 @@ future<> memtable::clear_gently() noexcept { partition_entry& memtable::find_or_create_partition_slow(partition_key_view key) { - assert(!reclaiming_enabled()); + SCYLLA_ASSERT(!reclaiming_enabled()); // FIXME: Perform lookup using std::pair // to avoid unconditional copy of the partition key. @@ -223,7 +224,7 @@ memtable::find_or_create_partition_slow(partition_key_view key) { partition_entry& memtable::find_or_create_partition(const dht::decorated_key& key) { - assert(!reclaiming_enabled()); + SCYLLA_ASSERT(!reclaiming_enabled()); // call lower_bound so we have a hint for the insert, just in case. partitions_type::bound_hint hint; @@ -555,7 +556,7 @@ public: : _mt(mt) {} ~flush_memory_accounter() { - assert(_mt._flushed_memory <= _mt.occupancy().total_space()); + SCYLLA_ASSERT(_mt._flushed_memory <= _mt.occupancy().total_space()); } uint64_t compute_size(memtable_entry& e, partition_snapshot& snp) { return e.size_in_allocator_without_rows(_mt.allocator()) @@ -851,7 +852,7 @@ void memtable_entry::upgrade_schema(logalloc::region& r, const schema_ptr& s, mu void memtable::upgrade_entry(memtable_entry& e) { if (e.schema() != _schema) { - assert(!reclaiming_enabled()); + SCYLLA_ASSERT(!reclaiming_enabled()); e.upgrade_schema(region(), _schema, cleaner()); } } diff --git a/replica/table.cc b/replica/table.cc index 65aa18c824..a1a7f73cd4 100644 --- a/replica/table.cc +++ b/replica/table.cc @@ -25,6 +25,7 @@ #include "sstables/sstables_manager.hh" #include "db/schema_tables.hh" #include "cell_locking.hh" +#include "utils/assert.hh" #include "utils/logalloc.hh" #include "checked-file-impl.hh" #include "view_info.hh" @@ -84,7 +85,7 @@ void table::update_sstables_known_generation(sstables::generation_type generatio } sstables::generation_type table::calculate_generation_for_new_table() { - assert(_sstable_generation_generator); + SCYLLA_ASSERT(_sstable_generation_generator); auto ret = std::invoke(*_sstable_generation_generator, sstables::uuid_identifiers{_sstables_manager.uuid_sstable_identifiers()}); tlogger.debug("{}.{} new sstable generation {}", schema()->ks_name(), schema()->cf_name(), ret); @@ -368,7 +369,7 @@ mutation_reader table::make_nonpopulating_cache_reader(schema_ptr schema, reader } future> table::lock_counter_cells(const mutation& m, db::timeout_clock::time_point timeout) { - assert(m.schema() == _counter_cell_locks->schema()); + SCYLLA_ASSERT(m.schema() == _counter_cell_locks->schema()); return _counter_cell_locks->lock_cells(m.decorated_key(), partition_cells_range(m.partition()), timeout); } @@ -2826,7 +2827,7 @@ db::commitlog* table::commitlog() const { } void table::set_schema(schema_ptr s) { - assert(s->is_counter() == _schema->is_counter()); + SCYLLA_ASSERT(s->is_counter() == _schema->is_counter()); tlogger.debug("Changing schema version of {}.{} ({}) from {} to {}", _schema->ks_name(), _schema->cf_name(), _schema->id(), _schema->version(), s->version()); diff --git a/row_cache.cc b/row_cache.cc index 51e76c2a0e..cf7f86cd40 100644 --- a/row_cache.cc +++ b/row_cache.cc @@ -25,6 +25,7 @@ #include "cache_mutation_reader.hh" #include "partition_snapshot_reader.hh" #include "clustering_key_filter.hh" +#include "utils/assert.hh" #include "utils/updateable_value.hh" namespace cache { @@ -1116,7 +1117,7 @@ future<> row_cache::update(external_updater eu, replica::memtable& m, preemption if (cache_i != partitions_end() && hint.match) { cache_entry& entry = *cache_i; upgrade_entry(entry); - assert(entry.schema() == _schema); + SCYLLA_ASSERT(entry.schema() == _schema); _tracker.on_partition_merge(); mem_e.upgrade_schema(_tracker.region(), _schema, _tracker.memtable_cleaner()); return entry.partition().apply_to_incomplete(*_schema, std::move(mem_e.partition()), _tracker.memtable_cleaner(), @@ -1247,7 +1248,7 @@ future<> row_cache::invalidate(external_updater eu, dht::partition_range_vector& break; } } - assert(it != _partitions.end()); + SCYLLA_ASSERT(it != _partitions.end()); _tracker.clear_continuity(*it); return stop_iteration(it == end); }); @@ -1351,7 +1352,7 @@ void rows_entry::on_evicted(cache_tracker& tracker) noexcept { mutation_partition_v2::rows_type* rows = it.tree_if_singular(); if (rows != nullptr) { - assert(it->is_last_dummy()); + SCYLLA_ASSERT(it->is_last_dummy()); partition_version& pv = partition_version::container_of(mutation_partition_v2::container_of(*rows)); if (pv.is_referenced_from_entry()) { partition_entry& pe = partition_entry::container_of(pv); @@ -1423,7 +1424,7 @@ const schema_ptr& row_cache::schema() const { void row_cache::upgrade_entry(cache_entry& e) { if (e.schema() != _schema && !e.partition().is_locked()) { auto& r = _tracker.region(); - assert(!r.reclaiming_enabled()); + SCYLLA_ASSERT(!r.reclaiming_enabled()); e.partition().upgrade(r, _schema, _tracker.cleaner(), &_tracker); } } diff --git a/schema/schema.cc b/schema/schema.cc index 710d91f59b..f2b9a2709d 100644 --- a/schema/schema.cc +++ b/schema/schema.cc @@ -10,6 +10,7 @@ #include #include "db/view/view.hh" #include "timestamp.hh" +#include "utils/assert.hh" #include "utils/UUID_gen.hh" #include "cql3/column_identifier.hh" #include "cql3/util.hh" @@ -89,7 +90,7 @@ bool operator==(const column_mapping& lhs, const column_mapping& rhs) { } const column_mapping_entry& column_mapping::column_at(column_kind kind, column_id id) const { - assert(kind == column_kind::regular_column || kind == column_kind::static_column); + SCYLLA_ASSERT(kind == column_kind::regular_column || kind == column_kind::static_column); return kind == column_kind::regular_column ? regular_column_at(id) : static_column_at(id); } @@ -404,7 +405,7 @@ schema::schema(private_tag, const raw_schema& raw, std::optional column_id id = 0; for (auto& def : _raw._columns) { def.column_specification = make_column_specification(def); - assert(!def.id || def.id == id - column_offset(def.kind)); + SCYLLA_ASSERT(!def.id || def.id == id - column_offset(def.kind)); def.ordinal_id = static_cast(id); def.id = id - column_offset(def.kind); @@ -1236,7 +1237,7 @@ schema_builder& schema_builder::rename_column(bytes from, bytes to) auto it = std::find_if(_raw._columns.begin(), _raw._columns.end(), [&] (auto& col) { return col.name() == from; }); - assert(it != _raw._columns.end()); + SCYLLA_ASSERT(it != _raw._columns.end()); auto& def = *it; column_definition new_def(to, def.type, def.kind, def.component_index()); _raw._columns.erase(it); @@ -1246,12 +1247,12 @@ schema_builder& schema_builder::rename_column(bytes from, bytes to) schema_builder& schema_builder::alter_column_type(bytes name, data_type new_type) { auto it = boost::find_if(_raw._columns, [&name] (auto& c) { return c.name() == name; }); - assert(it != _raw._columns.end()); + SCYLLA_ASSERT(it != _raw._columns.end()); it->type = new_type; if (new_type->is_multi_cell()) { auto c_it = _raw._collections.find(name); - assert(c_it != _raw._collections.end()); + SCYLLA_ASSERT(c_it != _raw._collections.end()); c_it->second = new_type; } return *this; @@ -1259,7 +1260,7 @@ schema_builder& schema_builder::alter_column_type(bytes name, data_type new_type schema_builder& schema_builder::mark_column_computed(bytes name, column_computation_ptr computation) { auto it = boost::find_if(_raw._columns, [&name] (const column_definition& c) { return c.name() == name; }); - assert(it != _raw._columns.end()); + SCYLLA_ASSERT(it != _raw._columns.end()); it->set_computed(std::move(computation)); return *this; @@ -1556,7 +1557,7 @@ sstring to_sstring(const schema& s) { if (s.is_compound()) { return compound_name(s); } else if (s.clustering_key_size() == 1) { - assert(s.is_dense() || s.is_static_compact_table()); + SCYLLA_ASSERT(s.is_dense() || s.is_static_compact_table()); return s.clustering_key_columns().front().type->name(); } else { return s.regular_column_name_type()->name(); diff --git a/schema/schema.hh b/schema/schema.hh index b9162ec6b8..61349467d7 100644 --- a/schema/schema.hh +++ b/schema/schema.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include @@ -356,7 +357,7 @@ public: return is_primary_key(); } uint32_t component_index() const { - assert(has_component_index()); + SCYLLA_ASSERT(has_component_index()); return id; } uint32_t position() const { @@ -962,8 +963,8 @@ public: // // auto schema = make_schema(); // auto reverse_schema = schema->get_reversed(); - // assert(reverse_schema->get_reversed().get() == schema.get()); - // assert(schema->get_reversed().get() == reverse_schema.get()); + // SCYLLA_ASSERT(reverse_schema->get_reversed().get() == schema.get()); + // SCYLLA_ASSERT(schema->get_reversed().get() == reverse_schema.get()); // schema_ptr get_reversed() const; }; @@ -982,7 +983,7 @@ class view_ptr final { public: explicit view_ptr(schema_ptr schema) noexcept : _schema(schema) { if (schema) { - assert(_schema->is_view()); + SCYLLA_ASSERT(_schema->is_view()); } } diff --git a/schema/schema_registry.cc b/schema/schema_registry.cc index f8bb9f8e32..b0076d28f9 100644 --- a/schema/schema_registry.cc +++ b/schema/schema_registry.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include "schema_registry.hh" @@ -39,7 +40,7 @@ schema_registry_entry::schema_registry_entry(table_schema_version v, schema_regi { _erase_timer.set_callback([this] { slogger.debug("Dropping {}", _version); - assert(!_schema); + SCYLLA_ASSERT(!_schema); try { _registry._entries.erase(_version); } catch (...) { @@ -246,7 +247,7 @@ void schema_registry_entry::detach_schema() noexcept { } frozen_schema schema_registry_entry::frozen() const { - assert(_state >= state::LOADED); + SCYLLA_ASSERT(_state >= state::LOADED); return *_frozen_schema; } @@ -313,7 +314,7 @@ global_schema_ptr::global_schema_ptr(const global_schema_ptr& o) global_schema_ptr::global_schema_ptr(global_schema_ptr&& o) noexcept { auto current = this_shard_id(); - assert(o._cpu_of_origin == current); + SCYLLA_ASSERT(o._cpu_of_origin == current); _ptr = std::move(o._ptr); _cpu_of_origin = current; _base_schema = std::move(o._base_schema); diff --git a/serializer.hh b/serializer.hh index 1a78755aba..1a4317097b 100644 --- a/serializer.hh +++ b/serializer.hh @@ -9,6 +9,7 @@ #include #include +#include "utils/assert.hh" #include "utils/managed_bytes.hh" #include "bytes_ostream.hh" #include @@ -377,7 +378,7 @@ serialize_gc_clock_duration_value(Output& out, int64_t v) { if (!gc_clock_using_3_1_0_serialization) { // This should have been caught by the CQL layer, so this is just // for extra safety. - assert(int32_t(v) == v); + SCYLLA_ASSERT(int32_t(v) == v); serializer::write(out, v); } else { serializer::write(out, v); diff --git a/service/broadcast_tables/experimental/lang.cc b/service/broadcast_tables/experimental/lang.cc index 997c0e4ace..910984c697 100644 --- a/service/broadcast_tables/experimental/lang.cc +++ b/service/broadcast_tables/experimental/lang.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "lang.hh" #include @@ -73,7 +74,7 @@ future execute_broadcast_table_query( co_return query_result_select{}; } - assert(rs->partitions().size() == 1); // In this version only one value per partition key is allowed. + SCYLLA_ASSERT(rs->partitions().size() == 1); // In this version only one value per partition key is allowed. const auto& p = rs->partitions()[0]; auto mutation = p.mut().unfreeze(schema); @@ -92,7 +93,7 @@ future execute_broadcast_table_query( bool found = !rs->partitions().empty(); - assert(!found || rs->partitions().size() == 1); // In this version at most one value per partition key is allowed. + SCYLLA_ASSERT(!found || rs->partitions().size() == 1); // In this version at most one value per partition key is allowed. auto new_mutation = found ? rs->partitions()[0].mut().unfreeze(schema) diff --git a/service/load_broadcaster.hh b/service/load_broadcaster.hh index bfe8e17365..5e705bc525 100644 --- a/service/load_broadcaster.hh +++ b/service/load_broadcaster.hh @@ -9,6 +9,7 @@ #pragma once +#include "utils/assert.hh" #include "replica/database_fwd.hh" #include "gms/i_endpoint_state_change_subscriber.hh" #include "gms/gossiper.hh" @@ -32,7 +33,7 @@ public: _gossiper.register_(shared_from_this()); } ~load_broadcaster() { - assert(_stopped); + SCYLLA_ASSERT(_stopped); } virtual future<> on_change(gms::inet_address endpoint, const gms::application_state_map& states, gms::permit_id pid) override { diff --git a/service/migration_manager.cc b/service/migration_manager.cc index 882da48f57..bf9ea7ba0b 100644 --- a/service/migration_manager.cc +++ b/service/migration_manager.cc @@ -21,6 +21,7 @@ #include "service/migration_listener.hh" #include "message/messaging_service.hh" #include "gms/feature_service.hh" +#include "utils/assert.hh" #include "utils/runtime.hh" #include "gms/gossiper.hh" #include "view_info.hh" @@ -896,7 +897,7 @@ future<> migration_manager::push_schema_mutation(const gms::inet_address& endpoi template future<> migration_manager::announce_with_raft(std::vector schema, group0_guard guard, std::string_view description) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto schema_features = _feat.cluster_schema_features(); auto adjusted_schema = db::schema_tables::adjust_schema_for_schema_features(std::move(schema), schema_features); @@ -933,7 +934,7 @@ future<> migration_manager::announce_without_raft(std::vector schema, static mutation make_group0_schema_version_mutation(const data_dictionary::database db, const group0_guard& guard) { auto s = db.find_schema(db::system_keyspace::NAME, db::system_keyspace::SCYLLA_LOCAL); auto* cdef = s->get_column_definition("value"); - assert(cdef); + SCYLLA_ASSERT(cdef); mutation m(s, partition_key::from_singular(*s, "group0_schema_version")); auto cell = guard.with_raft() @@ -958,7 +959,7 @@ static void add_committed_by_group0_flag(std::vector& schema, const gr auto& scylla_tables_schema = *mut.schema(); auto cdef = scylla_tables_schema.get_column_definition("committed_by_group0"); - assert(cdef); + SCYLLA_ASSERT(cdef); for (auto& cr: mut.partition().clustered_rows()) { cr.row().cells().apply(*cdef, atomic_cell::make_live( @@ -992,7 +993,7 @@ template future<> migration_manager::announce(std::vector schema, group0_guard, std::string_view description); future migration_manager::start_group0_operation() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); return _group0_client.start_operation(_as, raft_timeout{}); } @@ -1010,7 +1011,7 @@ void migration_manager::passive_announce(table_schema_version version) { } future<> migration_manager::passive_announce() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); mlogger.info("Gossiping my schema version {}", _schema_version_to_publish); return _gossiper.add_local_application_state(gms::application_state::SCHEMA, gms::versioned_value::schema(_schema_version_to_publish)); } diff --git a/service/qos/service_level_controller.cc b/service/qos/service_level_controller.cc index 1cc13f3dd5..90c482837a 100644 --- a/service/qos/service_level_controller.cc +++ b/service/qos/service_level_controller.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include @@ -222,7 +223,7 @@ future<> service_level_controller::update_service_levels_from_distributed_data() } void service_level_controller::stop_legacy_update_from_distributed_data() { - assert(this_shard_id() == global_controller); + SCYLLA_ASSERT(this_shard_id() == global_controller); if (_global_controller_db->dist_data_update_aborter.abort_requested()) { return; diff --git a/service/raft/discovery.cc b/service/raft/discovery.cc index e64571c787..8bef0889bd 100644 --- a/service/raft/discovery.cc +++ b/service/raft/discovery.cc @@ -5,6 +5,7 @@ /* * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "service/raft/discovery.hh" namespace service { @@ -61,7 +62,7 @@ void discovery::step(const peer_list& peers) { // If we have this peer, its ID must be the // same as we know (with the exceptions of seeds, // for which servers might not know ids at first). - assert(it == _peers.end() || it->id == addr.id || addr.id == raft::server_id{}); + SCYLLA_ASSERT(it == _peers.end() || it->id == addr.id || addr.id == raft::server_id{}); } } if (refresh_peer_list) { @@ -114,7 +115,7 @@ std::optional discovery::request(const peer_list& peers) { } void discovery::response(discovery_peer from, const peer_list& peers) { - assert(_peers.contains(from)); + SCYLLA_ASSERT(_peers.contains(from)); _responded.emplace(from); step(peers); } diff --git a/service/raft/raft_address_map.hh b/service/raft/raft_address_map.hh index f1692b7db4..f6a42c163f 100644 --- a/service/raft/raft_address_map.hh +++ b/service/raft/raft_address_map.hh @@ -7,6 +7,7 @@ */ #pragma once +#include "utils/assert.hh" #include "gms/inet_address.hh" #include "gms/generation-number.hh" #include "raft/raft.hh" @@ -277,12 +278,12 @@ public: {} future<> stop() { - assert(_replication_fiber); + SCYLLA_ASSERT(_replication_fiber); co_await *std::exchange(_replication_fiber, std::nullopt); } ~raft_address_map_t() { - assert(!_replication_fiber); + SCYLLA_ASSERT(!_replication_fiber); } // Find a mapping with a given id. diff --git a/service/raft/raft_group0.cc b/service/raft/raft_group0.cc index 5e6bf7cbe2..b967a11b3d 100644 --- a/service/raft/raft_group0.cc +++ b/service/raft/raft_group0.cc @@ -27,6 +27,7 @@ #include "gms/feature_service.hh" #include "db/system_keyspace.hh" #include "replica/database.hh" +#include "utils/assert.hh" #include "utils/error_injection.hh" #include @@ -269,7 +270,7 @@ static future load_discovered_peers(cql3::query_processor& "SELECT ip_addr, raft_server_id FROM system.{} WHERE key = '{}'", db::system_keyspace::DISCOVERY, DISCOVERY_KEY); auto rs = co_await qp.execute_internal(load_cql, cql3::query_processor::cache_internal::yes); - assert(rs); + SCYLLA_ASSERT(rs); discovery::peer_list peers; for (auto& r: *rs) { @@ -286,7 +287,7 @@ static mutation make_discovery_mutation(discovery::peer_list peers) { auto s = db::system_keyspace::discovery(); auto ts = api::new_timestamp(); auto raft_id_cdef = s->get_column_definition("raft_server_id"); - assert(raft_id_cdef); + SCYLLA_ASSERT(raft_id_cdef); mutation m(s, partition_key::from_singular(*s, DISCOVERY_KEY)); for (auto& p: peers) { @@ -389,7 +390,7 @@ future<> raft_group0::abort() { } future<> raft_group0::start_server_for_group0(raft::group_id group0_id, service::storage_service& ss, cql3::query_processor& qp, service::migration_manager& mm, bool topology_change_enabled) { - assert(group0_id != raft::group_id{}); + SCYLLA_ASSERT(group0_id != raft::group_id{}); // The address map may miss our own id in case we connect // to an existing Raft Group 0 leader. auto my_id = load_my_id(); @@ -442,8 +443,8 @@ future<> raft_group0::leadership_monitor_fiber() { future<> raft_group0::join_group0(std::vector seeds, shared_ptr handshaker, service::storage_service& ss, cql3::query_processor& qp, service::migration_manager& mm, db::system_keyspace& sys_ks, bool topology_change_enabled) { - assert(this_shard_id() == 0); - assert(!joined_group0()); + SCYLLA_ASSERT(this_shard_id() == 0); + SCYLLA_ASSERT(!joined_group0()); auto group0_id = raft::group_id{co_await sys_ks.get_raft_group0_id()}; if (group0_id) { @@ -501,7 +502,7 @@ future<> raft_group0::join_group0(std::vector seeds, shared_p // state is present, and if it is, do nothing. } - assert(server); + SCYLLA_ASSERT(server); if (server->get_configuration().contains(my_id)) { // True if we started a new group or completed a configuration change initiated earlier. group0_log.info("server {} already in group 0 (id {}) as {}", my_id, group0_id, @@ -614,7 +615,7 @@ static future synchronize_schema( abort_source&); future raft_group0::use_raft() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); if (((co_await _client.get_group0_upgrade_state()).second) == group0_upgrade_state::recovery) { group0_log.warn("setup_group0: Raft RECOVERY mode, skipping group 0 setup."); @@ -868,7 +869,7 @@ future<> raft_group0::remove_from_group0(raft::server_id node) { } future raft_group0::wait_for_raft() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto upgrade_state = (co_await _client.get_group0_upgrade_state()).second; if (upgrade_state == group0_upgrade_state::recovery) { @@ -1609,7 +1610,7 @@ static auto warn_if_upgrade_takes_too_long() { } future<> raft_group0::upgrade_to_group0(service::storage_service& ss, cql3::query_processor& qp, service::migration_manager& mm, bool topology_change_enabled) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto start_state = (co_await _client.get_group0_upgrade_state()).second; switch (start_state) { @@ -1648,7 +1649,7 @@ future<> raft_group0::upgrade_to_group0(service::storage_service& ss, cql3::quer // `start_state` is either `use_pre_raft_procedures` or `synchronize`. future<> raft_group0::do_upgrade_to_group0(group0_upgrade_state start_state, service::storage_service& ss, cql3::query_processor& qp, service::migration_manager& mm, bool topology_change_enabled) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); // Check if every peer knows about the upgrade procedure. // diff --git a/service/raft/raft_group0_client.cc b/service/raft/raft_group0_client.cc index c97cc1b9bc..2a1440876e 100644 --- a/service/raft/raft_group0_client.cc +++ b/service/raft/raft_group0_client.cc @@ -22,6 +22,7 @@ #include "idl/group0_state_machine.dist.impl.hh" #include "service/raft/group0_state_machine.hh" #include "replica/database.hh" +#include "utils/assert.hh" #include "utils/to_string.hh" @@ -116,7 +117,7 @@ struct group0_guard::impl { {} void release_read_apply_mutex() { - assert(_read_apply_mutex_holder.count() == 1); + SCYLLA_ASSERT(_read_apply_mutex_holder.count() == 1); _read_apply_mutex_holder.return_units(1); } }; diff --git a/service/storage_proxy.cc b/service/storage_proxy.cc index cb28db9912..1caa006566 100644 --- a/service/storage_proxy.cc +++ b/service/storage_proxy.cc @@ -52,6 +52,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/latency.hh" #include "schema/schema.hh" #include "query_ranges_to_vnodes.hh" @@ -233,7 +234,7 @@ public: } ~remote() { - assert(_stopped); + SCYLLA_ASSERT(_stopped); } // Must call before destroying the `remote` object. @@ -734,7 +735,7 @@ private: co_await utils::get_local_injector().inject("storage_proxy::handle_read", [s] (auto& handler) -> future<> { const auto cf_name = handler.get("cf_name"); - assert(cf_name); + SCYLLA_ASSERT(cf_name); if (s->cf_name() != cf_name) { co_return; } @@ -1898,7 +1899,7 @@ paxos_response_handler::begin_and_repair_paxos(client_state& cs, unsigned& conte co_await sleep_approx_50ms(); continue; } - assert(true); // no fall through + SCYLLA_ASSERT(true); // no fall through } // To be able to propose our value on a new round, we need a quorum of replica to have learn @@ -2414,13 +2415,13 @@ void storage_proxy::unthrottle() { storage_proxy::response_id_type storage_proxy::register_response_handler(shared_ptr&& h) { auto id = h->id(); auto e = _response_handlers.emplace(id, std::move(h)); - assert(e.second); + SCYLLA_ASSERT(e.second); return id; } void storage_proxy::remove_response_handler(storage_proxy::response_id_type id) { auto entry = _response_handlers.find(id); - assert(entry != _response_handlers.end()); + SCYLLA_ASSERT(entry != _response_handlers.end()); remove_response_handler_entry(std::move(entry)); } @@ -2922,7 +2923,7 @@ namespace service { using namespace std::literals::chrono_literals; storage_proxy::~storage_proxy() { - assert(!_remote); + SCYLLA_ASSERT(!_remote); } storage_proxy::storage_proxy(distributed& db, storage_proxy::config cfg, db::view::node_update_backlog& max_view_update_backlog, @@ -3405,7 +3406,7 @@ future> storage_proxy::mutate_begin(unique_response_handler_vector ids, // this function should be called with a future that holds result of mutation attempt (usually // future returned by mutate_begin()). The future should be ready when function is called. future> storage_proxy::mutate_end(future> mutate_result, utils::latency_counter lc, write_stats& stats, tracing::trace_state_ptr trace_state) { - assert(mutate_result.available()); + SCYLLA_ASSERT(mutate_result.available()); stats.write.mark(lc.stop().latency()); return utils::result_futurize_try([&] { @@ -3704,7 +3705,7 @@ storage_proxy::mutate_with_triggers(std::vector mutations, db::consist bool should_mutate_atomically, tracing::trace_state_ptr tr_state, service_permit permit, db::allow_per_partition_rate_limit allow_limit, bool raw_counters) { warn(unimplemented::cause::TRIGGERS); if (should_mutate_atomically) { - assert(!raw_counters); + SCYLLA_ASSERT(!raw_counters); return mutate_atomically_result(std::move(mutations), cl, timeout, std::move(tr_state), std::move(permit)); } return mutate_result(std::move(mutations), cl, timeout, std::move(tr_state), std::move(permit), allow_limit, raw_counters); @@ -4387,7 +4388,7 @@ public: } } bool digests_match() const { - assert(response_count()); + SCYLLA_ASSERT(response_count()); if (response_count() == 1) { return true; } @@ -4599,7 +4600,7 @@ private: break; } } - assert(last_partition); + SCYLLA_ASSERT(last_partition); return get_last_row(s, *last_partition, is_reversed); } @@ -4791,7 +4792,7 @@ public: } future> resolve(schema_ptr schema, const query::read_command& cmd, uint64_t original_row_limit, uint64_t original_per_partition_limit, uint32_t original_partition_limit) { - assert(_data_results.size()); + SCYLLA_ASSERT(_data_results.size()); if (_data_results.size() == 1) { // if there is a result only from one node there is nothing to reconcile @@ -6232,8 +6233,8 @@ future storage_proxy::cas(schema_ptr schema, shared_ptr reque co_await coroutine::return_exception(exceptions::invalid_request_exception(msg)); } - assert(partition_ranges.size() == 1); - assert(query::is_single_partition(partition_ranges[0])); + SCYLLA_ASSERT(partition_ranges.size() == 1); + SCYLLA_ASSERT(query::is_single_partition(partition_ranges[0])); db::validate_for_cas(cl_for_paxos); db::validate_for_cas_learn(cl_for_learn, schema->ks_name()); @@ -6672,7 +6673,7 @@ void storage_proxy::on_leave_cluster(const gms::inet_address& endpoint, const lo void storage_proxy::on_up(const gms::inet_address& endpoint) {}; void storage_proxy::cancel_write_handlers(noncopyable_function filter_fun) { - assert(thread::running_in_thread()); + SCYLLA_ASSERT(thread::running_in_thread()); auto it = _cancellable_write_handlers_list->begin(); while (it != _cancellable_write_handlers_list->end()) { auto guard = it->shared_from_this(); diff --git a/service/storage_service.cc b/service/storage_service.cc index f5346857a8..9d6b994349 100644 --- a/service/storage_service.cc +++ b/service/storage_service.cc @@ -46,6 +46,7 @@ #include "service/raft/group0_state_machine.hh" #include "service/raft/raft_group0_client.hh" #include "service/topology_state_machine.hh" +#include "utils/assert.hh" #include "utils/UUID.hh" #include "utils/to_string.hh" #include "gms/inet_address.hh" @@ -301,7 +302,7 @@ bool storage_service::should_bootstrap() { */ static future<> set_gossip_tokens(gms::gossiper& g, const std::unordered_set& tokens, std::optional cdc_gen_id) { - assert(!tokens.empty()); + SCYLLA_ASSERT(!tokens.empty()); // Order is important: both the CDC streams timestamp and tokens must be known when a node handles our status. return g.add_local_application_state( @@ -563,14 +564,14 @@ future storage_service::sync_raft_t co_await update_topology_change_info(tmptr, ::format("{} {}/{}", rs.state, id, ip)); break; case node_state::replacing: { - assert(_topology_state_machine._topology.req_param.contains(id)); + SCYLLA_ASSERT(_topology_state_machine._topology.req_param.contains(id)); auto replaced_id = std::get(_topology_state_machine._topology.req_param[id]).replaced_id; auto existing_ip = am.find(replaced_id); if (!existing_ip) { // FIXME: What if not known? on_fatal_internal_error(rtlogger, ::format("Cannot map id of a node being replaced {} to its ip", replaced_id)); } - assert(existing_ip); + SCYLLA_ASSERT(existing_ip); const auto replaced_host_id = locator::host_id(replaced_id.uuid()); tmptr->update_topology(replaced_host_id, std::nullopt, locator::node::state::being_replaced); update_topology(host_id, ip, rs); @@ -649,7 +650,7 @@ future<> storage_service::notify_nodes_after_sync(nodes_to_notify_after_sync&& n future<> storage_service::topology_state_load() { #ifdef SEASTAR_DEBUG static bool running = false; - assert(!running); // The function is not re-entrant + SCYLLA_ASSERT(!running); // The function is not re-entrant auto d = defer([] { running = false; }); @@ -817,7 +818,7 @@ future<> storage_service::topology_state_load() { } future<> storage_service::topology_transition() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); co_await topology_state_load(); // reload new state _topology_state_machine.event.broadcast(); @@ -873,7 +874,7 @@ future<> storage_service::merge_topology_snapshot(raft_snapshot snp) { } future<> storage_service::update_service_levels_cache() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); co_await _sl_controller.local().update_service_levels_from_distributed_data(); } @@ -1414,7 +1415,7 @@ future<> storage_service::update_topology_with_local_metadata(raft::server& raft } future<> storage_service::start_upgrade_to_raft_topology() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); if (_topology_state_machine._topology.upgrade_state != topology::upgrade_state_type::not_upgraded) { co_return; @@ -1466,7 +1467,7 @@ future<> storage_service::start_upgrade_to_raft_topology() { } topology::upgrade_state_type storage_service::get_topology_upgrade_state() const { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); return _topology_state_machine._topology.upgrade_state; } @@ -1730,7 +1731,7 @@ future<> storage_service::join_token_ring(shardedload_my_id(), @@ -1974,7 +1975,7 @@ future<> storage_service::join_token_ring(sharded storage_service::join_token_ring(shardedfinish_setup_after_join(*this, _qp, _migration_manager.local(), false); co_await _cdc_gens.local().after_join(std::move(cdc_gen_id)); @@ -2054,7 +2055,7 @@ future<> storage_service::join_token_ring(sharded storage_service::track_upgrade_progress_to_topology_coordinator(sharded& sys_dist_ks, sharded& proxy) { - assert(_group0); + SCYLLA_ASSERT(_group0); while (true) { _group0_as.check(); @@ -2190,7 +2191,7 @@ future<> storage_service::bootstrap(std::unordered_set& bootstrap_tokens, // After we pick a generation timestamp, we start gossiping it, and we stick with it. // We don't do any other generation switches (unless we crash before complecting bootstrap). - assert(!cdc_gen_id); + SCYLLA_ASSERT(!cdc_gen_id); cdc_gen_id = _cdc_gens.local().legacy_make_new_generation(bootstrap_tokens, !is_first_node()).get(); @@ -2223,9 +2224,9 @@ future<> storage_service::bootstrap(std::unordered_set& bootstrap_tokens, slogger.debug("Removing replaced endpoint {} from system.peers", replace_addr); _sys_ks.local().remove_endpoint(replace_addr).get(); - assert(replaced_host_id); + SCYLLA_ASSERT(replaced_host_id); auto raft_id = raft::server_id{replaced_host_id.uuid()}; - assert(_group0); + SCYLLA_ASSERT(_group0); bool raft_available = _group0->wait_for_raft().get(); if (raft_available) { slogger.info("Replace: removing {}/{} from group 0...", replace_addr, raft_id); @@ -2842,7 +2843,7 @@ future<> storage_service::stop_transport() { } future<> storage_service::drain_on_shutdown() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); return (_operation_mode == mode::DRAINING || _operation_mode == mode::DRAINED) ? _drain_finished.get_future() : do_drain(); } @@ -2874,7 +2875,7 @@ bool storage_service::is_topology_coordinator_enabled() const { future<> storage_service::join_cluster(sharded& sys_dist_ks, sharded& proxy, start_hint_manager start_hm, gms::generation_type new_generation) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); if (_sys_ks.local().was_decommissioned()) { auto msg = sstring("This node was decommissioned and will not rejoin the ring unless " @@ -2993,7 +2994,7 @@ future<> storage_service::join_cluster(sharded& } future<> storage_service::replicate_to_all_cores(mutable_token_metadata_ptr tmptr) noexcept { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); slogger.debug("Replicating token_metadata to all cores"); std::exception_ptr ex; @@ -3118,8 +3119,8 @@ future<> storage_service::replicate_to_all_cores(mutable_token_metadata_ptr tmpt auto& ss_ = ss; const auto ks_name = handler.get("ks_name"); const auto cf_name = handler.get("cf_name"); - assert(ks_name); - assert(cf_name); + SCYLLA_ASSERT(ks_name); + SCYLLA_ASSERT(cf_name); if (cf.schema()->ks_name() != *ks_name || cf.schema()->cf_name() != *cf_name) { co_return; } @@ -3464,7 +3465,7 @@ void storage_service::set_mode(mode m) { slogger.info("entering {} mode", m); _operation_mode = m; } else { - // This shouldn't happen, but it's too much for an assert, + // This shouldn't happen, but it's too much for an SCYLLA_ASSERT, // so -- just emit a warning in the hope that it will be // noticed, reported and fixed slogger.warn("re-entering {} mode", m); @@ -3686,7 +3687,7 @@ future<> storage_service::decommission() { slogger.info("DECOMMISSIONING: starts"); ctl.req.leaving_nodes = std::list{endpoint}; - assert(ss._group0); + SCYLLA_ASSERT(ss._group0); bool raft_available = ss._group0->wait_for_raft().get(); try { @@ -3738,7 +3739,7 @@ future<> storage_service::decommission() { if (raft_available && left_token_ring) { slogger.info("decommission[{}]: leaving Raft group 0", uuid); - assert(ss._group0); + SCYLLA_ASSERT(ss._group0); ss._group0->leave_group0().get(); slogger.info("decommission[{}]: left Raft group 0", uuid); } @@ -4013,7 +4014,7 @@ future<> storage_service::removenode(locator::host_id host_id, std::listget_endpoint_for_host_id_if_known(host_id); - assert(ss._group0); + SCYLLA_ASSERT(ss._group0); auto raft_id = raft::server_id{host_id.uuid()}; bool raft_available = ss._group0->wait_for_raft().get(); bool is_group0_member = raft_available && ss._group0->is_member(raft_id, false); @@ -4130,7 +4131,7 @@ future<> storage_service::removenode(locator::host_id host_id, std::list storage_service::check_and_repair_cdc_streams() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); if (!_cdc_gens.local_is_initialized()) { return make_exception_future<>(std::runtime_error("CDC generation service not initialized yet")); @@ -5157,7 +5158,7 @@ std::chrono::milliseconds storage_service::get_ring_delay() { } future storage_service::get_token_metadata_lock() noexcept { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); return _shared_token_metadata.get_lock(); } @@ -5172,7 +5173,7 @@ future storage_service::get_token_metadata_lock() // // Note: must be called on shard 0. future<> storage_service::mutate_token_metadata(std::function (mutable_token_metadata_ptr)> func, acquire_merge_lock acquire_merge_lock) noexcept { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); std::optional tmlock; if (acquire_merge_lock) { @@ -5184,7 +5185,7 @@ future<> storage_service::mutate_token_metadata(std::function (mutable_ } future<> storage_service::update_topology_change_info(mutable_token_metadata_ptr tmptr, sstring reason) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); try { locator::dc_rack_fn get_dc_rack_by_host_id([this, &tm = *tmptr] (locator::host_id host_id) -> std::optional { @@ -5334,7 +5335,7 @@ void storage_service::start_tablet_split_monitor() { } future<> storage_service::snitch_reconfigured() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto& snitch = _snitch.local(); co_await mutate_token_metadata([&snitch] (mutable_token_metadata_ptr tmptr) -> future<> { // re-read local rack and DC info @@ -5532,7 +5533,7 @@ future storage_service::raft_topology_cmd_handler(raft locator::endpoint_dc_rack{rs.datacenter, rs.rack}, rs.ring.value().tokens, get_token_metadata_ptr()); auto replaced_id = std::get(_topology_state_machine._topology.req_param[raft_server.id()]).replaced_id; auto existing_ip = _group0->address_map().find(replaced_id); - assert(existing_ip); + SCYLLA_ASSERT(existing_ip); co_await bs.bootstrap(streaming::stream_reason::replace, _gossiper, _topology_state_machine._topology.session, *existing_ip); } })); @@ -5571,7 +5572,7 @@ future storage_service::raft_topology_cmd_handler(raft rtlogger.debug("streaming to remove node {}", id); const auto& am = _group0->address_map(); auto ip = am.find(id); // map node id to ip - assert (ip); // what to do if address is unknown? + SCYLLA_ASSERT (ip); // what to do if address is unknown? tasks::task_info parent_info{tasks::task_id{it->second.request_id}, 0}; auto task = co_await get_task_manager_module().make_and_start_task(parent_info, parent_info.id, streaming::stream_reason::removenode, _remove_result[id], coroutine::lambda([this, ip] () { @@ -6494,7 +6495,7 @@ future storage_service::join_node_request_handler(join } future storage_service::join_node_response_handler(join_node_response_params params) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); // Usually this handler will only run once, but there are some cases where we might get more than one RPC, // possibly happening at the same time, e.g.: @@ -6855,7 +6856,7 @@ future<> storage_service::force_remove_completion() { co_await ss.excise(tokens_set, *endpoint, host_id, pid); slogger.info("force_remove_completion: removing endpoint {} from group 0", *endpoint); - assert(ss._group0); + SCYLLA_ASSERT(ss._group0); bool raft_available = co_await ss._group0->wait_for_raft(); if (raft_available) { co_await ss._group0->remove_from_group0(raft::server_id{host_id.uuid()}); diff --git a/service/tablet_allocator.cc b/service/tablet_allocator.cc index 61787712f9..5acecc618f 100644 --- a/service/tablet_allocator.cc +++ b/service/tablet_allocator.cc @@ -12,6 +12,7 @@ #include "replica/database.hh" #include "service/migration_listener.hh" #include "service/tablet_allocator.hh" +#include "utils/assert.hh" #include "utils/error_injection.hh" #include "utils/stall_free.hh" #include "db/config.hh" @@ -1945,7 +1946,7 @@ public: tablet_allocator_impl(tablet_allocator_impl&&) = delete; // "this" captured. ~tablet_allocator_impl() { - assert(_stopped); + SCYLLA_ASSERT(_stopped); } future<> stop() { diff --git a/service/topology_coordinator.cc b/service/topology_coordinator.cc index 1e40d7d188..0bd63facee 100644 --- a/service/topology_coordinator.cc +++ b/service/topology_coordinator.cc @@ -42,6 +42,7 @@ #include "service/tablet_allocator.hh" #include "service/topology_state_machine.hh" #include "topology_mutation.hh" +#include "utils/assert.hh" #include "utils/error_injection.hh" #include "utils/stall_free.hh" #include "utils/to_string.hh" @@ -311,7 +312,7 @@ class topology_coordinator : public endpoint_lifecycle_subscriber { auto& topo = _topo_sm._topology; auto it = topo.find(id); - assert(it); + SCYLLA_ASSERT(it); std::optional req; auto rit = topo.requests.find(id); @@ -1581,7 +1582,7 @@ class topology_coordinator : public endpoint_lifecycle_subscriber { switch (node.rs->state) { case node_state::bootstrapping: { - assert(!node.rs->ring); + SCYLLA_ASSERT(!node.rs->ring); auto num_tokens = std::get(node.req_param.value()).num_tokens; auto tokens_string = std::get(node.req_param.value()).tokens_string; // A node have just been accepted and does not have tokens assigned yet @@ -1610,7 +1611,7 @@ class topology_coordinator : public endpoint_lifecycle_subscriber { } break; case node_state::replacing: { - assert(!node.rs->ring); + SCYLLA_ASSERT(!node.rs->ring); // Make sure all nodes are no longer trying to write to a node being replaced. This is important if the new node have the same IP, so that old write will not // go to the new node by mistake try { @@ -1629,8 +1630,8 @@ class topology_coordinator : public endpoint_lifecycle_subscriber { auto replaced_id = std::get(node.req_param.value()).replaced_id; auto it = _topo_sm._topology.normal_nodes.find(replaced_id); - assert(it != _topo_sm._topology.normal_nodes.end()); - assert(it->second.ring && it->second.state == node_state::normal); + SCYLLA_ASSERT(it != _topo_sm._topology.normal_nodes.end()); + SCYLLA_ASSERT(it->second.ring && it->second.state == node_state::normal); topology_mutation_builder builder(node.guard.write_timestamp()); @@ -2152,7 +2153,7 @@ class topology_coordinator : public endpoint_lifecycle_subscriber { rtbuilder.set("start_time", db_clock::now()); switch (node.request.value()) { case topology_request::join: { - assert(!node.rs->ring); + SCYLLA_ASSERT(!node.rs->ring); // Write chosen tokens through raft. builder.set_transition_state(topology::transition_state::join_group0) .with_node(node.id) @@ -2163,7 +2164,7 @@ class topology_coordinator : public endpoint_lifecycle_subscriber { break; } case topology_request::leave: - assert(node.rs->ring); + SCYLLA_ASSERT(node.rs->ring); // start decommission and put tokens of decommissioning nodes into write_both_read_old state // meaning that reads will go to the replica being decommissioned // but writes will go to new owner as well @@ -2176,7 +2177,7 @@ class topology_coordinator : public endpoint_lifecycle_subscriber { "start decommission"); break; case topology_request::remove: { - assert(node.rs->ring); + SCYLLA_ASSERT(node.rs->ring); builder.set_transition_state(topology::transition_state::tablet_draining) .set_version(_topo_sm._topology.version + 1) @@ -2188,7 +2189,7 @@ class topology_coordinator : public endpoint_lifecycle_subscriber { break; } case topology_request::replace: { - assert(!node.rs->ring); + SCYLLA_ASSERT(!node.rs->ring); builder.set_transition_state(topology::transition_state::join_group0) .with_node(node.id) @@ -2289,7 +2290,7 @@ class topology_coordinator : public endpoint_lifecycle_subscriber { auto id = node.id; - assert(!_topo_sm._topology.transition_nodes.empty()); + SCYLLA_ASSERT(!_topo_sm._topology.transition_nodes.empty()); release_node(std::move(node)); @@ -2924,7 +2925,7 @@ future<> topology_coordinator::stop() { // but let's check all of them because we never reset these holders // once they are added as barriers for (auto& [stage, barrier]: tablet_state.barriers) { - assert(barrier.has_value()); + SCYLLA_ASSERT(barrier.has_value()); try { co_await std::move(*barrier); } catch (...) { diff --git a/service/topology_mutation.cc b/service/topology_mutation.cc index c892e67674..3be5273645 100644 --- a/service/topology_mutation.cc +++ b/service/topology_mutation.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "db/system_keyspace.hh" #include "topology_mutation.hh" #include "types/tuple.hh" @@ -34,7 +35,7 @@ topology_node_mutation_builder::topology_node_mutation_builder(topology_mutation template Builder& topology_mutation_builder_base::apply_atomic(const char* cell, const data_value& value) { const column_definition* cdef = self().schema().get_column_definition(cell); - assert(cdef); + SCYLLA_ASSERT(cdef); self().row().apply(*cdef, atomic_cell::make_live(*cdef->type, self().timestamp(), cdef->type->decompose(value), self().ttl())); return self(); } @@ -44,7 +45,7 @@ template requires std::convertible_to, data_value> Builder& topology_mutation_builder_base::apply_set(const char* cell, collection_apply_mode apply_mode, const C& c) { const column_definition* cdef = self().schema().get_column_definition(cell); - assert(cdef); + SCYLLA_ASSERT(cdef); auto vtype = static_pointer_cast(cdef->type)->get_elements_type(); std::set cset(vtype->as_less_comparator()); @@ -69,7 +70,7 @@ Builder& topology_mutation_builder_base::apply_set(const char* cell, co template Builder& topology_mutation_builder_base::del(const char* cell) { auto cdef = self().schema().get_column_definition(cell); - assert(cdef); + SCYLLA_ASSERT(cdef); if (!cdef->type->is_multi_cell()) { self().row().apply(*cdef, atomic_cell::make_dead(self().timestamp(), gc_clock::now())); } else { diff --git a/sstables/compress.cc b/sstables/compress.cc index 7b55f9dc32..9e62250c41 100644 --- a/sstables/compress.cc +++ b/sstables/compress.cc @@ -20,6 +20,7 @@ #include "exceptions.hh" #include "unimplemented.hh" #include "segmented_compress_params.hh" +#include "utils/assert.hh" #include "utils/class_registrator.hh" #include "reader_permit.hh" @@ -155,7 +156,7 @@ void compression::segmented_offsets::state::update_position_trackers(std::size_t } void compression::segmented_offsets::init(uint32_t chunk_size) { - assert(chunk_size != 0); + SCYLLA_ASSERT(chunk_size != 0); _chunk_size = chunk_size; @@ -436,7 +437,7 @@ public: virtual future> skip(uint64_t n) override { _pos += n; - assert(_pos <= _end_pos); + SCYLLA_ASSERT(_pos <= _end_pos); if (_pos == _end_pos) { return make_ready_future>(); } diff --git a/sstables/compress.hh b/sstables/compress.hh index a7fd95ca2f..a9ef380534 100644 --- a/sstables/compress.hh +++ b/sstables/compress.hh @@ -33,6 +33,7 @@ // are read using O_DIRECT), nor uncompressed data. We intend to cache high- // level Cassandra rows, not disk blocks. +#include "utils/assert.hh" #include #include #include @@ -171,7 +172,7 @@ struct compression { const_iterator(const const_iterator& other) = default; const_iterator& operator=(const const_iterator& other) { - assert(&_offsets == &other._offsets); + SCYLLA_ASSERT(&_offsets == &other._offsets); _index = other._index; return *this; } diff --git a/sstables/consumer.hh b/sstables/consumer.hh index 576c6dcd32..94ac9aacbf 100644 --- a/sstables/consumer.hh +++ b/sstables/consumer.hh @@ -17,6 +17,7 @@ #include #include "bytes.hh" #include "reader_permit.hh" +#include "utils/assert.hh" #include "utils/fragmented_temporary_buffer.hh" #include "utils/small_vector.hh" @@ -307,7 +308,7 @@ private: // Reads bytes belonging to an integer of size len. Returns true // if a full integer is now available. bool process_int(temporary_buffer& data, unsigned len) { - assert(_pos < len); + SCYLLA_ASSERT(_pos < len); auto n = std::min((size_t)(len - _pos), data.size()); std::copy(data.begin(), data.begin() + n, _read_int.bytes + _pos); data.trim_front(n); @@ -534,7 +535,7 @@ public: while (data || (!primitive_consumer::active() && non_consuming())) { // The primitive_consumer must finish before the enclosing state machine can continue. if (__builtin_expect(primitive_consumer::consume(data) == read_status::waiting, false)) { - assert(data.size() == 0); + SCYLLA_ASSERT(data.size() == 0); return proceed::yes; } auto ret = state_processor().process_state(data); @@ -584,7 +585,7 @@ public: }, [this, &data, orig_data_size](skip_bytes skip) { // we only expect skip_bytes to be used if reader needs to skip beyond the provided buffer // otherwise it should just trim_front and proceed as usual - assert(data.size() == 0); + SCYLLA_ASSERT(data.size() == 0); _remain -= orig_data_size; if (skip.get_value() >= _remain) { skip_bytes skip_remaining(_remain); @@ -602,11 +603,11 @@ public: } future<> fast_forward_to(size_t begin, size_t end) { - assert(begin >= _stream_position.position); + SCYLLA_ASSERT(begin >= _stream_position.position); auto n = begin - _stream_position.position; _stream_position.position = begin; - assert(end >= _stream_position.position); + SCYLLA_ASSERT(end >= _stream_position.position); _remain = end - _stream_position.position; primitive_consumer::reset(); diff --git a/sstables/downsampling.hh b/sstables/downsampling.hh index 6b63c42d31..20efe4e2f7 100644 --- a/sstables/downsampling.hh +++ b/sstables/downsampling.hh @@ -10,6 +10,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include @@ -43,14 +44,14 @@ public: * @return A list of `sampling_level` unique indices between 0 and `sampling_level` */ static const std::vector& get_sampling_pattern(int sampling_level) { - assert(sampling_level > 0 && sampling_level <= BASE_SAMPLING_LEVEL); + SCYLLA_ASSERT(sampling_level > 0 && sampling_level <= BASE_SAMPLING_LEVEL); auto& entry = _sample_pattern_cache[sampling_level-1]; if (!entry.empty()) { return entry; } if (sampling_level <= 1) { - assert(_sample_pattern_cache[0].empty()); + SCYLLA_ASSERT(_sample_pattern_cache[0].empty()); _sample_pattern_cache[0].push_back(0); return _sample_pattern_cache[0]; } @@ -95,7 +96,7 @@ public: * @return a list of original indexes for current summary entries */ static const std::vector& get_original_indexes(int sampling_level) { - assert(sampling_level > 0 && sampling_level <= BASE_SAMPLING_LEVEL); + SCYLLA_ASSERT(sampling_level > 0 && sampling_level <= BASE_SAMPLING_LEVEL); auto& entry = _original_index_cache[sampling_level-1]; if (!entry.empty()) { return entry; @@ -127,7 +128,7 @@ public: * @return the number of partitions before the next index summary entry, inclusive on one end */ static int get_effective_index_interval_after_index(int index, int sampling_level, int min_index_interval) { - assert(index >= -1); + SCYLLA_ASSERT(index >= -1); const std::vector& original_indexes = get_original_indexes(sampling_level); if (index == -1) { return original_indexes[0] * min_index_interval; diff --git a/sstables/generation_type.hh b/sstables/generation_type.hh index 3f101889f5..4885ad8cd7 100644 --- a/sstables/generation_type.hh +++ b/sstables/generation_type.hh @@ -21,6 +21,7 @@ #include #include #include "types/types.hh" +#include "utils/assert.hh" #include "utils/UUID_gen.hh" #include "log.hh" @@ -180,7 +181,7 @@ public: /// way to determine that is overlapping its partition-ranges with the shard's /// owned ranges. static bool maybe_owned_by_this_shard(const sstables::generation_type& gen) { - assert(bool(gen)); + SCYLLA_ASSERT(bool(gen)); int64_t hint = 0; if (gen.is_uuid_based()) { hint = std::hash{}(gen.as_uuid()); diff --git a/sstables/index_reader.hh b/sstables/index_reader.hh index 21e7a8d77b..c18324e027 100644 --- a/sstables/index_reader.hh +++ b/sstables/index_reader.hh @@ -7,6 +7,7 @@ */ #pragma once +#include "utils/assert.hh" #include "sstables.hh" #include "consumer.hh" #include "downsampling.hh" @@ -520,7 +521,7 @@ private: // Must be called for non-decreasing summary_idx. future<> advance_to_page(index_bound& bound, uint64_t summary_idx) { sstlog.trace("index {}: advance_to_page({}), bound {}", fmt::ptr(this), summary_idx, fmt::ptr(&bound)); - assert(!bound.current_list || bound.current_summary_idx <= summary_idx); + SCYLLA_ASSERT(!bound.current_list || bound.current_summary_idx <= summary_idx); if (bound.current_list && bound.current_summary_idx == summary_idx) { sstlog.trace("index {}: same page", fmt::ptr(this)); return make_ready_future<>(); @@ -626,7 +627,7 @@ private: // Valid if partition_data_ready(bound) index_entry& current_partition_entry(index_bound& bound) { - assert(bound.current_list); + SCYLLA_ASSERT(bound.current_list); return *bound.current_list->_entries[bound.current_index_idx]; } @@ -691,7 +692,7 @@ private: // is no G in that bucket so we read the following one to get the // position (see the advance_to_page() call below). After we've got it, it's time to // get J] position. Again, summary points us to the first bucket and we - // hit an assert since the reader is already at the second bucket and we + // hit an SCYLLA_ASSERT since the reader is already at the second bucket and we // cannot go backward. // The solution is this condition above. If our lookup requires reading // the previous bucket we assume that the entry doesn't exist and return @@ -739,7 +740,7 @@ private: // So need to make sure first that it is read if (!partition_data_ready(_lower_bound)) { return read_partition_data().then([this, pos] { - assert(partition_data_ready()); + SCYLLA_ASSERT(partition_data_ready()); return advance_upper_past(pos); }); } @@ -816,12 +817,12 @@ public: // Ensures that partition_data_ready() returns true. // Can be called only when !eof() future<> read_partition_data() { - assert(!eof()); + SCYLLA_ASSERT(!eof()); if (partition_data_ready(_lower_bound)) { return make_ready_future<>(); } // The only case when _current_list may be missing is when the cursor is at the beginning - assert(_lower_bound.current_summary_idx == 0); + SCYLLA_ASSERT(_lower_bound.current_summary_idx == 0); return advance_to_page(_lower_bound, 0); } @@ -920,7 +921,7 @@ public: if (!partition_data_ready()) { return read_partition_data().then([this, pos] { sstlog.trace("index {}: page done", fmt::ptr(this)); - assert(partition_data_ready(_lower_bound)); + SCYLLA_ASSERT(partition_data_ready(_lower_bound)); return advance_to(pos); }); } @@ -1008,7 +1009,7 @@ public: // so need to make sure first that the lower bound partition data is in memory. if (!partition_data_ready(_lower_bound)) { return read_partition_data().then([this, pos] { - assert(partition_data_ready()); + SCYLLA_ASSERT(partition_data_ready()); return advance_reverse(pos); }); } @@ -1053,7 +1054,7 @@ public: // // Preconditions: sstable version >= mc, partition_data_ready(). future> last_block_offset() { - assert(partition_data_ready()); + SCYLLA_ASSERT(partition_data_ready()); auto cur = current_clustered_cursor(); if (!cur) { diff --git a/sstables/kl/reader.cc b/sstables/kl/reader.cc index 16a4065db5..d709354931 100644 --- a/sstables/kl/reader.cc +++ b/sstables/kl/reader.cc @@ -15,6 +15,7 @@ #include "clustering_key_filter.hh" #include "clustering_ranges_walker.hh" #include "concrete_types.hh" +#include "utils/assert.hh" #include "utils/to_string.hh" namespace sstables { @@ -266,7 +267,7 @@ private: std::optional _pending_collection = {}; collection_mutation& pending_collection(const column_definition *cdef) { - assert(cdef->is_multi_cell() && "frozen set should behave like a cell\n"); + SCYLLA_ASSERT(cdef->is_multi_cell() && "frozen set should behave like a cell\n"); if (!_pending_collection || _pending_collection->is_new_collection(cdef)) { flush_pending_collection(*_schema); _pending_collection = collection_mutation(cdef); @@ -434,7 +435,7 @@ public: flush_pending_collection(*_schema); // If _ready is already set we have a bug: get_mutation_fragment() // was not called, and below we will lose one clustering row! - assert(!_ready); + SCYLLA_ASSERT(!_ready); if (!_skip_in_progress) { _ready = std::exchange(_in_progress, { }); return push_ready_fragments_with_ready_set(); @@ -1122,7 +1123,7 @@ public: _state = state::ATOM_START; break; default: - assert(0); + SCYLLA_ASSERT(0); } _consumer.reset(el); _gen = do_process_state(); @@ -1212,7 +1213,7 @@ private: _read_enabled = false; return make_ready_future<>(); } - assert(_index_reader->element_kind() == indexable_element::partition); + SCYLLA_ASSERT(_index_reader->element_kind() == indexable_element::partition); return skip_to(_index_reader->element_kind(), start).then([this] { _sst->get_stats().on_partition_seek(); }); @@ -1292,7 +1293,7 @@ private: if (!pos || pos->is_before_all_fragments(*_schema)) { return make_ready_future<>(); } - assert (_current_partition_key); + SCYLLA_ASSERT (_current_partition_key); return [this] { if (!_index_in_current_partition) { _index_in_current_partition = true; @@ -1334,7 +1335,7 @@ private: } auto [begin, end] = _index_reader->data_file_positions(); - assert(end); + SCYLLA_ASSERT(end); if (_single_partition_read) { _read_enabled = (begin != *end); @@ -1383,11 +1384,11 @@ public: _partition_finished = true; _before_partition = true; _end_of_stream = false; - assert(_index_reader); + SCYLLA_ASSERT(_index_reader); auto f1 = _index_reader->advance_to(pr); return f1.then([this] { auto [start, end] = _index_reader->data_file_positions(); - assert(end); + SCYLLA_ASSERT(end); if (start != *end) { _read_enabled = true; _index_in_current_partition = true; diff --git a/sstables/mutation_fragment_filter.hh b/sstables/mutation_fragment_filter.hh index 62f66e6efa..5b7d90e87b 100644 --- a/sstables/mutation_fragment_filter.hh +++ b/sstables/mutation_fragment_filter.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "mutation/mutation_fragment.hh" #include "clustering_ranges_walker.hh" #include "clustering_key_filter.hh" @@ -129,7 +130,7 @@ public: * query ranges tracked by _walker. */ std::optional fast_forward_to(position_range r) { - assert(_fwd); + SCYLLA_ASSERT(_fwd); _fwd_end = std::move(r).end(); _out_of_range = !_walker.advance_to(r.start(), _fwd_end); diff --git a/sstables/mx/bsearch_clustered_cursor.hh b/sstables/mx/bsearch_clustered_cursor.hh index 286f71b26b..c19e164dbd 100644 --- a/sstables/mx/bsearch_clustered_cursor.hh +++ b/sstables/mx/bsearch_clustered_cursor.hh @@ -12,6 +12,7 @@ #include "sstables/column_translation.hh" #include "parsers.hh" #include "schema/schema.hh" +#include "utils/assert.hh" #include "utils/cached_file.hh" #include "utils/to_string.hh" @@ -106,13 +107,13 @@ public: const schema& _s; bool operator()(const promoted_index_block& lhs, position_in_partition_view rhs) const { - assert(lhs.start); + SCYLLA_ASSERT(lhs.start); position_in_partition::less_compare less(_s); return less(*lhs.start, rhs); } bool operator()(position_in_partition_view lhs, const promoted_index_block& rhs) const { - assert(rhs.start); + SCYLLA_ASSERT(rhs.start); position_in_partition::less_compare less(_s); return less(lhs, *rhs.start); } diff --git a/sstables/mx/parsers.hh b/sstables/mx/parsers.hh index dfd3cefaed..8bf2943124 100644 --- a/sstables/mx/parsers.hh +++ b/sstables/mx/parsers.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "sstables/consumer.hh" #include "sstables/types.hh" #include "sstables/column_translation.hh" @@ -282,7 +283,7 @@ public: } [[fallthrough]]; case state::END_OPEN_MARKER_FLAG: - assert(_primitive._i64 + width_base > 0); + SCYLLA_ASSERT(_primitive._i64 + width_base > 0); _width = (_primitive._i64 + width_base); if (_primitive.read_8(data) != read_status::ready) { _state = state::END_OPEN_MARKER_LOCAL_DELETION_TIME; diff --git a/sstables/mx/reader.cc b/sstables/mx/reader.cc index 667a338df3..8903eeed38 100644 --- a/sstables/mx/reader.cc +++ b/sstables/mx/reader.cc @@ -14,6 +14,7 @@ #include "sstables/m_format_read_helpers.hh" #include "sstables/sstable_mutation_reader.hh" #include "sstables/processing_result_generator.hh" +#include "utils/assert.hh" #include "utils/to_string.hh" namespace sstables { @@ -516,7 +517,7 @@ public: return consume_range_tombstone_boundary(std::move(pos), end_tombstone, start_tombstone); } default: - assert(false && "Invalid boundary type"); + SCYLLA_ASSERT(false && "Invalid boundary type"); } } @@ -1330,7 +1331,7 @@ public: " partition range: {}", pr)); } // FIXME: if only the defaults were better... - //assert(fwd_mr == mutation_reader::forwarding::no); + //SCYLLA_ASSERT(fwd_mr == mutation_reader::forwarding::no); } } @@ -1375,7 +1376,7 @@ private: _read_enabled = false; return make_ready_future<>(); } - assert(_index_reader->element_kind() == indexable_element::partition); + SCYLLA_ASSERT(_index_reader->element_kind() == indexable_element::partition); return skip_to(_index_reader->element_kind(), start).then([this] { _sst->get_stats().on_partition_seek(); }); @@ -1455,7 +1456,7 @@ private: if (!pos || pos->is_before_all_fragments(*_schema)) { return make_ready_future<>(); } - assert (_current_partition_key); + SCYLLA_ASSERT (_current_partition_key); return [this] { if (!_index_in_current_partition) { _index_in_current_partition = true; @@ -1473,7 +1474,7 @@ private: // The reversing data source will notice the skip and update the data ranges // from which it prepares the data given to us. - assert(_reversed_read_sstable_position); + SCYLLA_ASSERT(_reversed_read_sstable_position); auto ip = _index_reader->data_file_positions(); if (ip.end >= *_reversed_read_sstable_position) { // The reversing data source was already ahead (in reverse - its position was smaller) @@ -1542,7 +1543,7 @@ private: } auto [begin, end] = _index_reader->data_file_positions(); - assert(end); + SCYLLA_ASSERT(end); if (_single_partition_read) { _read_enabled = (begin != *end); @@ -1601,11 +1602,11 @@ public: _partition_finished = true; _before_partition = true; _end_of_stream = false; - assert(_index_reader); + SCYLLA_ASSERT(_index_reader); auto f1 = _index_reader->advance_to(pr); return f1.then([this] { auto [start, end] = _index_reader->data_file_positions(); - assert(end); + SCYLLA_ASSERT(end); if (start != *end) { _read_enabled = true; _index_in_current_partition = true; @@ -2029,7 +2030,7 @@ public: case bound_kind_m::excl_end_incl_start: return consume_range_tombstone(ecp, bound_kind::incl_start, start_tombstone); default: - assert(false && "Invalid boundary type"); + SCYLLA_ASSERT(false && "Invalid boundary type"); } } diff --git a/sstables/mx/types.hh b/sstables/mx/types.hh index df875328a3..2f80c1f18b 100644 --- a/sstables/mx/types.hh +++ b/sstables/mx/types.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "clustering_bounds_comparator.hh" #include @@ -62,7 +63,7 @@ inline bool is_start(bound_kind_m kind) { } inline bound_kind to_bound_kind(bound_kind_m kind) { - assert(is_bound_kind(kind)); + SCYLLA_ASSERT(is_bound_kind(kind)); using underlying_type = std::underlying_type_t; return bound_kind{static_cast(kind)}; } @@ -73,12 +74,12 @@ inline bound_kind_m to_bound_kind_m(bound_kind kind) { } inline bound_kind boundary_to_start_bound(bound_kind_m kind) { - assert(is_boundary_between_adjacent_intervals(kind)); + SCYLLA_ASSERT(is_boundary_between_adjacent_intervals(kind)); return (kind == bound_kind_m::incl_end_excl_start) ? bound_kind::excl_start : bound_kind::incl_start; } inline bound_kind boundary_to_end_bound(bound_kind_m kind) { - assert(is_boundary_between_adjacent_intervals(kind)); + SCYLLA_ASSERT(is_boundary_between_adjacent_intervals(kind)); return (kind == bound_kind_m::incl_end_excl_start) ? bound_kind::incl_end : bound_kind::excl_end; } diff --git a/sstables/mx/writer.cc b/sstables/mx/writer.cc index 8ec6a3f048..d30f53a984 100644 --- a/sstables/mx/writer.cc +++ b/sstables/mx/writer.cc @@ -16,6 +16,7 @@ #include "sstables/mx/types.hh" #include "db/config.hh" #include "mutation/atomic_cell.hh" +#include "utils/assert.hh" #include "utils/exceptions.hh" #include "db/large_data_handler.hh" @@ -91,7 +92,7 @@ public: {} void increment() { - assert(_range); + SCYLLA_ASSERT(_range); if (!_range->next()) { _range = nullptr; } @@ -102,7 +103,7 @@ public: } const ValueType dereference() const { - assert(_range); + SCYLLA_ASSERT(_range); return _range->get_value(); } @@ -153,7 +154,7 @@ public: auto limit = std::min(_serialization_limit_size, _offset + clustering_block::max_block_size); _current_block = {}; - assert (_offset % clustering_block::max_block_size == 0); + SCYLLA_ASSERT (_offset % clustering_block::max_block_size == 0); while (_offset < limit) { auto shift = _offset % clustering_block::max_block_size; if (_offset < _prefix.size(_schema)) { @@ -280,7 +281,7 @@ public: ++_current_index; } } else { - assert(_mode == encoding_mode::large_encode_missing); + SCYLLA_ASSERT(_mode == encoding_mode::large_encode_missing); while (_current_index < total_size) { auto cell = _row.find_cell(_columns[_current_index].get().id); if (!cell) { @@ -1062,7 +1063,7 @@ void writer::write_cell(bytes_ostream& writer, const clustering_key_prefix* clus if (cdef.is_counter()) { if (!is_deleted) { - assert(!cell.is_counter_update()); + SCYLLA_ASSERT(!cell.is_counter_update()); auto ccv = counter_cell_view(cell); write_counter_value(ccv, writer, _sst.get_version(), [] (bytes_ostream& out, uint32_t value) { return write_vint(out, value); @@ -1334,7 +1335,7 @@ template requires Writer static void write_clustering_prefix(sstable_version_types v, W& writer, bound_kind_m kind, const schema& s, const clustering_key_prefix& clustering) { - assert(kind != bound_kind_m::static_clustering); + SCYLLA_ASSERT(kind != bound_kind_m::static_clustering); write(v, writer, kind); auto is_ephemerally_full = ephemerally_full_prefix{s.is_compact_table()}; if (kind != bound_kind_m::clustering) { diff --git a/sstables/partition_index_cache.hh b/sstables/partition_index_cache.hh index 582bd94a3a..e7aa7dfa20 100644 --- a/sstables/partition_index_cache.hh +++ b/sstables/partition_index_cache.hh @@ -13,6 +13,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/bptree.hh" #include "utils/lru.hh" #include "utils/lsa/weak_ptr.hh" @@ -57,7 +58,7 @@ private: // Live entry_ptr should keep the entry alive, except when the entry failed on loading. // In that case, entry_ptr holders are not supposed to use the pointer, so it's safe // to nullify those entry_ptrs. - assert(!ready()); + SCYLLA_ASSERT(!ready()); } } @@ -206,7 +207,7 @@ public: return with_allocator(_region.allocator(), [&] { auto it_and_flag = _cache.emplace(key, this, key); entry &cp = *it_and_flag.first; - assert(it_and_flag.second); + SCYLLA_ASSERT(it_and_flag.second); try { return share(cp); } catch (...) { diff --git a/sstables/promoted_index_blocks_reader.hh b/sstables/promoted_index_blocks_reader.hh index 9c10a7c328..4238086c80 100644 --- a/sstables/promoted_index_blocks_reader.hh +++ b/sstables/promoted_index_blocks_reader.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "consumer.hh" #include "column_translation.hh" #include "sstables/mx/parsers.hh" @@ -172,7 +173,7 @@ public: std::visit([this, &data] (auto& ctx) mutable { return process_state(data, ctx); }, _ctx); if (_mode == consuming_mode::consume_until) { - assert(_pos); + SCYLLA_ASSERT(_pos); auto cmp_with_start = [this, pos_cmp = promoted_index_block_compare(_s)] (position_in_partition_view pos, const promoted_index_block& block) -> bool { return pos_cmp(pos, block.start(_s)); diff --git a/sstables/random_access_reader.hh b/sstables/random_access_reader.hh index 97982d5905..c240c5a7e6 100644 --- a/sstables/random_access_reader.hh +++ b/sstables/random_access_reader.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include @@ -25,7 +26,7 @@ protected: virtual input_stream open_at(uint64_t pos) = 0; void set(input_stream in) { - assert(!_in); + SCYLLA_ASSERT(!_in); _in = std::make_unique>(std::move(in)); } diff --git a/sstables/scanning_clustered_index_cursor.hh b/sstables/scanning_clustered_index_cursor.hh index e98af95e39..ab88c019be 100644 --- a/sstables/scanning_clustered_index_cursor.hh +++ b/sstables/scanning_clustered_index_cursor.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "sstables/index_entry.hh" #include "sstables/promoted_index_blocks_reader.hh" #include "schema/schema.hh" @@ -95,7 +96,7 @@ private: // End open marker can be only engaged in SSTables 3.x ('mc' format) and never in ka/la auto end_pos = prev->end(_s); position_in_partition_view* open_rt_pos = std::get_if(&end_pos); - assert(open_rt_pos); + SCYLLA_ASSERT(open_rt_pos); return skip_info{offset, tombstone(*prev->end_open_marker()), position_in_partition{*open_rt_pos}}; } } diff --git a/sstables/sstable_directory.cc b/sstables/sstable_directory.cc index 32e62fe3c9..923fd810bb 100644 --- a/sstables/sstable_directory.cc +++ b/sstables/sstable_directory.cc @@ -20,6 +20,7 @@ #include "compaction/compaction_manager.hh" #include "log.hh" #include "sstable_directory.hh" +#include "utils/assert.hh" #include "utils/lister.hh" #include "utils/overloaded_functor.hh" #include "utils/directories.hh" @@ -433,7 +434,7 @@ sstable_directory::move_foreign_sstables(sharded& source_dire return make_ready_future<>(); } // Should be empty, since an SSTable that belongs to this shard is not remote. - assert(shard_id != this_shard_id()); + SCYLLA_ASSERT(shard_id != this_shard_id()); dirlog.debug("Moving {} unshared SSTables to shard {} ", info_vec.size(), shard_id); return source_directory.invoke_on(shard_id, &sstables::sstable_directory::load_foreign_sstables, std::move(info_vec)); }); @@ -469,7 +470,7 @@ sstable_directory::collect_output_unshared_sstables(std::vectorget_shards_for_this_sstable(); - assert(shards.size() == 1); + SCYLLA_ASSERT(shards.size() == 1); auto shard = shards[0]; if (shard == this_shard_id()) { diff --git a/sstables/sstable_set.cc b/sstables/sstable_set.cc index aac5774469..c6ccb9af32 100644 --- a/sstables/sstable_set.cc +++ b/sstables/sstable_set.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include @@ -693,8 +694,8 @@ public: // by !empty(bound) and `_it` invariant: // _it != _end, _it->first <= bound, and filter(*_it->second) == true - assert(_cmp(_it->first, bound) <= 0); - // we don't assert(filter(*_it->second)) due to the requirement that `filter` is called at most once for each sstable + SCYLLA_ASSERT(_cmp(_it->first, bound) <= 0); + // we don't SCYLLA_ASSERT(filter(*_it->second)) due to the requirement that `filter` is called at most once for each sstable // Find all sstables with the same position as `_it` (they form a contiguous range in the container). auto next = std::find_if(std::next(_it), _end, [this] (const value_t& v) { return _cmp(v.first, _it->first) != 0; }); @@ -1264,7 +1265,7 @@ sstable_set::create_single_key_sstable_reader( streamed_mutation::forwarding fwd, mutation_reader::forwarding fwd_mr, const sstable_predicate& predicate) const { - assert(pr.is_singular() && pr.start()->value().has_key()); + SCYLLA_ASSERT(pr.is_singular() && pr.start()->value().has_key()); return _impl->create_single_key_sstable_reader(cf, std::move(schema), std::move(permit), sstable_histogram, pr, slice, std::move(trace_state), fwd, fwd_mr, predicate); } @@ -1368,7 +1369,7 @@ sstable_set::make_local_shard_sstable_reader( { auto reader_factory_fn = [s, permit, &slice, trace_state, fwd, fwd_mr, &monitor_generator, &predicate] (shared_sstable& sst, const dht::partition_range& pr) mutable { - assert(!sst->is_shared()); + SCYLLA_ASSERT(!sst->is_shared()); if (!predicate(*sst)) { return make_empty_flat_reader_v2(s, permit); } diff --git a/sstables/sstables.cc b/sstables/sstables.cc index fbff3bf293..2a6f564a08 100644 --- a/sstables/sstables.cc +++ b/sstables/sstables.cc @@ -33,6 +33,7 @@ #include #include +#include "utils/assert.hh" #include "utils/error_injection.hh" #include "utils/to_string.hh" #include "data_dictionary/storage_options.hh" @@ -903,7 +904,7 @@ void file_writer::close() { // to work, because file stream would step on unaligned IO and S3 upload // stream would send completion message to the server and would lose any // subsequent write. - assert(!_closed && "file_writer already closed"); + SCYLLA_ASSERT(!_closed && "file_writer already closed"); std::exception_ptr ex; try { _out.flush().get(); @@ -1337,7 +1338,7 @@ future<> sstable::open_or_create_data(open_flags oflags, file_open_options optio future<> sstable::open_data(sstable_open_config cfg) noexcept { co_await open_or_create_data(open_flags::ro); co_await update_info_for_opened_data(cfg); - assert(!_shards.empty()); + SCYLLA_ASSERT(!_shards.empty()); auto* sm = _components->scylla_metadata->data.get(); if (sm) { // Sharding information uses a lot of memory and once we're doing with this computation we will no longer use it. @@ -1370,7 +1371,7 @@ future<> sstable::update_info_for_opened_data(sstable_open_config cfg) { auto size = co_await _index_file.size(); _index_file_size = size; - assert(!_cached_index_file); + SCYLLA_ASSERT(!_cached_index_file); _cached_index_file = seastar::make_shared(_index_file, _manager.get_cache_tracker().get_index_cached_file_stats(), _manager.get_cache_tracker().get_lru(), @@ -1646,7 +1647,7 @@ sstable::load_owner_shards(const dht::sharder& sharder) { } void prepare_summary(summary& s, uint64_t expected_partition_count, uint32_t min_index_interval) { - assert(expected_partition_count >= 1); + SCYLLA_ASSERT(expected_partition_count >= 1); s.header.min_index_interval = min_index_interval; s.header.sampling_level = downsampling::BASE_SAMPLING_LEVEL; @@ -1668,7 +1669,7 @@ future<> seal_summary(summary& s, s.header.size = s.entries.size(); s.header.size_at_full_sampling = sstable::get_size_at_full_sampling(state.partition_count, s.header.min_index_interval); - assert(first_key); // assume non-empty sstable + SCYLLA_ASSERT(first_key); // assume non-empty sstable s.first_key.value = first_key->get_bytes(); if (last_key) { @@ -2204,7 +2205,7 @@ sstring sstable::component_basename(const sstring& ks, const sstring& cf, versio case sstable::version_types::me: return v + "-" + g + "-" + f + "-" + component; } - assert(0 && "invalid version"); + SCYLLA_ASSERT(0 && "invalid version"); } sstring sstable::component_basename(const sstring& ks, const sstring& cf, version_types version, generation_type generation, diff --git a/sstables/sstables_manager.cc b/sstables/sstables_manager.cc index cbc8de3e12..83ddd33cd1 100644 --- a/sstables/sstables_manager.cc +++ b/sstables/sstables_manager.cc @@ -16,6 +16,7 @@ #include "db/config.hh" #include "gms/feature.hh" #include "gms/feature_service.hh" +#include "utils/assert.hh" #include "utils/s3/client.hh" #include "exceptions/exceptions.hh" @@ -46,9 +47,9 @@ sstables_manager::sstables_manager( } sstables_manager::~sstables_manager() { - assert(_closing); - assert(_active.empty()); - assert(_undergoing_close.empty()); + SCYLLA_ASSERT(_closing); + SCYLLA_ASSERT(_active.empty()); + SCYLLA_ASSERT(_undergoing_close.empty()); } storage_manager::storage_manager(const db::config& cfg, config stm_cfg) diff --git a/sstables/sstables_manager.hh b/sstables/sstables_manager.hh index 376a6b9283..442092fdec 100644 --- a/sstables/sstables_manager.hh +++ b/sstables/sstables_manager.hh @@ -12,6 +12,7 @@ #include #include +#include "utils/assert.hh" #include "utils/disk-error-handler.hh" #include "gc_clock.hh" #include "sstables/sstables.hh" @@ -143,12 +144,12 @@ public: size_t buffer_size = default_sstable_buffer_size); shared_ptr get_endpoint_client(sstring endpoint) const { - assert(_storage != nullptr); + SCYLLA_ASSERT(_storage != nullptr); return _storage->get_endpoint_client(std::move(endpoint)); } bool is_known_endpoint(sstring endpoint) const { - assert(_storage != nullptr); + SCYLLA_ASSERT(_storage != nullptr); return _storage->is_known_endpoint(std::move(endpoint)); } @@ -180,7 +181,7 @@ public: // Only for sstable::storage usage sstables::sstables_registry& sstables_registry() const noexcept { - assert(_sstables_registry && "sstables_registry is not plugged"); + SCYLLA_ASSERT(_sstables_registry && "sstables_registry is not plugged"); return *_sstables_registry; } diff --git a/sstables/storage.cc b/sstables/storage.cc index 52582667eb..aae41bc76e 100644 --- a/sstables/storage.cc +++ b/sstables/storage.cc @@ -25,6 +25,7 @@ #include "sstables/sstable_version.hh" #include "sstables/integrity_checked_file_impl.hh" #include "sstables/writer.hh" +#include "utils/assert.hh" #include "utils/lister.hh" #include "utils/overloaded_functor.hh" #include "utils/memory_data_sink.hh" @@ -127,7 +128,7 @@ future filesystem_storage::make_data_or_index_sink(sstable& sst, comp options.buffer_size = sst.sstable_buffer_size; options.write_behind = 10; - assert(type == component_type::Data || type == component_type::Index); + SCYLLA_ASSERT(type == component_type::Data || type == component_type::Index); return make_file_data_sink(type == component_type::Data ? std::move(sst._data_file) : std::move(sst._index_file), options); } @@ -606,7 +607,7 @@ future s3_storage::open_component(const sstable& sst, component_type type, } future s3_storage::make_data_or_index_sink(sstable& sst, component_type type) { - assert(type == component_type::Data || type == component_type::Index); + SCYLLA_ASSERT(type == component_type::Data || type == component_type::Index); // FIXME: if we have file size upper bound upfront, it's better to use make_upload_sink() instead co_return _client->make_upload_jumbo_sink(make_s3_object_name(sst, type)); } diff --git a/sstables/storage.hh b/sstables/storage.hh index d17cb55e2f..1760ba3992 100644 --- a/sstables/storage.hh +++ b/sstables/storage.hh @@ -9,6 +9,7 @@ #pragma once +#include "utils/assert.hh" #include #include @@ -40,13 +41,13 @@ class storage { // Internal, but can also be used by tests virtual future<> change_dir_for_test(sstring nd) { - assert(false && "Changing directory not implemented"); + SCYLLA_ASSERT(false && "Changing directory not implemented"); } virtual future<> create_links(const sstable& sst, const std::filesystem::path& dir) const { - assert(false && "Direct links creation not implemented"); + SCYLLA_ASSERT(false && "Direct links creation not implemented"); } virtual future<> move(const sstable& sst, sstring new_dir, generation_type generation, delayed_commit_changes* delay) { - assert(false && "Direct move not implemented"); + SCYLLA_ASSERT(false && "Direct move not implemented"); } public: diff --git a/streaming/session_info.cc b/streaming/session_info.cc index 314ea15e1b..54e127ae54 100644 --- a/streaming/session_info.cc +++ b/streaming/session_info.cc @@ -8,12 +8,13 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include "streaming/session_info.hh" namespace streaming { void session_info::update_progress(progress_info new_progress) { - assert(peer == new_progress.peer); + SCYLLA_ASSERT(peer == new_progress.peer); auto& current_files = new_progress.dir == progress_info::direction::IN ? receiving_files : sending_files; current_files[new_progress.file_name] = new_progress; diff --git a/streaming/stream_session.cc b/streaming/stream_session.cc index b1c98f584f..475c12cb97 100644 --- a/streaming/stream_session.cc +++ b/streaming/stream_session.cc @@ -30,6 +30,7 @@ #include "consumer.hh" #include "readers/generating_v2.hh" #include "service/topology_guard.hh" +#include "utils/assert.hh" #include "utils/error_injection.hh" namespace streaming { @@ -543,7 +544,7 @@ void stream_session::add_transfer_ranges(sstring keyspace, dht::token_range_vect if (it == _transfers.end()) { stream_transfer_task task(shared_from_this(), cf_id, ranges); auto inserted = _transfers.emplace(cf_id, std::move(task)).second; - assert(inserted); + SCYLLA_ASSERT(inserted); } else { it->second.append_ranges(ranges); } diff --git a/table_helper.cc b/table_helper.cc index db78fd6ebc..8409963402 100644 --- a/table_helper.cc +++ b/table_helper.cc @@ -7,6 +7,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include "table_helper.hh" @@ -24,7 +25,7 @@ static schema_ptr parse_new_cf_statement(cql3::query_processor& qp, const sstrin auto parsed = cql3::query_processor::parse_statement(create_cql); cql3::statements::raw::cf_statement* parsed_cf_stmt = static_cast(parsed.get()); - (void)parsed_cf_stmt->keyspace(); // This will assert if cql statement did not contain keyspace + (void)parsed_cf_stmt->keyspace(); // This will SCYLLA_ASSERT if cql statement did not contain keyspace ::shared_ptr statement = static_pointer_cast( parsed_cf_stmt->prepare(db, qp.get_cql_stats())->statement); diff --git a/tasks/task_manager.cc b/tasks/task_manager.cc index c6a64044f0..7fc235fad3 100644 --- a/tasks/task_manager.cc +++ b/tasks/task_manager.cc @@ -20,6 +20,7 @@ #include "db/timeout_clock.hh" #include "message/messaging_service.hh" +#include "utils/assert.hh" #include "utils/overloaded_functor.hh" #include "tasks/task_handler.hh" #include "task_manager.hh" @@ -50,7 +51,7 @@ future<> task_manager::task::children::add_child(foreign_task_ptr task) { auto id = task->id(); auto inserted = _children.emplace(id, std::move(task)).second; - assert(inserted); + SCYLLA_ASSERT(inserted); } future<> task_manager::task::children::mark_as_finished(task_id id, task_essentials essentials) const { @@ -70,7 +71,7 @@ future task_manager::task::children::get_progress( co_await coroutine::parallel_for_each(_children, [&] (const auto& child_entry) -> future<> { const auto& child = child_entry.second; auto local_progress = co_await smp::submit_to(child.get_owner_shard(), [&child, &progress_units] { - assert(child->get_status().progress_units == progress_units); + SCYLLA_ASSERT(child->get_status().progress_units == progress_units); return child->get_progress(); }); progress += local_progress; @@ -431,7 +432,7 @@ future task_manager::virtual_task::impl::is_abortable() con task_manager::virtual_task::virtual_task(virtual_task_impl_ptr&& impl) noexcept : _impl(std::move(impl)) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); } future> task_manager::virtual_task::get_ids() const { @@ -562,7 +563,7 @@ void task_manager::module::register_task(task_ptr task) { } void task_manager::module::register_virtual_task(virtual_task_ptr task) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); auto group = task->get_group(); get_virtual_tasks()[group] = task; try { diff --git a/test/boost/bptree_test.cc b/test/boost/bptree_test.cc index 2a3f30b1e1..5dbb966ac7 100644 --- a/test/boost/bptree_test.cc +++ b/test/boost/bptree_test.cc @@ -12,6 +12,7 @@ #include #include +#include "utils/assert.hh" #include "utils/bptree.hh" #include "test/unit/tree_test_key.hh" @@ -253,7 +254,7 @@ public: tree_data(int key, int cookie) : _key(key), _cookie(cookie) {} int cookie() const { return _cookie; } int key() const { - assert(_key != -1); + SCYLLA_ASSERT(_key != -1); return _key; } }; diff --git a/test/boost/bytes_ostream_test.cc b/test/boost/bytes_ostream_test.cc index 132ad88d22..34ab3b39a6 100644 --- a/test/boost/bytes_ostream_test.cc +++ b/test/boost/bytes_ostream_test.cc @@ -8,6 +8,7 @@ #define BOOST_TEST_MODULE core +#include "utils/assert.hh" #include #include @@ -27,7 +28,7 @@ void append_sequence(bytes_ostream& buf, int count) { void assert_sequence(bytes_ostream& buf, int count) { auto in = ser::as_input_stream(buf.linearize()); - assert(buf.size() == count * sizeof(int)); + SCYLLA_ASSERT(buf.size() == count * sizeof(int)); for (int i = 0; i < count; i++) { auto val = ser::deserialize(in, boost::type()); BOOST_REQUIRE_EQUAL(val, i); @@ -163,7 +164,7 @@ BOOST_AUTO_TEST_CASE(test_fragment_iteration) { // If this fails, we will only have one fragment, and the test will be weak. // Bump up the 'count' if this is triggered. - assert(!buf2.is_linearized()); + SCYLLA_ASSERT(!buf2.is_linearized()); assert_sequence(buf2, count); } diff --git a/test/boost/cache_mutation_reader_test.cc b/test/boost/cache_mutation_reader_test.cc index 6f8ddf6360..b6182fe3ef 100644 --- a/test/boost/cache_mutation_reader_test.cc +++ b/test/boost/cache_mutation_reader_test.cc @@ -8,6 +8,7 @@ */ +#include "utils/assert.hh" #include #include "test/lib/scylla_test_case.hh" @@ -72,7 +73,7 @@ static void add_tombstone(mutation& m, range_tombstone rt) { static void set_row_continuous(mutation_partition& mp, int ck, is_continuous value) { auto it = mp.clustered_rows().find(make_ck(ck), rows_entry::tri_compare(*SCHEMA)); - assert(it != mp.clustered_rows().end()); + SCYLLA_ASSERT(it != mp.clustered_rows().end()); it->set_continuous(value); } diff --git a/test/boost/cdc_generation_test.cc b/test/boost/cdc_generation_test.cc index f2847d8f5e..7c4d38a5f3 100644 --- a/test/boost/cdc_generation_test.cc +++ b/test/boost/cdc_generation_test.cc @@ -8,6 +8,7 @@ #define BOOST_TEST_MODULE core +#include "utils/assert.hh" #include #include @@ -139,7 +140,7 @@ BOOST_AUTO_TEST_CASE(test_cdc_generation_limitting_multiple_vnodes_should_limit) cdc::topology_description result = cdc::limit_number_of_streams_if_needed(std::move(given)); - assert(streams_count_per_vnode.size() <= cdc::limit_of_streams_in_topology_description()); + SCYLLA_ASSERT(streams_count_per_vnode.size() <= cdc::limit_of_streams_in_topology_description()); size_t per_vnode_limit = cdc::limit_of_streams_in_topology_description() / streams_count_per_vnode.size(); for (auto& count : streams_count_per_vnode) { count = std::min(count, per_vnode_limit); diff --git a/test/boost/cdc_test.cc b/test/boost/cdc_test.cc index c8fd138af3..b8c1482cb1 100644 --- a/test/boost/cdc_test.cc +++ b/test/boost/cdc_test.cc @@ -31,6 +31,7 @@ #include "cql3/column_identifier.hh" +#include "utils/assert.hh" #include "utils/UUID_gen.hh" #include "utils/to_string.hh" @@ -269,7 +270,7 @@ SEASTAR_THREAD_TEST_CASE(test_permissions_of_cdc_description) { for (auto& t : {generations_v2, streams, timestamps}) { auto dot_pos = t.find_first_of('.'); - assert(dot_pos != std::string_view::npos && dot_pos != 0 && dot_pos != t.size() - 1); + SCYLLA_ASSERT(dot_pos != std::string_view::npos && dot_pos != 0 && dot_pos != t.size() - 1); BOOST_REQUIRE(e.local_db().has_schema(t.substr(0, dot_pos), t.substr(dot_pos + 1))); // Disallow DROP diff --git a/test/boost/commitlog_test.cc b/test/boost/commitlog_test.cc index 2e8e4abe2e..0a6344198c 100644 --- a/test/boost/commitlog_test.cc +++ b/test/boost/commitlog_test.cc @@ -29,6 +29,7 @@ #include #include +#include "utils/assert.hh" #include "utils/UUID_gen.hh" #include "test/lib/tmpdir.hh" #include "db/commitlog/commitlog.hh" @@ -802,7 +803,7 @@ SEASTAR_TEST_CASE(test_commitlog_chunk_truncation) { // Reading this segment will now get corruption at the above position, // right before where we have truncated the file. It will try to skip // to next chunk, which is past actual EOF. If #15269 is broken, this - // will assert and crash in file_data_source_impl. If not, we should + // will SCYLLA_ASSERT and crash in file_data_source_impl. If not, we should // get a corruption exception and no more entries past the corrupt one. db::position_type pos = 0; try { @@ -888,7 +889,7 @@ SEASTAR_TEST_CASE(test_allocation_failure){ // Use us loads of memory so we can OOM at the appropriate place try { - assert(fragmented_temporary_buffer::default_fragment_size < size); + SCYLLA_ASSERT(fragmented_temporary_buffer::default_fragment_size < size); for (;;) { junk->emplace_back(new char[fragmented_temporary_buffer::default_fragment_size]); } diff --git a/test/boost/cql_functions_test.cc b/test/boost/cql_functions_test.cc index 9458ebb5a6..3f35143d81 100644 --- a/test/boost/cql_functions_test.cc +++ b/test/boost/cql_functions_test.cc @@ -22,6 +22,7 @@ #include #include "transport/messages/result_message.hh" +#include "utils/assert.hh" #include "utils/big_decimal.hh" #include "types/map.hh" #include "types/list.hh" @@ -316,7 +317,7 @@ SEASTAR_TEST_CASE(test_aggregate_functions_timeuuid_type) { timeuuid_native_type{utils::UUID("00000000-0000-1000-0000-000000000000")}, timeuuid_native_type{utils::UUID("00000000-0000-1000-0000-000000000001")}, timeuuid_native_type{utils::UUID("00000000-0000-1000-0000-000000000002")} - ).test_count(); // min and max will fail, because we assert using UUID order, not timestamp order. + ).test_count(); // min and max will fail, because we SCYLLA_ASSERT using UUID order, not timestamp order. }); } diff --git a/test/boost/cql_query_test.cc b/test/boost/cql_query_test.cc index 34e0144b0a..765f121baf 100644 --- a/test/boost/cql_query_test.cc +++ b/test/boost/cql_query_test.cc @@ -29,6 +29,7 @@ #include "transport/messages/result_message.hh" #include "transport/messages/result_message_base.hh" #include "types/types.hh" +#include "utils/assert.hh" #include "utils/big_decimal.hh" #include "types/map.hh" #include "types/list.hh" @@ -3026,10 +3027,10 @@ SEASTAR_TEST_CASE(test_query_with_range_tombstones) { SEASTAR_TEST_CASE(test_alter_table_validation) { return do_with_cql_env([] (cql_test_env& e) { return e.execute_cql("create table tatv (p1 int, c1 int, c2 int, r1 int, r2 set, PRIMARY KEY (p1, c1, c2));").discard_result().then_wrapped([&e] (future<> f) { - assert(!f.failed()); + SCYLLA_ASSERT(!f.failed()); return e.execute_cql("alter table tatv drop r2;").discard_result(); }).then_wrapped([&e] (future<> f) { - assert(!f.failed()); + SCYLLA_ASSERT(!f.failed()); return e.execute_cql("alter table tatv add r2 list;").discard_result(); }).then_wrapped([&e] (future<> f) { assert_that_failed(f); @@ -3038,7 +3039,7 @@ SEASTAR_TEST_CASE(test_alter_table_validation) { assert_that_failed(f); return e.execute_cql("alter table tatv add r2 set;").discard_result(); }).then_wrapped([&e] (future<> f) { - assert(!f.failed()); + SCYLLA_ASSERT(!f.failed()); return e.execute_cql("alter table tatv rename r2 to r3;").discard_result(); }).then_wrapped([&e] (future<> f) { assert_that_failed(f); @@ -3050,16 +3051,16 @@ SEASTAR_TEST_CASE(test_alter_table_validation) { assert_that_failed(f); return e.execute_cql("alter table tatv add r3 map;").discard_result(); }).then_wrapped([&e] (future<> f) { - assert(!f.failed()); + SCYLLA_ASSERT(!f.failed()); return e.execute_cql("alter table tatv add r4 set;").discard_result(); }).then_wrapped([&e] (future<> f) { - assert(!f.failed()); + SCYLLA_ASSERT(!f.failed()); return e.execute_cql("alter table tatv drop r3;").discard_result(); }).then_wrapped([&e] (future<> f) { - assert(!f.failed()); + SCYLLA_ASSERT(!f.failed()); return e.execute_cql("alter table tatv drop r4;").discard_result(); }).then_wrapped([&e] (future<> f) { - assert(!f.failed()); + SCYLLA_ASSERT(!f.failed()); return e.execute_cql("alter table tatv add r3 map;").discard_result(); }).then_wrapped([&e] (future<> f) { assert_that_failed(f); @@ -3068,10 +3069,10 @@ SEASTAR_TEST_CASE(test_alter_table_validation) { assert_that_failed(f); return e.execute_cql("alter table tatv add r3 map;").discard_result(); }).then_wrapped([&e] (future<> f) { - assert(!f.failed()); + SCYLLA_ASSERT(!f.failed()); return e.execute_cql("alter table tatv add r4 set;").discard_result(); }).then_wrapped([] (future<> f) { - assert(!f.failed()); + SCYLLA_ASSERT(!f.failed()); }); }); } @@ -4706,7 +4707,7 @@ SEASTAR_TEST_CASE(test_select_serial_consistency) { auto check_fails = [&e] (const sstring& query, const source_location& loc = source_location::current()) { try { prepared_on_shard(e, query, {}, {}, db::consistency_level::SERIAL); - assert(false); + SCYLLA_ASSERT(false); } catch (const exceptions::invalid_request_exception& e) { testlog.info("Query '{}' failed as expected with error: {}", query, e); } catch (...) { diff --git a/test/boost/database_test.cc b/test/boost/database_test.cc index b5bdc4e8a9..c2852296ba 100644 --- a/test/boost/database_test.cc +++ b/test/boost/database_test.cc @@ -27,6 +27,7 @@ #include "test/lib/key_utils.hh" #include "replica/database.hh" +#include "utils/assert.hh" #include "utils/lister.hh" #include "partition_slice_builder.hh" #include "mutation/frozen_mutation.hh" @@ -788,7 +789,7 @@ SEASTAR_TEST_CASE(clear_multiple_snapshots) { testlog.debug("Clearing all snapshots in {}.{} after it had been dropped", ks_name, table_name); e.local_db().clear_snapshot("", {ks_name}, table_name).get(); - assert(!fs::exists(table_dir)); + SCYLLA_ASSERT(!fs::exists(table_dir)); // after all snapshots had been cleared, // the dropped table directory is expected to be removed. diff --git a/test/boost/exceptions_test.inc.cc b/test/boost/exceptions_test.inc.cc index de75ab7ac5..4cdfa15d96 100644 --- a/test/boost/exceptions_test.inc.cc +++ b/test/boost/exceptions_test.inc.cc @@ -23,6 +23,7 @@ #include #include +#include "utils/assert.hh" #include "utils/exceptions.hh" class base_exception : public std::exception {}; @@ -67,7 +68,7 @@ static void check_catch(Throw&& ex) { BOOST_CHECK_EQUAL(typed_eptr, &t); } catch (...) { // Can happen if the first check fails, just skip - assert(typed_eptr == nullptr); + SCYLLA_ASSERT(typed_eptr == nullptr); } } diff --git a/test/boost/fragmented_temporary_buffer_test.cc b/test/boost/fragmented_temporary_buffer_test.cc index c1d57eb6c7..1551e643c4 100644 --- a/test/boost/fragmented_temporary_buffer_test.cc +++ b/test/boost/fragmented_temporary_buffer_test.cc @@ -9,6 +9,7 @@ #include #include "test/lib/scylla_test_case.hh" +#include "utils/assert.hh" #include "utils/fragmented_temporary_buffer.hh" #include "test/lib/random_utils.hh" @@ -245,7 +246,7 @@ SEASTAR_THREAD_TEST_CASE(test_read_pod) { SEASTAR_THREAD_TEST_CASE(test_read_to) { auto test = [&] (bytes_view expected_value1, bytes_view expected_value2, fragmented_temporary_buffer& ftb) { - assert(expected_value2.size() < expected_value1.size()); + SCYLLA_ASSERT(expected_value2.size() < expected_value1.size()); bytes actual_value; @@ -281,7 +282,7 @@ SEASTAR_THREAD_TEST_CASE(test_read_to) { SEASTAR_THREAD_TEST_CASE(test_read_view) { auto test = [&] (bytes_view expected_value1, bytes_view expected_value2, fragmented_temporary_buffer& ftb) { - assert(expected_value2.size() < expected_value1.size()); + SCYLLA_ASSERT(expected_value2.size() < expected_value1.size()); auto in = ftb.get_istream(); BOOST_CHECK_EQUAL(in.bytes_left(), expected_value1.size() + expected_value2.size()); @@ -312,7 +313,7 @@ SEASTAR_THREAD_TEST_CASE(test_read_view) { SEASTAR_THREAD_TEST_CASE(test_read_bytes_view) { auto linearization_buffer = bytes_ostream(); auto test = [&] (bytes_view expected_value1, bytes_view expected_value2, fragmented_temporary_buffer& ftb) { - assert(expected_value2.size() < expected_value1.size()); + SCYLLA_ASSERT(expected_value2.size() < expected_value1.size()); auto in = ftb.get_istream(); BOOST_CHECK_EQUAL(in.read_bytes_view(0, linearization_buffer), bytes_view()); diff --git a/test/boost/idl_test.cc b/test/boost/idl_test.cc index 6f5af9a0e7..e7b989f296 100644 --- a/test/boost/idl_test.cc +++ b/test/boost/idl_test.cc @@ -8,6 +8,7 @@ #define BOOST_TEST_MODULE core +#include "utils/assert.hh" #include #include @@ -241,7 +242,7 @@ BOOST_AUTO_TEST_CASE(test_vector) BOOST_REQUIRE_EQUAL(vec1.size(), first_view.size()); for (size_t i = 0; i < first_view.size(); i++) { auto fv = first_view[i]; - assert(vec1[i].foo == fv.foo()); + SCYLLA_ASSERT(vec1[i].foo == fv.foo()); BOOST_REQUIRE_EQUAL(vec1[i].foo, first_view[i].foo()); BOOST_REQUIRE_EQUAL(vec1[i].bar, first_view[i].bar()); } diff --git a/test/boost/large_paging_state_test.cc b/test/boost/large_paging_state_test.cc index 0e6ab14515..1f743d7701 100644 --- a/test/boost/large_paging_state_test.cc +++ b/test/boost/large_paging_state_test.cc @@ -7,6 +7,7 @@ */ +#include "utils/assert.hh" #include #include "test/lib/scylla_test_case.hh" @@ -69,7 +70,7 @@ SEASTAR_TEST_CASE(test_use_high_bits_of_remaining_rows_in_paging_state) { test_remaining = test_remaining - rows_fetched; if (has_more_pages(msg)) { paging_state = extract_paging_state(msg); - assert(paging_state); + SCYLLA_ASSERT(paging_state); BOOST_REQUIRE_EQUAL(test_remaining, paging_state->get_remaining()); } } @@ -107,7 +108,7 @@ SEASTAR_TEST_CASE(test_use_high_bits_of_remaining_rows_in_paging_state_filtering test_remaining = test_remaining - rows_fetched; if (has_more_pages(msg)) { paging_state = extract_paging_state(msg); - assert(paging_state); + SCYLLA_ASSERT(paging_state); BOOST_REQUIRE_EQUAL(test_remaining, paging_state->get_remaining()); } } diff --git a/test/boost/limiting_data_source_test.cc b/test/boost/limiting_data_source_test.cc index 106e484199..2358f7d283 100644 --- a/test/boost/limiting_data_source_test.cc +++ b/test/boost/limiting_data_source_test.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "utils/limiting_data_source.hh" #include @@ -79,7 +80,7 @@ data_source prepare_test_skip() { } SEASTAR_THREAD_TEST_CASE(test_get_smaller_than_limit) { - assert(test_data_source_impl::chunk_limit > 1); + SCYLLA_ASSERT(test_data_source_impl::chunk_limit > 1); test_get(test_data_source_impl::chunk_limit - 1); } diff --git a/test/boost/lister_test.cc b/test/boost/lister_test.cc index 2ab4a35edf..d0bdd7cb53 100644 --- a/test/boost/lister_test.cc +++ b/test/boost/lister_test.cc @@ -20,6 +20,7 @@ #include "test/lib/random_utils.hh" #include "test/lib/test_utils.hh" +#include "utils/assert.hh" #include "utils/lister.hh" class expected_exception : public std::exception { @@ -82,7 +83,7 @@ SEASTAR_TEST_CASE(test_lister_abort) { std::unordered_set dir_names; auto count = co_await generate_random_content(tmp, file_names, dir_names, 1, tests::random::get_int(100, 1000)); - assert(count > 0); + SCYLLA_ASSERT(count > 0); BOOST_TEST_MESSAGE(fmt::format("Generated {} dir entries", count)); size_t initial = tests::random::get_int(1, count); diff --git a/test/boost/locator_topology_test.cc b/test/boost/locator_topology_test.cc index 826f3ebd62..5f4e0c89a5 100644 --- a/test/boost/locator_topology_test.cc +++ b/test/boost/locator_topology_test.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include @@ -182,7 +183,7 @@ SEASTAR_THREAD_TEST_CASE(test_add_or_update_by_host_id) { // In this test we check that add_or_update_endpoint searches by host_id first. // We create two nodes, one matches by id, another - by ip, - // and assert that add_or_update_endpoint updates the first. + // and SCYLLA_ASSERT that add_or_update_endpoint updates the first. // We need to make the second node 'being_decommissioned', so that // it gets removed from ip index and we don't get the non-unique IP error. diff --git a/test/boost/log_heap_test.cc b/test/boost/log_heap_test.cc index a15a110404..baa6f8d93c 100644 --- a/test/boost/log_heap_test.cc +++ b/test/boost/log_heap_test.cc @@ -11,6 +11,7 @@ #include +#include "utils/assert.hh" #include "utils/log_heap.hh" template @@ -52,7 +53,7 @@ void test_with_options() { ++count; auto key = t.v; if (prev_key) { - assert(key > prev_key); + SCYLLA_ASSERT(key > prev_key); } max_key = std::max(max_key, key); } diff --git a/test/boost/logalloc_test.cc b/test/boost/logalloc_test.cc index fd40313ae9..6de6c8dd09 100644 --- a/test/boost/logalloc_test.cc +++ b/test/boost/logalloc_test.cc @@ -24,6 +24,7 @@ #include #include +#include "utils/assert.hh" #include "utils/logalloc.hh" #include "utils/managed_ref.hh" #include "utils/managed_bytes.hh" @@ -544,7 +545,7 @@ SEASTAR_THREAD_TEST_CASE(test_hold_reserve) { as.with_reserve(region, [&] { with_allocator(region.allocator(), [&] { - assert(sizeof(entry) + 128 < current_allocator().preferred_max_contiguous_allocation()); + SCYLLA_ASSERT(sizeof(entry) + 128 < current_allocator().preferred_max_contiguous_allocation()); logalloc::reclaim_lock rl(region); // Reserve a segment. diff --git a/test/boost/memtable_test.cc b/test/boost/memtable_test.cc index 6ba7774463..3c4133ff97 100644 --- a/test/boost/memtable_test.cc +++ b/test/boost/memtable_test.cc @@ -9,6 +9,7 @@ #include #include "replica/database.hh" #include "db/config.hh" +#include "utils/assert.hh" #include "utils/UUID_gen.hh" #include "test/lib/scylla_test_case.hh" #include @@ -52,7 +53,7 @@ static bytes make_unique_bytes() { } static void set_column(mutation& m, const sstring& column_name) { - assert(m.schema()->get_column_definition(to_bytes(column_name))->type == bytes_type); + SCYLLA_ASSERT(m.schema()->get_column_definition(to_bytes(column_name))->type == bytes_type); auto value = data_value(make_unique_bytes()); m.set_clustered_cell(clustering_key::make_empty(), to_bytes(column_name), value, next_timestamp()); } @@ -169,7 +170,7 @@ SEASTAR_TEST_CASE(test_memtable_flush_reader) { tests::reader_concurrency_semaphore_wrapper semaphore; auto make_memtable = [] (replica::dirty_memory_manager& mgr, replica::memtable_table_shared_data& table_shared_data, replica::table_stats& tbl_stats, std::vector muts) { - assert(!muts.empty()); + SCYLLA_ASSERT(!muts.empty()); auto mt = make_lw_shared(muts.front().schema(), mgr, table_shared_data, tbl_stats); for (auto& m : muts) { mt->apply(m); @@ -959,7 +960,7 @@ SEASTAR_TEST_CASE(memtable_flush_compresses_mutations) { // Flush to make sure all the modifications make it to disk t.flush().get(); - // Treat the table as mutation_source and assert we get the expected mutation and end of stream + // Treat the table as mutation_source and SCYLLA_ASSERT we get the expected mutation and end of stream mutation_source ms = t.as_mutation_source(); assert_that(ms.make_reader_v2(s, semaphore.make_permit())) .produces(m2) diff --git a/test/boost/multishard_mutation_query_test.cc b/test/boost/multishard_mutation_query_test.cc index 3307486695..14230f5354 100644 --- a/test/boost/multishard_mutation_query_test.cc +++ b/test/boost/multishard_mutation_query_test.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "multishard_mutation_query.hh" #include "schema/schema_registry.hh" #include "db/config.hh" @@ -303,7 +304,7 @@ read_partitions_with_generic_paged_scan(distributed& db, sche while (!ranges->front().contains(res_builder.last_pkey(), cmp)) { ranges->erase(ranges->begin()); } - assert(!ranges->empty()); + SCYLLA_ASSERT(!ranges->empty()); const auto pkrange_begin_inclusive = res_builder.last_ckey() && res_builder.last_pkey_rows() < slice.partition_row_limit(); @@ -896,7 +897,7 @@ namespace { template static interval generate_range(RandomEngine& rnd_engine, int start, int end, bool allow_open_ended_start = true) { - assert(start < end); + SCYLLA_ASSERT(start < end); std::uniform_int_distribution defined_bound_dist(0, 7); std::uniform_int_distribution inclusive_dist(0, 1); diff --git a/test/boost/mutation_query_test.cc b/test/boost/mutation_query_test.cc index 8934b0dfa6..c346f9b678 100644 --- a/test/boost/mutation_query_test.cc +++ b/test/boost/mutation_query_test.cc @@ -7,6 +7,7 @@ */ +#include "utils/assert.hh" #include #include @@ -50,19 +51,19 @@ static schema_ptr make_schema() { struct mutation_less_cmp { bool operator()(const mutation& m1, const mutation& m2) const { - assert(m1.schema() == m2.schema()); + SCYLLA_ASSERT(m1.schema() == m2.schema()); return m1.decorated_key().less_compare(*m1.schema(), m2.decorated_key()); } }; static mutation_source make_source(std::vector mutations) { return mutation_source([mutations = std::move(mutations)] (schema_ptr s, reader_permit permit, const dht::partition_range& range, const query::partition_slice& slice, tracing::trace_state_ptr, streamed_mutation::forwarding fwd, mutation_reader::forwarding fwd_mr) { - assert(range.is_full()); // slicing not implemented yet + SCYLLA_ASSERT(range.is_full()); // slicing not implemented yet for (auto&& m : mutations) { if (slice.is_reversed()) { - assert(m.schema()->make_reversed()->version() == s->version()); + SCYLLA_ASSERT(m.schema()->make_reversed()->version() == s->version()); } else { - assert(m.schema() == s); + SCYLLA_ASSERT(m.schema() == s); } } return make_mutation_reader_from_mutations_v2(s, std::move(permit), mutations, slice, fwd); diff --git a/test/boost/mutation_reader_another_test.cc b/test/boost/mutation_reader_another_test.cc index b5b8baaa59..8a5eca90c0 100644 --- a/test/boost/mutation_reader_another_test.cc +++ b/test/boost/mutation_reader_another_test.cc @@ -24,6 +24,7 @@ #include "replica/memtable.hh" #include "row_cache.hh" #include "mutation/mutation_rebuilder.hh" +#include "utils/assert.hh" #include "utils/to_string.hh" #include "test/lib/simple_schema.hh" @@ -574,7 +575,7 @@ void test_flat_stream(schema_ptr s, std::vector muts, reversed_partiti auto consume_fn = [&] (mutation_reader& fmr, flat_stream_consumer fsc) { if (thread) { - assert(bool(!reversed)); + SCYLLA_ASSERT(bool(!reversed)); return fmr.consume_in_thread(std::move(fsc)); } else { if (reversed) { diff --git a/test/boost/mutation_reader_test.cc b/test/boost/mutation_reader_test.cc index c508596359..f936ff187e 100644 --- a/test/boost/mutation_reader_test.cc +++ b/test/boost/mutation_reader_test.cc @@ -48,6 +48,7 @@ #include "replica/database.hh" #include "partition_slice_builder.hh" #include "schema/schema_registry.hh" +#include "utils/assert.hh" #include "utils/ranges.hh" #include "mutation/mutation_rebuilder.hh" @@ -695,7 +696,7 @@ public: return ret; } virtual std::vector fast_forward_to(const dht::partition_range& pr) override { - assert(false); // Fast forward not supported by this reader + SCYLLA_ASSERT(false); // Fast forward not supported by this reader return {}; } }; @@ -3310,7 +3311,7 @@ SEASTAR_THREAD_TEST_CASE(test_evictable_reader_drop_flags) { } size_t add_mutation_fragment(mutation_fragment_v2&& mf, bool only_to_frags = false) { if (!only_to_frags) { - assert(mut_rebuilder); + SCYLLA_ASSERT(mut_rebuilder); mut_rebuilder->consume(mutation_fragment_v2(*s.schema(), permit, mf)); } size += frags.emplace_back(*s.schema(), permit, std::move(mf)).memory_usage(); @@ -3803,7 +3804,7 @@ struct clustering_order_merger_test_generator { std::vector fwd_ranges; for (size_t i = 0; i < num_ranges; ++i) { - assert(2*i+1 < positions.size()); + SCYLLA_ASSERT(2*i+1 < positions.size()); fwd_ranges.push_back(position_range(std::move(positions[2*i]), std::move(positions[2*i+1]))); } @@ -3935,7 +3936,7 @@ static future<> do_test_clustering_order_merger_sstable_set(bool reversed) { // for our partition (not even `partition_start`). For that we create an sstable // with a different partition. auto pk = pkeys[1]; - assert(!pk.equal(*g._s, g._pk)); + SCYLLA_ASSERT(!pk.equal(*g._s, g._pk)); sst = make_sstable_containing(sst_factory, {mutation(table_schema, pk)}); sst_set.insert(sst); @@ -4009,7 +4010,7 @@ SEASTAR_THREAD_TEST_CASE(clustering_combined_reader_mutation_source_test) { , _it(std::partition_point(_readers.begin(), _readers.end(), [this, cmp = dht::ring_position_comparator(*_schema)] (auto& r) { return _range.get().before(r.first, cmp); })) { - assert(!_readers.empty()); + SCYLLA_ASSERT(!_readers.empty()); } virtual future<> fill_buffer() override { @@ -4033,7 +4034,7 @@ SEASTAR_THREAD_TEST_CASE(clustering_combined_reader_mutation_source_test) { // => current partition is _it, we need to move forward // _it might be the end of current forwarding range, but that's no problem; // in that case we'll go into eos mode until forwarded - assert(_it != _readers.end()); + SCYLLA_ASSERT(_it != _readers.end()); _inside_partition = false; ++_it; } else { @@ -4062,7 +4063,7 @@ SEASTAR_THREAD_TEST_CASE(clustering_combined_reader_mutation_source_test) { // while inside partition. But if it happens for whatever reason just do nothing return make_ready_future<>(); } - assert(_it != _readers.end()); + SCYLLA_ASSERT(_it != _readers.end()); // all fragments currently in the buffer come from the current position range // and pr must be strictly greater, so just clear the buffer clear_buffer(); diff --git a/test/boost/mutation_test.cc b/test/boost/mutation_test.cc index d436345d51..d601d08d25 100644 --- a/test/boost/mutation_test.cc +++ b/test/boost/mutation_test.cc @@ -13,6 +13,7 @@ #include #include #include "mutation_query.hh" +#include "utils/assert.hh" #include "utils/hashers.hh" #include "utils/preempt.hh" #include "utils/xx_hasher.hh" @@ -2927,13 +2928,13 @@ SEASTAR_THREAD_TEST_CASE(test_compaction_data_stream_split) { if (destination == tests::timestamp_destination::partition_tombstone || destination == tests::timestamp_destination::row_tombstone || destination == tests::timestamp_destination::range_tombstone) { - assert(min_timestamp < tomb_ts_max); + SCYLLA_ASSERT(min_timestamp < tomb_ts_max); return tests::random::get_int(tomb_ts_min, tomb_ts_max, engine); } else if (destination == tests::timestamp_destination::collection_tombstone) { - assert(min_timestamp < collection_tomb_ts_max); + SCYLLA_ASSERT(min_timestamp < collection_tomb_ts_max); return tests::random::get_int(collection_tomb_ts_min, collection_tomb_ts_max, engine); } else { - assert(min_timestamp < other_ts_max); + SCYLLA_ASSERT(min_timestamp < other_ts_max); return tests::random::get_int(other_ts_min, other_ts_max, engine); } }; @@ -2962,13 +2963,13 @@ SEASTAR_THREAD_TEST_CASE(test_compaction_data_stream_split) { if (destination == tests::timestamp_destination::partition_tombstone || destination == tests::timestamp_destination::row_tombstone || destination == tests::timestamp_destination::range_tombstone) { - assert(min_timestamp < tomb_ts_max); + SCYLLA_ASSERT(min_timestamp < tomb_ts_max); return tests::random::get_int(tomb_ts_min, tomb_ts_max, engine); } else if (destination == tests::timestamp_destination::collection_tombstone) { - assert(min_timestamp < tomb_ts_max); + SCYLLA_ASSERT(min_timestamp < tomb_ts_max); return tests::random::get_int(collection_tomb_ts_min, collection_tomb_ts_max, engine); } else { - assert(min_timestamp < other_ts_max); + SCYLLA_ASSERT(min_timestamp < other_ts_max); return tests::random::get_int(other_ts_min, other_ts_max, engine); } }; diff --git a/test/boost/mvcc_test.cc b/test/boost/mvcc_test.cc index ad820feeee..0e4903a32c 100644 --- a/test/boost/mvcc_test.cc +++ b/test/boost/mvcc_test.cc @@ -7,6 +7,7 @@ */ +#include "utils/assert.hh" #include #include #include @@ -207,7 +208,7 @@ void mvcc_partition::apply_to_evictable(partition_entry&& src, schema_ptr src_sc } mvcc_partition& mvcc_partition::operator+=(mvcc_partition&& src) { - assert(_evictable); + SCYLLA_ASSERT(_evictable); apply_to_evictable(std::move(src.entry()), src.schema()); return *this; } diff --git a/test/boost/network_topology_strategy_test.cc b/test/boost/network_topology_strategy_test.cc index b12ea8443d..7ac5bea5ac 100644 --- a/test/boost/network_topology_strategy_test.cc +++ b/test/boost/network_topology_strategy_test.cc @@ -11,6 +11,7 @@ #include #include "gms/inet_address.hh" #include "locator/types.hh" +#include "utils/assert.hh" #include "utils/UUID_gen.hh" #include "utils/sequenced_set.hh" #include "utils/to_string.hh" @@ -747,7 +748,7 @@ static locator::host_id_set calculate_natural_endpoints( racks = tp.get_datacenter_racks(); // not aware of any cluster members - assert(!all_endpoints.empty() && !racks.empty()); + SCYLLA_ASSERT(!all_endpoints.empty() && !racks.empty()); for (auto& next : tm.ring_range(search_token)) { diff --git a/test/boost/partitioner_test.cc b/test/boost/partitioner_test.cc index c762c33e6a..700893619f 100644 --- a/test/boost/partitioner_test.cc +++ b/test/boost/partitioner_test.cc @@ -21,6 +21,7 @@ #include "schema/schema.hh" #include "types/types.hh" #include "schema/schema_builder.hh" +#include "utils/assert.hh" #include "utils/to_string.hh" #include "test/lib/simple_schema.hh" @@ -631,11 +632,11 @@ SEASTAR_THREAD_TEST_CASE(test_find_first_token_for_shard) { auto second_boundary = sharder.token_for_next_shard_for_reads(dht::minimum_token(), 2); auto third_boundary = sharder.token_for_next_shard_for_reads(dht::minimum_token(), 0); auto next_token = [] (dht::token t) { - assert(dht::token::to_int64(t) < std::numeric_limits::max()); + SCYLLA_ASSERT(dht::token::to_int64(t) < std::numeric_limits::max()); return dht::token::from_int64(dht::token::to_int64(t) + 1); }; auto prev_token = [] (dht::token t) { - assert(dht::token::to_int64(t) > std::numeric_limits::min() + 1); + SCYLLA_ASSERT(dht::token::to_int64(t) > std::numeric_limits::min() + 1); return dht::token::from_int64(dht::token::to_int64(t) - 1); }; diff --git a/test/boost/reader_concurrency_semaphore_test.cc b/test/boost/reader_concurrency_semaphore_test.cc index a7e00bc17c..50c8474c30 100644 --- a/test/boost/reader_concurrency_semaphore_test.cc +++ b/test/boost/reader_concurrency_semaphore_test.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include "reader_concurrency_semaphore.hh" @@ -121,7 +122,7 @@ SEASTAR_THREAD_TEST_CASE(test_reader_concurrency_semaphore_abandoned_handle_clos { auto handle = semaphore.register_inactive_read(make_empty_flat_reader_v2(s.schema(), permit)); // The handle is destroyed here, triggering the destrution of the inactive read. - // If the test fails an assert() is triggered due to the reader being + // If the test fails an SCYLLA_ASSERT() is triggered due to the reader being // destroyed without having been closed before. } } diff --git a/test/boost/reusable_buffer_test.cc b/test/boost/reusable_buffer_test.cc index a275b73845..b54f04c244 100644 --- a/test/boost/reusable_buffer_test.cc +++ b/test/boost/reusable_buffer_test.cc @@ -11,6 +11,7 @@ #include +#include "utils/assert.hh" #include "utils/reusable_buffer.hh" #include #include @@ -139,7 +140,7 @@ SEASTAR_TEST_CASE(test_decay) { // It isn't strictly required from the implementation to use // power-of-2 sizes, just sizes coarse enough to limit the number // of allocations. - // If the implementation is modified, this assert can be freely changed. + // If the implementation is modified, this SCYLLA_ASSERT can be freely changed. BOOST_REQUIRE_EQUAL(buffer.size(), std::bit_ceil(size_t(1'000'001))); co_await advance_clock(1500ms); get_buffer(1'000); diff --git a/test/boost/row_cache_test.cc b/test/boost/row_cache_test.cc index 3cfb66e450..c0b89bbf0a 100644 --- a/test/boost/row_cache_test.cc +++ b/test/boost/row_cache_test.cc @@ -34,6 +34,7 @@ #include "test/lib/reader_concurrency_semaphore.hh" #include "test/lib/random_utils.hh" #include "test/lib/sstable_utils.hh" +#include "utils/assert.hh" #include "utils/throttle.hh" #include @@ -87,7 +88,7 @@ snapshot_source make_decorated_snapshot_source(snapshot_source src, std::functio mutation_source make_source_with(mutation m) { return mutation_source([m] (schema_ptr s, reader_permit permit, const dht::partition_range&, const query::partition_slice&, tracing::trace_state_ptr, streamed_mutation::forwarding fwd) { - assert(m.schema() == s); + SCYLLA_ASSERT(m.schema() == s); return make_mutation_reader_from_mutations_v2(s, std::move(permit), m, std::move(fwd)); }); } @@ -285,7 +286,7 @@ void test_cache_delegates_to_underlying_only_once_with_single_partition(schema_p const query::partition_slice&, tracing::trace_state_ptr, streamed_mutation::forwarding fwd) { - assert(m.schema() == s); + SCYLLA_ASSERT(m.schema() == s); if (range.contains(dht::ring_position(m.decorated_key()), dht::ring_position_comparator(*s))) { return make_counting_reader(make_mutation_reader_from_mutations_v2(s, std::move(permit), m, std::move(fwd)), secondary_calls_count); } else { @@ -1571,7 +1572,7 @@ SEASTAR_TEST_CASE(test_mvcc) { assert_that(std::move(rd3)).has_monotonic_positions(); if (with_active_memtable_reader) { - assert(mt1_reader_opt); + SCYLLA_ASSERT(mt1_reader_opt); auto mt1_reader_mutation = read_mutation_from_mutation_reader(*mt1_reader_opt).get(); BOOST_REQUIRE(mt1_reader_mutation); assert_that(*mt1_reader_mutation).is_equal_to_compacted(m2); @@ -1699,7 +1700,7 @@ SEASTAR_TEST_CASE(test_slicing_mutation_reader) { static void evict_one_partition(cache_tracker& tracker) { auto initial = tracker.partitions(); - assert(initial > 0); + SCYLLA_ASSERT(initial > 0); while (tracker.partitions() == initial) { auto ret = tracker.region().evict_some(); BOOST_REQUIRE(ret == memory::reclaiming_result::reclaimed_something); @@ -1708,7 +1709,7 @@ static void evict_one_partition(cache_tracker& tracker) { static void evict_one_row(cache_tracker& tracker) { auto initial = tracker.get_stats().rows; - assert(initial > 0); + SCYLLA_ASSERT(initial > 0); while (tracker.get_stats().rows == initial) { auto ret = tracker.region().evict_some(); BOOST_REQUIRE(ret == memory::reclaiming_result::reclaimed_something); @@ -4461,7 +4462,7 @@ SEASTAR_THREAD_TEST_CASE(test_population_of_subrange_of_expired_partition) { // Reproducer for #14110. // Forces a scenario where digest is calculated for rows in old MVCC // versions, incompatible with the current schema. -// In the original issue, this crashed the node with an assert failure, +// In the original issue, this crashed the node with an SCYLLA_ASSERT failure, // because the digest calculation was passed the current schema, // instead of the row's actual old schema. SEASTAR_THREAD_TEST_CASE(test_digest_read_during_schema_upgrade) { @@ -4517,7 +4518,7 @@ SEASTAR_THREAD_TEST_CASE(test_digest_read_during_schema_upgrade) { auto close_rd = deferred_close(rd); // In the original issue reproduced by this test, the read would crash - // on an assert. + // on an SCYLLA_ASSERT. // So what we are really testing below is that the read doesn't crash. // The comparison with m2 is just a sanity check. auto m2 = m1; diff --git a/test/boost/s3_test.cc b/test/boost/s3_test.cc index e7c6568659..e8dad6ada4 100644 --- a/test/boost/s3_test.cc +++ b/test/boost/s3_test.cc @@ -23,6 +23,7 @@ #include "test/lib/random_utils.hh" #include "test/lib/test_utils.hh" #include "test/lib/tmpdir.hh" +#include "utils/assert.hh" #include "utils/s3/client.hh" #include "utils/s3/creds.hh" #include "utils/exceptions.hh" @@ -209,7 +210,7 @@ future<> test_client_upload_file(std::string_view test_name, size_t total_size, auto output = co_await make_file_output_stream(std::move(f)); std::string_view data = "1234567890ABCDEF"; // so we can test !with_remainder case properly with multiple writes - assert(total_size % data.size() == 0); + SCYLLA_ASSERT(total_size % data.size() == 0); for (size_t bytes_written = 0; bytes_written < total_size; diff --git a/test/boost/secondary_index_test.cc b/test/boost/secondary_index_test.cc index 42c1db338e..caf21ba2b3 100644 --- a/test/boost/secondary_index_test.cc +++ b/test/boost/secondary_index_test.cc @@ -19,6 +19,7 @@ #include "types/list.hh" #include "types/set.hh" #include "cql3/statements/select_statement.hh" +#include "utils/assert.hh" #include "utils/error_injection.hh" using namespace std::chrono_literals; @@ -445,7 +446,7 @@ SEASTAR_TEST_CASE(test_simple_index_paging) { auto extract_paging_state = [] (::shared_ptr res) { auto rows = dynamic_pointer_cast(res); auto paging_state = rows->rs().get_metadata().paging_state(); - assert(paging_state); + SCYLLA_ASSERT(paging_state); return make_lw_shared(*paging_state); }; @@ -829,7 +830,7 @@ SEASTAR_TEST_CASE(test_local_index_paging) { auto extract_paging_state = [] (::shared_ptr res) { auto rows = dynamic_pointer_cast(res); auto paging_state = rows->rs().get_metadata().paging_state(); - assert(paging_state); + SCYLLA_ASSERT(paging_state); return make_lw_shared(*paging_state); }; diff --git a/test/boost/sstable_3_x_test.cc b/test/boost/sstable_3_x_test.cc index 618e212a01..28004e2b19 100644 --- a/test/boost/sstable_3_x_test.cc +++ b/test/boost/sstable_3_x_test.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include @@ -5609,7 +5610,7 @@ SEASTAR_TEST_CASE(test_compression_premature_eof) { // Creates an sstable with a newer schema, and populates // it with a reader created with an older schema. // -// Before the fixes, it would have resulted in an assert violation. +// Before the fixes, it would have resulted in an SCYLLA_ASSERT violation. SEASTAR_TEST_CASE(test_alter_bloom_fp_chance_during_write) { return test_env::do_with_async([] (test_env& env) { auto s1 = schema_builder("ks", "t") diff --git a/test/boost/sstable_compaction_test.cc b/test/boost/sstable_compaction_test.cc index 9b2111e351..578cbaac5f 100644 --- a/test/boost/sstable_compaction_test.cc +++ b/test/boost/sstable_compaction_test.cc @@ -69,6 +69,7 @@ #include "readers/from_mutations_v2.hh" #include "readers/from_fragments_v2.hh" #include "readers/combined.hh" +#include "utils/assert.hh" #include "utils/pretty_printers.hh" namespace fs = std::filesystem; @@ -395,7 +396,7 @@ static future compact_sstables(test_env& env, std::vect static future create_and_compact_sstables(test_env& env, size_t create_sstables) { uint64_t min_sstable_size = 50; auto res = co_await compact_sstables(env, {}, create_sstables, min_sstable_size, compaction_strategy_type::size_tiered); - // size tiered compaction will output at most one sstable, let's assert that. + // size tiered compaction will output at most one sstable, let's SCYLLA_ASSERT that. BOOST_REQUIRE(res.output_sstables.size() == 1); co_return res; } @@ -403,7 +404,7 @@ static future create_and_compact_sstables(test_env& env static future compact_sstables(test_env& env, std::vector sstables_to_compact) { uint64_t min_sstable_size = 50; auto res = co_await compact_sstables(env, std::move(sstables_to_compact), 0, min_sstable_size, compaction_strategy_type::size_tiered); - // size tiered compaction will output at most one sstable, let's assert that. + // size tiered compaction will output at most one sstable, let's SCYLLA_ASSERT that. BOOST_REQUIRE(res.output_sstables.size() == 1); co_return res; } @@ -492,9 +493,9 @@ static sstables::shared_sstable add_sstable_for_leveled_test(test_env& env, lw_s uint32_t sstable_level, const partition_key& first_key, const partition_key& last_key, int64_t max_timestamp = 0) { auto sst = env.make_sstable(cf->schema()); sstables::test(sst).set_values_for_leveled_strategy(fake_data_size, sstable_level, max_timestamp, first_key, last_key); - assert(sst->data_size() == fake_data_size); - assert(sst->get_sstable_level() == sstable_level); - assert(sst->get_stats_metadata().max_timestamp == max_timestamp); + SCYLLA_ASSERT(sst->data_size() == fake_data_size); + SCYLLA_ASSERT(sst->get_sstable_level() == sstable_level); + SCYLLA_ASSERT(sst->get_stats_metadata().max_timestamp == max_timestamp); column_family_test(cf).add_sstable(sst).get(); return sst; } @@ -2873,7 +2874,7 @@ SEASTAR_TEST_CASE(backlog_tracker_correctness_after_changing_compaction_strategy } // Start compaction, then stop tracking compaction, switch to TWCS, wait for compaction to finish and check for backlog. - // That's done to assert backlog will work for compaction that is finished and was stopped tracking. + // That's done to SCYLLA_ASSERT backlog will work for compaction that is finished and was stopped tracking. auto fut = compact_sstables(env, sstables::compaction_descriptor(ssts), cf, sst_gen); @@ -3492,7 +3493,7 @@ SEASTAR_TEST_CASE(test_twcs_partition_estimate) { }; auto ret = compact_sstables(env, sstables::compaction_descriptor(sstables_spanning_many_windows), cf, sst_gen, replacer_fn_no_op()).get(); - // The real test here is that we don't assert() in + // The real test here is that we don't SCYLLA_ASSERT() in // sstables::prepare_summary() with the compact_sstables() call above, // this is only here as a sanity check. BOOST_REQUIRE_EQUAL(ret.new_sstables.size(), std::min(sstables_spanning_many_windows.size() * rows_per_partition, @@ -3755,7 +3756,7 @@ SEASTAR_TEST_CASE(twcs_reshape_with_disjoint_set_test) { constexpr auto window_size_in_minutes = 8 * 60; forward_jump_clocks(minutes(window_size_in_minutes - now_in_minutes.count() % window_size_in_minutes)); now = gc_clock::now().time_since_epoch() + offset_duration; - assert(std::chrono::duration_cast(now).count() % window_size_in_minutes == 0); + SCYLLA_ASSERT(std::chrono::duration_cast(now).count() % window_size_in_minutes == 0); auto next_timestamp = [now](auto step) { return (now + duration_cast(step)).count(); @@ -4427,7 +4428,7 @@ future<> run_controller_test(sstables::compaction_strategy_type compaction_strat auto sst = env.make_sstable(t.schema()); auto key = tests::generate_partition_key(t.schema()).key(); sstables::test(sst).set_values_for_leveled_strategy(data_size, level, 0 /*max ts*/, key, key); - assert(sst->data_size() == data_size); + SCYLLA_ASSERT(sst->data_size() == data_size); auto backlog_before = t.as_table_state().get_backlog_tracker().backlog(); t->add_sstable_and_update_cache(sst).get(); testlog.debug("\tNew sstable of size={} level={}; Backlog diff={};", diff --git a/test/boost/sstable_conforms_to_mutation_source_test.cc b/test/boost/sstable_conforms_to_mutation_source_test.cc index 01ba640beb..5838610dc5 100644 --- a/test/boost/sstable_conforms_to_mutation_source_test.cc +++ b/test/boost/sstable_conforms_to_mutation_source_test.cc @@ -7,6 +7,7 @@ */ +#include "utils/assert.hh" #include #include "test/lib/scylla_test_case.hh" #include @@ -137,7 +138,7 @@ SEASTAR_TEST_CASE(test_sstable_conforms_to_mutation_source_md_large) { return test_sstable_conforms_to_mutation_source(writable_sstable_versions[1], block_sizes[2]); } -// This assert makes sure we don't miss writable vertions +// This SCYLLA_ASSERT makes sure we don't miss writable vertions static_assert(writable_sstable_versions.size() == 3); // `keys` may contain repetitions. @@ -219,7 +220,7 @@ SEASTAR_THREAD_TEST_CASE(test_sstable_reversing_reader_random_schema) { std::vector ranges; for (auto& r: fwd_ranges) { - assert(position_in_partition::less_compare(*query_schema)(r.start(), r.end())); + SCYLLA_ASSERT(position_in_partition::less_compare(*query_schema)(r.start(), r.end())); auto cr_opt = position_range_to_clustering_range(r, *query_schema); if (!cr_opt) { continue; diff --git a/test/boost/sstable_datafile_test.cc b/test/boost/sstable_datafile_test.cc index db8d1d4395..f81848f512 100644 --- a/test/boost/sstable_datafile_test.cc +++ b/test/boost/sstable_datafile_test.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include @@ -2057,7 +2058,7 @@ SEASTAR_TEST_CASE(sstable_owner_shards) { }; auto assert_sstable_owners = [&] (std::unordered_set expected_owners, unsigned ignore_msb, unsigned smp_count) { - assert(expected_owners.size() <= smp_count); + SCYLLA_ASSERT(expected_owners.size() <= smp_count); auto sst = make_shared_sstable(expected_owners, ignore_msb, smp_count); auto owners = boost::copy_range>(sst->get_shards_for_this_sstable()); BOOST_REQUIRE(boost::algorithm::all_of(expected_owners, [&] (unsigned expected_owner) { @@ -2596,7 +2597,7 @@ SEASTAR_TEST_CASE(test_zero_estimated_partitions) { auto close_mr = deferred_close(sst_mr); auto sst_mut = read_mutation_from_mutation_reader(sst_mr).get(); - // The real test here is that we don't assert() in + // The real test here is that we don't SCYLLA_ASSERT() in // sstables::prepare_summary() with the write_components() call above, // this is only here as a sanity check. BOOST_REQUIRE(sst_mr.is_buffer_empty()); diff --git a/test/boost/sstable_test.hh b/test/boost/sstable_test.hh index 92e7b83c88..e45392aea0 100644 --- a/test/boost/sstable_test.hh +++ b/test/boost/sstable_test.hh @@ -9,6 +9,7 @@ #pragma once +#include "utils/assert.hh" #include "types/map.hh" #include "sstables/sstables.hh" #include "replica/database.hh" @@ -378,7 +379,7 @@ inline void match(const row& row, const schema& s, bytes col, const data_value& auto expected = cdef->type->decompose(value); auto val = c.value().linearize(); - assert(val == expected); + SCYLLA_ASSERT(val == expected); BOOST_REQUIRE(c.value().linearize() == expected); if (timestamp) { BOOST_REQUIRE(c.timestamp() == timestamp); diff --git a/test/lib/cql_assertions.cc b/test/lib/cql_assertions.cc index 2ba6464b1a..f440966d07 100644 --- a/test/lib/cql_assertions.cc +++ b/test/lib/cql_assertions.cc @@ -13,6 +13,7 @@ #include "test/lib/cql_assertions.hh" #include "test/lib/eventually.hh" #include "transport/messages/result_message.hh" +#include "utils/assert.hh" #include "utils/to_string.hh" #include "bytes.hh" @@ -260,23 +261,23 @@ future<> require_column_has_value(cql_test_env& e, const sstring& table_name, auto& cf = db.find_column_family("ks", table_name); auto schema = cf.schema(); return cf.find_row(schema, make_reader_permit(e), dk, ckey).then([schema, column_name, exp] (auto row) { - assert(row != nullptr); + SCYLLA_ASSERT(row != nullptr); auto col_def = schema->get_column_definition(utf8_type->decompose(column_name)); - assert(col_def != nullptr); + SCYLLA_ASSERT(col_def != nullptr); const atomic_cell_or_collection* cell = row->find_cell(col_def->id); if (!cell) { - assert(((void)"column not set", 0)); + SCYLLA_ASSERT(((void)"column not set", 0)); } bytes actual; if (!col_def->type->is_multi_cell()) { auto c = cell->as_atomic_cell(*col_def); - assert(c.is_live()); + SCYLLA_ASSERT(c.is_live()); actual = c.value().linearize(); } else { actual = linearized(serialize_for_cql(*col_def->type, cell->as_collection_mutation())); } - assert(col_def->type->equal(actual, exp)); + SCYLLA_ASSERT(col_def->type->equal(actual, exp)); }); }); } diff --git a/test/lib/cql_assertions.hh b/test/lib/cql_assertions.hh index 18ecf9f6a6..ff583c8268 100644 --- a/test/lib/cql_assertions.hh +++ b/test/lib/cql_assertions.hh @@ -9,6 +9,7 @@ #pragma once +#include "utils/assert.hh" #include "test/lib/cql_test_env.hh" #include "transport/messages/result_message_base.hh" #include "bytes.hh" @@ -50,7 +51,7 @@ void assert_that_failed(future& f) { try { f.get(); - assert(f.failed()); + SCYLLA_ASSERT(f.failed()); } catch (...) { } @@ -61,7 +62,7 @@ void assert_that_failed(future&& f) { try { f.get(); - assert(f.failed()); + SCYLLA_ASSERT(f.failed()); } catch (...) { } diff --git a/test/lib/cql_test_env.cc b/test/lib/cql_test_env.cc index d7623b0bf4..b8c9a42c7f 100644 --- a/test/lib/cql_test_env.cc +++ b/test/lib/cql_test_env.cc @@ -57,6 +57,7 @@ #include "db/system_distributed_keyspace.hh" #include "db/sstables-format-selector.hh" #include "repair/row_level.hh" +#include "utils/assert.hh" #include "utils/class_registrator.hh" #include "utils/cross-shard-barrier.hh" #include "streaming/stream_manager.hh" @@ -270,7 +271,7 @@ public: } auto stmt = prepared->statement; - assert(stmt->get_bound_terms() == qo->get_values_count()); + SCYLLA_ASSERT(stmt->get_bound_terms() == qo->get_values_count()); qo->prepare(prepared->bound_names); auto qs = make_query_state(); @@ -415,7 +416,7 @@ public: auto deactivate = defer([] { bool old_active = true; auto success = active.compare_exchange_strong(old_active, false); - assert(success); + SCYLLA_ASSERT(success); }); // FIXME: make the function storage non static @@ -691,7 +692,7 @@ private: // Normally the auth server is already stopped in here, // but if there is an initialization failure we have to - // make sure to stop it now or ~sharded will assert. + // make sure to stop it now or ~sharded will SCYLLA_ASSERT. auto stop_auth_server = defer([this] { _auth_service.stop().get(); }); diff --git a/test/lib/data_model.cc b/test/lib/data_model.cc index b1093e418f..d399b1fe9b 100644 --- a/test/lib/data_model.cc +++ b/test/lib/data_model.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "test/lib/data_model.hh" #include @@ -94,10 +95,10 @@ mutation mutation_description::build(schema_ptr s) const { m.partition().apply(_partition_tombstone); for (auto& [ column, value_or_collection ] : _static_row) { auto cdef = s->get_column_definition(utf8_type->decompose(column)); - assert(cdef); + SCYLLA_ASSERT(cdef); std::visit(make_visitor( [&] (const atomic_value& v) { - assert(cdef->is_atomic()); + SCYLLA_ASSERT(cdef->is_atomic()); if (!v.expiring) { m.set_static_cell(*cdef, atomic_cell::make_live(*cdef->type, v.timestamp, v.value)); } else { @@ -106,7 +107,7 @@ mutation mutation_description::build(schema_ptr s) const { } }, [&] (const collection& c) { - assert(!cdef->is_atomic()); + SCYLLA_ASSERT(!cdef->is_atomic()); auto get_value_type = visit(*cdef->type, make_visitor( [] (const collection_type_impl& ctype) -> std::function { @@ -116,7 +117,7 @@ mutation mutation_description::build(schema_ptr s) const { return [&] (bytes_view key) -> const abstract_type& { return *utype.type(deserialize_field_index(key)); }; }, [] (const abstract_type& o) -> std::function { - assert(false); + SCYLLA_ASSERT(false); } )); @@ -144,10 +145,10 @@ mutation mutation_description::build(schema_ptr s) const { auto ck = clustering_key::from_exploded(*s, ckey); for (auto& [ column, value_or_collection ] : cells) { auto cdef = s->get_column_definition(utf8_type->decompose(column)); - assert(cdef); + SCYLLA_ASSERT(cdef); std::visit(make_visitor( [&] (const atomic_value& v) { - assert(cdef->is_atomic()); + SCYLLA_ASSERT(cdef->is_atomic()); if (!v.expiring) { m.set_clustered_cell(ck, *cdef, atomic_cell::make_live(*cdef->type, v.timestamp, v.value)); } else { @@ -156,7 +157,7 @@ mutation mutation_description::build(schema_ptr s) const { } }, [&] (const collection& c) { - assert(!cdef->is_atomic()); + SCYLLA_ASSERT(!cdef->is_atomic()); auto get_value_type = visit(*cdef->type, make_visitor( [] (const collection_type_impl& ctype) -> std::function { @@ -166,7 +167,7 @@ mutation mutation_description::build(schema_ptr s) const { return [&] (bytes_view key) -> const abstract_type& { return *utype.type(deserialize_field_index(key)); }; }, [] (const abstract_type& o) -> std::function { - assert(false); + SCYLLA_ASSERT(false); } )); @@ -229,7 +230,7 @@ std::vector::iterator table_description::find_column( } void table_description::add_column(std::vector& columns, const sstring& name, data_type type) { - assert(find_column(columns, name) == columns.end()); + SCYLLA_ASSERT(find_column(columns, name) == columns.end()); columns.emplace_back(name, type); } @@ -239,14 +240,14 @@ void table_description::add_old_column(const sstring& name, data_type type) { void table_description::remove_column(std::vector& columns, const sstring& name) { auto it = find_column(columns, name); - assert(it != columns.end()); + SCYLLA_ASSERT(it != columns.end()); _removed_columns.emplace_back(removed_column { name, std::get(*it), column_removal_timestamp }); columns.erase(it); } void table_description::alter_column_type(std::vector& columns, const sstring& name, data_type new_type) { auto it = find_column(columns, name); - assert(it != columns.end()); + SCYLLA_ASSERT(it != columns.end()); std::get(*it) = new_type; } @@ -344,13 +345,13 @@ void table_description::alter_regular_column_type(const sstring& name, data_type void table_description::rename_partition_column(const sstring& from, const sstring& to) { _change_log.emplace_back(format("renamed partition column \'{}\' to \'{}\'", from, to)); auto it = find_column(_partition_key, from); - assert(it != _partition_key.end()); + SCYLLA_ASSERT(it != _partition_key.end()); std::get(*it) = to; } void table_description::rename_clustering_column(const sstring& from, const sstring& to) { _change_log.emplace_back(format("renamed clustering column \'{}\' to \'{}\'", from, to)); auto it = find_column(_clustering_key, from); - assert(it != _clustering_key.end()); + SCYLLA_ASSERT(it != _clustering_key.end()); std::get(*it) = to; } diff --git a/test/lib/mutation_reader_assertions.hh b/test/lib/mutation_reader_assertions.hh index 2f4d06f840..fc84f70dd4 100644 --- a/test/lib/mutation_reader_assertions.hh +++ b/test/lib/mutation_reader_assertions.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include "readers/mutation_reader.hh" @@ -261,7 +262,7 @@ public: BOOST_FAIL(format("Expected row with column {}, but it is not present", columns[i].name)); } auto& cdef = _reader.schema()->regular_column_at(columns[i].id); - assert (!cdef.is_multi_cell()); + SCYLLA_ASSERT (!cdef.is_multi_cell()); auto cmp = compare_unsigned(columns[i].value, cell->as_atomic_cell(cdef).value().linearize()); if (cmp != 0) { BOOST_FAIL(format("Expected row with column {} having value {}, but it has value {}", diff --git a/test/lib/mutation_source_test.cc b/test/lib/mutation_source_test.cc index e49bc66260..40f7471ba0 100644 --- a/test/lib/mutation_source_test.cc +++ b/test/lib/mutation_source_test.cc @@ -34,6 +34,7 @@ #include "types/list.hh" #include "types/set.hh" #include +#include "utils/assert.hh" #include "utils/UUID_gen.hh" // partitions must be sorted by decorated key @@ -67,7 +68,7 @@ public: void fast_forward_if_needed(flat_reader_assertions_v2& mr, const mutation& expected, bool verify_eos = true) { while (!current_range().contains(expected.decorated_key(), dht::ring_position_comparator(*expected.schema()))) { _current_position++; - assert(_current_position < _ranges.size()); + SCYLLA_ASSERT(_current_position < _ranges.size()); if (verify_eos) { mr.produces_end_of_stream(); } @@ -409,7 +410,7 @@ static void test_streamed_mutation_forwarding_guarantees(tests::reader_concurren }; const int n_keys = 1001; - assert(!contains_key(n_keys - 1)); // so that we can form a range with position greater than all keys + SCYLLA_ASSERT(!contains_key(n_keys - 1)); // so that we can form a range with position greater than all keys mutation m(s, table.make_pkey()); std::vector keys; @@ -1959,7 +1960,7 @@ void for_each_mutation_pair(std::function{0, n_keys - 1}; } @@ -2308,7 +2309,7 @@ public: case 1: return row_marker(random_tombstone(timestamp_level::row_marker_tombstone)); case 2: return row_marker(gen_timestamp(timestamp_level::data)); case 3: return row_marker(gen_timestamp(timestamp_level::data), std::chrono::seconds(1), new_expiry()); - default: assert(0); + default: SCYLLA_ASSERT(0); } abort(); }; @@ -2769,33 +2770,33 @@ mutation forwardable_reader_to_mutation(mutation_reader r, const std::vectorconsume_new_partition(dk); } stop_iteration consume(tombstone t) { - assert(_builder); + SCYLLA_ASSERT(_builder); return _builder->consume(t); } stop_iteration consume(range_tombstone_change&& rt) { - assert(_builder); + SCYLLA_ASSERT(_builder); return _builder->consume(std::move(rt)); } stop_iteration consume(static_row&& sr) { - assert(_builder); + SCYLLA_ASSERT(_builder); return _builder->consume(std::move(sr)); } stop_iteration consume(clustering_row&& cr) { - assert(_builder); + SCYLLA_ASSERT(_builder); return _builder->consume(std::move(cr)); } stop_iteration consume_end_of_partition() { - assert(_builder); + SCYLLA_ASSERT(_builder); return stop_iteration::yes; } diff --git a/test/lib/random_schema.cc b/test/lib/random_schema.cc index b21dbff004..0e1d0a7ec5 100644 --- a/test/lib/random_schema.cc +++ b/test/lib/random_schema.cc @@ -22,6 +22,7 @@ #include "types/set.hh" #include "types/tuple.hh" #include "types/user.hh" +#include "utils/assert.hh" #include "utils/big_decimal.hh" #include "utils/UUID_gen.hh" @@ -178,7 +179,7 @@ public: , _regular_column_count_dist(regular_column_count_dist) , _static_column_count_dist(static_column_count_dist) , _type_generator(*this) { - assert(_partition_column_count_dist.a() > 0); + SCYLLA_ASSERT(_partition_column_count_dist.a() > 0); } virtual sstring table_name(std::mt19937& engine) override { return format("table{}", generate_unique_id(engine, _used_table_ids)); @@ -447,7 +448,7 @@ data_value generate_utf8_value(std::mt19937& engine, size_t min_size_in_bytes, s char* to_next; std::mbstate_t mb{}; auto res = f.out(mb, &wstr[0], &wstr[wstr.size()], from_next, &utf8_str[0], &utf8_str[utf8_str.size()], to_next); - assert(res == codec::ok); + SCYLLA_ASSERT(res == codec::ok); utf8_str.resize(to_next - &utf8_str[0]); return data_value(std::move(utf8_str)); @@ -513,44 +514,44 @@ data_value generate_duration_value(std::mt19937& engine, size_t, size_t) { } data_value generate_frozen_tuple_value(std::mt19937& engine, const tuple_type_impl& type, value_generator& val_gen, size_t min_size_in_bytes, size_t max_size_in_bytes) { - assert(!type.is_multi_cell()); + SCYLLA_ASSERT(!type.is_multi_cell()); return make_tuple_value(type.shared_from_this(), generate_frozen_tuple_values(engine, val_gen, type.all_types(), min_size_in_bytes, max_size_in_bytes)); } data_value generate_frozen_user_value(std::mt19937& engine, const user_type_impl& type, value_generator& val_gen, size_t min_size_in_bytes, size_t max_size_in_bytes) { - assert(!type.is_multi_cell()); + SCYLLA_ASSERT(!type.is_multi_cell()); return make_user_value(type.shared_from_this(), generate_frozen_tuple_values(engine, val_gen, type.all_types(), min_size_in_bytes, max_size_in_bytes)); } data_model::mutation_description::collection generate_list_value(std::mt19937& engine, const list_type_impl& type, value_generator& val_gen) { - assert(type.is_multi_cell()); + SCYLLA_ASSERT(type.is_multi_cell()); return generate_collection(engine, *type.name_comparator(), *type.value_comparator(), val_gen); } data_value generate_frozen_list_value(std::mt19937& engine, const list_type_impl& type, value_generator& val_gen, size_t min_size_in_bytes, size_t max_size_in_bytes) { - assert(!type.is_multi_cell()); + SCYLLA_ASSERT(!type.is_multi_cell()); return make_list_value(type.shared_from_this(), generate_frozen_list(engine, *type.get_elements_type(), val_gen, min_size_in_bytes, max_size_in_bytes)); } data_model::mutation_description::collection generate_set_value(std::mt19937& engine, const set_type_impl& type, value_generator& val_gen) { - assert(type.is_multi_cell()); + SCYLLA_ASSERT(type.is_multi_cell()); return generate_collection(engine, *type.name_comparator(), *type.value_comparator(), val_gen); } data_value generate_frozen_set_value(std::mt19937& engine, const set_type_impl& type, value_generator& val_gen, size_t min_size_in_bytes, size_t max_size_in_bytes) { - assert(!type.is_multi_cell()); + SCYLLA_ASSERT(!type.is_multi_cell()); return make_set_value(type.shared_from_this(), generate_frozen_set(engine, *type.get_elements_type(), val_gen, min_size_in_bytes, max_size_in_bytes)); } data_model::mutation_description::collection generate_map_value(std::mt19937& engine, const map_type_impl& type, value_generator& val_gen) { - assert(type.is_multi_cell()); + SCYLLA_ASSERT(type.is_multi_cell()); return generate_collection(engine, *type.name_comparator(), *type.value_comparator(), val_gen); } data_value generate_frozen_map_value(std::mt19937& engine, const map_type_impl& type, value_generator& val_gen, size_t min_size_in_bytes, size_t max_size_in_bytes) { - assert(!type.is_multi_cell()); + SCYLLA_ASSERT(!type.is_multi_cell()); return make_map_value(type.shared_from_this(), generate_frozen_map(engine, *type.get_keys_type(), *type.get_values_type(), val_gen, min_size_in_bytes, max_size_in_bytes)); } @@ -562,7 +563,7 @@ data_value value_generator::generate_atomic_value(std::mt19937& engine, const ab } data_value value_generator::generate_atomic_value(std::mt19937& engine, const abstract_type& type, size_t min_size_in_bytes, size_t max_size_in_bytes) { - assert(!type.is_multi_cell()); + SCYLLA_ASSERT(!type.is_multi_cell()); return get_atomic_value_generator(type)(engine, min_size_in_bytes, max_size_in_bytes); } @@ -596,7 +597,7 @@ value_generator::value_generator() } size_t value_generator::min_size(const abstract_type& type) { - assert(!type.is_multi_cell()); + SCYLLA_ASSERT(!type.is_multi_cell()); auto it = _regular_value_min_sizes.find(&type); if (it != _regular_value_min_sizes.end()) { @@ -633,7 +634,7 @@ size_t value_generator::min_size(const abstract_type& type) { } value_generator::atomic_value_generator value_generator::get_atomic_value_generator(const abstract_type& type) { - assert(!type.is_multi_cell()); + SCYLLA_ASSERT(!type.is_multi_cell()); auto it = _regular_value_generators.find(&type); if (it != _regular_value_generators.end()) { @@ -807,7 +808,7 @@ schema_ptr build_random_schema(uint32_t seed, random_schema_specification& spec) auto builder = schema_builder(spec.keyspace_name(), spec.table_name(engine)); auto pk_columns = spec.partition_key_columns(engine); - assert(!pk_columns.empty()); // Let's not pull in boost::test here + SCYLLA_ASSERT(!pk_columns.empty()); // Let's not pull in boost::test here for (size_t pk = 0; pk < pk_columns.size(); ++pk) { builder.with_column(to_bytes(format("pk{}", pk)), std::move(pk_columns[pk]), column_kind::partition_key); } @@ -947,7 +948,7 @@ void decorate_with_timestamps(const schema& schema, std::mt19937& engine, timest } for (auto& [ key, value ] : c.elements) { value.timestamp = ts_gen(engine, timestamp_destination::collection_cell_timestamp, c.tomb.timestamp); - assert(!c.tomb || value.timestamp > c.tomb.timestamp); + SCYLLA_ASSERT(!c.tomb || value.timestamp > c.tomb.timestamp); if (auto expiry_opt = exp_gen(engine, timestamp_destination::collection_cell_timestamp)) { value.expiring = data_model::mutation_description::expiry_info{expiry_opt->ttl, expiry_opt->expiry_point}; } @@ -977,7 +978,7 @@ data_model::mutation_description::key random_schema::make_partition_key(uint32_t } data_model::mutation_description::key random_schema::make_clustering_key(uint32_t n, value_generator& gen) const { - assert(_schema->clustering_key_size() > 0); + SCYLLA_ASSERT(_schema->clustering_key_size() > 0); return make_key(n, gen, _schema->clustering_key_columns(), std::numeric_limits::max()); } diff --git a/test/lib/random_utils.hh b/test/lib/random_utils.hh index 84d1659595..99eefa3b89 100644 --- a/test/lib/random_utils.hh +++ b/test/lib/random_utils.hh @@ -16,6 +16,7 @@ #include #include "bytes.hh" +#include "utils/assert.hh" #include "utils/preempt.hh" namespace tests::random { @@ -164,7 +165,7 @@ inline sstring get_sstring() { // Picks a random subset of size `m` from the given vector. template std::vector random_subset(std::vector v, unsigned m, std::mt19937& engine) { - assert(m <= v.size()); + SCYLLA_ASSERT(m <= v.size()); std::shuffle(v.begin(), v.end(), engine); return {v.begin(), v.begin() + m}; } @@ -172,7 +173,7 @@ std::vector random_subset(std::vector v, unsigned m, std::mt19937& engine) // Picks a random subset of size `m` from the set {0, ..., `n` - 1}. template std::vector random_subset(unsigned n, unsigned m, std::mt19937& engine) { - assert(m <= n); + SCYLLA_ASSERT(m <= n); std::vector the_set(n); std::iota(the_set.begin(), the_set.end(), T{}); diff --git a/test/lib/reader_lifecycle_policy.hh b/test/lib/reader_lifecycle_policy.hh index bd9b1ea570..1abef8b8f7 100644 --- a/test/lib/reader_lifecycle_policy.hh +++ b/test/lib/reader_lifecycle_policy.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "readers/multishard.hh" #include @@ -62,12 +63,12 @@ public: } virtual const dht::partition_range* get_read_range() const override { const auto shard = this_shard_id(); - assert(_contexts[shard]); + SCYLLA_ASSERT(_contexts[shard]); return _contexts[shard]->range.get(); } void update_read_range(lw_shared_ptr range) override { const auto shard = this_shard_id(); - assert(_contexts[shard]); + SCYLLA_ASSERT(_contexts[shard]); _contexts[shard]->range = std::move(range); } virtual future<> destroy_reader(stopped_reader reader) noexcept override { diff --git a/test/lib/test_services.cc b/test/lib/test_services.cc index 80e5c3e989..e7ced2b69a 100644 --- a/test/lib/test_services.cc +++ b/test/lib/test_services.cc @@ -17,6 +17,7 @@ #include "gms/feature_service.hh" #include "repair/row_level.hh" #include "replica/compaction_group.hh" +#include "utils/assert.hh" #include "utils/overloaded_functor.hh" #include #include @@ -224,7 +225,7 @@ test_env::impl::impl(test_env_config cfg, sstables::storage_manager* sstm) } if (!storage.is_local_type()) { // remote storage requires uuid-based identifier for naming sstables - assert(use_uuid == uuid_identifiers::yes); + SCYLLA_ASSERT(use_uuid == uuid_identifiers::yes); } } diff --git a/test/manual/enormous_table_scan_test.cc b/test/manual/enormous_table_scan_test.cc index 7a605bc85e..4bcf5a8fc7 100644 --- a/test/manual/enormous_table_scan_test.cc +++ b/test/manual/enormous_table_scan_test.cc @@ -7,6 +7,7 @@ */ +#include "utils/assert.hh" #include #include @@ -52,7 +53,7 @@ public: auto ck_to_int = [] (const clustering_key& ck) -> int64_t { auto exploded = ck.explode(); - assert(exploded.size() == 1); + SCYLLA_ASSERT(exploded.size() == 1); return value_cast(long_type->deserialize(exploded[0])); }; diff --git a/test/perf/memory_footprint_test.cc b/test/perf/memory_footprint_test.cc index acf0904a3b..e37466ee51 100644 --- a/test/perf/memory_footprint_test.cc +++ b/test/perf/memory_footprint_test.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include @@ -186,7 +187,7 @@ static sizes calculate_sizes(cache_tracker& tracker, const mutation_settings& se auto cache_initial_occupancy = tracker.region().occupancy().used_space(); - assert(mt->occupancy().used_space() == 0); + SCYLLA_ASSERT(mt->occupancy().used_space() == 0); std::vector muts; for (size_t i = 0; i < settings.partition_count; ++i) { diff --git a/test/perf/perf_collection.cc b/test/perf/perf_collection.cc index 6433de50b0..7196b7a286 100644 --- a/test/perf/perf_collection.cc +++ b/test/perf/perf_collection.cc @@ -29,6 +29,7 @@ struct perf_key_tri_compare { } }; +#include "utils/assert.hh" #include "utils/bptree.hh" using namespace seastar; @@ -98,7 +99,7 @@ public: virtual void insert(per_key_t k) override { _t.emplace(k, 0); } virtual void lower_bound(per_key_t k) override { auto i = _t.lower_bound(k); - assert(i != _t.end()); + SCYLLA_ASSERT(i != _t.end()); } virtual void scan(int batch) override { scan_collection(_t, batch); @@ -118,7 +119,7 @@ public: virtual void clone() override { } virtual void insert_and_erase(per_key_t k) override { auto i = _t.emplace(k, 0); - assert(i.second); + SCYLLA_ASSERT(i.second); i.first.erase(perf_key_compare{}); } virtual void show_stats() override { @@ -149,7 +150,7 @@ public: virtual void insert(per_key_t k) override { _t.emplace(k, 0); } virtual void lower_bound(per_key_t k) override { auto i = _t.get(k); - assert(i != nullptr); + SCYLLA_ASSERT(i != nullptr); } virtual void scan(int batch) override { scan_collection(_t, batch); @@ -194,7 +195,7 @@ public: virtual void insert(per_key_t k) override { _t.insert(std::make_unique(k), _cmp); } virtual void lower_bound(per_key_t k) override { auto i = _t.lower_bound(k, _cmp); - assert(i != _t.end()); + SCYLLA_ASSERT(i != _t.end()); } virtual void erase(per_key_t k) override { _t.erase_and_dispose(k, _cmp, [] (perf_intrusive_key* k) noexcept { delete k; }); } virtual void drain(int batch) override { @@ -240,7 +241,7 @@ public: virtual void insert(per_key_t k) override { _s.insert(k); } virtual void lower_bound(per_key_t k) override { auto i = _s.lower_bound(k); - assert(i != _s.end()); + SCYLLA_ASSERT(i != _s.end()); } virtual void scan(int batch) override { scan_collection(_s, batch); @@ -260,7 +261,7 @@ public: virtual void clone() override { } virtual void insert_and_erase(per_key_t k) override { auto i = _s.insert(k); - assert(i.second); + SCYLLA_ASSERT(i.second); _s.erase(i.first); } virtual void show_stats() override { } @@ -273,7 +274,7 @@ public: virtual void insert(per_key_t k) override { _m[k] = 0; } virtual void lower_bound(per_key_t k) override { auto i = _m.lower_bound(k); - assert(i != _m.end()); + SCYLLA_ASSERT(i != _m.end()); } virtual void scan(int batch) override { scan_collection(_m, batch); @@ -293,7 +294,7 @@ public: virtual void clone() override { } virtual void insert_and_erase(per_key_t k) override { auto i = _m.insert({k, 0}); - assert(i.second); + SCYLLA_ASSERT(i.second); _m.erase(i.first); } virtual void show_stats() override { } diff --git a/test/perf/perf_commitlog.cc b/test/perf/perf_commitlog.cc index cd881f4f45..88e991715f 100644 --- a/test/perf/perf_commitlog.cc +++ b/test/perf/perf_commitlog.cc @@ -30,6 +30,7 @@ #include "db/config.hh" #include "db/extensions.hh" #include "db/commitlog/commitlog.hh" +#include "utils/assert.hh" #include "utils/UUID_gen.hh" struct test_config { @@ -120,7 +121,7 @@ struct commitlog_service { {} future<> init(const db::commitlog::config& cfg) { - assert(!log); + SCYLLA_ASSERT(!log); log.emplace(co_await db::commitlog::create_commitlog(cfg)); fa.emplace(log->add_flush_handler(std::bind(&commitlog_service::flush_handler, this, std::placeholders::_1, std::placeholders::_2))); } diff --git a/test/perf/perf_fast_forward.cc b/test/perf/perf_fast_forward.cc index 2c3e2b1775..375bdeb174 100644 --- a/test/perf/perf_fast_forward.cc +++ b/test/perf/perf_fast_forward.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include @@ -502,7 +503,7 @@ public: Json::Value stats_value; if (summary_result) { - assert(values.size() == 1); + SCYLLA_ASSERT(values.size() == 1); for (size_t i = 0; i < stats_names.size(); ++i) { write_test_values_impl(stats_value, stats_names, values.front()); } @@ -781,8 +782,8 @@ uint64_t consume_all_with_next_partition(mutation_reader& rd) { static void assert_partition_start(mutation_reader& rd) { auto mfopt = rd().get(); - assert(mfopt); - assert(mfopt->is_partition_start()); + SCYLLA_ASSERT(mfopt); + SCYLLA_ASSERT(mfopt->is_partition_start()); } // A dataset with one large partition with many clustered fragments. @@ -1194,8 +1195,8 @@ table_config read_config(cql_test_env& env, const sstring& name) { } static unsigned cardinality(int_range r) { - assert(r.start()); - assert(r.end()); + SCYLLA_ASSERT(r.start()); + SCYLLA_ASSERT(r.end()); return r.end()->value() - r.start()->value() + r.start()->is_inclusive() + r.end()->is_inclusive() - 1; } @@ -1281,7 +1282,7 @@ public: results.resize(rs.size()); } { - assert(rs.size() == results.size()); + SCYLLA_ASSERT(rs.size() == results.size()); for (auto j = 0u; j < rs.size(); j++) { results[j].emplace_back(rs[j]); } @@ -1387,7 +1388,7 @@ void test_large_partition_single_key_slice(app_template &app, replica::column_fa }; }); - assert(n_rows > 200); // assumed below + SCYLLA_ASSERT(n_rows > 200); // assumed below run_test_case(app, [&] { // adjacent, no overlap return test_result_vector { diff --git a/test/perf/perf_simple_query.cc b/test/perf/perf_simple_query.cc index dc2eaaa0c5..f4951bb58b 100644 --- a/test/perf/perf_simple_query.cc +++ b/test/perf/perf_simple_query.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include @@ -384,7 +385,7 @@ static std::vector do_alternator_test(std::string isolation_level, sharded& mm, sharded& gossiper, test_config& cfg) { - assert(cfg.frontend == test_config::frontend_type::alternator); + SCYLLA_ASSERT(cfg.frontend == test_config::frontend_type::alternator); std::cout << "Running test with config: " << cfg << std::endl; alternator_test_env env(gossiper, qp.local().proxy().container(), mm, qp); @@ -417,7 +418,7 @@ static std::vector do_alternator_test(std::string isolation_level, } static std::vector do_cql_test(cql_test_env& env, test_config& cfg) { - assert(cfg.frontend == test_config::frontend_type::cql); + SCYLLA_ASSERT(cfg.frontend == test_config::frontend_type::cql); std::cout << "Running test with config: " << cfg << std::endl; env.create_table([&cfg] (auto ks_name) { diff --git a/test/perf/perf_sstable.hh b/test/perf/perf_sstable.hh index 927afe3173..64d3fc44f3 100644 --- a/test/perf/perf_sstable.hh +++ b/test/perf/perf_sstable.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include "sstables/sstables.hh" @@ -245,12 +246,12 @@ public: auto partitions_per_sstable = _cfg.partitions / _cfg.sstables; if (_cfg.compaction_strategy != sstables::compaction_strategy_type::time_window) { - assert(ret.new_sstables.size() == 1); + SCYLLA_ASSERT(ret.new_sstables.size() == 1); } auto total_keys_written = std::accumulate(ret.new_sstables.begin(), ret.new_sstables.end(), uint64_t(0), [] (uint64_t n, const sstables::shared_sstable& sst) { return n + sst->get_estimated_key_count(); }); - assert(total_keys_written >= partitions_per_sstable); + SCYLLA_ASSERT(total_keys_written >= partitions_per_sstable); auto duration = std::chrono::duration(end - start).count(); return total_keys_written / duration; diff --git a/test/perf/perf_tablets.cc b/test/perf/perf_tablets.cc index 8c63720387..1125c1b906 100644 --- a/test/perf/perf_tablets.cc +++ b/test/perf/perf_tablets.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include @@ -82,7 +83,7 @@ static future<> test_basic_operations(app_template& app) { for (int k = 0; k < rf; ++k) { replicas.push_back({h1, 0}); } - assert(std::cmp_equal(replicas.size(), rf)); + SCYLLA_ASSERT(std::cmp_equal(replicas.size(), rf)); tmap.set_tablet(j, tablet_info{std::move(replicas)}); ++total_tablets; } @@ -117,7 +118,7 @@ static future<> test_basic_operations(app_template& app) { auto time_to_read = duration_in_seconds([&] { tm2 = read_tablet_metadata(e.local_qp()).get(); }); - assert(tm == tm2); + SCYLLA_ASSERT(tm == tm2); testlog.info("Read in {:.6f} [ms]", time_to_read.count() * 1000); diff --git a/test/raft/future_set.hh b/test/raft/future_set.hh index 9502e93907..f95ebc6c58 100644 --- a/test/raft/future_set.hh +++ b/test/raft/future_set.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include @@ -53,7 +54,7 @@ public: co_await _container.v.wait(wake_condition); } - assert(wake_condition()); + SCYLLA_ASSERT(wake_condition()); for (auto& f : _futures) { if (f.available()) { @@ -65,7 +66,7 @@ public: } // No future was available, so `wake_condition()` implies: - assert(timer.now() >= timeout); + SCYLLA_ASSERT(timer.now() >= timeout); co_return std::nullopt; } @@ -88,6 +89,6 @@ public: } ~future_set() { - assert(_futures.empty()); + SCYLLA_ASSERT(_futures.empty()); } }; diff --git a/test/raft/generator.hh b/test/raft/generator.hh index 2f3e5a93e8..dfd00cbd94 100644 --- a/test/raft/generator.hh +++ b/test/raft/generator.hh @@ -18,6 +18,7 @@ #include #include +#include "utils/assert.hh" #include "utils/chunked_vector.hh" #include "test/raft/future_set.hh" @@ -46,7 +47,7 @@ namespace operation { using thread_set = std::unordered_set; thread_id some(const thread_set& s) { - assert(!s.empty()); + SCYLLA_ASSERT(!s.empty()); return *s.begin(); } @@ -57,7 +58,7 @@ auto take_impl(const std::vector& vec, std::index_sequence) { template auto take(const thread_set& s) { - assert(N <= s.size()); + SCYLLA_ASSERT(N <= s.size()); auto end = s.begin(); std::advance(end, N); std::vector vec{s.begin(), end}; @@ -303,8 +304,8 @@ public: , _timer(timer) , _record(std::move(record)) { - assert(!_all_threads.empty()); - assert(_max_pending_interval > raft::logical_clock::duration{0}); + SCYLLA_ASSERT(!_all_threads.empty()); + SCYLLA_ASSERT(_max_pending_interval > raft::logical_clock::duration{0}); } // Run the interpreter and record all operation invocations and completions. @@ -317,8 +318,8 @@ public: if (auto r = co_await _invocations.poll(_timer, _poll_timeout)) { auto [res, tid] = std::move(*r); - assert(_all_threads.contains(tid)); - assert(!_free_threads.contains(tid)); + SCYLLA_ASSERT(_all_threads.contains(tid)); + SCYLLA_ASSERT(!_free_threads.contains(tid)); _free_threads.insert(tid); _record(operation::completion { @@ -370,7 +371,7 @@ public: op.thread = some(_free_threads); } - assert(_free_threads.contains(*op.thread)); + SCYLLA_ASSERT(_free_threads.contains(*op.thread)); _free_threads.erase(*op.thread); _record(op); @@ -393,7 +394,7 @@ public: ~interpreter() { // Ensured by `exit()`. - assert(_invocations.empty()); + SCYLLA_ASSERT(_invocations.empty()); } private: @@ -632,7 +633,7 @@ struct on_threads_gen { if (auto i = std::get_if(&op)) { if (i->thread) { - assert(masked_free_threads.contains(*i->thread)); + SCYLLA_ASSERT(masked_free_threads.contains(*i->thread)); } else { // The underlying generator didn't assign a thread so we do it. i->thread = some(masked_free_threads); diff --git a/test/raft/helpers.cc b/test/raft/helpers.cc index 06a68ee6db..4427df36dc 100644 --- a/test/raft/helpers.cc +++ b/test/raft/helpers.cc @@ -10,6 +10,7 @@ // Helper functions for raft tests // +#include "utils/assert.hh" #include #include "helpers.hh" @@ -35,7 +36,7 @@ void election_timeout(raft::fsm& fsm) { } void make_candidate(raft::fsm& fsm) { - assert(fsm.is_follower()); + SCYLLA_ASSERT(fsm.is_follower()); // NOTE: single node skips candidate state while (fsm.is_follower()) { fsm.tick(); @@ -55,8 +56,8 @@ bool compare_log_entry(raft::log_entry_ptr le1, raft::log_entry_ptr le2) { } bool compare_log_entries(raft::log& log1, raft::log& log2, size_t from, size_t to) { - assert(to <= log1.last_idx()); - assert(to <= log2.last_idx()); + SCYLLA_ASSERT(to <= log1.last_idx()); + SCYLLA_ASSERT(to <= log2.last_idx()); for (size_t i = from; i <= to; ++i) { if (!compare_log_entry(log1[i], log2[i])) { return false; diff --git a/test/raft/helpers.hh b/test/raft/helpers.hh index 01810f7d27..c7ab3c8156 100644 --- a/test/raft/helpers.hh +++ b/test/raft/helpers.hh @@ -12,6 +12,7 @@ #pragma once +#include "utils/assert.hh" #include #include "test/lib/log.hh" #include "test/lib/random_utils.hh" @@ -89,7 +90,7 @@ public: } bool leadership_transfer_active() const { - assert(is_leader()); + SCYLLA_ASSERT(is_leader()); return bool(leader_state().stepdown); } }; diff --git a/test/raft/randomized_nemesis_test.cc b/test/raft/randomized_nemesis_test.cc index 921764311c..df5806cb3b 100644 --- a/test/raft/randomized_nemesis_test.cc +++ b/test/raft/randomized_nemesis_test.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include @@ -159,7 +160,7 @@ public: future take_snapshot() override { auto id = raft::snapshot_id::create_random_id(); - assert(_snapshots.emplace(id, _val).second); + SCYLLA_ASSERT(_snapshots.emplace(id, _val).second); tlogger.trace("{}: took snapshot id {} val {}", _id, id, _val); co_return id; } @@ -170,7 +171,7 @@ public: future<> load_snapshot(raft::snapshot_id id) override { auto it = _snapshots.find(id); - assert(it != _snapshots.end()); // dunno if the snapshot can actually be missing + SCYLLA_ASSERT(it != _snapshots.end()); // dunno if the snapshot can actually be missing tlogger.trace("{}: loading snapshot id {} prev val {} new val {}", _id, id, _val, it->second); _val = it->second; co_return; @@ -192,7 +193,7 @@ public: promise p; auto fut = p.get_future(); auto cmd_id = utils::make_random_uuid(); - assert(_output_channels.emplace(cmd_id, std::move(p)).second); + SCYLLA_ASSERT(_output_channels.emplace(cmd_id, std::move(p)).second); auto guard = defer([this, cmd_id] { auto it = _output_channels.find(cmd_id); @@ -231,7 +232,7 @@ future wait(F f) { auto impl = [] (F f) -> future { struct container { F f; }; container c = co_await f.then_wrapped([] (F f) { return container{std::move(f)}; }); - assert(c.f.available()); + SCYLLA_ASSERT(c.f.available()); co_return std::move(c.f); }; @@ -344,7 +345,7 @@ future> call( return make_ready_future>(raft::stopped_error{}); } catch (...) { tlogger.error("unexpected exception from call: {}", std::current_exception()); - assert(false); + SCYLLA_ASSERT(false); } }); } @@ -378,7 +379,7 @@ future> read( co_return timed_out_error{}; } catch (...) { tlogger.error("unexpected exception from `read`: {}", std::current_exception()); - assert(false); + SCYLLA_ASSERT(false); } }; @@ -520,8 +521,8 @@ public: // Message is delivered to us. // The caller must ensure that `abort()` wasn't called yet. void receive(raft::server_id src, message_t payload) { - assert(!_gate.is_closed()); - assert(_client); + SCYLLA_ASSERT(!_gate.is_closed()); + SCYLLA_ASSERT(_client); auto& c = *_client; std::visit(make_visitor( @@ -934,7 +935,7 @@ public: void store_snapshot(const raft::snapshot_descriptor& snap, State snap_data, size_t preserve_log_entries) { // The snapshot's index cannot be smaller than the index of the first stored entry minus one; // that would create a ``gap'' in the log. - assert(_stored_entries.empty() || snap.idx + 1 >= _stored_entries.front()->idx); + SCYLLA_ASSERT(_stored_entries.empty() || snap.idx + 1 >= _stored_entries.front()->idx); _stored_snapshot = {snap, std::move(snap_data)}; @@ -960,14 +961,14 @@ public: // The raft server is supposed to provide entries in strictly increasing order, // hence the following assertions. if (_stored_entries.empty()) { - assert(entries.front()->idx == _stored_snapshot.first.idx + 1); + SCYLLA_ASSERT(entries.front()->idx == _stored_snapshot.first.idx + 1); } else { - assert(entries.front()->idx == _stored_entries.back()->idx + 1); + SCYLLA_ASSERT(entries.front()->idx == _stored_entries.back()->idx + 1); } _stored_entries.push_back(entries[0]); for (size_t i = 1; i < entries.size(); ++i) { - assert(entries[i]->idx == entries[i-1]->idx + 1); + SCYLLA_ASSERT(entries[i]->idx == entries[i-1]->idx + 1); _stored_entries.push_back(entries[i]); } } @@ -1012,7 +1013,7 @@ public: // Stores not only the snapshot descriptor but also the corresponding snapshot. virtual future<> store_snapshot_descriptor(const raft::snapshot_descriptor& snap, size_t preserve_log_entries) override { auto it = _snapshots.find(snap.id); - assert(it != _snapshots.end()); + SCYLLA_ASSERT(it != _snapshots.end()); _persistence->store_snapshot(snap, it->second, preserve_log_entries); co_return; @@ -1054,7 +1055,7 @@ class direct_fd_pinger final : public direct_failure_detector::pinger { public: direct_fd_pinger(::rpc& rpc) : _rpc(rpc) { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); } // Can be called on any shard. @@ -1080,7 +1081,7 @@ class direct_fd_clock final : public direct_failure_detector::clock { public: direct_fd_clock() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); } void tick() { @@ -1274,7 +1275,7 @@ future reconfigure( co_return timed_out_error{}; } catch (...) { tlogger.error("unexpected exception from set_configuration: {}", std::current_exception()); - assert(false); + SCYLLA_ASSERT(false); } } @@ -1312,7 +1313,7 @@ future modify_config( co_return e; } catch (...) { tlogger.error("unexpected exception from modify_config: {}", std::current_exception()); - assert(false); + SCYLLA_ASSERT(false); } } @@ -1411,7 +1412,7 @@ public: } ~raft_server() { - assert(!_started || _stopped); + SCYLLA_ASSERT(!_started || _stopped); } raft_server(const raft_server&&) = delete; @@ -1423,7 +1424,7 @@ public: static const raft::logical_clock::duration fd_ping_period = 10_t; static const raft::logical_clock::duration fd_ping_timeout = 30_t; - assert(!_started); + SCYLLA_ASSERT(!_started); _started = true; // _fd_service must be started before raft server, @@ -1462,7 +1463,7 @@ public: } void tick() { - assert(_started); + SCYLLA_ASSERT(_started); _rpc.tick(); _server->tick(); _fd_clock->tick(); @@ -1472,7 +1473,7 @@ public: typename M::input_t input, raft::logical_clock::time_point timeout, logical_timer& timer) { - assert(_started); + SCYLLA_ASSERT(_started); try { co_return co_await with_gate(_gate, [this, input = std::move(input), timeout, &timer] { return ::call(std::move(input), timeout, timer, *_server, _sm); @@ -1485,7 +1486,7 @@ public: future> read( raft::logical_clock::time_point timeout, logical_timer& timer) { - assert(_started); + SCYLLA_ASSERT(_started); try { co_return co_await with_gate(_gate, [this, timeout, &timer] { return ::read(timeout, timer, *_server, _sm); @@ -1499,7 +1500,7 @@ public: const std::vector>& ids, raft::logical_clock::time_point timeout, logical_timer& timer) { - assert(_started); + SCYLLA_ASSERT(_started); try { co_return co_await with_gate(_gate, [this, &ids, timeout, &timer] { return ::reconfigure(ids, timeout, timer, *_server); @@ -1514,7 +1515,7 @@ public: std::vector deleted, raft::logical_clock::time_point timeout, logical_timer& timer) { - assert(_started); + SCYLLA_ASSERT(_started); try { co_return co_await with_gate(_gate, [this, &added, deleted = std::move(deleted), timeout, &timer] { return ::modify_config(added, std::move(deleted), timeout, timer, *_server); @@ -1541,7 +1542,7 @@ public: } void deliver(raft::server_id src, const typename rpc::message_t& m) { - assert(_started); + SCYLLA_ASSERT(_started); if (!_gate.is_closed()) { _rpc.receive(src, m); } @@ -1655,7 +1656,7 @@ public: , _network(std::move(cfg.network_delay), std::move(cfg.rnd), [this] (raft::server_id src, raft::server_id dst, const message_t& m) { auto& n = _routes.at(dst); - assert(n._persistence); + SCYLLA_ASSERT(n._persistence); if (n._server) { n._server->deliver(src, m); @@ -1664,7 +1665,7 @@ public: } ~environment() { - assert(_routes.empty() || _stopped); + SCYLLA_ASSERT(_routes.empty() || _stopped); } environment(const environment&) = delete; @@ -1717,7 +1718,7 @@ public: ._persistence = make_lw_shared>(first ? std::optional{id} : std::nullopt, M::init), ._server = nullptr, }); - assert(inserted); + SCYLLA_ASSERT(inserted); return id; } @@ -1727,8 +1728,8 @@ public: future<> start_server(raft::server_id id) { return with_gate(_gate, [this, id] () -> future<> { auto& n = _routes.at(id); - assert(n._persistence); - assert(!n._server); + SCYLLA_ASSERT(n._persistence); + SCYLLA_ASSERT(!n._server); lw_shared_ptr*> this_srv_addr = make_lw_shared*>(nullptr); auto srv = raft_server::create(id, n._persistence, _fd_convict_threshold, n._cfg, @@ -1765,8 +1766,8 @@ public: future<> stop(raft::server_id id) { return with_gate(_gate, [this, id] () -> future<> { auto& n = _routes.at(id); - assert(n._persistence); - assert(n._server); + SCYLLA_ASSERT(n._persistence); + SCYLLA_ASSERT(n._server); co_await n._server->abort(); n._server = nullptr; @@ -1781,8 +1782,8 @@ public: _gate.check(); auto& n = _routes.at(id); - assert(n._persistence); - assert(n._server); + SCYLLA_ASSERT(n._persistence); + SCYLLA_ASSERT(n._server); // Let the 'crashed' server continue working on its copy of persistence; // none of that work will be seen by later servers restarted on this node @@ -2086,7 +2087,7 @@ struct wait_for_leader { } }(env.weak_from_this(), std::move(nodes))); - assert(l != raft::server_id{}); + SCYLLA_ASSERT(l != raft::server_id{}); // Note: `l` may no longer be a leader at this point if there was a yield at the `co_await` above // and `l` decided to step down, was restarted, or just got removed from the configuration. @@ -2127,7 +2128,7 @@ SEASTAR_TEST_CASE(basic_test) { auto leader_id = co_await env.new_server(true); // Wait at most 1000 ticks for the server to elect itself as a leader. - assert(co_await wait_for_leader{}(env, {leader_id}, timer, timer.now() + 1000_t) == leader_id); + SCYLLA_ASSERT(co_await wait_for_leader{}(env, {leader_id}, timer, timer.now() + 1000_t) == leader_id); auto call = [&] (ExReg::input_t input, raft::logical_clock::duration timeout) { return env.call(leader_id, std::move(input), timer.now() + timeout, timer); @@ -2138,7 +2139,7 @@ SEASTAR_TEST_CASE(basic_test) { }; for (int i = 1; i <= 100; ++i) { - assert(eq(co_await call(ExReg::exchange{i}, 100_t), ExReg::ret{i - 1})); + SCYLLA_ASSERT(eq(co_await call(ExReg::exchange{i}, 100_t), ExReg::ret{i - 1})); } tlogger.debug("100 exchanges - single server - passed"); @@ -2148,14 +2149,14 @@ SEASTAR_TEST_CASE(basic_test) { tlogger.debug("Started 2 more servers, changing configuration"); - assert(std::holds_alternative( + SCYLLA_ASSERT(std::holds_alternative( co_await env.reconfigure(leader_id, {leader_id, id2, id3}, timer.now() + 100_t, timer))); tlogger.debug("Configuration changed"); co_await call(ExReg::exchange{0}, 100_t); for (int i = 1; i <= 100; ++i) { - assert(eq(co_await call(ExReg::exchange{i}, 100_t), ExReg::ret{i - 1})); + SCYLLA_ASSERT(eq(co_await call(ExReg::exchange{i}, 100_t), ExReg::ret{i - 1})); } tlogger.debug("100 exchanges - three servers - passed"); @@ -2167,7 +2168,7 @@ SEASTAR_TEST_CASE(basic_test) { co_await timer.sleep(2_t); } for (int i = 0; i < 100; ++i) { - assert(eq(co_await std::move(futs[i]), ExReg::ret{100})); + SCYLLA_ASSERT(eq(co_await std::move(futs[i]), ExReg::ret{100})); } tlogger.debug("100 concurrent reads - three servers - passed"); @@ -2218,7 +2219,7 @@ SEASTAR_TEST_CASE(test_frequent_snapshotting) { }; // Wait at most 1000 ticks for the server to elect itself as a leader. - assert(co_await wait_for_leader{}(env, {leader_id}, timer, timer.now() + 1000_t) == leader_id); + SCYLLA_ASSERT(co_await wait_for_leader{}(env, {leader_id}, timer, timer.now() + 1000_t) == leader_id); auto id2 = co_await env.new_server(false, server_config); auto id3 = co_await env.new_server(false, server_config); @@ -2229,14 +2230,14 @@ SEASTAR_TEST_CASE(test_frequent_snapshotting) { tlogger.debug("Started 2 more servers, changing configuration"); - assert(std::holds_alternative( + SCYLLA_ASSERT(std::holds_alternative( co_await env.reconfigure(leader_id, {leader_id, id2, id3}, timer.now() + 100_t, timer))); tlogger.debug("Configuration changed"); co_await call(ExReg::exchange{0}, 100_t); for (int i = 1; i <= 100; ++i) { - assert(eq(co_await call(ExReg::exchange{i}, 100_t), ExReg::ret{i - 1})); + SCYLLA_ASSERT(eq(co_await call(ExReg::exchange{i}, 100_t), ExReg::ret{i - 1})); } tlogger.debug("100 exchanges - three servers - passed"); @@ -2248,7 +2249,7 @@ SEASTAR_TEST_CASE(test_frequent_snapshotting) { co_await timer.sleep(2_t); } for (int i = 0; i < 100; ++i) { - assert(eq(co_await std::move(futs[i]), ExReg::ret{100})); + SCYLLA_ASSERT(eq(co_await std::move(futs[i]), ExReg::ret{100})); } tlogger.debug("100 concurrent reads - three servers - passed"); @@ -2281,23 +2282,23 @@ SEASTAR_TEST_CASE(snapshot_uses_correct_term_test) { // It's easier to catch the problem when we send entries one by one, not in batches. .append_request_threshold = 1, }); - assert(co_await wait_for_leader{}(env, {id1}, timer, timer.now() + 1000_t) == id1); + SCYLLA_ASSERT(co_await wait_for_leader{}(env, {id1}, timer, timer.now() + 1000_t) == id1); auto id2 = co_await env.new_server(false, raft::server::configuration{ .append_request_threshold = 1, }); - assert(std::holds_alternative( + SCYLLA_ASSERT(std::holds_alternative( co_await env.reconfigure(id1, {id1, id2}, timer.now() + 100_t, timer))); // Append a bunch of entries for (int i = 1; i <= 10; ++i) { - assert(std::holds_alternative( + SCYLLA_ASSERT(std::holds_alternative( co_await env.call(id1, ExReg::exchange{0}, timer.now() + 100_t, timer))); } - assert(env.is_leader(id1)); + SCYLLA_ASSERT(env.is_leader(id1)); // Force a term increase by partitioning the network and waiting for the leader to step down tlogger.trace("add grudge"); @@ -2332,7 +2333,7 @@ SEASTAR_TEST_CASE(snapshot_uses_correct_term_test) { .snapshot_threshold = 5, .snapshot_trailing = 2, }); - assert(std::holds_alternative( + SCYLLA_ASSERT(std::holds_alternative( co_await env.reconfigure(l, {l, id3}, timer.now() + 1000_t, timer))); }); } @@ -2361,7 +2362,7 @@ SEASTAR_TEST_CASE(snapshotting_preserves_config_test) { .snapshot_threshold = 5, .snapshot_trailing = 1, }); - assert(co_await wait_for_leader{}(env, {id1}, timer, timer.now() + 1000_t) == id1); + SCYLLA_ASSERT(co_await wait_for_leader{}(env, {id1}, timer, timer.now() + 1000_t) == id1); auto id2 = co_await env.new_server(false, raft::server::configuration{ @@ -2369,16 +2370,16 @@ SEASTAR_TEST_CASE(snapshotting_preserves_config_test) { .snapshot_trailing = 1, }); - assert(std::holds_alternative( + SCYLLA_ASSERT(std::holds_alternative( co_await env.reconfigure(id1, {id1, id2}, timer.now() + 100_t, timer))); // Append a bunch of entries for (int i = 1; i <= 10; ++i) { - assert(std::holds_alternative( + SCYLLA_ASSERT(std::holds_alternative( co_await env.call(id1, ExReg::exchange{0}, timer.now() + 100_t, timer))); } - assert(env.is_leader(id1)); + SCYLLA_ASSERT(env.is_leader(id1)); // Partition the network, forcing the leader to step down. tlogger.trace("add grudge"); @@ -2422,16 +2423,16 @@ SEASTAR_TEST_CASE(removed_follower_with_forwarding_learns_about_removal) { }; auto id1 = co_await env.new_server(true, cfg); - assert(co_await wait_for_leader{}(env, {id1}, timer, timer.now() + 1000_t) == id1); + SCYLLA_ASSERT(co_await wait_for_leader{}(env, {id1}, timer, timer.now() + 1000_t) == id1); auto id2 = co_await env.new_server(false, cfg); - assert(std::holds_alternative( + SCYLLA_ASSERT(std::holds_alternative( co_await env.reconfigure(id1, {id1, id2}, timer.now() + 100_t, timer))); // Server 2 forwards the entry that removes it to server 1. // We want server 2 to eventually learn from server 1 that it was removed, // so the call finishes (no timeout). - assert(std::holds_alternative( + SCYLLA_ASSERT(std::holds_alternative( co_await env.modify_config(id2, std::vector{}, {id2}, timer.now() + 100_t, timer))); }); } @@ -2468,16 +2469,16 @@ SEASTAR_TEST_CASE(remove_leader_with_forwarding_finishes) { }; auto id1 = co_await env.new_server(true, cfg); - assert(co_await wait_for_leader{}(env, {id1}, timer, timer.now() + 1000_t) == id1); + SCYLLA_ASSERT(co_await wait_for_leader{}(env, {id1}, timer, timer.now() + 1000_t) == id1); auto id2 = co_await env.new_server(false, cfg); - assert(std::holds_alternative( + SCYLLA_ASSERT(std::holds_alternative( co_await env.reconfigure(id1, {id1, id2}, timer.now() + 200_t, timer))); // Server 2 forwards the entry that removes server 1 to server 1. // We want server 2 to either learn from server 1 about the removal, // or become a leader and learn from itself; in both cases the call should finish (no timeout). auto result = co_await env.modify_config(id2, std::vector{}, {id1}, timer.now() + 200_t, timer); tlogger.info("env.modify_config result {}", result); - assert(std::holds_alternative(result)); + SCYLLA_ASSERT(std::holds_alternative(result)); }); } @@ -2573,7 +2574,7 @@ struct raft_call { // TODO a stable contact point used by a given thread would be preferable; // the thread would switch only if necessary (the contact point left the configuration). // Currently we choose the contact point randomly each time. - assert(s.known.size() > 0); + SCYLLA_ASSERT(s.known.size() > 0); static std::mt19937 engine{0}; auto it = s.known.begin(); @@ -2614,7 +2615,7 @@ struct raft_read { }; future execute(state_type& s, const operation::context& ctx) { - assert(s.known.size() > 0); + SCYLLA_ASSERT(s.known.size() > 0); static std::mt19937 engine{0}; auto it = s.known.begin(); @@ -2745,7 +2746,7 @@ struct reconfiguration { tlogger.debug("reconfig modify_config start add {} remove {} start tid {} start time {} current time {} contact {}", added, removed, ctx.thread, ctx.start, s.timer.now(), contact); - assert(s.known.size() > 0); + SCYLLA_ASSERT(s.known.size() > 0); auto [res, last] = co_await bouncing{ [&added, &removed, timeout = s.timer.now() + timeout, &timer = s.timer, &env = s.env] (raft::server_id id) { return env.modify_config(id, added, removed, timeout, timer); @@ -2790,7 +2791,7 @@ struct reconfiguration { tlogger.debug("reconfig set_configuration start nodes {} start tid {} start time {} current time {} contact {}", nodes_voters, ctx.thread, ctx.start, s.timer.now(), contact); - assert(s.known.size() > 0); + SCYLLA_ASSERT(s.known.size() > 0); auto [res, last] = co_await bouncing{[&nodes_voters, timeout = s.timer.now() + timeout, &timer = s.timer, &env = s.env] (raft::server_id id) { return env.reconfigure(id, nodes_voters, timeout, timer); }}(s.timer, s.known, contact, 10, 10_t, 10_t); @@ -2820,7 +2821,7 @@ struct reconfiguration { future execute(state_type& s, const operation::context& ctx) { static std::bernoulli_distribution bdist{0.5}; - assert(s.all_servers.size() > 1); + SCYLLA_ASSERT(s.all_servers.size() > 1); std::vector nodes{s.all_servers.begin(), s.all_servers.end()}; std::shuffle(nodes.begin(), nodes.end(), s.rnd); @@ -2867,7 +2868,7 @@ struct stop_crash { using result_type = stop_crash_result; future execute(state_type& s, const operation::context& ctx) { - assert(s.known.size() > 0); + SCYLLA_ASSERT(s.known.size() > 0); auto it = s.known.begin(); std::advance(it, std::uniform_int_distribution{0, s.known.size() - 1}(s.rnd)); auto srv = *it; @@ -2931,7 +2932,7 @@ public: BOOST_REQUIRE_LT(d, magic); auto y = (d + x) % magic; - assert(digest_remove(y, x) == d); + SCYLLA_ASSERT(digest_remove(y, x) == d); return y; } @@ -2948,8 +2949,8 @@ public: } append_seq append(elem_t x) const { - assert(_seq); - assert(_end <= _seq->size()); + SCYLLA_ASSERT(_seq); + SCYLLA_ASSERT(_end <= _seq->size()); auto seq = _seq; if (_end < seq->size()) { @@ -2963,9 +2964,9 @@ public: } elem_t operator[](size_t idx) const { - assert(_seq); - assert(idx < _end); - assert(_end <= _seq->size()); + SCYLLA_ASSERT(_seq); + SCYLLA_ASSERT(idx < _end); + SCYLLA_ASSERT(_end <= _seq->size()); return (*_seq)[idx]; } @@ -2974,14 +2975,14 @@ public: } size_t size() const { - assert(_end <= _seq->size()); + SCYLLA_ASSERT(_end <= _seq->size()); return _end; } std::pair pop() const { - assert(_seq); - assert(_end <= _seq->size()); - assert(0 < _end); + SCYLLA_ASSERT(_seq); + SCYLLA_ASSERT(_end <= _seq->size()); + SCYLLA_ASSERT(0 < _end); return {{_seq, _end - 1, digest_remove(_digest, (*_seq)[_end - 1])}, (*_seq)[_end - 1]}; } @@ -3070,15 +3071,15 @@ struct append_reg_model { std::unordered_map reads; void invocation(elem_t x) { - assert(!index.contains(x)); - assert(!in_progress.contains(x)); + SCYLLA_ASSERT(!index.contains(x)); + SCYLLA_ASSERT(!in_progress.contains(x)); in_progress.insert(x); } void return_success(elem_t x, append_seq prev) { - assert(!returned.contains(x)); - assert(x != 0); - assert(!prev.empty()); + SCYLLA_ASSERT(!returned.contains(x)); + SCYLLA_ASSERT(x != 0); + SCYLLA_ASSERT(!prev.empty()); try { completion(x, prev); } catch (inconsistency& e) { @@ -3089,20 +3090,20 @@ struct append_reg_model { } void return_failure(elem_t x) { - assert(!index.contains(x)); - assert(in_progress.contains(x)); + SCYLLA_ASSERT(!index.contains(x)); + SCYLLA_ASSERT(in_progress.contains(x)); banned.insert(x); in_progress.erase(x); } void start_read(int32_t id) { auto [_, inserted] = reads.emplace(id, seq.back().elem); - assert(inserted); + SCYLLA_ASSERT(inserted); } void read_success(int32_t id, append_seq result) { auto read = reads.find(id); - assert(read != reads.end()); + SCYLLA_ASSERT(read != reads.end()); size_t idx = 0; for (; idx < result.size(); ++idx) { @@ -3130,21 +3131,21 @@ struct append_reg_model { private: void completion(elem_t x, append_seq prev) { if (prev.empty()) { - assert(x == 0); + SCYLLA_ASSERT(x == 0); return; } - assert(x != 0); - assert(!banned.contains(x)); - assert(in_progress.contains(x) || index.contains(x)); + SCYLLA_ASSERT(x != 0); + SCYLLA_ASSERT(!banned.contains(x)); + SCYLLA_ASSERT(in_progress.contains(x) || index.contains(x)); auto [prev_prev, prev_x] = prev.pop(); if (auto it = index.find(x); it != index.end()) { // This element was already completed. auto idx = it->second; - assert(0 < idx); - assert(idx < seq.size()); + SCYLLA_ASSERT(0 < idx); + SCYLLA_ASSERT(idx < seq.size()); if (prev_x != seq[idx - 1].elem) { throw inconsistency{format( @@ -3177,7 +3178,7 @@ private: completion(prev_x, std::move(prev_prev)); // Check that the existing tail matches our tail. - assert(!seq.empty()); + SCYLLA_ASSERT(!seq.empty()); if (prev_x != seq.back().elem) { throw inconsistency{format( "new completion (elem: {}) but prev elem does not match existing model" @@ -3293,7 +3294,7 @@ SEASTAR_TEST_CASE(basic_generator_test) { auto leader_id = co_await env.new_server(true, srv_cfg); // Wait for the server to elect itself as a leader. - assert(co_await wait_for_leader{}(env, {leader_id}, timer, timer.now() + 1000_t) == leader_id); + SCYLLA_ASSERT(co_await wait_for_leader{}(env, {leader_id}, timer, timer.now() + 1000_t) == leader_id); size_t no_all_servers = 10; std::vector all_servers{leader_id}; @@ -3326,7 +3327,7 @@ SEASTAR_TEST_CASE(basic_generator_test) { known_config.insert(all_servers[i]); } - assert(std::holds_alternative( + SCYLLA_ASSERT(std::holds_alternative( co_await env.reconfigure(leader_id, std::vector{known_config.begin(), known_config.end()}, timer.now() + 100_t, timer))); @@ -3418,7 +3419,7 @@ SEASTAR_TEST_CASE(basic_generator_test) { either( stagger(seed, timer.now(), 0_t, 50_t, sequence(1, [] (int32_t i) { - assert(i > 0); + SCYLLA_ASSERT(i > 0); return op_type{raft_call{AppendReg::append{i}, 200_t}}; }) ), @@ -3462,7 +3463,7 @@ SEASTAR_TEST_CASE(basic_generator_test) { void operator()(operation::completion c) { auto res = std::get_if(&c.result); - assert(res); + SCYLLA_ASSERT(res); if (auto call_res = std::get_if::result_type>(res)) { std::visit(make_visitor( @@ -3478,8 +3479,8 @@ SEASTAR_TEST_CASE(basic_generator_test) { ++_stats.failures; }, [this] (raft::commit_status_unknown& e) { - // TODO assert: only allowed if reconfigurations happen? - // assert(false); TODO debug this + // TODO SCYLLA_ASSERT: only allowed if reconfigurations happen? + // SCYLLA_ASSERT(false); TODO debug this ++_stats.failures; }, [this] (auto&) { @@ -3525,7 +3526,7 @@ SEASTAR_TEST_CASE(basic_generator_test) { } }); - assert(false); + SCYLLA_ASSERT(false); } tlogger.info("Finished generator run, time: {}, invocations: {}, successes: {}, failures: {}, total: {}", @@ -3546,7 +3547,7 @@ SEASTAR_TEST_CASE(basic_generator_test) { std::vector{all_servers.begin(), all_servers.end()}, timer, limit) .handle_exception_type([&timer, now] (logical_timer::timed_out) -> raft::server_id { tlogger.error("Failed to find a leader after {} ticks at the end of test.", timer.now() - now); - assert(false); + SCYLLA_ASSERT(false); }); if (env.is_leader(leader)) { @@ -3557,13 +3558,13 @@ SEASTAR_TEST_CASE(basic_generator_test) { } auto config = env.get_configuration(leader); - assert(config); + SCYLLA_ASSERT(config); tlogger.info("Leader {} configuration: current {} previous {}", leader, config->current, config->previous); for (auto& s: all_servers) { if (env.is_leader(s) && s != leader) { auto conf = env.get_configuration(s); - assert(conf); + SCYLLA_ASSERT(conf); tlogger.info("There is another leader: {}, configuration: current {} previous {}", s, conf->current, conf->previous); } } @@ -3584,6 +3585,6 @@ SEASTAR_TEST_CASE(basic_generator_test) { } tlogger.error("Failed to obtain a final successful response at the end of the test. Number of attempts: {}", cnt); - assert(false); + SCYLLA_ASSERT(false); }); } diff --git a/test/raft/replication.hh b/test/raft/replication.hh index d53ce426bd..8cc24685bd 100644 --- a/test/raft/replication.hh +++ b/test/raft/replication.hh @@ -26,6 +26,7 @@ #include "raft/server.hh" #include "serializer.hh" #include "serializer_impl.hh" +#include "utils/assert.hh" #include "utils/xx_hasher.hh" #include "utils/to_string.hh" #include "test/raft/helpers.hh" @@ -1287,7 +1288,7 @@ future<> raft_cluster::partition(::partition p) { } else if (std::holds_alternative(s)) { auto range = std::get(s); for (size_t id = range.start; id <= range.end; id++) { - assert(id < _servers.size()); + SCYLLA_ASSERT(id < _servers.size()); partition_servers.insert(id); } } else { diff --git a/test/raft/ticker.hh b/test/raft/ticker.hh index 9c9afa7086..e6432d1fac 100644 --- a/test/raft/ticker.hh +++ b/test/raft/ticker.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include @@ -29,13 +30,13 @@ public: ticker(ticker&&) = delete; ~ticker() { - assert(!_ticker); + SCYLLA_ASSERT(!_ticker); } using on_tick_t = noncopyable_function(uint64_t)>; void start(on_tick_t fun, uint64_t limit = std::numeric_limits::max()) { - assert(!_ticker); + SCYLLA_ASSERT(!_ticker); _ticker = tick(std::move(fun), limit); } @@ -58,6 +59,6 @@ private: } _logger.error("ticker: limit reached"); - assert(false); + SCYLLA_ASSERT(false); } }; diff --git a/test/unit/bptree_compaction_test.cc b/test/unit/bptree_compaction_test.cc index 40aece75e4..3190077f17 100644 --- a/test/unit/bptree_compaction_test.cc +++ b/test/unit/bptree_compaction_test.cc @@ -16,6 +16,7 @@ constexpr int TEST_NODE_SIZE = 7; #include "tree_test_key.hh" +#include "utils/assert.hh" #include "utils/bptree.hh" #include "bptree_validation.hh" #include "collection_stress.hh" @@ -63,7 +64,7 @@ int main(int argc, char **argv) { /* insert */ [&] (int key) { test_key k(key); auto ti = t->emplace(copy_key(k), k); - assert(ti.second); + SCYLLA_ASSERT(ti.second); }, /* erase */ [&] (int key) { t->erase(test_key(key)); diff --git a/test/unit/bptree_stress_test.cc b/test/unit/bptree_stress_test.cc index bb6f4b90a1..36c3c6e17a 100644 --- a/test/unit/bptree_stress_test.cc +++ b/test/unit/bptree_stress_test.cc @@ -14,6 +14,7 @@ constexpr int TEST_NODE_SIZE = 16; #include "tree_test_key.hh" +#include "utils/assert.hh" #include "utils/bptree.hh" #include "bptree_validation.hh" #include "collection_stress.hh" @@ -78,7 +79,7 @@ int main(int argc, char **argv) { if (rep % 2 != 1) { auto ir = t->emplace(copy_key(k), k); - assert(ir.second); + SCYLLA_ASSERT(ir.second); } else { auto ir = t->lower_bound(k); ir.emplace_before(copy_key(k), test_key_compare{}, k); @@ -107,7 +108,7 @@ int main(int argc, char **argv) { auto ni = ri; ni++; auto eni = ri.erase(test_key_compare{}); - assert(ni == eni); + SCYLLA_ASSERT(ni == eni); } oracle.erase(key); diff --git a/test/unit/btree_compaction_test.cc b/test/unit/btree_compaction_test.cc index dd8d978b22..1d9cc00011 100644 --- a/test/unit/btree_compaction_test.cc +++ b/test/unit/btree_compaction_test.cc @@ -14,6 +14,7 @@ constexpr int TEST_NODE_SIZE = 7; constexpr int TEST_LINEAR_THRESHOLD = 19; #include "tree_test_key.hh" +#include "utils/assert.hh" #include "utils/intrusive_btree.hh" #include "btree_validation.hh" #include "collection_stress.hh" @@ -58,7 +59,7 @@ int main(int argc, char **argv) { /* insert */ [&] (int key) { auto k = alloc_strategy_unique_ptr(current_allocator().construct(key)); auto ti = t->insert(std::move(k), test_key_tri_compare{}); - assert(ti.second); + SCYLLA_ASSERT(ti.second); }, /* erase */ [&] (int key) { auto deleter = current_deleter(); diff --git a/test/unit/btree_stress_test.cc b/test/unit/btree_stress_test.cc index 2da5a0c716..3c4131da79 100644 --- a/test/unit/btree_stress_test.cc +++ b/test/unit/btree_stress_test.cc @@ -16,6 +16,7 @@ constexpr int TEST_NODE_SIZE = 8; constexpr int TEST_LINEAR_THRESH = 21; +#include "utils/assert.hh" #include "utils/intrusive_btree.hh" #include "btree_validation.hh" #include "test/unit/tree_test_key.hh" @@ -68,7 +69,7 @@ int main(int argc, char **argv) { stress_collection(cfg, /* insert */ [&] (int key) { auto ir = t->insert(std::make_unique(key), cmp); - assert(ir.second); + SCYLLA_ASSERT(ir.second); oracle[key] = key; if (itv++ % 7 == 0) { diff --git a/test/unit/collection_stress.hh b/test/unit/collection_stress.hh index 085982b26b..c014f4bcc6 100644 --- a/test/unit/collection_stress.hh +++ b/test/unit/collection_stress.hh @@ -12,6 +12,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/logalloc.hh" struct stress_config { @@ -112,13 +113,13 @@ public: } void link(reference& other) { - assert(_ref == nullptr); + SCYLLA_ASSERT(_ref == nullptr); _ref = &other; other._ref = this; } reference* get() { - assert(_ref != nullptr); + SCYLLA_ASSERT(_ref != nullptr); return _ref; } }; diff --git a/test/unit/radix_tree_compaction_test.cc b/test/unit/radix_tree_compaction_test.cc index 300374815b..2652c1b5f5 100644 --- a/test/unit/radix_tree_compaction_test.cc +++ b/test/unit/radix_tree_compaction_test.cc @@ -11,6 +11,7 @@ #include #include +#include "utils/assert.hh" #include "utils/compact-radix-tree.hh" #include "radix_tree_printer.hh" #include "collection_stress.hh" @@ -86,11 +87,11 @@ int main(int argc, char **argv) { unsigned nr = 0; auto ti = t->begin(); while (ti != t->end()) { - assert(ti->value() == ti.key()); + SCYLLA_ASSERT(ti->value() == ti.key()); nr++; ti++; } - assert(nr == col_size); + SCYLLA_ASSERT(nr == col_size); }, /* clear */ [&] { t->clear(); diff --git a/test/unit/radix_tree_stress_test.cc b/test/unit/radix_tree_stress_test.cc index 2435d02453..8f49be9d92 100644 --- a/test/unit/radix_tree_stress_test.cc +++ b/test/unit/radix_tree_stress_test.cc @@ -14,6 +14,7 @@ #include #include +#include "utils/assert.hh" #include "utils/compact-radix-tree.hh" #include "radix_tree_printer.hh" #include "collection_stress.hh" @@ -97,30 +98,30 @@ int main(int argc, char **argv) { if (vld == validate::oracle) { for (auto&& d : oracle) { test_data* td = t->get(d.first); - assert(td != nullptr); - assert(td->value() == d.second.value()); + SCYLLA_ASSERT(td != nullptr); + SCYLLA_ASSERT(td->value() == d.second.value()); } vld = validate::iterator; } else if (vld == validate::iterator) { unsigned nr = 0; auto ti = t->begin(); while (ti != t->end()) { - assert(ti->value() == ti.key()); + SCYLLA_ASSERT(ti->value() == ti.key()); nr++; ti++; - assert(nr <= col_size); + SCYLLA_ASSERT(nr <= col_size); } - assert(nr == col_size); + SCYLLA_ASSERT(nr == col_size); vld = validate::walk; } else if (vld == validate::walk) { unsigned nr = 0; t->walk([&nr, col_size] (unsigned idx, test_data& td) { - assert(idx == td.value()); + SCYLLA_ASSERT(idx == td.value()); nr++; - assert(nr <= col_size); + SCYLLA_ASSERT(nr <= col_size); return true; }); - assert(nr == col_size); + SCYLLA_ASSERT(nr == col_size); vld = validate::lower_bound; } else if (vld == validate::lower_bound) { unsigned nr = 0; @@ -130,12 +131,12 @@ int main(int argc, char **argv) { if (td == nullptr) { break; } - assert(td->value() >= idx); + SCYLLA_ASSERT(td->value() >= idx); nr++; idx = td->value() + 1; - assert(nr <= col_size); + SCYLLA_ASSERT(nr <= col_size); } - assert(nr == col_size); + SCYLLA_ASSERT(nr == col_size); vld = validate::oracle; } }, diff --git a/test/unit/row_cache_alloc_stress_test.cc b/test/unit/row_cache_alloc_stress_test.cc index 7e8d83e537..0c46e34025 100644 --- a/test/unit/row_cache_alloc_stress_test.cc +++ b/test/unit/row_cache_alloc_stress_test.cc @@ -12,6 +12,7 @@ #include #include +#include "utils/assert.hh" #include "utils/logalloc.hh" #include "row_cache.hh" #include "log.hh" @@ -109,7 +110,7 @@ int main(int argc, char** argv) { // We need to have enough Free memory to copy memtable into cache // When this assertion fails, increase amount of memory - assert(mt->occupancy().used_space() < reclaimable_memory()); + SCYLLA_ASSERT(mt->occupancy().used_space() < reclaimable_memory()); std::deque cache_stuffing; auto fill_cache_to_the_top = [&] { @@ -179,8 +180,8 @@ int main(int argc, char** argv) { auto reader = cache.make_reader(s, semaphore.make_permit(), range); auto close_reader = deferred_close(reader); auto mo = read_mutation_from_mutation_reader(reader).get(); - assert(mo); - assert(mo->partition().live_row_count(*s) == + SCYLLA_ASSERT(mo); + SCYLLA_ASSERT(mo->partition().live_row_count(*s) == row_count + 1 /* one row was already in cache before update()*/); } @@ -197,8 +198,8 @@ int main(int argc, char** argv) { auto reader = cache.make_reader(s, semaphore.make_permit(), range); auto close_reader = deferred_close(reader); auto mfopt = reader().get(); - assert(mfopt); - assert(mfopt->is_partition_start()); + SCYLLA_ASSERT(mfopt); + SCYLLA_ASSERT(mfopt->is_partition_start()); } std::cout << "Testing reading when memory can't be reclaimed.\n"; @@ -235,12 +236,12 @@ int main(int argc, char** argv) { try { auto reader = cache.make_reader(s, semaphore.make_permit(), range); auto close_reader = deferred_close(reader); - assert(!reader().get()); + SCYLLA_ASSERT(!reader().get()); auto evicted_from_cache = logalloc::segment_size + large_cell_size; // GCC's -fallocation-dce can remove dead calls to new and malloc, so // assign the result to a global variable to disable it. leak = new char[evicted_from_cache + logalloc::segment_size]; - assert(false); // The test is not invoking the case which it's supposed to test + SCYLLA_ASSERT(false); // The test is not invoking the case which it's supposed to test } catch (const std::bad_alloc&) { // expected } diff --git a/test/unit/row_cache_stress_test.cc b/test/unit/row_cache_stress_test.cc index 900dc92dce..506aca9cf1 100644 --- a/test/unit/row_cache_stress_test.cc +++ b/test/unit/row_cache_stress_test.cc @@ -14,6 +14,7 @@ #include "replica/memtable.hh" #include "row_cache.hh" #include "partition_slice_builder.hh" +#include "utils/assert.hh" #include "utils/int_range.hh" #include "utils/div_ceil.hh" #include "utils/to_string.hh" @@ -411,8 +412,8 @@ int main(int argc, char** argv) { t.tracker.cleaner().drain().get(); t.tracker.memtable_cleaner().drain().get(); - assert(t.tracker.get_stats().partitions == 0); - assert(t.tracker.get_stats().rows == 0); + SCYLLA_ASSERT(t.tracker.get_stats().partitions == 0); + SCYLLA_ASSERT(t.tracker.get_stats().rows == 0); }); }); } diff --git a/test/unit/tree_test_key.hh b/test/unit/tree_test_key.hh index 53bbf47ab7..8d886e5603 100644 --- a/test/unit/tree_test_key.hh +++ b/test/unit/tree_test_key.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include @@ -98,7 +99,7 @@ public: if (_cookie != nullptr) { delete _cookie; } - assert(_p_cookie != nullptr); + SCYLLA_ASSERT(_p_cookie != nullptr); delete _p_cookie; } }; diff --git a/tools/lua_sstable_consumer.cc b/tools/lua_sstable_consumer.cc index e9e339495b..c02a35d0f5 100644 --- a/tools/lua_sstable_consumer.cc +++ b/tools/lua_sstable_consumer.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include @@ -64,7 +65,7 @@ template <> struct type_to_metatable { static constexpr const char* template const char* get_metatable_name() { const auto metatable_name = type_to_metatable>::metatable_name; - assert(metatable_name); + SCYLLA_ASSERT(metatable_name); return metatable_name; } diff --git a/tools/schema_loader.cc b/tools/schema_loader.cc index b53ec2bc7e..41f38a0f65 100644 --- a/tools/schema_loader.cc +++ b/tools/schema_loader.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include #include #include @@ -270,7 +271,7 @@ std::vector do_load_schemas(const db::config& cfg, std::string_view auto prepared_statement = raw_statement->prepare(db, cql_stats); auto* statement = prepared_statement->statement.get(); auto p = dynamic_cast(statement); - assert(p); + SCYLLA_ASSERT(p); real_db.keyspaces.emplace_back(p->get_keyspace_metadata(*token_metadata.local().get(), feature_service)); return db.find_keyspace(name); }; diff --git a/tools/scylla-nodetool.cc b/tools/scylla-nodetool.cc index 4ab24f883e..61f13b36ce 100644 --- a/tools/scylla-nodetool.cc +++ b/tools/scylla-nodetool.cc @@ -43,6 +43,7 @@ #include "release.hh" #include "tools/format_printers.hh" #include "tools/utils.hh" +#include "utils/assert.hh" #include "utils/estimated_histogram.hh" #include "utils/http.hh" #include "utils/pretty_printers.hh" @@ -1533,7 +1534,7 @@ std::string last_token_in_hosts(const std::vectorfirst; } diff --git a/tools/utils.cc b/tools/utils.cc index 4851342db6..62e0f631ec 100644 --- a/tools/utils.cc +++ b/tools/utils.cc @@ -15,6 +15,7 @@ #include "db/config.hh" #include "db/extensions.hh" #include "tools/utils.hh" +#include "utils/assert.hh" #include "utils/logalloc.hh" #include "init.hh" @@ -220,7 +221,7 @@ int tool_app_template::run_async(int argc, char** argv, noncopyable_function trace_keyspace_helper::flush_one_session_mutations(lw_shared_ptr #include "transport/controller.hh" #include @@ -272,7 +273,7 @@ future<> controller::do_start_server() { } future<> controller::stop_server() { - assert(this_shard_id() == 0); + SCYLLA_ASSERT(this_shard_id() == 0); if (!_stopped) { co_await _ops_sem.wait(); diff --git a/transport/event.cc b/transport/event.cc index f1a7b523ed..b15994d1da 100644 --- a/transport/event.cc +++ b/transport/event.cc @@ -8,6 +8,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include "transport/event.hh" namespace cql_transport { @@ -57,17 +58,17 @@ event::schema_change::schema_change(change_type change, target_type target, sstr { switch (target) { case event::schema_change::target_type::KEYSPACE: - assert(this->arguments.empty()); + SCYLLA_ASSERT(this->arguments.empty()); break; case event::schema_change::target_type::TYPE: case event::schema_change::target_type::TABLE: // just the name - assert(this->arguments.size() == 1); + SCYLLA_ASSERT(this->arguments.size() == 1); break; case event::schema_change::target_type::FUNCTION: case event::schema_change::target_type::AGGREGATE: // at least the name - assert(this->arguments.size() >= 1); + SCYLLA_ASSERT(this->arguments.size() >= 1); break; } } diff --git a/transport/messages/result_message.hh b/transport/messages/result_message.hh index 2d3b477146..467930bc2d 100644 --- a/transport/messages/result_message.hh +++ b/transport/messages/result_message.hh @@ -9,6 +9,7 @@ #pragma once +#include "utils/assert.hh" #include #include "cql3/result_set.hh" @@ -69,7 +70,7 @@ public: void visit(const result_message::prepared::cql&) override {}; void visit(const result_message::schema_change&) override {}; void visit(const result_message::rows&) override {}; - void visit(const result_message::bounce_to_shard&) override { assert(false); }; + void visit(const result_message::bounce_to_shard&) override { SCYLLA_ASSERT(false); }; void visit(const result_message::exception&) override; }; diff --git a/transport/server.cc b/transport/server.cc index aad5783df1..b742784d31 100644 --- a/transport/server.cc +++ b/transport/server.cc @@ -36,6 +36,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/result_try.hh" #include "utils/result_combinators.hh" #include "db/operation_type.hh" @@ -160,7 +161,7 @@ sstring to_string(const event::schema_change::change_type t) { case event::schema_change::change_type::UPDATED: return "UPDATED"; case event::schema_change::change_type::DROPPED: return "DROPPED"; } - assert(false && "unreachable"); + SCYLLA_ASSERT(false && "unreachable"); } sstring to_string(const event::schema_change::target_type t) { @@ -171,7 +172,7 @@ sstring to_string(const event::schema_change::target_type t) { case event::schema_change::target_type::FUNCTION: return "FUNCTION"; case event::schema_change::target_type::AGGREGATE:return "AGGREGATE"; } - assert(false && "unreachable"); + SCYLLA_ASSERT(false && "unreachable"); } event::event_type parse_event_type(const sstring& value) @@ -1499,7 +1500,7 @@ public: break; } default: - assert(0); + SCYLLA_ASSERT(0); } } diff --git a/types/types.cc b/types/types.cc index 170a51eea1..5490e8faec 100644 --- a/types/types.cc +++ b/types/types.cc @@ -19,6 +19,7 @@ #include #include #include "types/types.hh" +#include "utils/assert.hh" #include "utils/serialization.hh" #include "vint-serialization.hh" #include @@ -518,7 +519,7 @@ listlike_collection_type_impl::listlike_collection_type_impl( std::strong_ordering listlike_collection_type_impl::compare_with_map(const map_type_impl& map_type, bytes_view list, bytes_view map) const { - assert((is_set() && map_type.get_keys_type() == _elements) || (!is_set() && map_type.get_values_type() == _elements)); + SCYLLA_ASSERT((is_set() && map_type.get_keys_type() == _elements) || (!is_set() && map_type.get_values_type() == _elements)); if (list.empty()) { return map.empty() ? std::strong_ordering::equal : std::strong_ordering::less; @@ -558,7 +559,7 @@ std::strong_ordering listlike_collection_type_impl::compare_with_map(const map_t bytes listlike_collection_type_impl::serialize_map(const map_type_impl& map_type, const data_value& value) const { - assert((is_set() && map_type.get_keys_type() == _elements) || (!is_set() && map_type.get_values_type() == _elements)); + SCYLLA_ASSERT((is_set() && map_type.get_keys_type() == _elements) || (!is_set() && map_type.get_values_type() == _elements)); const std::vector>& map = map_type.from_value(value); // Lists are represented as vector>, sets are vector> bool first = is_set(); @@ -1112,7 +1113,7 @@ map_type_impl::freeze() const { bool map_type_impl::is_compatible_with_frozen(const collection_type_impl& previous) const { - assert(!_is_multi_cell); + SCYLLA_ASSERT(!_is_multi_cell); auto* p = dynamic_cast(&previous); if (!p) { return false; @@ -1123,7 +1124,7 @@ map_type_impl::is_compatible_with_frozen(const collection_type_impl& previous) c bool map_type_impl::is_value_compatible_with_frozen(const collection_type_impl& previous) const { - assert(!_is_multi_cell); + SCYLLA_ASSERT(!_is_multi_cell); auto* p = dynamic_cast(&previous); if (!p) { return false; @@ -1315,7 +1316,7 @@ set_type_impl::freeze() const { bool set_type_impl::is_compatible_with_frozen(const collection_type_impl& previous) const { - assert(!_is_multi_cell); + SCYLLA_ASSERT(!_is_multi_cell); auto* p = dynamic_cast(&previous); if (!p) { return false; @@ -1459,7 +1460,7 @@ list_type_impl::freeze() const { bool list_type_impl::is_compatible_with_frozen(const collection_type_impl& previous) const { - assert(!_is_multi_cell); + SCYLLA_ASSERT(!_is_multi_cell); auto* p = dynamic_cast(&previous); if (!p) { return false; @@ -1800,10 +1801,10 @@ void abstract_type::validate(bytes_view v) const { } static void serialize_aux(const tuple_type_impl& type, const tuple_type_impl::native_type* val, bytes::iterator& out) { - assert(val); + SCYLLA_ASSERT(val); auto& elems = *val; - assert(elems.size() <= type.size()); + SCYLLA_ASSERT(elems.size() <= type.size()); for (size_t i = 0; i < elems.size(); ++i) { const abstract_type& t = type.type(i)->without_reversed(); @@ -3353,15 +3354,15 @@ static bytes_ostream serialize_for_cql_aux(const list_type_impl&, collection_mut } static bytes_ostream serialize_for_cql_aux(const user_type_impl& type, collection_mutation_view_description mut) { - assert(type.is_multi_cell()); - assert(mut.cells.size() <= type.size()); + SCYLLA_ASSERT(type.is_multi_cell()); + SCYLLA_ASSERT(mut.cells.size() <= type.size()); bytes_ostream out; size_t curr_field_pos = 0; for (auto&& e : mut.cells) { auto field_pos = deserialize_field_index(e.first); - assert(field_pos < type.size()); + SCYLLA_ASSERT(field_pos < type.size()); // Some fields don't have corresponding cells -- these fields are null. while (curr_field_pos < field_pos) { @@ -3391,7 +3392,7 @@ static bytes_ostream serialize_for_cql_aux(const user_type_impl& type, collectio } bytes_ostream serialize_for_cql(const abstract_type& type, collection_mutation_view v) { - assert(type.is_multi_cell()); + SCYLLA_ASSERT(type.is_multi_cell()); return v.with_deserialized(type, [&] (collection_mutation_view_description mv) { return visit(type, make_visitor( @@ -3418,12 +3419,12 @@ bytes serialize_field_index(size_t idx) { } size_t deserialize_field_index(const bytes_view& b) { - assert(b.size() == sizeof(int16_t)); + SCYLLA_ASSERT(b.size() == sizeof(int16_t)); return read_be(reinterpret_cast(b.data())); } size_t deserialize_field_index(managed_bytes_view b) { - assert(b.size_bytes() == sizeof(int16_t)); + SCYLLA_ASSERT(b.size_bytes() == sizeof(int16_t)); return be_to_cpu(read_simple_native(b)); } diff --git a/utils/UUID.hh b/utils/UUID.hh index 0b5d3c9a6c..f95ae854de 100644 --- a/utils/UUID.hh +++ b/utils/UUID.hh @@ -20,6 +20,7 @@ #include #include #include "bytes.hh" +#include "utils/assert.hh" #include "utils/hashing.hh" #include "utils/serialization.hh" @@ -57,7 +58,7 @@ public: //if (version() != 1) { // throw new UnsupportedOperationException("Not a time-based UUID"); //} - assert(is_timestamp()); + SCYLLA_ASSERT(is_timestamp()); return ((most_sig_bits & 0xFFF) << 48) | (((most_sig_bits >> 16) & 0xFFFF) << 32) | diff --git a/utils/UUID_gen.hh b/utils/UUID_gen.hh index 0ab954a89c..aea7fb7f9d 100644 --- a/utils/UUID_gen.hh +++ b/utils/UUID_gen.hh @@ -9,6 +9,7 @@ * SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0) */ +#include "utils/assert.hh" #include #include @@ -83,7 +84,7 @@ private: UUID_gen() { // make sure someone didn't whack the clockSeqAndNode by changing the order of instantiation. - assert(clock_seq_and_node != 0); + SCYLLA_ASSERT(clock_seq_and_node != 0); } // Return decimicrosecond time based on the system time, @@ -118,7 +119,7 @@ public: static UUID get_time_UUID() { auto uuid = UUID(_instance.create_time_safe(), clock_seq_and_node); - assert(uuid.is_timestamp()); + SCYLLA_ASSERT(uuid.is_timestamp()); return uuid; } @@ -130,7 +131,7 @@ public: static UUID get_time_UUID(std::chrono::system_clock::time_point tp) { auto uuid = UUID(create_time(from_unix_timestamp(tp.time_since_epoch())), clock_seq_and_node); - assert(uuid.is_timestamp()); + SCYLLA_ASSERT(uuid.is_timestamp()); return uuid; } @@ -142,14 +143,14 @@ public: static UUID get_time_UUID(milliseconds when, int64_t clock_seq_and_node = UUID_gen::clock_seq_and_node) { auto uuid = UUID(create_time(from_unix_timestamp(when)), clock_seq_and_node); - assert(uuid.is_timestamp()); + SCYLLA_ASSERT(uuid.is_timestamp()); return uuid; } static UUID get_time_UUID_raw(decimicroseconds when, int64_t clock_seq_and_node) { auto uuid = UUID(create_time(when), clock_seq_and_node); - assert(uuid.is_timestamp()); + SCYLLA_ASSERT(uuid.is_timestamp()); return uuid; } @@ -169,7 +170,7 @@ public: static thread_local std::uniform_int_distribution rand_dist(std::numeric_limits::min()); auto uuid = UUID(create_time(from_unix_timestamp(when_in_micros)), rand_dist(rand_gen)); - assert(uuid.is_timestamp()); + SCYLLA_ASSERT(uuid.is_timestamp()); return uuid; } // Generate a time-based (Version 1) UUID using @@ -230,7 +231,7 @@ public: /** creates uuid from raw bytes. */ static UUID get_UUID(bytes raw) { - assert(raw.size() == 16); + SCYLLA_ASSERT(raw.size() == 16); return get_UUID(raw.begin()); } @@ -294,7 +295,7 @@ public: static UUID min_time_UUID(decimicroseconds timestamp = decimicroseconds{0}) { auto uuid = UUID(create_time(from_unix_timestamp(timestamp)), MIN_CLOCK_SEQ_AND_NODE); - assert(uuid.is_timestamp()); + SCYLLA_ASSERT(uuid.is_timestamp()); return uuid; } @@ -312,7 +313,7 @@ public: // precision by taking 10000, but rather 19999. decimicroseconds uuid_tstamp = from_unix_timestamp(timestamp + milliseconds(1)) - decimicroseconds(1); auto uuid = UUID(create_time(uuid_tstamp), MAX_CLOCK_SEQ_AND_NODE); - assert(uuid.is_timestamp()); + SCYLLA_ASSERT(uuid.is_timestamp()); return uuid; } @@ -386,7 +387,7 @@ public: // timeuuid time must fit in 60 bits if ((0xf000000000000000UL & msb)) { // We hope callers would try to avoid this case, but they don't - // always do, so assert() would be bad here - and caused #17035. + // always do, so SCYLLA_ASSERT() would be bad here - and caused #17035. utils::on_internal_error("timeuuid time must fit in 60 bits"); } return ((0x00000000ffffffffL & msb) << 32 | @@ -401,8 +402,8 @@ public: // // auto original_uuid = UUID_gen::get_time_UUID(); // auto negated_uuid = UUID_gen::negate(original_uuid); - // assert(original_uuid != negated_uuid); - // assert(original_uuid == UUID_gen::negate(negated_uuid)); + // SCYLLA_ASSERT(original_uuid != negated_uuid); + // SCYLLA_ASSERT(original_uuid == UUID_gen::negate(negated_uuid)); static UUID negate(UUID); }; diff --git a/utils/assert.hh b/utils/assert.hh new file mode 100644 index 0000000000..9b733ec403 --- /dev/null +++ b/utils/assert.hh @@ -0,0 +1,9 @@ +// Copyright 2024-present ScyllaDB +// SPDX-License-Identifier: AGPL-3.0-or-later + +#pragma once + +#include + +/// Like assert(), but independent of NDEBUG. Active in all build modes. +#define SCYLLA_ASSERT(x) do { if (!(x)) { __assert_fail(#x, __FILE__, __LINE__, __PRETTY_FUNCTION__); } } while (0) diff --git a/utils/big_decimal.cc b/utils/big_decimal.cc index 8b8cf8a36c..9f313d56ad 100644 --- a/utils/big_decimal.cc +++ b/utils/big_decimal.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "big_decimal.hh" #include #include "marshal_exception.hh" @@ -186,7 +187,7 @@ big_decimal big_decimal::operator-(const big_decimal& other) const { big_decimal big_decimal::div(const ::uint64_t y, const rounding_mode mode) const { if (mode != rounding_mode::HALF_EVEN) { - assert(0); + SCYLLA_ASSERT(0); } // Implementation of Division with Half to Even (aka Bankers) Rounding diff --git a/utils/bloom_calculations.hh b/utils/bloom_calculations.hh index d82de4582b..e487d302ad 100644 --- a/utils/bloom_calculations.hh +++ b/utils/bloom_calculations.hh @@ -10,6 +10,7 @@ #pragma once +#include "utils/assert.hh" #include #include "exceptions/exceptions.hh" @@ -56,8 +57,8 @@ namespace bloom_calculations { * @return A spec that minimizes the false positive rate. */ inline bloom_specification compute_bloom_spec(int buckets_per_element) { - assert(buckets_per_element >= 1); - assert(buckets_per_element <= int(probs.size()) - 1); + SCYLLA_ASSERT(buckets_per_element >= 1); + SCYLLA_ASSERT(buckets_per_element <= int(probs.size()) - 1); return bloom_specification(opt_k_per_buckets[buckets_per_element], buckets_per_element); } @@ -76,8 +77,8 @@ namespace bloom_calculations { * @throws unsupported_operation_exception if a filter satisfying the parameters cannot be met */ inline bloom_specification compute_bloom_spec(int max_buckets_per_element, double max_false_pos_prob) { - assert(max_buckets_per_element >= 1); - assert(max_buckets_per_element <= int(probs.size()) - 1); + SCYLLA_ASSERT(max_buckets_per_element >= 1); + SCYLLA_ASSERT(max_buckets_per_element <= int(probs.size()) - 1); auto max_k = int(probs[max_buckets_per_element].size()) - 1; @@ -219,8 +220,8 @@ class BloomCalculations { */ public static BloomSpecification computeBloomSpec(int bucketsPerElement) { - assert bucketsPerElement >= 1; - assert bucketsPerElement <= probs.length - 1; + SCYLLA_ASSERT bucketsPerElement >= 1; + SCYLLA_ASSERT bucketsPerElement <= probs.length - 1; return new BloomSpecification(optKPerBuckets[bucketsPerElement], bucketsPerElement); } @@ -261,8 +262,8 @@ class BloomCalculations { */ public static BloomSpecification computeBloomSpec(int maxBucketsPerElement, double maxFalsePosProb) { - assert maxBucketsPerElement >= 1; - assert maxBucketsPerElement <= probs.length - 1; + SCYLLA_ASSERT maxBucketsPerElement >= 1; + SCYLLA_ASSERT maxBucketsPerElement <= probs.length - 1; int maxK = probs[maxBucketsPerElement].length - 1; // Handle the trivial cases diff --git a/utils/bptree.hh b/utils/bptree.hh index 2c80ea1e10..b03324f5c4 100644 --- a/utils/bptree.hh +++ b/utils/bptree.hh @@ -12,6 +12,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/allocation_strategy.hh" #include "utils/collection-concepts.hh" #include "utils/neat-object-id.hh" @@ -285,7 +286,7 @@ private: */ if (i == 0) { - assert(n.is_leftmost()); + SCYLLA_ASSERT(n.is_leftmost()); return begin(); } else if (i <= n._num_keys) { const_iterator cur = const_iterator(n._kids[i].d, i); @@ -297,7 +298,7 @@ private: return cur; } else { - assert(n.is_rightmost()); + SCYLLA_ASSERT(n.is_rightmost()); return end(); } } @@ -339,7 +340,7 @@ public: size_t ret = 0; const node* leaf = _left; while (1) { - assert(leaf->is_leaf()); + SCYLLA_ASSERT(leaf->is_leaf()); ret += leaf->_num_keys; if (leaf == _right) { break; @@ -430,7 +431,7 @@ public: data* d = data::create(std::forward(args)...); auto x = seastar::defer([&d] { data::destroy(*d, default_dispose); }); n.insert(i, std::move(k), d, _less); - assert(d->attached()); + SCYLLA_ASSERT(d->attached()); x.cancel(); return std::pair(iterator(d, i + 1), true); } @@ -449,7 +450,7 @@ public: return end(); } - assert(n._num_keys > 0); + SCYLLA_ASSERT(n._num_keys > 0); if (_less(n._keys[i - 1].v, k)) { return end(); @@ -501,19 +502,19 @@ public: private: void do_set_left(node *n) noexcept { - assert(n->is_leftmost()); + SCYLLA_ASSERT(n->is_leftmost()); _left = n; n->_kids[0]._leftmost_tree = this; } void do_set_right(node *n) noexcept { - assert(n->is_rightmost()); + SCYLLA_ASSERT(n->is_rightmost()); _right = n; n->_rightmost_tree = this; } void do_set_root(node *n) noexcept { - assert(n->is_root()); + SCYLLA_ASSERT(n->is_root()); n->_root_tree = this; _root = n; } @@ -553,7 +554,7 @@ public: explicit iterator_base(tree_ptr t) noexcept : _tree(t), _idx(npos) { } iterator_base(data_ptr d, kid_index idx) noexcept : _data(d), _idx(idx) { - assert(!is_end()); + SCYLLA_ASSERT(!is_end()); } iterator_base() noexcept : iterator_base(static_cast(nullptr)) {} @@ -562,7 +563,7 @@ public: * and returns back the leaf that points to it. */ node_ptr revalidate() noexcept { - assert(!is_end()); + SCYLLA_ASSERT(!is_end()); node_ptr leaf = _data->_leaf; @@ -609,7 +610,7 @@ public: iterator_base& operator--() noexcept { if (is_end()) { node* n = _tree->_right; - assert(n->_num_keys > 0); + SCYLLA_ASSERT(n->_num_keys > 0); _data = n->_kids[n->_num_keys].d; _idx = n->_num_keys; return *this; @@ -732,12 +733,12 @@ public: i = leaf->_num_keys; } - assert(i >= 0); + SCYLLA_ASSERT(i >= 0); data* d = data::create(std::forward(args)...); auto x = seastar::defer([&d] { data::destroy(*d, default_dispose); }); leaf->insert(i, std::move(key(d)), d, less); - assert(d->attached()); + SCYLLA_ASSERT(d->attached()); x.cancel(); /* * XXX -- if the node was not split we can ++ it index @@ -829,7 +830,7 @@ public: return end(); } - assert(_left->_num_keys > 0); + SCYLLA_ASSERT(_left->_num_keys > 0); // Leaf nodes have data pointers starting from index 1 return const_iterator(_left->_kids[1].d, 1); } @@ -922,8 +923,8 @@ struct searcher { static size_t gt(const K& k, const maybe_key* keys, size_t nr, Less less) noexcept { size_t rl = searcher::gt(k, keys, nr, less); size_t rb = searcher::gt(k, keys, nr, less); - assert(rl == rb); - assert(rl <= nr); + SCYLLA_ASSERT(rl == rb); + SCYLLA_ASSERT(rl <= nr); return rl; } }; @@ -1033,22 +1034,22 @@ class node final { }; node* get_next() const noexcept { - assert(is_leaf()); + SCYLLA_ASSERT(is_leaf()); return __next; } void set_next(node *n) noexcept { - assert(is_leaf()); + SCYLLA_ASSERT(is_leaf()); __next = n; } node* get_prev() const noexcept { - assert(is_leaf()); + SCYLLA_ASSERT(is_leaf()); return _kids[0].n; } void set_prev(node* n) noexcept { - assert(is_leaf()); + SCYLLA_ASSERT(is_leaf()); _kids[0].n = n; } @@ -1058,7 +1059,7 @@ class node final { _flags &= ~NODE_RIGHTMOST; n._flags |= node::NODE_RIGHTMOST; tree* t = _rightmost_tree; - assert(t->_right == this); + SCYLLA_ASSERT(t->_right == this); t->do_set_right(&n); } else { n.set_next(get_next()); @@ -1079,7 +1080,7 @@ class node final { _flags &= ~node::NODE_LEFTMOST; x->_flags |= node::NODE_LEFTMOST; t = _kids[0]._leftmost_tree; - assert(t->_left == this); + SCYLLA_ASSERT(t->_left == this); t->do_set_left(x); break; case node::NODE_RIGHTMOST: @@ -1087,7 +1088,7 @@ class node final { _flags &= ~node::NODE_RIGHTMOST; x->_flags |= node::NODE_RIGHTMOST; t = _rightmost_tree; - assert(t->_right == this); + SCYLLA_ASSERT(t->_right == this); t->do_set_right(x); break; case 0: @@ -1099,7 +1100,7 @@ class node final { * Right- and left-most at the same time can only be root, * otherwise this would mean we have root with 0 keys. */ - assert(false); + SCYLLA_ASSERT(false); } set_next(this); @@ -1149,7 +1150,7 @@ class node final { break; } } - assert(i <= _num_keys); + SCYLLA_ASSERT(i <= _num_keys); return i; } @@ -1208,7 +1209,7 @@ class node final { } void move_to(node& to, size_t off) noexcept { - assert(off <= _num_keys); + SCYLLA_ASSERT(off <= _num_keys); to._num_keys = 0; move_keys_and_kids(off, to, _num_keys - off); } @@ -1232,7 +1233,7 @@ class node final { * kids: [A012] -> [B56] = [A01] [2B56] */ - assert(from._num_keys > 0); + SCYLLA_ASSERT(from._num_keys > 0); key_index i = from._num_keys - 1; shift_right(0); @@ -1337,7 +1338,7 @@ class node final { return true; } - // Helper for assert(). See comment for do_insert for details. + // Helper for SCYLLA_ASSERT(). See comment for do_insert for details. bool left_kid_sorted(const Key& k, Less less) const noexcept { if (Debug == with_debug::yes && !is_leaf() && _num_keys > 0) { node* x = _kids[0].n; @@ -1385,7 +1386,7 @@ class node final { } void drop() noexcept { - assert(!is_root()); + SCYLLA_ASSERT(!is_root()); if (is_leaf()) { unlink(); } @@ -1446,7 +1447,7 @@ class node final { } void split_and_insert(kid_index idx, Key k, node_or_data nd, Less less, prealloc& nodes) noexcept { - assert(_num_keys == NodeSize); + SCYLLA_ASSERT(_num_keys == NodeSize); node* nn = nodes.pop(); maybe_key sep; @@ -1548,7 +1549,7 @@ class node final { } } - assert(equally_split(*nn)); + SCYLLA_ASSERT(equally_split(*nn)); if (is_root()) { insert_into_root(*nn, std::move(sep.v), nodes); @@ -1559,7 +1560,7 @@ class node final { } void do_insert(kid_index i, Key k, node_or_data nd, Less less) noexcept { - assert(_num_keys < NodeSize); + SCYLLA_ASSERT(_num_keys < NodeSize); /* * The new k:nd pair should be put into the given index and @@ -1573,7 +1574,7 @@ class node final { * Said that, if we're inserting a new pair, the newbie can * only get to the right of the left-most kid. */ - assert(i != 0 || left_kid_sorted(k, less)); + SCYLLA_ASSERT(i != 0 || left_kid_sorted(k, less)); shift_right(i); @@ -1643,7 +1644,7 @@ class node final { } insert(i, std::move(k), node_or_data{.d = d}, less, nodes); - assert(nodes.empty()); + SCYLLA_ASSERT(nodes.empty()); } void remove_from(key_index i, Less less) noexcept { @@ -1680,9 +1681,9 @@ class node final { * the 0th key, so make sure it exists. We can go even without * it, but since we don't let's be on the safe side. */ - assert(_num_keys > 0); + SCYLLA_ASSERT(_num_keys > 0); kid_index i = p.index_for(_keys[0].v, less); - assert(p._kids[i].n == this); + SCYLLA_ASSERT(p._kids[i].n == this); /* * The node is "underflown" (see comment near NodeHalf @@ -1747,7 +1748,7 @@ class node final { * be able to find this node's index at parent (the call for * index_for() above). */ - assert(_num_keys > 1); + SCYLLA_ASSERT(_num_keys > 1); } void remove(kid_index ki, Less less) noexcept { @@ -1784,8 +1785,8 @@ public: explicit node() noexcept : _num_keys(0) , _flags(0) , _parent(nullptr) { } ~node() { - assert(_num_keys == 0); - assert(is_root() || !is_leaf() || (get_prev() == this && get_next() == this)); + SCYLLA_ASSERT(_num_keys == 0); + SCYLLA_ASSERT(is_root() || !is_leaf() || (get_prev() == this && get_next() == this)); } node(node&& other) noexcept : _flags(other._flags) { @@ -1817,7 +1818,7 @@ public: if (!is_root()) { _parent = other._parent; kid_index i = _parent->index_for(&other); - assert(_parent->_kids[i].n == &other); + SCYLLA_ASSERT(_parent->_kids[i].n == &other); _parent->_kids[i].n = this; } else { other._root_tree->do_set_root(this); @@ -1837,7 +1838,7 @@ public: break; } } - assert(i <= _num_keys); + SCYLLA_ASSERT(i <= _num_keys); return i; } @@ -1852,7 +1853,7 @@ private: } node* pop() noexcept { - assert(!_nodes.empty()); + SCYLLA_ASSERT(!_nodes.empty()); node* ret = _nodes.back(); _nodes.pop_back(); return ret; @@ -1929,17 +1930,17 @@ public: } } - ~data() { assert(!attached()); } + ~data() { SCYLLA_ASSERT(!attached()); } bool attached() const noexcept { return _leaf != nullptr; } void attach(node& to) noexcept { - assert(!attached()); + SCYLLA_ASSERT(!attached()); _leaf = &to; } void reattach(node* to) noexcept { - assert(attached()); + SCYLLA_ASSERT(attached()); _leaf = to; } diff --git a/utils/build_id.cc b/utils/build_id.cc index 7a71b542cf..d009b91e55 100644 --- a/utils/build_id.cc +++ b/utils/build_id.cc @@ -2,6 +2,7 @@ * Copyright (C) 2019-present ScyllaDB */ +#include "utils/assert.hh" #include "build_id.hh" #include #include @@ -38,7 +39,7 @@ static const Elf64_Nhdr* get_nt_build_id(dl_phdr_info* info) { } } - assert(0 && "no NT_GNU_BUILD_ID note"); + SCYLLA_ASSERT(0 && "no NT_GNU_BUILD_ID note"); } static int callback(dl_phdr_info* info, size_t size, void* data) { @@ -46,7 +47,7 @@ static int callback(dl_phdr_info* info, size_t size, void* data) { std::ostringstream os; // The first DSO is always the main program, which has an empty name. - assert(strlen(info->dlpi_name) == 0); + SCYLLA_ASSERT(strlen(info->dlpi_name) == 0); auto* n = get_nt_build_id(info); auto* p = reinterpret_cast(n); @@ -68,7 +69,7 @@ static int callback(dl_phdr_info* info, size_t size, void* data) { static std::string really_get_build_id() { std::string ret; int r = dl_iterate_phdr(callback, &ret); - assert(r == 1); + SCYLLA_ASSERT(r == 1); return ret; } diff --git a/utils/cached_file.hh b/utils/cached_file.hh index 50034f97ed..d0da064cbe 100644 --- a/utils/cached_file.hh +++ b/utils/cached_file.hh @@ -9,6 +9,7 @@ #pragma once #include "reader_permit.hh" +#include "utils/assert.hh" #include "utils/div_ceil.hh" #include "utils/bptree.hh" #include "utils/logalloc.hh" @@ -97,7 +98,7 @@ private: } ~cached_page() { - assert(!_use_count); + SCYLLA_ASSERT(!_use_count); } void on_evicted() noexcept override; @@ -364,7 +365,7 @@ public: ~cached_file() { evict_range(_cache.begin(), _cache.end()); - assert(_cache.empty()); + SCYLLA_ASSERT(_cache.empty()); } /// \brief Invalidates [start, end) or less. diff --git a/utils/compact-radix-tree.hh b/utils/compact-radix-tree.hh index bcb5dab4f5..61e9b0b986 100644 --- a/utils/compact-radix-tree.hh +++ b/utils/compact-radix-tree.hh @@ -12,6 +12,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/allocation_strategy.hh" #include "utils/array-search.hh" #include @@ -297,7 +298,7 @@ private: } node_head(const node_head&) = delete; - ~node_head() { assert(_size == 0); } + ~node_head() { SCYLLA_ASSERT(_size == 0); } /* * Helpers to cast header to the actual node class or to the @@ -1022,8 +1023,8 @@ private: } void append(node_head& head, node_index_t ni, Slot&& val) noexcept { - assert(check_capacity(head, ni)); - assert(!_data.has(ni)); + SCYLLA_ASSERT(check_capacity(head, ni)); + SCYLLA_ASSERT(!_data.has(ni)); _data.add(head, ni); new (&_data._slots[ni]) Slot(std::move(val)); } @@ -1079,7 +1080,7 @@ private: while (want_ni >= next_cap) { next_cap <<= 1; } - assert(next_cap > head._capacity); + SCYLLA_ASSERT(next_cap > head._capacity); NT* nn = NT::allocate(head._prefix, layout::direct_dynamic, next_cap); move_slots(_data._slots, head._capacity, head._capacity + 1, nn->_base, @@ -1145,7 +1146,7 @@ private: if constexpr (this_layout == layout::direct_static) { return sizeof(direct_layout) + node_index_limit * sizeof(Slot); } else { - assert(capacity != 0); + SCYLLA_ASSERT(capacity != 0); return sizeof(direct_layout) + capacity * sizeof(Slot); } } @@ -1235,8 +1236,8 @@ private: void append(node_head& head, node_index_t ni, Slot&& val) noexcept { unsigned i = head._size++; - assert(i < Size); - assert(_idx[i] == unused_node_index); + SCYLLA_ASSERT(i < Size); + SCYLLA_ASSERT(_idx[i] == unused_node_index); _idx[i] = ni; new (&_slots[i]) Slot(std::move(val)); } @@ -1444,10 +1445,10 @@ private: * index still coincide */ unsigned plen = common_prefix_len(key, n_prefix); - assert(plen >= depth); + SCYLLA_ASSERT(plen >= depth); plen -= depth; depth += plen; - assert(n.prefix_len() > plen); + SCYLLA_ASSERT(n.prefix_len() > plen); node_index_t ni = node_index(n_prefix, depth); node_head* nn = inner_node::allocate_initial(make_prefix(key, plen), ni); @@ -1466,7 +1467,7 @@ private: static node_head* squash(node_head* n, unsigned depth) noexcept { const node_head_ptr np = n->pop_lower(); node_head* kid = np.raw(); - assert(kid != nullptr); + SCYLLA_ASSERT(kid != nullptr); // Kid has n and it's prefix squashed kid->bump_prefix(n->prefix_len() + 1); return kid; @@ -1537,7 +1538,7 @@ private: n->free(depth); n = nn; ret = nn->alloc(key, depth); - assert(ret.first != nullptr); + SCYLLA_ASSERT(ret.first != nullptr); } return ret; } @@ -1551,7 +1552,7 @@ private: * len big enough to cover all skipped node * up to the current depth */ - assert(leaf_depth >= depth); + SCYLLA_ASSERT(leaf_depth >= depth); np = leaf_node::allocate_initial(make_prefix(key, leaf_depth - depth)); } @@ -1610,7 +1611,7 @@ private: static bool erase_from_slot(node_head_ptr* np, key_t key, unsigned depth, erase_mode erm) noexcept { node_head* n = np->raw(); - assert(n->check_prefix(key, depth)); + SCYLLA_ASSERT(n->check_prefix(key, depth)); erase_result er = n->erase(key, depth, erm); if (erm == erase_mode::cleanup) { @@ -1846,7 +1847,7 @@ public: template requires std::is_invocable_r::value void clone_from(const tree& tree, Cloner&& cloner) { - assert(_root.is(nil_root)); + SCYLLA_ASSERT(_root.is(nil_root)); if (!tree._root.is(nil_root)) { clone_res cres = tree._root->clone(cloner, 0); if (cres.first != nullptr) { diff --git a/utils/composite_abort_source.hh b/utils/composite_abort_source.hh index e709f819c7..0b19a77d63 100644 --- a/utils/composite_abort_source.hh +++ b/utils/composite_abort_source.hh @@ -1,4 +1,5 @@ #include +#include "utils/assert.hh" #include "utils/small_vector.hh" #include "seastarx.hh" @@ -14,7 +15,7 @@ public: void add(abort_source& as) { as.check(); auto sub = as.subscribe([this]() noexcept { _as.request_abort(); }); - assert(sub); + SCYLLA_ASSERT(sub); _subscriptions.push_back(std::move(*sub)); } abort_source& abort_source() noexcept { diff --git a/utils/cross-shard-barrier.hh b/utils/cross-shard-barrier.hh index d76078ba9e..e058d61283 100644 --- a/utils/cross-shard-barrier.hh +++ b/utils/cross-shard-barrier.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include @@ -99,7 +100,7 @@ public: // barrier, because it will likely be copied between sharded // users on sharded::start. The best check in this situation // is to make sure the local promise is not set up. - assert(!_b->wakeup[this_shard_id()].has_value()); + SCYLLA_ASSERT(!_b->wakeup[this_shard_id()].has_value()); auto i = _b->counter.fetch_add(-1); return i == 1 ? complete() : wait(); } @@ -130,7 +131,7 @@ private: if (this_shard_id() != sid) { std::optional>& w = b->wakeup[this_shard_id()]; if (alive) { - assert(w.has_value()); + SCYLLA_ASSERT(w.has_value()); w->set_value(); w.reset(); } else if (w.has_value()) { diff --git a/utils/double-decker.hh b/utils/double-decker.hh index 9f07de00ee..7e3850ba1f 100644 --- a/utils/double-decker.hh +++ b/utils/double-decker.hh @@ -10,6 +10,7 @@ #include #include +#include "utils/assert.hh" #include "utils/bptree.hh" #include "utils/intrusive-array.hh" #include "utils/collection-concepts.hh" @@ -197,7 +198,7 @@ public: template iterator emplace_before(iterator i, Key k, const bound_hint& hint, Args&&... args) { - assert(!hint.match); + SCYLLA_ASSERT(!hint.match); outer_iterator& bucket = i._bucket; if (!hint.key_match) { @@ -363,7 +364,7 @@ public: arr->for_each(disp); }); - assert(nb == end._bucket); + SCYLLA_ASSERT(nb == end._bucket); /* * Drop the head of the ending bucket. Every erased element is the 0th diff --git a/utils/entangled.hh b/utils/entangled.hh index a44f2d15cf..bf8cdfcc52 100644 --- a/utils/entangled.hh +++ b/utils/entangled.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include @@ -33,7 +34,7 @@ class entangled final { private: struct init_tag {}; entangled(init_tag, entangled& other) { - assert(!other._ref); + SCYLLA_ASSERT(!other._ref); _ref = &other; other._ref = this; } diff --git a/utils/error_injection.hh b/utils/error_injection.hh index e615dfaeb0..aaf2efcdaf 100644 --- a/utils/error_injection.hh +++ b/utils/error_injection.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include @@ -274,7 +275,7 @@ private: , shared_data(make_lw_shared(std::move(parameters), injection_name)) {} void receive_message() { - assert(shared_data); + SCYLLA_ASSERT(shared_data); ++shared_data->received_message_count; shared_data->received_message_cv.broadcast(); diff --git a/utils/estimated_histogram.hh b/utils/estimated_histogram.hh index c601069007..e6eefb0049 100644 --- a/utils/estimated_histogram.hh +++ b/utils/estimated_histogram.hh @@ -10,6 +10,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include @@ -557,7 +558,7 @@ public: * @return estimated value at given percentile */ int64_t percentile(double perc) const { - assert(perc >= 0 && perc <= 1.0); + SCYLLA_ASSERT(perc >= 0 && perc <= 1.0); auto last_bucket = buckets.size() - 1; auto c = count(); diff --git a/utils/file_lock.cc b/utils/file_lock.cc index 2d7272a647..56cc14bce7 100644 --- a/utils/file_lock.cc +++ b/utils/file_lock.cc @@ -7,6 +7,7 @@ */ +#include "utils/assert.hh" #include #include #include @@ -30,9 +31,9 @@ public: if (!_path.empty()) { ::unlink(_path.c_str()); } - assert(_fd.get() != -1); + SCYLLA_ASSERT(_fd.get() != -1); auto r = ::lockf(_fd.get(), F_ULOCK, 0); - assert(r == 0); + SCYLLA_ASSERT(r == 0); } fs::path _path; file_desc _fd; diff --git a/utils/flush_queue.hh b/utils/flush_queue.hh index 22751c121f..f8910873df 100644 --- a/utils/flush_queue.hh +++ b/utils/flush_queue.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include @@ -77,7 +78,7 @@ private: template static future handle_failed_future(future f, promise_type& pr) { - assert(f.failed()); + SCYLLA_ASSERT(f.failed()); auto ep = std::move(f).get_exception(); pr.set_exception(ep); return make_exception_future(ep); @@ -120,16 +121,16 @@ public: return futurator::invoke(std::forward(func)).then_wrapped([this, rp, post = std::forward(post)](typename futurator::type f) mutable { auto i = _map.find(rp); - assert(i != _map.end()); + SCYLLA_ASSERT(i != _map.end()); using post_result = decltype(call_helper(std::forward(post), std::move(f))); auto run_post = [this, post = std::forward(post), f = std::move(f), i]() mutable { - assert(i == _map.begin()); + SCYLLA_ASSERT(i == _map.begin()); return call_helper(std::forward(post), std::move(f)).then_wrapped([this, i](post_result f) { if (--i->second.count == 0) { auto pr = std::move(i->second.pr); - assert(i == _map.begin()); + SCYLLA_ASSERT(i == _map.begin()); _map.erase(i); if (f.failed() && _chain_exceptions) { return handle_failed_future(std::move(f), pr); diff --git a/utils/i_filter.cc b/utils/i_filter.cc index c68b328e84..1fad6d606c 100644 --- a/utils/i_filter.cc +++ b/utils/i_filter.cc @@ -10,6 +10,7 @@ #include "log.hh" #include "bloom_filter.hh" #include "bloom_calculations.hh" +#include "utils/assert.hh" #include "utils/murmur_hash.hh" #include @@ -17,7 +18,7 @@ namespace utils { static logging::logger filterlog("bloom_filter"); filter_ptr i_filter::get_filter(int64_t num_elements, double max_false_pos_probability, filter_format fformat) { - assert(seastar::thread::running_in_thread()); + SCYLLA_ASSERT(seastar::thread::running_in_thread()); if (max_false_pos_probability > 1.0) { throw std::invalid_argument(format("Invalid probability {:f}: must be lower than 1.0", max_false_pos_probability)); diff --git a/utils/int_range.hh b/utils/int_range.hh index f6fe847eb6..1f891ea366 100644 --- a/utils/int_range.hh +++ b/utils/int_range.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "interval.hh" #include @@ -17,8 +18,8 @@ using int_range = interval; inline unsigned cardinality(const int_range& r) { - assert(r.start()); - assert(r.end()); + SCYLLA_ASSERT(r.start()); + SCYLLA_ASSERT(r.end()); return r.end()->value() - r.start()->value() + r.start()->is_inclusive() + r.end()->is_inclusive() - 1; } diff --git a/utils/intrusive-array.hh b/utils/intrusive-array.hh index 8ea9a1fa6a..1236f2cce7 100644 --- a/utils/intrusive-array.hh +++ b/utils/intrusive-array.hh @@ -12,6 +12,7 @@ #include #include +#include "utils/assert.hh" #include "utils/allocation_strategy.hh" #include "utils/collection-concepts.hh" @@ -140,7 +141,7 @@ public: new (&_data[i].object) T(std::move(from._data[i - off].object)); } - assert(grow.add_pos <= i && i < max_len); + SCYLLA_ASSERT(grow.add_pos <= i && i < max_len); new (&_data[grow.add_pos].object) T(std::forward(args)...); @@ -212,15 +213,15 @@ public: * altogether if needed */ void erase(int pos) noexcept { - assert(!is_single_element()); - assert(pos < max_len); + SCYLLA_ASSERT(!is_single_element()); + SCYLLA_ASSERT(pos < max_len); bool with_train = _data[0].object.with_train(); bool tail = _data[pos].object.is_tail(); _data[pos].object.~T(); if (tail) { - assert(pos > 0); + SCYLLA_ASSERT(pos > 0); _data[pos - 1].object.set_tail(true); } else { while (!tail) { @@ -233,7 +234,7 @@ public: _data[0].object.set_train(true); unsigned short train_len = with_train ? _data[pos + 1].train_len : 0; - assert(train_len < max_len); + SCYLLA_ASSERT(train_len < max_len); _data[pos].train_len = train_len + 1; } @@ -324,7 +325,7 @@ public: static intrusive_array& from_element(T* ptr, int& idx) noexcept { idx = 0; while (!ptr->is_head()) { - assert(idx < max_len); // may the force be with us... + SCYLLA_ASSERT(idx < max_len); // may the force be with us... idx++; ptr--; } diff --git a/utils/intrusive_btree.hh b/utils/intrusive_btree.hh index b07d545571..aeb63503ed 100644 --- a/utils/intrusive_btree.hh +++ b/utils/intrusive_btree.hh @@ -12,6 +12,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/collection-concepts.hh" #include "utils/neat-object-id.hh" #include "utils/allocation_strategy.hh" @@ -144,7 +145,7 @@ public: node_base* node() const noexcept { return _node; } void attach_first(node_base& to) noexcept { - assert(to.num_keys == 0); + SCYLLA_ASSERT(to.num_keys == 0); to.num_keys = 1; to.keys[0] = this; _node = &to; @@ -153,7 +154,7 @@ public: member_hook() noexcept = default; member_hook(const member_hook&) = delete; ~member_hook() { - assert(!attached()); + SCYLLA_ASSERT(!attached()); } member_hook(member_hook&& other) noexcept : _node(other._node) { @@ -223,12 +224,12 @@ private: }; static const tree* from_inline(const node_base* n) noexcept { - assert(n->is_inline()); + SCYLLA_ASSERT(n->is_inline()); return boost::intrusive::get_parent_from_member(n, &tree::_inline); } static tree* from_inline(node_base* n) noexcept { - assert(n->is_inline()); + SCYLLA_ASSERT(n->is_inline()); return boost::intrusive::get_parent_from_member(n, &tree::_inline); } @@ -274,7 +275,7 @@ private: bool match; cur.idx = cur.n->index_for(key, cmp, match); - assert(cur.idx <= cur.n->_base.num_keys); + SCYLLA_ASSERT(cur.idx <= cur.n->_base.num_keys); if (match || cur.n->is_leaf()) { return match; } @@ -284,13 +285,13 @@ private: } void do_set_root(node& n) noexcept { - assert(n.is_root()); + SCYLLA_ASSERT(n.is_root()); n._parent.t = this; _root = &n; } void do_set_left(node& n) noexcept { - assert(n.is_leftmost()); + SCYLLA_ASSERT(n.is_leftmost()); if (!n.is_linear()) { n._leaf_tree = this; } @@ -298,7 +299,7 @@ private: } void do_set_right(node& n) noexcept { - assert(n.is_rightmost()); + SCYLLA_ASSERT(n.is_rightmost()); if (!n.is_linear()) { n._leaf_tree = this; } @@ -364,10 +365,10 @@ public: tree(const tree& other) = delete; ~tree() noexcept { if (!inline_root()) { - assert(_root->is_leaf()); + SCYLLA_ASSERT(_root->is_leaf()); node::destroy(*_root); } else { - assert(_inline.empty()); + SCYLLA_ASSERT(_inline.empty()); } } @@ -486,7 +487,7 @@ public: cursor cur; match = key_lower_bound(k, cmp, cur); if (!match && cur.idx == cur.n->_base.num_keys) { - assert(cur.idx > 0); + SCYLLA_ASSERT(cur.idx > 0); cur.idx--; return ++const_iterator(cur); } @@ -597,7 +598,7 @@ public: nb->num_keys = 0; } else { node* n = node::from_base(nb); - assert(n->is_leaf()); + SCYLLA_ASSERT(n->is_leaf()); n->remove_leftmost_light_rebalance(); } return hook->to_key(); @@ -669,13 +670,13 @@ public: explicit iterator_base(tree_ptr t) noexcept : _tree(t), _idx(npos) {} iterator_base(key_hook_ptr h, key_index idx) noexcept : _hook(h), _idx(idx) { - assert(!is_end()); - assert(h->attached()); + SCYLLA_ASSERT(!is_end()); + SCYLLA_ASSERT(h->attached()); } explicit iterator_base(const cursor& cur) noexcept : _idx(cur.idx) { - assert(_idx < cur.n->_base.num_keys); + SCYLLA_ASSERT(_idx < cur.n->_base.num_keys); _hook = cur.n->_base.keys[_idx]; - assert(_hook->attached()); + SCYLLA_ASSERT(_hook->attached()); } iterator_base() noexcept : _tree(static_cast(nullptr)), _idx(npos) {} @@ -686,7 +687,7 @@ public: * and returns back the node that points to it. */ node_base_ptr revalidate() noexcept { - assert(!is_end()); + SCYLLA_ASSERT(!is_end()); node_base_ptr n = _hook->node(); /* @@ -759,7 +760,7 @@ public: iterator_base& operator--() noexcept { if (is_end()) { node_base_ptr n = _tree->rightmost_node(); - assert(n->num_keys > 0); + SCYLLA_ASSERT(n->num_keys > 0); _idx = n->num_keys - 1u; _hook = n->keys[_idx]; return *this; @@ -987,7 +988,7 @@ public: public: explicit key_grabber(iterator& it) : _it(it) { - assert(!_it.is_end()); + SCYLLA_ASSERT(!_it.is_end()); } key_grabber(const key_grabber&) = delete; @@ -1086,8 +1087,8 @@ struct searcher { bool ml, mr; key_index rl = searcher::ge(k, node, cmp, ml); key_index rb = searcher::ge(k, node, cmp, mr); - assert(rl == rb); - assert(ml == mr); + SCYLLA_ASSERT(rl == rb); + SCYLLA_ASSERT(ml == mr); match = ml; return rl; } @@ -1171,12 +1172,12 @@ class node { }; tree* corner_tree() const noexcept { - assert(is_leaf()); + SCYLLA_ASSERT(is_leaf()); if (!is_linear()) { return _leaf_tree; } - assert(is_root()); + SCYLLA_ASSERT(is_root()); return _parent.t; } @@ -1254,21 +1255,21 @@ private: } void unlink_corner_leaf() noexcept { - assert(!is_root()); + SCYLLA_ASSERT(!is_root()); node* p = _parent.n, *x; switch (_base.flags & (node_base::NODE_LEFTMOST | node_base::NODE_RIGHTMOST)) { case 0: break; case node_base::NODE_LEFTMOST: - assert(p->_base.num_keys > 0 && p->_kids[0] == this); + SCYLLA_ASSERT(p->_base.num_keys > 0 && p->_kids[0] == this); x = p->_kids[1]; _base.flags &= ~node_base::NODE_LEFTMOST; x->_base.flags |= node_base::NODE_LEFTMOST; _leaf_tree->do_set_left(*x); break; case node_base::NODE_RIGHTMOST: - assert(p->_base.num_keys > 0 && p->_kids[p->_base.num_keys] == this); + SCYLLA_ASSERT(p->_base.num_keys > 0 && p->_kids[p->_base.num_keys] == this); x = p->_kids[p->_base.num_keys - 1]; _base.flags &= ~node_base::NODE_RIGHTMOST; x->_base.flags |= node_base::NODE_RIGHTMOST; @@ -1279,17 +1280,17 @@ private: * Right- and left-most at the same time can only be root, * otherwise this would mean we have root with 0 keys. */ - assert(false); + SCYLLA_ASSERT(false); } } static const node* from_base(const node_base* nb) noexcept { - assert(!nb->is_inline()); + SCYLLA_ASSERT(!nb->is_inline()); return boost::intrusive::get_parent_from_member(nb, &node::_base); } static node* from_base(node_base* nb) noexcept { - assert(!nb->is_inline()); + SCYLLA_ASSERT(!nb->is_inline()); return boost::intrusive::get_parent_from_member(nb, &node::_base); } @@ -1331,7 +1332,7 @@ public: node(const node& other) = delete; ~node() { - assert(_base.num_keys == 0); + SCYLLA_ASSERT(_base.num_keys == 0); } size_t storage_size() const noexcept { @@ -1366,12 +1367,12 @@ private: } void drop() noexcept { - assert(!(is_leftmost() || is_rightmost())); + SCYLLA_ASSERT(!(is_leftmost() || is_rightmost())); if (Debug == with_debug::yes && !is_root()) { node* p = _parent.n; if (p->_base.num_keys != 0) { for (kid_index i = 0; i <= p->_base.num_keys; i++) { - assert(p->_kids[i] != this); + SCYLLA_ASSERT(p->_kids[i] != this); } } } @@ -1389,7 +1390,7 @@ private: // Two helpers for raw pointers lookup. kid_index index_for(const node* kid) const noexcept { - assert(!is_leaf()); + SCYLLA_ASSERT(!is_leaf()); for (kid_index i = 0; i <= _base.num_keys; i++) { if (_kids[i] == kid) { @@ -1530,7 +1531,7 @@ private: move_key(src++, *root, root->_base.num_keys++); leaf = nodes.pop(true); root->set_kid(root->_base.num_keys, leaf); - assert(src != _base.num_keys); // need more keys for the next leaf + SCYLLA_ASSERT(src != _base.num_keys); // need more keys for the next leaf } } adjust_idx(); @@ -1539,12 +1540,12 @@ private: _base.flags &= ~(node_base::NODE_LEFTMOST | node_base::NODE_RIGHTMOST); drop(); - assert(new_insertion != nullptr); + SCYLLA_ASSERT(new_insertion != nullptr); return new_insertion; } node* check_linear_capacity(kid_index& idx) { - assert(make_linear_root && is_root() && is_leaf()); + SCYLLA_ASSERT(make_linear_root && is_root() && is_leaf()); if (_base.num_keys < _base.capacity) { return this; @@ -1585,7 +1586,7 @@ private: * starts with the leaf. Upper levels get their new keys * only if these come up from the deep. */ - assert(is_leaf()); + SCYLLA_ASSERT(is_leaf()); if (_base.num_keys < _base.capacity) { /* @@ -1656,7 +1657,7 @@ private: */ idx--; } else if (is_leaf()) { - assert(kid == nullptr); + SCYLLA_ASSERT(kid == nullptr); p->move_key(i - 1, *left, left->_base.num_keys); left->_base.num_keys++; p->set_key(i - 1, &key); @@ -1671,7 +1672,7 @@ private: if (idx < _base.num_keys) { right->grab_from_left(this, i + 1); } else if (is_leaf()) { - assert(kid == nullptr); + SCYLLA_ASSERT(kid == nullptr); right->shift_right(0); p->move_key(i, *right, 0); p->set_key(i, &key); @@ -1988,7 +1989,7 @@ private: } void light_refill() noexcept { - assert(_parent.n->_base.num_keys > 0); + SCYLLA_ASSERT(_parent.n->_base.num_keys > 0); node* right = _parent.n->_kids[1]; /* @@ -2128,7 +2129,7 @@ private: size_t external_memory_usage() const noexcept { if (is_linear()) { - assert(is_leaf()); + SCYLLA_ASSERT(is_leaf()); return linear_node_size(_base.capacity); } @@ -2137,7 +2138,7 @@ private: } size_t size = inner_node_size; - assert(_base.num_keys != 0); + SCYLLA_ASSERT(_base.num_keys != 0); for (kid_index i = 0; i <= _base.num_keys; i++) { size += _kids[i]->external_memory_usage(); } @@ -2153,7 +2154,7 @@ private: } else { st.nodes_filled[_base.num_keys]++; st.nodes++; - assert(_base.num_keys != 0); + SCYLLA_ASSERT(_base.num_keys != 0); for (kid_index i = 0; i <= _base.num_keys; i++) { _kids[i]->fill_stats(st); } @@ -2165,7 +2166,7 @@ private: node** _tail = &_nodes; node* pop() noexcept { - assert(!empty()); + SCYLLA_ASSERT(!empty()); node* ret = _nodes; _nodes = ret->_parent.n; if (_tail == &ret->_parent.n) { @@ -2191,7 +2192,7 @@ private: node* pop(bool leaf) noexcept { node* ret = pop(); - assert(leaf == ret->is_leaf()); + SCYLLA_ASSERT(leaf == ret->is_leaf()); return ret; } diff --git a/utils/large_bitset.cc b/utils/large_bitset.cc index 2043ca32d2..950ee4b083 100644 --- a/utils/large_bitset.cc +++ b/utils/large_bitset.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "large_bitset.hh" #include "stall_free.hh" @@ -15,7 +16,7 @@ using namespace seastar; large_bitset::large_bitset(size_t nr_bits) : _nr_bits(nr_bits) { - assert(thread::running_in_thread()); + SCYLLA_ASSERT(thread::running_in_thread()); size_t nr_ints = align_up(nr_bits, bits_per_int()) / bits_per_int(); utils::reserve_gently(_storage, nr_ints).get(); @@ -28,7 +29,7 @@ large_bitset::large_bitset(size_t nr_bits) : _nr_bits(nr_bits) { void large_bitset::clear() { - assert(thread::running_in_thread()); + SCYLLA_ASSERT(thread::running_in_thread()); for (auto&& pos: _storage) { pos = 0; thread::maybe_yield(); diff --git a/utils/lister.cc b/utils/lister.cc index 821ed621fe..869bfc6d6b 100644 --- a/utils/lister.cc +++ b/utils/lister.cc @@ -3,6 +3,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/lister.hh" #include "checked-file-impl.hh" @@ -90,7 +91,7 @@ future> directory_lister::get() { auto walker = [this] (fs::path dir, directory_entry de) { return _queue.push_eventually(std::make_optional(std::move(de))); }; - assert(!_lister); + SCYLLA_ASSERT(!_lister); _lister = std::make_unique(std::move(dir), _type, std::move(walker), _filter, _dir, _do_show_hidden); _opt_done_fut = _lister->done().then_wrapped([this] (future<> f) { if (f.failed()) [[unlikely]] { diff --git a/utils/loading_cache.hh b/utils/loading_cache.hh index 75aa4956f1..8f0fa5ae71 100644 --- a/utils/loading_cache.hh +++ b/utils/loading_cache.hh @@ -26,6 +26,7 @@ #include #include "exceptions/exceptions.hh" +#include "utils/assert.hh" #include "utils/loading_shared_values.hh" #include "utils/chunked_vector.hh" #include "log.hh" @@ -144,7 +145,7 @@ class loading_cache { timestamped_val(timestamped_val&&) = default; timestamped_val& operator=(value_type new_val) { - assert(_lru_entry_ptr); + SCYLLA_ASSERT(_lru_entry_ptr); _value = std::move(new_val); _loaded = loading_cache_clock_type::now(); @@ -305,7 +306,7 @@ public: requires std::is_invocable_r_v, LoadFunc, const key_type&> future get_ptr(const Key& k, LoadFunc&& load) { // We shouldn't be here if caching is disabled - assert(caching_enabled()); + SCYLLA_ASSERT(caching_enabled()); return _loading_values.get_or_load(k, [load = std::forward(load)] (const Key& k) mutable { return load(k).then([] (value_type val) { diff --git a/utils/loading_shared_values.hh b/utils/loading_shared_values.hh index f2920ce320..4b3a1e030c 100644 --- a/utils/loading_shared_values.hh +++ b/utils/loading_shared_values.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include #include @@ -200,7 +201,7 @@ public: loading_shared_values(loading_shared_values&&) = default; loading_shared_values(const loading_shared_values&) = delete; ~loading_shared_values() { - assert(!_set.size()); + SCYLLA_ASSERT(!_set.size()); } /// \brief diff --git a/utils/logalloc.cc b/utils/logalloc.cc index fb7d70e50f..e8c84ee2d4 100644 --- a/utils/logalloc.cc +++ b/utils/logalloc.cc @@ -27,6 +27,7 @@ #include #include +#include "utils/assert.hh" #include "utils/logalloc.hh" #include "log.hh" #include "utils/dynamic_bitset.hh" @@ -60,7 +61,7 @@ template template void poison(const T* addr, size_t size) { // Both values and descriptors must be aligned. - assert(uintptr_t(addr) % 8 == 0); + SCYLLA_ASSERT(uintptr_t(addr) % 8 == 0); // This can be followed by // * 8 byte aligned descriptor (this is a value) // * 8 byte aligned value @@ -194,7 +195,7 @@ migrate_fn_type::register_migrator(migrate_fn_type* m) { auto& migrators = *debug::static_migrators; auto idx = migrators.add(m); // object_descriptor encodes 2 * index() + 1 - assert(idx * 2 + 1 < utils::uleb64_express_supreme); + SCYLLA_ASSERT(idx * 2 + 1 < utils::uleb64_express_supreme); m->_migrators = migrators.shared_from_this(); return idx; } @@ -540,7 +541,7 @@ public: void enable_abort_on_bad_alloc() noexcept { _abort_on_bad_alloc = true; } bool should_abort_on_bad_alloc() const noexcept { return _abort_on_bad_alloc; } void setup_background_reclaim(scheduling_group sg) { - assert(!_background_reclaimer); + SCYLLA_ASSERT(!_background_reclaimer); _background_reclaimer.emplace(sg, [this] (size_t target) { reclaim(target, is_preemptible::yes); }); @@ -927,14 +928,14 @@ public: if (_delegate_store) { return _delegate_store->segment_from_idx(idx); } - assert(idx < _segments.size()); + SCYLLA_ASSERT(idx < _segments.size()); return _segments[idx]; } segment* segment_from_idx(size_t idx) noexcept { if (_delegate_store) { return _delegate_store->segment_from_idx(idx); } - assert(idx < _segments.size()); + SCYLLA_ASSERT(idx < _segments.size()); return _segments[idx]; } size_t idx_from_segment(const segment* seg) const noexcept { @@ -958,7 +959,7 @@ public: auto seg = new (p) segment; poison(seg, sizeof(segment)); auto i = find_empty(); - assert(i != _segments.end()); + SCYLLA_ASSERT(i != _segments.end()); *i = seg; size_t ret = i - _segments.begin(); _segment_indexes[seg] = ret; @@ -1103,7 +1104,7 @@ public: _non_lsa_memory_in_use += n; } void subtract_non_lsa_memory_in_use(size_t n) noexcept { - assert(_non_lsa_memory_in_use >= n); + SCYLLA_ASSERT(_non_lsa_memory_in_use >= n); _non_lsa_memory_in_use -= n; } size_t non_lsa_memory_in_use() const noexcept { @@ -1335,7 +1336,7 @@ segment* segment_pool::allocate_segment(size_t reserve) void segment_pool::deallocate_segment(segment* seg) noexcept { - assert(_lsa_owned_segments_bitmap.test(idx_from_segment(seg))); + SCYLLA_ASSERT(_lsa_owned_segments_bitmap.test(idx_from_segment(seg))); _lsa_free_segments_bitmap.set(idx_from_segment(seg)); _free_segments++; } @@ -1378,7 +1379,7 @@ segment_pool::containing_segment(const void* obj) noexcept { segment* segment_pool::segment_from(const segment_descriptor& desc) noexcept { - assert(desc._region); + SCYLLA_ASSERT(desc._region); auto index = &desc - &_segments[0]; return segment_from_idx(index); } @@ -1898,7 +1899,7 @@ private: if (seg != _buf_active) { if (desc.is_empty()) { - assert(desc._buf_pointers.empty()); + SCYLLA_ASSERT(desc._buf_pointers.empty()); _segment_descs.erase(desc); desc._buf_pointers = std::vector(); free_segment(seg, desc); @@ -1924,7 +1925,7 @@ private: for (entangled& e : _buf_ptrs_for_compact_segment) { if (e) { lsa_buffer* old_ptr = e.get(&lsa_buffer::_link); - assert(&desc == old_ptr->_desc); + SCYLLA_ASSERT(&desc == old_ptr->_desc); lsa_buffer dst = alloc_buf(old_ptr->_size); memcpy(dst._buf, old_ptr->_buf, dst._size); old_ptr->_link = std::move(dst._link); @@ -1959,7 +1960,7 @@ private: // Memory allocation above could allocate active buffer during segment compaction. close_buf_active(); } - assert((uintptr_t)new_active->at(0) % buf_align == 0); + SCYLLA_ASSERT((uintptr_t)new_active->at(0) % buf_align == 0); segment_descriptor& desc = segment_pool().descriptor(new_active); desc._buf_pointers = std::move(ptrs); desc.set_kind(segment_kind::bufs); @@ -2004,17 +2005,17 @@ public: while (!_segment_descs.empty()) { auto& desc = _segment_descs.one_of_largest(); _segment_descs.pop_one_of_largest(); - assert(desc.is_empty()); + SCYLLA_ASSERT(desc.is_empty()); free_segment(desc); } _closed_occupancy = {}; if (_active) { - assert(segment_pool().descriptor(_active).is_empty()); + SCYLLA_ASSERT(segment_pool().descriptor(_active).is_empty()); free_segment(_active); _active = nullptr; } if (_buf_active) { - assert(segment_pool().descriptor(_buf_active).is_empty()); + SCYLLA_ASSERT(segment_pool().descriptor(_buf_active).is_empty()); free_segment(_buf_active); _buf_active = nullptr; } @@ -2131,7 +2132,7 @@ private: void on_non_lsa_free(void* obj) noexcept { auto allocated_size = malloc_usable_size(obj); auto cookie = (non_lsa_object_cookie*)((char*)obj + allocated_size) - 1; - assert(cookie->value == non_lsa_object_cookie().value); + SCYLLA_ASSERT(cookie->value == non_lsa_object_cookie().value); _non_lsa_occupancy -= occupancy_stats(0, allocated_size); if (_listener) { _evictable_space -= allocated_size; @@ -2390,7 +2391,7 @@ public: void unreserve(uintptr_t n_segments) noexcept override { auto& pool = segment_pool(); - assert(pool.current_emergency_reserve_goal() >= n_segments); + SCYLLA_ASSERT(pool.current_emergency_reserve_goal() >= n_segments); size_t new_goal = pool.current_emergency_reserve_goal() - n_segments; pool.set_current_emergency_reserve_goal(new_goal); } diff --git a/utils/logalloc.hh b/utils/logalloc.hh index 9df9efbe5a..8e8e3bdcf3 100644 --- a/utils/logalloc.hh +++ b/utils/logalloc.hh @@ -18,6 +18,7 @@ #include "allocation_strategy.hh" #include "seastarx.hh" #include "db/timeout_clock.hh" +#include "utils/assert.hh" #include "utils/entangled.hh" #include "utils/memory_limit_reached.hh" @@ -304,7 +305,7 @@ public: tracker& get_tracker() { return _tracker; } void set_reclaiming_enabled(bool enabled) noexcept { - assert(this_shard_id() == _cpu); + SCYLLA_ASSERT(this_shard_id() == _cpu); _reclaiming_enabled = enabled; } @@ -494,7 +495,7 @@ public: // template decltype(auto) with_reclaiming_disabled(logalloc::region& r, Func&& fn) { - assert(r.reclaiming_enabled()); + SCYLLA_ASSERT(r.reclaiming_enabled()); maybe_decay_reserve(); while (true) { try { diff --git a/utils/lru.hh b/utils/lru.hh index db865a41b1..699bd22487 100644 --- a/utils/lru.hh +++ b/utils/lru.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include #include @@ -37,7 +38,7 @@ protected: // in the destructor, we can't perform proper accounting for that without access to the // head of the containing list. ~evictable() { - assert(!_lru_link.is_linked()); + SCYLLA_ASSERT(!_lru_link.is_linked()); } evictable() = default; evictable(evictable&&) noexcept = default; diff --git a/utils/on_internal_error.hh b/utils/on_internal_error.hh index 3b4b8ae2f5..d217c9c619 100644 --- a/utils/on_internal_error.hh +++ b/utils/on_internal_error.hh @@ -6,8 +6,8 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ -// Seastar's on_internal_error() is a replacement for assert(). Instead of -// crashing like assert(), on_internal_error() logs a message with a +// Seastar's on_internal_error() is a replacement for SCYLLA_ASSERT(). Instead of +// crashing like SCYLLA_ASSERT(), on_internal_error() logs a message with a // backtrace and throws an exception (and optionally also crashes - this can // be useful for testing). However, Seastar's function is inconvenient because // it requires specifying a logger. This makes it hard to call it from source @@ -19,6 +19,7 @@ #pragma once +#include "utils/assert.hh" #include namespace utils { diff --git a/utils/pretty_printers.cc b/utils/pretty_printers.cc index 2531690f6d..cd6ba10d64 100644 --- a/utils/pretty_printers.cc +++ b/utils/pretty_printers.cc @@ -6,6 +6,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +#include "utils/assert.hh" #include "pretty_printers.hh" #include #include @@ -13,7 +14,7 @@ template static constexpr std::tuple do_format(size_t n, Suffixes suffixes, unsigned scale, unsigned precision, bool bytes) { - assert(scale < precision); + SCYLLA_ASSERT(scale < precision); size_t factor = n; const char* suffix = ""; size_t remainder = 0; diff --git a/utils/result_try.hh b/utils/result_try.hh index 733516db7f..7226a1c80f 100644 --- a/utils/result_try.hh +++ b/utils/result_try.hh @@ -10,6 +10,7 @@ #include #include +#include "utils/assert.hh" #include "utils/result.hh" namespace utils { @@ -63,7 +64,7 @@ concept ConvertsWithTo = std::convertible_to ExceptionHandle. // However, C++ does not support quantification like that in the constraints. -// Here, we use the dummy_result to assert that the handles defined below +// Here, we use the dummy_result to SCYLLA_ASSERT that the handles defined below // at least work with dummy_result. template using dummy_result = result_with_exception; diff --git a/utils/reusable_buffer.hh b/utils/reusable_buffer.hh index 1a40a62b16..c3a34d1d03 100644 --- a/utils/reusable_buffer.hh +++ b/utils/reusable_buffer.hh @@ -8,6 +8,7 @@ #pragma once +#include "utils/assert.hh" #include "utils/fragmented_temporary_buffer.hh" #include #include @@ -42,7 +43,7 @@ protected: reusable_buffer_impl() = default; ~reusable_buffer_impl() { - assert(_refcount == 0); + SCYLLA_ASSERT(_refcount == 0); } void resize(size_t new_size) & { @@ -214,7 +215,7 @@ private: period_type _decay_period; void decay() & { - assert(_refcount == 0); + SCYLLA_ASSERT(_refcount == 0); if (_high_watermark <= _buf_size / 16) { // We shrink when the size falls at least by four power-of-2 // notches, instead of just one notch. This adds hysteresis: @@ -238,10 +239,10 @@ public: } }; -/* Exists only to assert that there exists at most one reference to the +/* Exists only to SCYLLA_ASSERT that there exists at most one reference to the * reusable_buffer, to hopefully make it less of a footgun. * - * The reference/use counts exist only for assert purposes. + * The reference/use counts exist only for SCYLLA_ASSERT purposes. * They don't influence the program otherwise. * * Never keep the guard across preemption points. @@ -258,7 +259,7 @@ private: bool used = false; private: void mark_used() { - assert(!used); + SCYLLA_ASSERT(!used); used = true; } public: @@ -268,7 +269,7 @@ public: reusable_buffer_guard(reusable_buffer_impl& _buf) : _buf(_buf) { - assert(_buf._refcount == 0); + SCYLLA_ASSERT(_buf._refcount == 0); _buf._refcount += 1; } diff --git a/utils/rjson.hh b/utils/rjson.hh index 2b5a6f0a6e..ef45dedb8d 100644 --- a/utils/rjson.hh +++ b/utils/rjson.hh @@ -29,6 +29,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/base64.hh" #include @@ -50,12 +51,12 @@ public: // rapidjson configuration macros #define RAPIDJSON_HAS_STDSTRING 1 -// Default rjson policy is to use assert() - which is dangerous for two reasons: -// 1. assert() can be turned off with -DNDEBUG -// 2. assert() crashes a program +// Default rjson policy is to use SCYLLA_ASSERT() - which is dangerous for two reasons: +// 1. SCYLLA_ASSERT() can be turned off with -DNDEBUG +// 2. SCYLLA_ASSERT() crashes a program // Fortunately, the default policy can be overridden, and so rapidjson errors will // throw an rjson::error exception instead. -#define RAPIDJSON_ASSERT(x) do { if (!(x)) throw rjson::error(fmt::format("JSON assert failed on condition '{}', at: {}", #x, current_backtrace_tasklocal())); } while (0) +#define RAPIDJSON_ASSERT(x) do { if (!(x)) throw rjson::error(fmt::format("JSON SCYLLA_ASSERT failed on condition '{}', at: {}", #x, current_backtrace_tasklocal())); } while (0) // This macro is used for functions which are called for every json char making it // quite costly if not inlined, by default rapidjson only enables it if NDEBUG // is defined which isn't the case for us. diff --git a/utils/s3/client.cc b/utils/s3/client.cc index fc21f5a5b5..83bdfd4d88 100644 --- a/utils/s3/client.cc +++ b/utils/s3/client.cc @@ -35,6 +35,7 @@ #include #include #include +#include "utils/assert.hh" #include "utils/s3/client.hh" #include "utils/http.hh" #include "utils/memory_data_sink.hh" @@ -287,7 +288,7 @@ future client::get_object_stats(sstring object_name) { static rapidxml::xml_node<>* first_node_of(rapidxml::xml_node<>* root, std::initializer_list names) { - assert(root); + SCYLLA_ASSERT(root); auto* node = root; for (auto name : names) { node = node->first_node(name.data(), name.size()); @@ -577,7 +578,7 @@ future<> dump_multipart_upload_parts(output_stream out, const utils::chunk unsigned nr = 1; for (auto& etag : etags) { - assert(!etag.empty()); + SCYLLA_ASSERT(!etag.empty()); co_await out.write(format(multipart_upload_complete_entry.data(), etag, nr)); nr++; } @@ -981,8 +982,8 @@ class client::do_upload_file { } static size_t div_ceil(size_t x, size_t y) { - assert(std::in_range(x)); - assert(std::in_range(y)); + SCYLLA_ASSERT(std::in_range(x)); + SCYLLA_ASSERT(std::in_range(y)); auto [quot, rem] = std::lldiv(x, y); return rem ? quot + 1 : quot; } diff --git a/utils/throttle.hh b/utils/throttle.hh index 320e8bef7d..5150fa3275 100644 --- a/utils/throttle.hh +++ b/utils/throttle.hh @@ -1,5 +1,6 @@ #pragma once +#include "utils/assert.hh" #include #include @@ -51,7 +52,7 @@ public: } void unblock() { - assert(_block_counter); + SCYLLA_ASSERT(_block_counter); if (--_block_counter == 0) { _p.set_value(); } diff --git a/utils/top_k.hh b/utils/top_k.hh index ffaf6f7e31..bf0739e4be 100644 --- a/utils/top_k.hh +++ b/utils/top_k.hh @@ -52,6 +52,7 @@ #include #include +#include "utils/assert.hh" #include "utils/chunked_vector.hh" namespace utils { @@ -146,9 +147,9 @@ public: (*counter_it)->bucket_it = new_bucket_it; } else { buckets_iterator min_bucket = _buckets.begin(); - assert(min_bucket != _buckets.end()); + SCYLLA_ASSERT(min_bucket != _buckets.end()); counter_it = min_bucket->counters.begin(); - assert(counter_it != min_bucket->counters.end()); + SCYLLA_ASSERT(counter_it != min_bucket->counters.end()); counter_ptr ctr = *counter_it; _counters_map.erase(ctr->item); dropped_item = std::exchange(ctr->item, std::move(item));