From ba7a9d2ac3fcd8076fe19ce9b5b3dc106dbddfd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Botond=20D=C3=A9nes?= Date: Fri, 30 Oct 2020 11:11:36 +0200 Subject: [PATCH] imr: switch back to open-coded description of structures Commit aab6b0ee27a7bc3fdf1d879d25291f807b4795a5 introduced the controversial new IMR format, which relied on a very template-heavy infrastructure to generate serialization and deserialization code via template meta-programming. The promise was that this new format, beyond solving the problems the previous open-coded representation had (working on linearized buffers), will speed up migrating other components to this IMR format, as the IMR infrastructure reduces code bloat, makes the code more readable via declarative type descriptions as well as safer. However, the results were almost the opposite. The template meta-programming used by the IMR infrastructure proved very hard to understand. Developers don't want to read or modify it. Maintainers don't want to see it being used anywhere else. In short, nobody wants to touch it. This commit does a conceptual revert of aab6b0ee27a7bc3fdf1d879d25291f807b4795a5. A verbatim revert is not possible because related code evolved a lot since the merge. Also, going back to the previous code would mean we regress as we'd revert the move to fragmented buffers. So this revert is only conceptual, it changes the underlying infrastructure back to the previous open-coded one, but keeps the fragmented buffers, as well as the interface of the related components (to the extent possible). Fixes: #5578 --- atomic_cell.cc | 153 +-- atomic_cell.hh | 278 +++-- atomic_cell_hash.hh | 4 +- atomic_cell_or_collection.hh | 33 +- cdc/log.cc | 6 +- collection_mutation.cc | 129 +-- collection_mutation.hh | 12 +- configure.py | 7 - converting_mutation_partition_applier.cc | 4 +- counters.cc | 21 +- counters.hh | 116 +- cql3/expr/expression.cc | 2 +- cql3/functions/error_injection_fcts.cc | 1 + data/cell.cc | 52 - data/cell.hh | 1014 ------------------ data/cell_impl.hh | 75 -- data/schema_info.hh | 65 -- data/value_view.hh | 137 --- data/value_view_impl.hh | 116 -- db/commitlog/commitlog.cc | 1 + db/hints/resource_manager.hh | 1 + db/view/row_locking.cc | 2 + db/view/view.cc | 10 +- imr/alloc.hh | 321 ------ imr/compound.hh | 591 ---------- imr/concepts.hh | 83 -- imr/core.hh | 63 -- imr/fundamental.hh | 308 ------ imr/methods.hh | 169 --- imr/utils.hh | 201 ---- lua.cc | 1 + mutation_partition.cc | 5 +- mutation_partition_serializer.cc | 7 +- repair/repair.hh | 1 + service/migration_manager.hh | 1 + sstables/compaction_manager.hh | 1 + sstables/kl/writer.cc | 3 +- sstables/mx/writer.cc | 7 +- sstables/writer.hh | 20 +- test/boost/counter_test.cc | 78 +- test/boost/imr_test.cc | 847 --------------- test/boost/meta_test.cc | 220 ---- test/boost/multishard_mutation_query_test.cc | 7 +- test/boost/sstable_3_x_test.cc | 5 +- test/boost/sstable_datafile_test.cc | 18 +- test/manual/partition_data_test.cc | 84 +- types.cc | 37 +- types.hh | 11 +- types/collection.hh | 2 +- utils/meta.hh | 175 --- 50 files changed, 481 insertions(+), 5024 deletions(-) delete mode 100644 data/cell.cc delete mode 100644 data/cell.hh delete mode 100644 data/cell_impl.hh delete mode 100644 data/schema_info.hh delete mode 100644 data/value_view.hh delete mode 100644 data/value_view_impl.hh delete mode 100644 imr/alloc.hh delete mode 100644 imr/compound.hh delete mode 100644 imr/concepts.hh delete mode 100644 imr/core.hh delete mode 100644 imr/fundamental.hh delete mode 100644 imr/methods.hh delete mode 100644 imr/utils.hh delete mode 100644 test/boost/imr_test.cc delete mode 100644 test/boost/meta_test.cc delete mode 100644 utils/meta.hh diff --git a/atomic_cell.cc b/atomic_cell.cc index 8a2130f087..5371421db6 100644 --- a/atomic_cell.cc +++ b/atomic_cell.cc @@ -24,142 +24,82 @@ #include "counters.hh" #include "types.hh" -/// LSA mirator for cells with irrelevant type -/// -/// -const data::type_imr_descriptor& no_type_imr_descriptor() { - static thread_local data::type_imr_descriptor state(data::type_info::make_variable_size()); - return state; -} - atomic_cell atomic_cell::make_dead(api::timestamp_type timestamp, gc_clock::time_point deletion_time) { - auto& imr_data = no_type_imr_descriptor(); - return atomic_cell( - imr_data.type_info(), - imr_object_type::make(data::cell::make_dead(timestamp, deletion_time), &imr_data.lsa_migrator()) - ); + return atomic_cell_type::make_dead(timestamp, deletion_time); } atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, bytes_view value, atomic_cell::collection_member cm) { - auto& imr_data = type.imr_state(); - return atomic_cell( - imr_data.type_info(), - imr_object_type::make(data::cell::make_live(imr_data.type_info(), timestamp, value, bool(cm)), &imr_data.lsa_migrator()) - ); + return atomic_cell_type::make_live(timestamp, single_fragment_range(value)); +} + +atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, managed_bytes_view value, atomic_cell::collection_member cm) { + return atomic_cell_type::make_live(timestamp, fragment_range(value)); } atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, ser::buffer_view value, atomic_cell::collection_member cm) { - auto& imr_data = type.imr_state(); - return atomic_cell( - imr_data.type_info(), - imr_object_type::make(data::cell::make_live(imr_data.type_info(), timestamp, value, bool(cm)), &imr_data.lsa_migrator()) - ); + return atomic_cell_type::make_live(timestamp, value); } atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, const fragmented_temporary_buffer::view& value, collection_member cm) { - auto& imr_data = type.imr_state(); - return atomic_cell( - imr_data.type_info(), - imr_object_type::make(data::cell::make_live(imr_data.type_info(), timestamp, value, bool(cm)), &imr_data.lsa_migrator()) - ); + return atomic_cell_type::make_live(timestamp, value); } atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, bytes_view value, gc_clock::time_point expiry, gc_clock::duration ttl, atomic_cell::collection_member cm) { - auto& imr_data = type.imr_state(); - return atomic_cell( - imr_data.type_info(), - imr_object_type::make(data::cell::make_live(imr_data.type_info(), timestamp, value, expiry, ttl, bool(cm)), &imr_data.lsa_migrator()) - ); + return atomic_cell_type::make_live(timestamp, single_fragment_range(value), expiry, ttl); +} + +atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, managed_bytes_view value, + gc_clock::time_point expiry, gc_clock::duration ttl, atomic_cell::collection_member cm) { + return atomic_cell_type::make_live(timestamp, fragment_range(value), expiry, ttl); } atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, ser::buffer_view value, gc_clock::time_point expiry, gc_clock::duration ttl, atomic_cell::collection_member cm) { - auto& imr_data = type.imr_state(); - return atomic_cell( - imr_data.type_info(), - imr_object_type::make(data::cell::make_live(imr_data.type_info(), timestamp, value, expiry, ttl, bool(cm)), &imr_data.lsa_migrator()) - ); + return atomic_cell_type::make_live(timestamp, value, expiry, ttl); } atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, const fragmented_temporary_buffer::view& value, gc_clock::time_point expiry, gc_clock::duration ttl, collection_member cm) { - auto& imr_data = type.imr_state(); - return atomic_cell( - imr_data.type_info(), - imr_object_type::make(data::cell::make_live(imr_data.type_info(), timestamp, value, expiry, ttl, bool(cm)), &imr_data.lsa_migrator()) - ); + return atomic_cell_type::make_live(timestamp, value, expiry, ttl); } atomic_cell atomic_cell::make_live_counter_update(api::timestamp_type timestamp, int64_t value) { - auto& imr_data = no_type_imr_descriptor(); - return atomic_cell( - imr_data.type_info(), - imr_object_type::make(data::cell::make_live_counter_update(timestamp, value), &imr_data.lsa_migrator()) - ); + return atomic_cell_type::make_live_counter_update(timestamp, value); } atomic_cell atomic_cell::make_live_uninitialized(const abstract_type& type, api::timestamp_type timestamp, size_t size) { - auto& imr_data = no_type_imr_descriptor(); - return atomic_cell( - imr_data.type_info(), - imr_object_type::make(data::cell::make_live_uninitialized(imr_data.type_info(), timestamp, size), &imr_data.lsa_migrator()) - ); -} - -static imr::utils::object copy_cell(const data::type_imr_descriptor& imr_data, const uint8_t* ptr) -{ - using imr_object_type = imr::utils::object; - - // If the cell doesn't own any memory it is trivial and can be copied with - // memcpy. - auto f = data::cell::structure::get_member(ptr); - if (!f.template get()) { - data::cell::context ctx(f, imr_data.type_info()); - // XXX: We may be better off storing the total cell size in memory. Measure! - auto size = data::cell::structure::serialized_object_size(ptr, ctx); - return imr_object_type::make_raw(size, [&] (uint8_t* dst) noexcept { - std::copy_n(ptr, size, dst); - }, &imr_data.lsa_migrator()); - } - - return imr_object_type::make(data::cell::copy_fn(imr_data.type_info(), ptr), &imr_data.lsa_migrator()); + return atomic_cell_type::make_live_uninitialized(timestamp, size); } atomic_cell::atomic_cell(const abstract_type& type, atomic_cell_view other) - : atomic_cell(type.imr_state().type_info(), - copy_cell(type.imr_state(), other._view.raw_pointer())) -{ } + : _data(other._view) { + set_view(_data); +} atomic_cell_or_collection atomic_cell_or_collection::copy(const abstract_type& type) const { - if (!_data.get()) { + if (_data.empty()) { return atomic_cell_or_collection(); } - auto& imr_data = type.imr_state(); - return atomic_cell_or_collection( - copy_cell(imr_data, _data.get()) - ); + return atomic_cell_or_collection(managed_bytes(_data)); } atomic_cell_or_collection::atomic_cell_or_collection(const abstract_type& type, atomic_cell_view acv) - : _data(copy_cell(type.imr_state(), acv._view.raw_pointer())) + : _data(acv._view) { } bool atomic_cell_or_collection::equals(const abstract_type& type, const atomic_cell_or_collection& other) const { - auto ptr_a = _data.get(); - auto ptr_b = other._data.get(); - - if (!ptr_a || !ptr_b) { - return !ptr_a && !ptr_b; + if (_data.empty() || other._data.empty()) { + return _data.empty() && other._data.empty(); } if (type.is_atomic()) { - auto a = atomic_cell_view::from_bytes(type.imr_state().type_info(), _data); - auto b = atomic_cell_view::from_bytes(type.imr_state().type_info(), other._data); + auto a = atomic_cell_view::from_bytes(type, _data); + auto b = atomic_cell_view::from_bytes(type, other._data); if (a.timestamp() != b.timestamp()) { return false; } @@ -191,28 +131,7 @@ bool atomic_cell_or_collection::equals(const abstract_type& type, const atomic_c size_t atomic_cell_or_collection::external_memory_usage(const abstract_type& t) const { - if (!_data.get()) { - return 0; - } - auto ctx = data::cell::context(_data.get(), t.imr_state().type_info()); - - auto view = data::cell::structure::make_view(_data.get(), ctx); - auto flags = view.get(); - - size_t external_value_size = 0; - if (flags.get()) { - if (flags.get()) { - external_value_size = as_collection_mutation().data.size_bytes(); - } else { - auto cell_view = data::cell::atomic_cell_view(t.imr_state().type_info(), view); - external_value_size = cell_view.value_size(); - } - // Add overhead of chunk headers. The last one is a special case. - external_value_size += (external_value_size - 1) / data::cell::effective_external_chunk_length * data::cell::external_chunk_overhead; - external_value_size += data::cell::external_last_chunk_overhead; - } - return data::cell::structure::serialized_object_size(_data.get(), ctx) - + imr_object_type::size_overhead + external_value_size; + return _data.external_memory_usage(); } std::ostream& @@ -221,7 +140,7 @@ operator<<(std::ostream& os, const atomic_cell_view& acv) { return fmt_print(os, "atomic_cell{{{},ts={:d},expiry={:d},ttl={:d}}}", acv.is_counter_update() ? "counter_update_value=" + to_sstring(acv.counter_update_value()) - : to_hex(acv.value().linearize()), + : to_hex(to_bytes(acv.value())), acv.timestamp(), acv.is_live_and_has_ttl() ? acv.expiry().time_since_epoch().count() : -1, acv.is_live_and_has_ttl() ? acv.ttl().count() : 0); @@ -247,12 +166,11 @@ operator<<(std::ostream& os, const atomic_cell_view::printer& acvp) { cell_value_string_builder << "counter_update_value=" << acv.counter_update_value(); } else { cell_value_string_builder << "shards: "; - counter_cell_view::with_linearized(acv, [&cell_value_string_builder] (counter_cell_view& ccv) { - cell_value_string_builder << ::join(", ", ccv.shards()); - }); + auto ccv = counter_cell_view(acv); + cell_value_string_builder << ::join(", ", ccv.shards()); } } else { - cell_value_string_builder << type.to_string(acv.value().linearize()); + cell_value_string_builder << type.to_string(to_bytes(acv.value())); } return fmt_print(os, "atomic_cell{{{},ts={:d},expiry={:d},ttl={:d}}}", cell_value_string_builder.str(), @@ -271,12 +189,11 @@ operator<<(std::ostream& os, const atomic_cell::printer& acp) { } std::ostream& operator<<(std::ostream& os, const atomic_cell_or_collection::printer& p) { - if (!p._cell._data.get()) { + if (p._cell._data.empty()) { return os << "{ null atomic_cell_or_collection }"; } - using dc = data::cell; os << "{ "; - if (dc::structure::get_member(p._cell._data.get()).get()) { + if (p._cdef.type->is_collection()) { os << "collection "; auto cmv = p._cell.as_collection_mutation(); os << collection_mutation_view::printer(*p._cdef.type, cmv); diff --git a/atomic_cell.hh b/atomic_cell.hh index e72bd2eb24..870e9a6e5d 100644 --- a/atomic_cell.hh +++ b/atomic_cell.hh @@ -26,12 +26,12 @@ #include "tombstone.hh" #include "gc_clock.hh" #include "utils/managed_bytes.hh" +#include "utils/fragment_range.hh" #include +#include #include #include -#include "data/cell.hh" -#include "data/schema_info.hh" -#include "imr/utils.hh" +#include #include "utils/fragmented_temporary_buffer.hh" #include "serializer.hh" @@ -40,41 +40,191 @@ class abstract_type; class collection_type_impl; class atomic_cell_or_collection; -using atomic_cell_value_view = data::value_view; -using atomic_cell_value_mutable_view = data::value_mutable_view; +using atomic_cell_value = managed_bytes; +template +using atomic_cell_value_basic_view = managed_bytes_basic_view; +using atomic_cell_value_view = atomic_cell_value_basic_view; +using atomic_cell_value_mutable_view = atomic_cell_value_basic_view; + +template +requires std::is_trivial_v +static void set_field(atomic_cell_value_mutable_view& out, unsigned offset, T val) { + auto out_view = managed_bytes_mutable_view(out); + out_view.remove_prefix(offset); + write(out_view, val); +} + +template +requires std::is_trivial_v +static void set_field(atomic_cell_value& out, unsigned offset, T val) { + auto out_view = atomic_cell_value_mutable_view(out); + set_field(out_view, offset, val); +} + +template +static void set_value(managed_bytes& b, unsigned value_offset, const Buffer& value) { + auto v = managed_bytes_mutable_view(b).substr(value_offset, value.size_bytes()); + for (auto frag : value) { + write_fragmented(v, single_fragmented_view(frag)); + } +} + +template +requires std::is_trivial_v +static T get_field(Input in, unsigned offset = 0) { + in.remove_prefix(offset); + return read_simple(in); +} + +/* + * Represents atomic cell layout. Works on serialized form. + * + * Layout: + * + * := ()? + * := + */ +class atomic_cell_type final { +private: + static constexpr int8_t LIVE_FLAG = 0x01; + static constexpr int8_t EXPIRY_FLAG = 0x02; // When present, expiry field is present. Set only for live cells + static constexpr int8_t COUNTER_UPDATE_FLAG = 0x08; // Cell is a counter update. + static constexpr unsigned flags_size = 1; + static constexpr unsigned timestamp_offset = flags_size; + static constexpr unsigned timestamp_size = 8; + static constexpr unsigned expiry_offset = timestamp_offset + timestamp_size; + static constexpr unsigned expiry_size = 8; + static constexpr unsigned deletion_time_offset = timestamp_offset + timestamp_size; + static constexpr unsigned deletion_time_size = 8; + static constexpr unsigned ttl_offset = expiry_offset + expiry_size; + static constexpr unsigned ttl_size = 4; + friend class counter_cell_builder; +private: + static bool is_counter_update(atomic_cell_value_view cell) { + return cell.front() & COUNTER_UPDATE_FLAG; + } + static bool is_live(atomic_cell_value_view cell) { + return cell.front() & LIVE_FLAG; + } + static bool is_live_and_has_ttl(atomic_cell_value_view cell) { + return cell.front() & EXPIRY_FLAG; + } + static bool is_dead(atomic_cell_value_view cell) { + return !is_live(cell); + } + // Can be called on live and dead cells + static api::timestamp_type timestamp(atomic_cell_value_view cell) { + return get_field(cell, timestamp_offset); + } + static void set_timestamp(atomic_cell_value_mutable_view& cell, api::timestamp_type ts) { + set_field(cell, timestamp_offset, ts); + } + // Can be called on live cells only +private: + template + static managed_bytes_basic_view do_get_value(managed_bytes_basic_view cell) { + auto expiry_field_size = bool(cell.front() & EXPIRY_FLAG) * (expiry_size + ttl_size); + auto value_offset = flags_size + timestamp_size + expiry_field_size; + cell.remove_prefix(value_offset); + return cell; + } +public: + static atomic_cell_value_view value(managed_bytes_view cell) { + return do_get_value(cell); + } + static atomic_cell_value_mutable_view value(managed_bytes_mutable_view cell) { + return do_get_value(cell); + } + // Can be called on live counter update cells only + static int64_t counter_update_value(atomic_cell_value_view cell) { + return get_field(cell, flags_size + timestamp_size); + } + // Can be called only when is_dead() is true. + static gc_clock::time_point deletion_time(atomic_cell_value_view cell) { + assert(is_dead(cell)); + return gc_clock::time_point(gc_clock::duration(get_field(cell, deletion_time_offset))); + } + // Can be called only when is_live_and_has_ttl() is true. + static gc_clock::time_point expiry(atomic_cell_value_view cell) { + assert(is_live_and_has_ttl(cell)); + auto expiry = get_field(cell, expiry_offset); + return gc_clock::time_point(gc_clock::duration(expiry)); + } + // Can be called only when is_live_and_has_ttl() is true. + static gc_clock::duration ttl(atomic_cell_value_view cell) { + assert(is_live_and_has_ttl(cell)); + return gc_clock::duration(get_field(cell, ttl_offset)); + } + static managed_bytes make_dead(api::timestamp_type timestamp, gc_clock::time_point deletion_time) { + managed_bytes b(managed_bytes::initialized_later(), flags_size + timestamp_size + deletion_time_size); + b[0] = 0; + set_field(b, timestamp_offset, timestamp); + set_field(b, deletion_time_offset, static_cast(deletion_time.time_since_epoch().count())); + return b; + } + template + static managed_bytes make_live(api::timestamp_type timestamp, const Buffer& value) { + auto value_offset = flags_size + timestamp_size; + managed_bytes b(managed_bytes::initialized_later(), value_offset + value.size_bytes()); + b[0] = LIVE_FLAG; + set_field(b, timestamp_offset, timestamp); + set_value(b, value_offset, value); + return b; + } + static managed_bytes make_live_counter_update(api::timestamp_type timestamp, int64_t value) { + auto value_offset = flags_size + timestamp_size; + managed_bytes b(managed_bytes::initialized_later(), value_offset + sizeof(value)); + b[0] = LIVE_FLAG | COUNTER_UPDATE_FLAG; + set_field(b, timestamp_offset, timestamp); + set_field(b, value_offset, value); + return b; + } + template + static managed_bytes make_live(api::timestamp_type timestamp, const Buffer& value, gc_clock::time_point expiry, gc_clock::duration ttl) { + auto value_offset = flags_size + timestamp_size + expiry_size + ttl_size; + managed_bytes b(managed_bytes::initialized_later(), value_offset + value.size_bytes()); + b[0] = EXPIRY_FLAG | LIVE_FLAG; + set_field(b, timestamp_offset, timestamp); + set_field(b, expiry_offset, static_cast(expiry.time_since_epoch().count())); + set_field(b, ttl_offset, static_cast(ttl.count())); + set_value(b, value_offset, value); + return b; + } + static managed_bytes make_live_uninitialized(api::timestamp_type timestamp, size_t size) { + auto value_offset = flags_size + timestamp_size; + managed_bytes b(managed_bytes::initialized_later(), value_offset + size); + b[0] = LIVE_FLAG; + set_field(b, timestamp_offset, timestamp); + return b; + } + template + friend class basic_atomic_cell_view; + friend class atomic_cell; +}; /// View of an atomic cell template class basic_atomic_cell_view { protected: - data::cell::basic_atomic_cell_view _view; - friend class atomic_cell; -public: - using pointer_type = std::conditional_t; + managed_bytes_basic_view _view; + friend class atomic_cell; protected: - explicit basic_atomic_cell_view(data::cell::basic_atomic_cell_view v) - : _view(std::move(v)) { } - - basic_atomic_cell_view(const data::type_info& ti, pointer_type ptr) - : _view(data::cell::make_atomic_cell_view(ti, ptr)) - { } - + void set_view(managed_bytes_basic_view v) { + _view = v; + } + basic_atomic_cell_view() = default; + explicit basic_atomic_cell_view(managed_bytes_basic_view v) : _view(std::move(v)) { } friend class atomic_cell_or_collection; public: operator basic_atomic_cell_view() const noexcept { return basic_atomic_cell_view(_view); } - void swap(basic_atomic_cell_view& other) noexcept { - using std::swap; - swap(_view, other._view); - } - bool is_counter_update() const { - return _view.is_counter_update(); + return atomic_cell_type::is_counter_update(_view); } bool is_live() const { - return _view.is_live(); + return atomic_cell_type::is_live(_view); } bool is_live(tombstone t, bool is_counter) const { return is_live() && !is_covered_by(t, is_counter); @@ -83,73 +233,72 @@ public: return is_live() && !is_covered_by(t, is_counter) && !has_expired(now); } bool is_live_and_has_ttl() const { - return _view.is_expiring(); + return atomic_cell_type::is_live_and_has_ttl(_view); } bool is_dead(gc_clock::time_point now) const { - return !is_live() || has_expired(now); + return atomic_cell_type::is_dead(_view) || has_expired(now); } bool is_covered_by(tombstone t, bool is_counter) const { return timestamp() <= t.timestamp || (is_counter && t.timestamp != api::missing_timestamp); } // Can be called on live and dead cells api::timestamp_type timestamp() const { - return _view.timestamp(); + return atomic_cell_type::timestamp(_view); } void set_timestamp(api::timestamp_type ts) { - _view.set_timestamp(ts); + atomic_cell_type::set_timestamp(_view, ts); } // Can be called on live cells only - data::basic_value_view value() const { - return _view.value(); + atomic_cell_value_basic_view value() const { + return atomic_cell_type::value(_view); } // Can be called on live cells only size_t value_size() const { - return _view.value_size(); + return atomic_cell_type::value(_view).size(); } bool is_value_fragmented() const { - return _view.is_value_fragmented(); + return _view.is_fragmented(); } // Can be called on live counter update cells only int64_t counter_update_value() const { - return _view.counter_update_value(); + return atomic_cell_type::counter_update_value(_view); } // Can be called only when is_dead(gc_clock::time_point) gc_clock::time_point deletion_time() const { - return !is_live() ? _view.deletion_time() : expiry() - ttl(); + return !is_live() ? atomic_cell_type::deletion_time(_view) : expiry() - ttl(); } // Can be called only when is_live_and_has_ttl() gc_clock::time_point expiry() const { - return _view.expiry(); + return atomic_cell_type::expiry(_view); } // Can be called only when is_live_and_has_ttl() gc_clock::duration ttl() const { - return _view.ttl(); + return atomic_cell_type::ttl(_view); } // Can be called on live and dead cells bool has_expired(gc_clock::time_point now) const { return is_live_and_has_ttl() && expiry() <= now; } - bytes_view serialize() const { - return _view.serialize(); + managed_bytes_view serialize() const { + return _view; } }; class atomic_cell_view final : public basic_atomic_cell_view { - atomic_cell_view(const data::type_info& ti, const uint8_t* data) - : basic_atomic_cell_view(ti, data) {} + atomic_cell_view(managed_bytes_view v) + : basic_atomic_cell_view(v) {} template - atomic_cell_view(data::cell::basic_atomic_cell_view view) - : basic_atomic_cell_view(view) { } + atomic_cell_view(basic_atomic_cell_view view) + : basic_atomic_cell_view(view) {} friend class atomic_cell; public: - static atomic_cell_view from_bytes(const data::type_info& ti, const imr::utils::object& data) { - return atomic_cell_view(ti, data.get()); + static atomic_cell_view from_bytes(const abstract_type& t, managed_bytes_view v) { + return atomic_cell_view(v); } - - static atomic_cell_view from_bytes(const data::type_info& ti, bytes_view bv) { - return atomic_cell_view(ti, reinterpret_cast(bv.begin())); + static atomic_cell_view from_bytes(const abstract_type& t, bytes_view v) { + return atomic_cell_view(managed_bytes_view(v)); } friend std::ostream& operator<<(std::ostream& os, const atomic_cell_view& acv); @@ -164,11 +313,11 @@ public: }; class atomic_cell_mutable_view final : public basic_atomic_cell_view { - atomic_cell_mutable_view(const data::type_info& ti, uint8_t* data) - : basic_atomic_cell_view(ti, data) {} + atomic_cell_mutable_view(managed_bytes_mutable_view data) + : basic_atomic_cell_view(data) {} public: - static atomic_cell_mutable_view from_bytes(const data::type_info& ti, imr::utils::object& data) { - return atomic_cell_mutable_view(ti, data.get()); + static atomic_cell_mutable_view from_bytes(const abstract_type& t, managed_bytes_mutable_view v) { + return atomic_cell_mutable_view(v); } friend class atomic_cell; @@ -177,26 +326,31 @@ public: using atomic_cell_ref = atomic_cell_mutable_view; class atomic_cell final : public basic_atomic_cell_view { - using imr_object_type = imr::utils::object; - imr_object_type _data; - atomic_cell(const data::type_info& ti, imr::utils::object&& data) - : basic_atomic_cell_view(ti, data.get()), _data(std::move(data)) {} + managed_bytes _data; + atomic_cell(managed_bytes b) : _data(std::move(b)) { + set_view(_data); + } + public: class collection_member_tag; using collection_member = bool_class; - atomic_cell(atomic_cell&&) = default; - atomic_cell& operator=(const atomic_cell&) = delete; - atomic_cell& operator=(atomic_cell&&) = default; - void swap(atomic_cell& other) noexcept { - basic_atomic_cell_view::swap(other); - _data.swap(other._data); + atomic_cell(atomic_cell&& o) noexcept : _data(std::move(o._data)) { + set_view(_data); } - operator atomic_cell_view() const { return atomic_cell_view(_view); } + atomic_cell& operator=(const atomic_cell&) = delete; + atomic_cell& operator=(atomic_cell&& o) { + _data = std::move(o._data); + set_view(_data); + return *this; + } + operator atomic_cell_view() const { return atomic_cell_view(managed_bytes_view(_data)); } atomic_cell(const abstract_type& t, atomic_cell_view other); static atomic_cell make_dead(api::timestamp_type timestamp, gc_clock::time_point deletion_time); static atomic_cell make_live(const abstract_type& type, api::timestamp_type timestamp, bytes_view value, collection_member = collection_member::no); + static atomic_cell make_live(const abstract_type& type, api::timestamp_type timestamp, managed_bytes_view value, + collection_member = collection_member::no); static atomic_cell make_live(const abstract_type& type, api::timestamp_type timestamp, ser::buffer_view value, collection_member = collection_member::no); static atomic_cell make_live(const abstract_type& type, api::timestamp_type timestamp, const fragmented_temporary_buffer::view& value, @@ -208,6 +362,8 @@ public: static atomic_cell make_live_counter_update(api::timestamp_type timestamp, int64_t value); static atomic_cell make_live(const abstract_type&, api::timestamp_type timestamp, bytes_view value, gc_clock::time_point expiry, gc_clock::duration ttl, collection_member = collection_member::no); + static atomic_cell make_live(const abstract_type&, api::timestamp_type timestamp, managed_bytes_view value, + gc_clock::time_point expiry, gc_clock::duration ttl, collection_member = collection_member::no); static atomic_cell make_live(const abstract_type&, api::timestamp_type timestamp, ser::buffer_view value, gc_clock::time_point expiry, gc_clock::duration ttl, collection_member = collection_member::no); static atomic_cell make_live(const abstract_type&, api::timestamp_type timestamp, const fragmented_temporary_buffer::view& value, diff --git a/atomic_cell_hash.hh b/atomic_cell_hash.hh index 9407b25aa1..7aa9d9da57 100644 --- a/atomic_cell_hash.hh +++ b/atomic_cell_hash.hh @@ -52,9 +52,7 @@ struct appending_hash { feed_hash(h, cell.timestamp()); if (cell.is_live()) { if (cdef.is_counter()) { - counter_cell_view::with_linearized(cell, [&] (counter_cell_view ccv) { - ::feed_hash(h, ccv); - }); + ::feed_hash(h, counter_cell_view(cell)); return; } if (cell.is_live_and_has_ttl()) { diff --git a/atomic_cell_or_collection.hh b/atomic_cell_or_collection.hh index c66fb162f3..6a499b33cf 100644 --- a/atomic_cell_or_collection.hh +++ b/atomic_cell_or_collection.hh @@ -26,20 +26,14 @@ #include "schema.hh" #include "hashing.hh" -#include "imr/utils.hh" - // A variant type that can hold either an atomic_cell, or a serialized collection. // Which type is stored is determined by the schema. +// Has an "empty" state. +// Objects moved-from are left in an empty state. class atomic_cell_or_collection final { - // FIXME: This has made us lose small-buffer optimisation. Unfortunately, - // due to the changed cell format it would be less effective now, anyway. - // Measure the actual impact because any attempts to fix this will become - // irrelevant once rows are converted to the IMR as well, so maybe we can - // live with this like that. - using imr_object_type = imr::utils::object; - imr_object_type _data; + managed_bytes _data; private: - atomic_cell_or_collection(imr::utils::object&& data) : _data(std::move(data)) {} + atomic_cell_or_collection(managed_bytes&& data) : _data(std::move(data)) {} public: atomic_cell_or_collection() = default; atomic_cell_or_collection(atomic_cell_or_collection&&) = default; @@ -49,20 +43,16 @@ public: atomic_cell_or_collection(atomic_cell ac) : _data(std::move(ac._data)) {} atomic_cell_or_collection(const abstract_type& at, atomic_cell_view acv); static atomic_cell_or_collection from_atomic_cell(atomic_cell data) { return { std::move(data._data) }; } - atomic_cell_view as_atomic_cell(const column_definition& cdef) const { return atomic_cell_view::from_bytes(cdef.type->imr_state().type_info(), _data); } - atomic_cell_ref as_atomic_cell_ref(const column_definition& cdef) { return atomic_cell_mutable_view::from_bytes(cdef.type->imr_state().type_info(), _data); } - atomic_cell_mutable_view as_mutable_atomic_cell(const column_definition& cdef) { return atomic_cell_mutable_view::from_bytes(cdef.type->imr_state().type_info(), _data); } + atomic_cell_view as_atomic_cell(const column_definition& cdef) const { return atomic_cell_view::from_bytes(*cdef.type, _data); } + atomic_cell_mutable_view as_mutable_atomic_cell(const column_definition& cdef) { return atomic_cell_mutable_view::from_bytes(*cdef.type, _data); } atomic_cell_or_collection(collection_mutation cm) : _data(std::move(cm._data)) { } atomic_cell_or_collection copy(const abstract_type&) const; explicit operator bool() const { - return bool(_data); + return !_data.empty(); } static constexpr bool can_use_mutable_view() { return true; } - void swap(atomic_cell_or_collection& other) noexcept { - _data.swap(other._data); - } static atomic_cell_or_collection from_collection_mutation(collection_mutation data) { return std::move(data._data); } collection_mutation_view as_collection_mutation() const; bytes_view serialize() const; @@ -82,12 +72,3 @@ public: }; friend std::ostream& operator<<(std::ostream&, const printer&); }; - -namespace std { - -inline void swap(atomic_cell_or_collection& a, atomic_cell_or_collection& b) noexcept -{ - a.swap(b); -} - -} diff --git a/cdc/log.cc b/cdc/log.cc index a5e7a388b2..ebe74d0768 100644 --- a/cdc/log.cc +++ b/cdc/log.cc @@ -979,13 +979,13 @@ private: }; static bytes get_bytes(const atomic_cell_view& acv) { - return acv.value().linearize(); + return to_bytes(acv.value()); } static bytes_view get_bytes_view(const atomic_cell_view& acv, std::vector& buf) { return acv.value().is_fragmented() - ? bytes_view{buf.emplace_back(acv.value().linearize())} - : acv.value().first_fragment(); + ? bytes_view{buf.emplace_back(to_bytes(acv.value()))} + : acv.value().current_fragment(); } static ttl_opt get_ttl(const atomic_cell_view& acv) { diff --git a/collection_mutation.cc b/collection_mutation.cc index 22d65bcc88..9d024edee4 100644 --- a/collection_mutation.cc +++ b/collection_mutation.cc @@ -22,7 +22,6 @@ #include "types/collection.hh" #include "types/user.hh" #include "concrete_types.hh" -#include "atomic_cell_or_collection.hh" #include "mutation_partition.hh" #include "compaction_garbage_collector.hh" #include "combine.hh" @@ -30,40 +29,28 @@ #include "collection_mutation.hh" collection_mutation::collection_mutation(const abstract_type& type, collection_mutation_view v) - : _data(imr_object_type::make(data::cell::make_collection(v.data), &type.imr_state().lsa_migrator())) {} + : _data(v.data) {} -collection_mutation::collection_mutation(const abstract_type& type, const bytes_ostream& data) - : _data(imr_object_type::make(data::cell::make_collection(fragment_range_view(data)), &type.imr_state().lsa_migrator())) {} - -static collection_mutation_view get_collection_mutation_view(const uint8_t* ptr) -{ - auto f = data::cell::structure::get_member(ptr); - auto ti = data::type_info::make_collection(); - data::cell::context ctx(f, ti); - auto view = data::cell::structure::get_member(ptr).as(ctx); - auto dv = data::cell::variable_value::make_view(view, f.get()); - return collection_mutation_view { dv }; -} +collection_mutation::collection_mutation(const abstract_type& type, managed_bytes data) + : _data(std::move(data)) {} collection_mutation::operator collection_mutation_view() const { - return get_collection_mutation_view(_data.get()); + return collection_mutation_view{managed_bytes_view(_data)}; } collection_mutation_view atomic_cell_or_collection::as_collection_mutation() const { - return get_collection_mutation_view(_data.get()); + return collection_mutation_view{managed_bytes_view(_data)}; } bool collection_mutation_view::is_empty() const { - auto in = collection_mutation_input_stream(data); + auto in = collection_mutation_input_stream(fragment_range(data)); auto has_tomb = in.read_trivial(); return !has_tomb && in.read_trivial() == 0; } -template -requires std::is_invocable_r_v -static bool is_any_live(const atomic_cell_value_view& data, tombstone tomb, gc_clock::time_point now, F&& read_cell_type_info) { - auto in = collection_mutation_input_stream(data); +bool collection_mutation_view::is_any_live(const abstract_type& type, tombstone tomb, gc_clock::time_point now) const { + auto in = collection_mutation_input_stream(fragment_range(data)); auto has_tomb = in.read_trivial(); if (has_tomb) { auto ts = in.read_trivial(); @@ -73,9 +60,10 @@ static bool is_any_live(const atomic_cell_value_view& data, tombstone tomb, gc_c auto nr = in.read_trivial(); for (uint32_t i = 0; i != nr; ++i) { - auto& type_info = read_cell_type_info(in); + auto key_size = in.read_trivial(); + in.skip(key_size); auto vsize = in.read_trivial(); - auto value = atomic_cell_view::from_bytes(type_info, in.read(vsize)); + auto value = atomic_cell_view::from_bytes(type, in.read(vsize)); if (value.is_live(tomb, now, false)) { return true; } @@ -84,33 +72,8 @@ static bool is_any_live(const atomic_cell_value_view& data, tombstone tomb, gc_c return false; } -bool collection_mutation_view::is_any_live(const abstract_type& type, tombstone tomb, gc_clock::time_point now) const { - return visit(type, make_visitor( - [&] (const collection_type_impl& ctype) { - auto& type_info = ctype.value_comparator()->imr_state().type_info(); - return ::is_any_live(data, tomb, now, [&type_info] (collection_mutation_input_stream& in) -> const data::type_info& { - auto key_size = in.read_trivial(); - in.skip(key_size); - return type_info; - }); - }, - [&] (const user_type_impl& utype) { - return ::is_any_live(data, tomb, now, [&utype] (collection_mutation_input_stream& in) -> const data::type_info& { - auto key_size = in.read_trivial(); - auto key = in.read(key_size); - return utype.type(deserialize_field_index(key))->imr_state().type_info(); - }); - }, - [&] (const abstract_type& o) -> bool { - throw std::runtime_error(format("collection_mutation_view::is_any_live: unknown type {}", o.name())); - } - )); -} - -template -requires std::is_invocable_r_v -static api::timestamp_type last_update(const atomic_cell_value_view& data, F&& read_cell_type_info) { - auto in = collection_mutation_input_stream(data); +api::timestamp_type collection_mutation_view::last_update(const abstract_type& type) const { + auto in = collection_mutation_input_stream(fragment_range(data)); api::timestamp_type max = api::missing_timestamp; auto has_tomb = in.read_trivial(); if (has_tomb) { @@ -120,39 +83,16 @@ static api::timestamp_type last_update(const atomic_cell_value_view& data, F&& r auto nr = in.read_trivial(); for (uint32_t i = 0; i != nr; ++i) { - auto& type_info = read_cell_type_info(in); + const auto key_size = in.read_trivial(); + in.skip(key_size); auto vsize = in.read_trivial(); - auto value = atomic_cell_view::from_bytes(type_info, in.read(vsize)); + auto value = atomic_cell_view::from_bytes(type, in.read(vsize)); max = std::max(value.timestamp(), max); } return max; } - -api::timestamp_type collection_mutation_view::last_update(const abstract_type& type) const { - return visit(type, make_visitor( - [&] (const collection_type_impl& ctype) { - auto& type_info = ctype.value_comparator()->imr_state().type_info(); - return ::last_update(data, [&type_info] (collection_mutation_input_stream& in) -> const data::type_info& { - auto key_size = in.read_trivial(); - in.skip(key_size); - return type_info; - }); - }, - [&] (const user_type_impl& utype) { - return ::last_update(data, [&utype] (collection_mutation_input_stream& in) -> const data::type_info& { - auto key_size = in.read_trivial(); - auto key = in.read(key_size); - return utype.type(deserialize_field_index(key))->imr_state().type_info(); - }); - }, - [&] (const abstract_type& o) -> api::timestamp_type { - throw std::runtime_error(format("collection_mutation_view::last_update: unknown type {}", o.name())); - } - )); -} - std::ostream& operator<<(std::ostream& os, const collection_mutation_view::printer& cmvp) { fmt::print(os, "{{collection_mutation_view "); cmvp._cmv.with_deserialized(cmvp._type, [&os, &type = cmvp._type] (const collection_mutation_view_description& cmvd) { @@ -278,28 +218,31 @@ static collection_mutation serialize_collection_mutation( auto size = accumulate(cells, (size_t)4, element_size); size += 1; if (tomb) { - size += sizeof(tomb.timestamp) + sizeof(tomb.deletion_time); + size += sizeof(int64_t) + sizeof(int64_t); } - bytes_ostream ret; - ret.reserve(size); - auto out = ret.write_begin(); - *out++ = bool(tomb); + managed_bytes ret(managed_bytes::initialized_later(), size); + managed_bytes_mutable_view out(ret); + write(out, uint8_t(bool(tomb))); if (tomb) { - write(out, tomb.timestamp); - write(out, tomb.deletion_time.time_since_epoch().count()); + write(out, tomb.timestamp); + write(out, tomb.deletion_time.time_since_epoch().count()); } - auto writeb = [&out] (bytes_view v) { - serialize_int32(out, v.size()); - out = std::copy_n(v.begin(), v.size(), out); + auto writek = [&out] (bytes_view v) { + write(out, v.size()); + write_fragmented(out, single_fragmented_view(v)); + }; + auto writev = [&out] (managed_bytes_view v) { + write(out, v.size()); + write_fragmented(out, v); }; // FIXME: overflow? - serialize_int32(out, boost::distance(cells)); + write(out, boost::distance(cells)); for (auto&& kv : cells) { auto&& k = kv.first; auto&& v = kv.second; - writeb(k); + writek(k); - writeb(v.serialize()); + writev(v.serialize()); } return collection_mutation(type, ret); } @@ -448,13 +391,12 @@ deserialize_collection_mutation(const abstract_type& type, collection_mutation_i return visit(type, make_visitor( [&] (const collection_type_impl& ctype) { // value_comparator(), ugh - auto& type_info = ctype.value_comparator()->imr_state().type_info(); - return deserialize_collection_mutation(in, [&type_info] (collection_mutation_input_stream& in) { + return deserialize_collection_mutation(in, [&ctype] (collection_mutation_input_stream& in) { // FIXME: we could probably avoid the need for size auto ksize = in.read_trivial(); auto key = in.read(ksize); auto vsize = in.read_trivial(); - auto value = atomic_cell_view::from_bytes(type_info, in.read(vsize)); + auto value = atomic_cell_view::from_bytes(*ctype.value_comparator(), in.read(vsize)); return std::make_pair(key, value); }); }, @@ -464,8 +406,7 @@ deserialize_collection_mutation(const abstract_type& type, collection_mutation_i auto ksize = in.read_trivial(); auto key = in.read(ksize); auto vsize = in.read_trivial(); - auto value = atomic_cell_view::from_bytes( - utype.type(deserialize_field_index(key))->imr_state().type_info(), in.read(vsize)); + auto value = atomic_cell_view::from_bytes(*utype.type(deserialize_field_index(key)), in.read(vsize)); return std::make_pair(key, value); }); }, diff --git a/collection_mutation.hh b/collection_mutation.hh index c65dfee2da..43cf66be81 100644 --- a/collection_mutation.hh +++ b/collection_mutation.hh @@ -31,7 +31,6 @@ #include class abstract_type; -class bytes_ostream; class compaction_garbage_collector; class row_tombstone; @@ -70,7 +69,7 @@ struct collection_mutation_view_description { collection_mutation serialize(const abstract_type&) const; }; -using collection_mutation_input_stream = utils::linearizing_input_stream; +using collection_mutation_input_stream = utils::linearizing_input_stream, marshal_exception>; // Given a linearized collection_mutation_view, returns an auxiliary struct allowing the inspection of each cell. // The struct is an observer of the data given by the collection_mutation_view and is only valid while the @@ -80,7 +79,7 @@ collection_mutation_view_description deserialize_collection_mutation(const abstr class collection_mutation_view { public: - atomic_cell_value_view data; + managed_bytes_view data; // Is this a noop mutation? bool is_empty() const; @@ -97,7 +96,7 @@ public: // calls it on the corresponding description of `this`. template inline decltype(auto) with_deserialized(const abstract_type& type, F f) const { - auto stream = collection_mutation_input_stream(data); + auto stream = collection_mutation_input_stream(fragment_range(data)); return f(deserialize_collection_mutation(type, stream)); } @@ -122,12 +121,11 @@ public: // The mutation may also contain a collection-wide tombstone. class collection_mutation { public: - using imr_object_type = imr::utils::object; - imr_object_type _data; + managed_bytes _data; collection_mutation() {} collection_mutation(const abstract_type&, collection_mutation_view); - collection_mutation(const abstract_type& type, const bytes_ostream& data); + collection_mutation(const abstract_type&, managed_bytes); operator collection_mutation_view() const; }; diff --git a/configure.py b/configure.py index 613b0e35cb..921d7bd786 100755 --- a/configure.py +++ b/configure.py @@ -338,7 +338,6 @@ scylla_tests = set([ 'test/boost/hash_test', 'test/boost/hashers_test', 'test/boost/idl_test', - 'test/boost/imr_test', 'test/boost/input_stream_test', 'test/boost/json_cql_query_test', 'test/boost/json_test', @@ -356,7 +355,6 @@ scylla_tests = set([ 'test/boost/intrusive_array_test', 'test/boost/map_difference_test', 'test/boost/memtable_test', - 'test/boost/meta_test', 'test/boost/multishard_mutation_query_test', 'test/boost/murmur_hash_test', 'test/boost/mutation_fragment_test', @@ -413,7 +411,6 @@ scylla_tests = set([ 'test/boost/btree_test', 'test/boost/double_decker_test', 'test/boost/stall_free_test', - 'test/boost/imr_test', 'test/boost/raft_sys_table_storage_test', 'test/manual/ec2_snitch_test', 'test/manual/enormous_table_scan_test', @@ -861,7 +858,6 @@ scylla_core = (['database.cc', 'vint-serialization.cc', 'utils/arch/powerpc/crc32-vpmsum/crc32_wrapper.cc', 'querier.cc', - 'data/cell.cc', 'mutation_writer/multishard_writer.cc', 'multishard_mutation_query.cc', 'reader_concurrency_semaphore.cc', @@ -1036,7 +1032,6 @@ pure_boost_tests = set([ 'test/boost/like_matcher_test', 'test/boost/linearizing_input_stream_test', 'test/boost/map_difference_test', - 'test/boost/meta_test', 'test/boost/nonwrapping_range_test', 'test/boost/observable_test', 'test/boost/range_test', @@ -1112,8 +1107,6 @@ deps['test/boost/estimated_histogram_test'] = ['test/boost/estimated_histogram_t deps['test/boost/anchorless_list_test'] = ['test/boost/anchorless_list_test.cc'] deps['test/perf/perf_fast_forward'] += ['release.cc'] deps['test/perf/perf_simple_query'] += ['release.cc'] -deps['test/boost/meta_test'] = ['test/boost/meta_test.cc'] -deps['test/boost/imr_test'] = ['test/boost/imr_test.cc', 'utils/logalloc.cc', 'utils/dynamic_bitset.cc'] deps['test/boost/reusable_buffer_test'] = [ "test/boost/reusable_buffer_test.cc", "test/lib/log.cc", diff --git a/converting_mutation_partition_applier.cc b/converting_mutation_partition_applier.cc index 233a5c8c34..d317cd7237 100644 --- a/converting_mutation_partition_applier.cc +++ b/converting_mutation_partition_applier.cc @@ -36,9 +36,9 @@ converting_mutation_partition_applier::upgrade_cell(const abstract_type& new_typ atomic_cell::collection_member cm) { if (cell.is_live() && !old_type.is_counter()) { if (cell.is_live_and_has_ttl()) { - return atomic_cell::make_live(new_type, cell.timestamp(), cell.value().linearize(), cell.expiry(), cell.ttl(), cm); + return atomic_cell::make_live(new_type, cell.timestamp(), cell.value(), cell.expiry(), cell.ttl(), cm); } - return atomic_cell::make_live(new_type, cell.timestamp(), cell.value().linearize(), cm); + return atomic_cell::make_live(new_type, cell.timestamp(), cell.value(), cm); } else { return atomic_cell(new_type, cell); } diff --git a/counters.cc b/counters.cc index 5531244f72..30d2147e1a 100644 --- a/counters.cc +++ b/counters.cc @@ -118,16 +118,14 @@ void counter_cell_view::apply(const column_definition& cdef, atomic_cell_or_coll assert(!dst_ac.is_counter_update()); assert(!src_ac.is_counter_update()); - with_linearized(dst_ac, [&] (counter_cell_view dst_ccv) { - with_linearized(src_ac, [&] (counter_cell_view src_ccv) { + auto src_ccv = counter_cell_view(src_ac); + auto dst_ccv = counter_cell_view(dst_ac); if (dst_ccv.shard_count() >= src_ccv.shard_count()) { auto dst_amc = dst.as_mutable_atomic_cell(cdef); auto src_amc = src.as_mutable_atomic_cell(cdef); - if (!dst_amc.is_value_fragmented() && !src_amc.is_value_fragmented()) { - if (apply_in_place(cdef, dst_amc, src_amc)) { - return; - } + if (apply_in_place(cdef, dst_amc, src_amc)) { + return; } } @@ -142,8 +140,6 @@ void counter_cell_view::apply(const column_definition& cdef, atomic_cell_or_coll auto cell = result.build(std::max(dst_ac.timestamp(), src_ac.timestamp())); src = std::exchange(dst, atomic_cell_or_collection(std::move(cell))); - }); - }); } std::optional counter_cell_view::difference(atomic_cell_view a, atomic_cell_view b) @@ -158,8 +154,8 @@ std::optional counter_cell_view::difference(atomic_cell_view a, ato return { }; } - return with_linearized(a, [&] (counter_cell_view a_ccv) { - return with_linearized(b, [&] (counter_cell_view b_ccv) { + auto a_ccv = counter_cell_view(a); + auto b_ccv = counter_cell_view(b); auto a_shards = a_ccv.shards(); auto b_shards = b_ccv.shards(); @@ -186,8 +182,6 @@ std::optional counter_cell_view::difference(atomic_cell_view a, ato diff = atomic_cell::make_live(*counter_type, a.timestamp(), bytes_view()); } return diff; - }); - }); } @@ -225,14 +219,13 @@ void transform_counter_updates_to_shards(mutation& m, const mutation* current_st if (!acv.is_live()) { return; // continue -- we are in lambda } - counter_cell_view::with_linearized(acv, [&] (counter_cell_view ccv) { + auto ccv = counter_cell_view(acv); auto cs = ccv.get_shard(counter_id(local_id)); if (!cs) { return; // continue } shards.emplace_back(std::make_pair(id, counter_shard(*cs))); }); - }); transformee.for_each_cell([&] (column_id id, atomic_cell_or_collection& ac_o_c) { auto& cdef = s.column_at(kind, id); diff --git a/counters.hh b/counters.hh index 760c7e0434..358191b194 100644 --- a/counters.hh +++ b/counters.hh @@ -81,21 +81,20 @@ class basic_counter_shard_view { total_size = unsigned(logical_clock) + sizeof(int64_t), }; private: - using pointer_type = std::conditional_t; - pointer_type _base; + managed_bytes_basic_view _base; private: template T read(offset off) const { - T value; - std::copy_n(_base + static_cast(off), sizeof(T), reinterpret_cast(&value)); - return value; + auto v = _base; + v.remove_prefix(size_t(off)); + return read_simple_native(v); } public: static constexpr auto size = size_t(offset::total_size); public: basic_counter_shard_view() = default; - explicit basic_counter_shard_view(pointer_type ptr) noexcept - : _base(ptr) { } + explicit basic_counter_shard_view(managed_bytes_basic_view v) noexcept + : _base(v) { } counter_id id() const { return read(offset::id); } int64_t value() const { return read(offset::value); } @@ -106,15 +105,24 @@ public: static constexpr size_t size = size_t(offset::total_size) - off; signed char tmp[size]; - std::copy_n(_base + off, size, tmp); - std::copy_n(other._base + off, size, _base + off); - std::copy_n(tmp, size, other._base + off); + auto tmp_view = single_fragmented_mutable_view(bytes_mutable_view(std::data(tmp), std::size(tmp))); + + managed_bytes_mutable_view this_view = _base.substr(off, size); + managed_bytes_mutable_view other_view = other._base.substr(off, size); + + copy_fragmented_view(tmp_view, this_view); + copy_fragmented_view(this_view, other_view); + copy_fragmented_view(other_view, tmp_view); } void set_value_and_clock(const basic_counter_shard_view& other) noexcept { static constexpr size_t off = size_t(offset::value); static constexpr size_t size = size_t(offset::total_size) - off; - std::copy_n(other._base + off, size, _base + off); + + managed_bytes_mutable_view this_view = _base.substr(off, size); + managed_bytes_mutable_view other_view = other._base.substr(off, size); + + copy_fragmented_view(this_view, other_view); } bool operator==(const basic_counter_shard_view& other) const { @@ -140,11 +148,6 @@ class counter_shard { counter_id _id; int64_t _value; int64_t _logical_clock; -private: - template - static void write(const T& value, bytes::iterator& out) { - out = std::copy_n(reinterpret_cast(&value), sizeof(T), out); - } private: // Shared logic for applying counter_shards and counter_shard_views. // T is either counter_shard or basic_counter_shard_view. @@ -195,10 +198,10 @@ public: static constexpr size_t serialized_size() { return counter_shard_view::size; } - void serialize(bytes::iterator& out) const { - write(_id, out); - write(_value, out); - write(_logical_clock, out); + void serialize(atomic_cell_value_mutable_view& out) const { + write_native(out, _id); + write_native(out, _value); + write_native(out, _logical_clock); } }; @@ -235,7 +238,7 @@ public: size_t serialized_size() const { return _shards.size() * counter_shard::serialized_size(); } - void serialize(bytes::iterator& out) const { + void serialize(atomic_cell_value_mutable_view& out) const { for (auto&& cs : _shards) { cs.serialize(out); } @@ -246,31 +249,18 @@ public: } atomic_cell build(api::timestamp_type timestamp) const { - // If we can assume that the counter shards never cross fragment boundaries - // the serialisation code gets much simpler. - static_assert(data::cell::maximum_external_chunk_length % counter_shard::serialized_size() == 0); - auto ac = atomic_cell::make_live_uninitialized(*counter_type, timestamp, serialized_size()); - auto dst_it = ac.value().begin(); - auto dst_current = *dst_it++; + auto dst = ac.value(); for (auto&& cs : _shards) { - if (dst_current.empty()) { - dst_current = *dst_it++; - } - assert(!dst_current.empty()); - auto value_dst = dst_current.data(); - cs.serialize(value_dst); - dst_current.remove_prefix(counter_shard::serialized_size()); + cs.serialize(dst); } return ac; } static atomic_cell from_single_shard(api::timestamp_type timestamp, const counter_shard& cs) { - // We don't really need to bother with fragmentation here. - static_assert(data::cell::maximum_external_chunk_length >= counter_shard::serialized_size()); auto ac = atomic_cell::make_live_uninitialized(*counter_type, timestamp, counter_shard::serialized_size()); - auto dst = ac.value().first_fragment().begin(); + auto dst = ac.value(); cs.serialize(dst); return ac; } @@ -309,12 +299,7 @@ public: template class basic_counter_cell_view { protected: - using linearized_value_view = std::conditional_t; - using pointer_type = std::conditional_t; basic_atomic_cell_view _cell; - linearized_value_view _value; private: class shard_iterator { public: @@ -324,12 +309,12 @@ private: using pointer = basic_counter_shard_view*; using reference = basic_counter_shard_view&; private: - pointer_type _current; + managed_bytes_basic_view _current; basic_counter_shard_view _current_view; + size_t _pos = 0; public: - shard_iterator() = default; - shard_iterator(pointer_type ptr) noexcept - : _current(ptr), _current_view(ptr) { } + shard_iterator(managed_bytes_basic_view v, size_t offset) noexcept + : _current(v), _current_view(_current), _pos(offset) { } basic_counter_shard_view& operator*() noexcept { return _current_view; @@ -338,8 +323,8 @@ private: return &_current_view; } shard_iterator& operator++() noexcept { - _current += counter_shard_view::size; - _current_view = basic_counter_shard_view(_current); + _pos += counter_shard_view::size; + _current_view = basic_counter_shard_view(_current.substr(_pos, counter_shard_view::size)); return *this; } shard_iterator operator++(int) noexcept { @@ -348,8 +333,8 @@ private: return it; } shard_iterator& operator--() noexcept { - _current -= counter_shard_view::size; - _current_view = basic_counter_shard_view(_current); + _pos -= counter_shard_view::size; + _current_view = basic_counter_shard_view(_current.substr(_pos, counter_shard_view::size)); return *this; } shard_iterator operator--(int) noexcept { @@ -358,31 +343,29 @@ private: return it; } bool operator==(const shard_iterator& other) const noexcept { - return _current == other._current; - } - bool operator!=(const shard_iterator& other) const noexcept { - return !(*this == other); + return _pos == other._pos; } }; public: boost::iterator_range shards() const { - auto begin = shard_iterator(_value.data()); - auto end = shard_iterator(_value.data() + _value.size()); + auto value = _cell.value(); + auto begin = shard_iterator(value, 0); + auto end = shard_iterator(value, value.size()); return boost::make_iterator_range(begin, end); } size_t shard_count() const { - return _cell.value().size_bytes() / counter_shard_view::size; + return _cell.value().size() / counter_shard_view::size; } -protected: +public: // ac must be a live counter cell - explicit basic_counter_cell_view(basic_atomic_cell_view ac, linearized_value_view vv) noexcept - : _cell(ac), _value(vv) + explicit basic_counter_cell_view(basic_atomic_cell_view ac) noexcept + : _cell(ac) { assert(_cell.is_live()); assert(!_cell.is_counter_update()); } -public: + api::timestamp_type timestamp() const { return _cell.timestamp(); } static data_type total_value_type() { return long_type; } @@ -411,14 +394,6 @@ public: struct counter_cell_view : basic_counter_cell_view { using basic_counter_cell_view::basic_counter_cell_view; - template - static decltype(auto) with_linearized(basic_atomic_cell_view ac, Function&& fn) { - return ac.value().with_linearized([&] (bytes_view value_view) { - counter_cell_view ccv(ac, value_view); - return fn(ccv); - }); - } - // Reversibly applies two counter cells, at least one of them must be live. static void apply(const column_definition& cdef, atomic_cell_or_collection& dst, atomic_cell_or_collection& src); @@ -433,9 +408,8 @@ struct counter_cell_mutable_view : basic_counter_cell_view { using basic_counter_cell_view::basic_counter_cell_view; explicit counter_cell_mutable_view(atomic_cell_mutable_view ac) noexcept - : basic_counter_cell_view(ac, ac.value().first_fragment()) + : basic_counter_cell_view(ac) { - assert(!ac.value().is_fragmented()); } void set_timestamp(api::timestamp_type ts) { _cell.set_timestamp(ts); } diff --git a/cql3/expr/expression.cc b/cql3/expr/expression.cc index 14fa29bc57..360e81c52d 100644 --- a/cql3/expr/expression.cc +++ b/cql3/expr/expression.cc @@ -63,7 +63,7 @@ bytes_opt do_get_value(const schema& schema, } assert(cdef.is_atomic()); auto c = cell->as_atomic_cell(cdef); - return c.is_dead(now) ? std::nullopt : bytes_opt(c.value().linearize()); + return c.is_dead(now) ? std::nullopt : bytes_opt(to_bytes(c.value())); } } diff --git a/cql3/functions/error_injection_fcts.cc b/cql3/functions/error_injection_fcts.cc index a073bcc6cc..fcdf4f1a5c 100644 --- a/cql3/functions/error_injection_fcts.cc +++ b/cql3/functions/error_injection_fcts.cc @@ -24,6 +24,7 @@ #include "error_injection_fcts.hh" #include "utils/error_injection.hh" #include "types/list.hh" +#include namespace cql3 { diff --git a/data/cell.cc b/data/cell.cc deleted file mode 100644 index 887a84e2fa..0000000000 --- a/data/cell.cc +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#include "data/cell.hh" - -#include "types.hh" - -thread_local imr::alloc::context_factory lcc; -thread_local imr::alloc::lsa_migrate_fn> data::cell::lsa_last_chunk_migrate_fn(lcc); -thread_local imr::alloc::context_factory ecc; -thread_local imr::alloc::lsa_migrate_fn> data::cell::lsa_chunk_migrate_fn(ecc); - -int compare_unsigned(data::value_view lhs, data::value_view rhs) noexcept -{ - auto it1 = lhs.begin(); - auto it2 = rhs.begin(); - while (it1 != lhs.end() && it2 != rhs.end()) { - auto r = ::compare_unsigned(*it1, *it2); - if (r) { - return r; - } - ++it1; - ++it2; - } - if (it1 != lhs.end()) { - return 1; - } else if (it2 != rhs.end()) { - return -1; - } - return 0; -} - diff --git a/data/cell.hh b/data/cell.hh deleted file mode 100644 index 4c6173af97..0000000000 --- a/data/cell.hh +++ /dev/null @@ -1,1014 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#pragma once - -#include -#include - -#include - -#include "imr/compound.hh" -#include "imr/fundamental.hh" -#include "imr/alloc.hh" -#include "imr/utils.hh" -#include "imr/concepts.hh" - -#include "data/schema_info.hh" -#include "data/value_view.hh" - -#include "gc_clock.hh" -#include "timestamp.hh" - -namespace data { - -struct cell { - // We make the internal storage 1KB smaller for 2 reasons: - // * To ensure the cell storage doesn't exceed 8KB in total (and hence - // reverts to a 16KB allocation, wasting a lot of space). - // * To make sure we can distinguish between external and internal storage - // values based on size. External storage will practically store less than - // 8KB in chunks due to chunk header overhead. So in order to be able to - // decide on size alone we need to leave sufficient room between the two. - static constexpr size_t maximum_internal_storage_length = 7 * 1024; - static constexpr size_t maximum_external_chunk_length = 8 * 1024; - - - struct tags { - class cell; - class atomic_cell; - class collection; - - class flags; - class live; - class expiring; - class counter_update; - class external_data; - - class ttl; - class expiry; - class empty; - class timestamp; - class value; - class dead; - class counter_update; - class fixed_value; - class variable_value; - class value_size; - class value_data; - class pointer; - class data; - class external_data; - - class chunk_back_pointer; - class chunk_next; - class chunk_data; - class last_chunk_size; - }; - - using flags = imr::flags< - tags::collection, - tags::live, - tags::expiring, - tags::counter_update, - tags::empty, - tags::external_data - >; - - using fixed_value = imr::buffer; - - using variable_value_data_variant = imr::variant>>, - imr::member> - >; - - using variable_value_structure = imr::structure< - imr::member>, - imr::member - >; - - /// Cell value - /// - /// The cell value can be either a deletion time (if the cell is dead), - /// a delta (counter update cell), fixed-size value or variable-sized value. - using value_variant = imr::variant>, - imr::member>, - imr::member, - imr::member - >; - - template - class value_writer { - FragmentRange _value; - - typename FragmentRange::const_iterator _value_it; - typename FragmentRange::const_iterator _value_end; - bytes_view _value_current; - - size_t _value_size; - bool _force_internal; - private: - // Distinguishes between cell::make_live_uninitialized() and other - // cell::make_live*() variants. - static constexpr bool initialize_value() { - return !std::is_same_v; - } - - auto write_all_to_destination() { - if constexpr (initialize_value()) { - return [this] (uint8_t* out) noexcept { - auto dst = reinterpret_cast(out); - while (_value_it != _value_end) { - _value_current = *_value_it++; - dst = std::copy_n(_value_current.data(), _value_current.size(), dst); - } - }; - } else { - return [] (uint8_t*) noexcept { }; - } - } - - auto write_to_destination(size_t n) { - if constexpr (initialize_value()) { - return [this, n] (uint8_t* out) mutable noexcept { - auto dst = reinterpret_cast(out); - while (n) { - if (_value_current.empty()) { - ++_value_it; - _value_current = *_value_it; - } - auto this_size = std::min(_value_current.size(), n); - dst = std::copy_n(_value_current.data(), this_size, dst); - _value_current.remove_prefix(this_size); - n -= this_size; - } - }; - } else { - return [] (uint8_t*) noexcept { }; - } - } - public: - value_writer(FragmentRange value, size_t value_size, bool force_internal) - : _value(std::move(value)) - , _value_it(_value.begin()) - , _value_end(_value.end()) - , _value_current(_value.empty() ? bytes_view() : *_value_it) - , _value_size(value_size) - , _force_internal(force_internal) - { } - - template - requires (imr::is_sizer_for_v - && std::is_same_v) - || (imr::is_serializer_for_v - && std::is_same_v) - auto operator()(Serializer serializer, Allocator allocations) { - auto after_size = serializer.serialize(_value_size); - if (_force_internal || _value_size <= cell::maximum_internal_storage_length) { - return after_size - .template serialize_as(_value_size, write_all_to_destination()) - .done(); - } - - imr::placeholder> next_pointer_phldr; - auto next_pointer_position = after_size.position(); - auto cell_ser = after_size.template serialize_as(next_pointer_phldr); - - auto offset = 0; - auto migrate_fn_ptr = &cell::lsa_chunk_migrate_fn; - while (_value_size - offset > cell::effective_external_chunk_length) { - imr::placeholder> phldr; - auto chunk_ser = allocations.template allocate_nested(migrate_fn_ptr) - .serialize(next_pointer_position); - next_pointer_position = chunk_ser.position(); - next_pointer_phldr.serialize( - chunk_ser.serialize(phldr) - .serialize(cell::effective_external_chunk_length, write_to_destination(cell::effective_external_chunk_length)) - .done() - ); - next_pointer_phldr = phldr; - offset += cell::effective_external_chunk_length; - } - - size_t left = _value_size - offset; - auto ptr = allocations.template allocate_nested(&cell::lsa_last_chunk_migrate_fn) - .serialize(next_pointer_position) - .serialize(left) - .serialize(left, write_to_destination(left)) - .done(); - next_pointer_phldr.serialize(ptr); - return cell_ser.done(); - } - }; - - /// Variable-length cell value - /// - /// This is a definition of the IMR structure of a variable-length value. - /// It is used both by collections, counters and regular cells which type - /// is variable-sized. The data can be stored internally, if its size is - /// smaller or equal maximum_internal_storage_length or externally if it - /// larger. - struct variable_value { - using data_variant = variable_value_data_variant; - using structure = variable_value_structure; - - /// Create writer of a variable-size value - /// - /// Returns a function object that can be used as a writer of a variable - /// value. The first argument is expected to be either IMR sizer or - /// serializer and the second is an appropriate IMR allocator helper - /// object. - /// \arg force_internal if set to true stores the value internally - /// regardless of its size (used by collection members). - template - static value_writer> write(FragmentRange&& value, bool force_internal = false) noexcept; - static auto write(bytes_view value, bool force_internal = false) noexcept; - - /// Create writer of an uninitialised variable-size value - static value_writer write(size_t size, bool force_internal = false) noexcept; - - class context { - bool _external_storage; - uint32_t _value_size; - public: - explicit context(bool external_storage, uint32_t value_size) noexcept - : _external_storage(external_storage), _value_size(value_size) { } - template - auto active_alternative_of() const noexcept { - if (_external_storage) { - return data_variant::index_for(); - } else { - return data_variant::index_for(); - } - } - template - size_t size_of() const noexcept { - return _value_size; - } - template - auto context_for(Args&&...) const noexcept { - return *this; - } - }; - - template - static basic_value_view do_make_view(structure::basic_view view, bool external_storage); - - static data::value_view make_view(structure::view view, bool external_storage) { - return do_make_view(view, external_storage); - } - static data::value_mutable_view make_view(structure::mutable_view view, bool external_storage) { - return do_make_view(view, external_storage); - } - }; - - /// Atomic cell - /// - /// Atomic cells can be either regular cells or counters. Moreover, the - /// cell may be live or dead and the regular cells may have expiration time. - /// Counter cells may be either sets of shards or a delta. The former is not - /// fully converted to the IMR yet and still use a custom serilalisation - /// format. The IMR treats such cells the same way it handles regular blobs. - using atomic_cell = imr::structure< - imr::member>, - imr::optional_member>, - imr::member> - >>, - imr::member - >; - using atomic_cell_or_collection = imr::variant, - imr::member - >; - - /// Top IMR definition of a cell - /// - /// A cell in Scylla's data model can be either atomic (a regular cell, - /// a counter or a frozen collection) or an unfrozen collection. As for now - /// only regular cells are fully utilising the IMR. Collections are still - /// using custom serialisation format and from the IMR point of view are - /// just opaque values. - using structure = imr::structure< - imr::member, - imr::member - >; - - /// An fragment of externally stored value - /// - /// If a cell value size is above maximum_internal_storage_length it is - /// stored externally. Moreover, in order to avoid stressing the memory - /// allocators with large allocations values are fragmented in chunks - /// no larger than maximum_external_chunk_length. The gross size (including - /// the chunk header) of all chunks, but the last one is always - /// maximum_external_chunk_length. - using external_chunk = imr::structure< - imr::member>>, - imr::member>, - imr::member> - >; - static constexpr size_t external_chunk_overhead = sizeof(uint8_t*) * 2; - static constexpr size_t effective_external_chunk_length = maximum_external_chunk_length - external_chunk_overhead; - - using external_last_chunk_size = imr::pod; - /// The last fragment of an externally stored value - /// - /// The size of the last fragment of a value stored externally may vary. - /// Due to the requirements the LSA imposes on migrators we need to store - /// the size inside it so that it can be retrieved when the LSA migrates - /// object. - using external_last_chunk = imr::structure< - imr::member>>, - imr::member, - imr::member> - >; - static constexpr size_t external_last_chunk_overhead = sizeof(uint8_t*) + sizeof(uint16_t); - - class context; - class minimal_context; - - /// Value fragment deserialisation context - /// - /// This is a deserialization context for all, but last, value fragments. - /// Their size is fixed. - struct chunk_context { - explicit constexpr chunk_context(const uint8_t*) noexcept { } - - template - static constexpr size_t size_of() noexcept { - return cell::effective_external_chunk_length; - } - template - auto context_for(Args&&...) const noexcept { - return *this; - } - }; - - /// Last value fragment deserialisation context - class last_chunk_context { - uint16_t _size; - public: - explicit last_chunk_context(const uint8_t* ptr) noexcept - : _size(external_last_chunk::get_member(ptr).load()) - { } - - template - size_t size_of() const noexcept { - return _size; - } - - template - auto context_for(Args&&...) const noexcept { - return *this; - } - }; - - template - class basic_atomic_cell_view; - - using atomic_cell_view = basic_atomic_cell_view; - using mutable_atomic_cell_view = basic_atomic_cell_view; -private: - static thread_local imr::alloc::lsa_migrate_fn> lsa_last_chunk_migrate_fn; - static thread_local imr::alloc::lsa_migrate_fn> lsa_chunk_migrate_fn; - -public: - /// Make a writer that copies a cell - /// - /// This function creates a writer that copies a cell. It can be either - /// atomic or a collection. - /// - /// \arg ptr needs to remain valid as long as the writer is in use. - /// \returns imr::WriterAllocator for cell::structure. - static auto copy_fn(const type_info& ti, const uint8_t* ptr); - - /// Make a writer for a collection - /// - /// \arg data needs to remain valid as long as the writer is in use. - /// \returns imr::WriterAllocator for cell::structure. - template>>> - requires std::is_nothrow_move_constructible_v> && - std::is_nothrow_copy_constructible_v> && - std::is_nothrow_copy_assignable_v> && - std::is_nothrow_move_assignable_v> - static auto make_collection(FragmentRange data) noexcept { - return [data = std::move(data)] (auto&& serializer, auto&& allocations) noexcept { - return serializer - .serialize(imr::set_flag(), - imr::set_flag(data.size_bytes() > maximum_internal_storage_length)) - .template serialize_as(variable_value::write(data), allocations) - .done(); - }; - } - - static auto make_collection(bytes_view data) noexcept { - return make_collection(single_fragment_range(data)); - } - - /// Make a writer for a dead cell - /// - /// This function returns a generic lambda that is a writer for a dead - /// cell with the specified timestamp and deletion time. - /// - /// \returns imr::WriterAllocator for cell::structure. - static auto make_dead(api::timestamp_type ts, gc_clock::time_point deletion_time) noexcept { - return [ts, deletion_time] (auto&& serializer, auto&&...) noexcept { - return serializer - .serialize() - .template serialize_as_nested() - .serialize(ts) - .skip() - .template serialize_as(deletion_time.time_since_epoch().count()) - .done() - .done(); - }; - } - static auto make_live_counter_update(api::timestamp_type ts, int64_t delta) noexcept { - return [ts, delta] (auto&& serializer, auto&&...) noexcept { - return serializer - .serialize(imr::set_flag(), - imr::set_flag()) - .template serialize_as_nested() - .serialize(ts) - .skip() - .template serialize_as(delta) - .done() - .done(); - }; - } - - /// Make a writer for a live non-expiring cell - /// - /// \arg value needs to remain valid as long as the writer is in use. - /// \arg force_internal always store the value internally regardless of its - /// size. This is a temporary (hopefully, sorry if you are reading this in - /// 2020) hack to make integration with collections easier. - /// - /// \returns imr::WriterAllocator for cell::structure. - template>>> - static auto make_live(const type_info& ti, api::timestamp_type ts, FragmentRange&& value, bool force_internal = false) noexcept { - return [&ti, ts, value, force_internal] (auto&& serializer, auto&& allocations) noexcept { - auto after_expiring = serializer - .serialize(imr::set_flag(), - imr::set_flag(value.empty()), - imr::set_flag(!force_internal && !ti.is_fixed_size() && value.size_bytes() > maximum_internal_storage_length)) - .template serialize_as_nested() - .serialize(ts) - .skip(); - return [&] { - if (ti.is_fixed_size()) { - return after_expiring.template serialize_as(value); - } else { - return after_expiring - .template serialize_as(variable_value::write(value, force_internal), allocations); - } - }().done().done(); - }; - } - - static auto make_live(const type_info& ti, api::timestamp_type ts, bytes_view value, bool force_internal = false) noexcept { - return make_live(ti, ts, single_fragment_range(value), force_internal); - } - - template>>> - static auto make_live(const type_info& ti, api::timestamp_type ts, FragmentRange&& value, gc_clock::time_point expiry, gc_clock::duration ttl, bool force_internal = false) noexcept - { - return [&ti, ts, value, expiry, ttl, force_internal] (auto&& serializer, auto&& allocations) noexcept { - auto after_expiring = serializer - .serialize(imr::set_flag(), - imr::set_flag(), - imr::set_flag(value.empty()), - imr::set_flag(!force_internal && !ti.is_fixed_size() && value.size_bytes() > maximum_internal_storage_length)) - .template serialize_as_nested() - .serialize(ts) - .serialize_nested() - .serialize(gc_clock::as_int32(ttl)) - .serialize(expiry.time_since_epoch().count()) - .done(); - return [&] { - if (ti.is_fixed_size()) { - return after_expiring.template serialize_as(value); - } else { - return after_expiring - .template serialize_as(variable_value::write(value, force_internal), allocations); - } - }().done().done(); - }; - } - - static auto make_live(const type_info& ti, api::timestamp_type ts, bytes_view value, gc_clock::time_point expiry, gc_clock::duration ttl, bool force_internal = false) noexcept { - return make_live(ti, ts, single_fragment_range(value), expiry, ttl, force_internal); - } - - /// Make a writer of a live cell with uninitialised value - /// - /// This function returns a function object which is a writer of a live - /// cell. The space for value is allocated but not initialised. This can be - /// used if the value is a result of some IMR-independent serialisation - /// (e.g. counters). - /// - /// \returns imr::WriterAllocator for cell::structure. - static auto make_live_uninitialized(const type_info& ti, api::timestamp_type ts, size_t size) noexcept { - return [&ti, ts, size] (auto&& serializer, auto&& allocations) noexcept { - auto after_expiring = serializer - .serialize(imr::set_flag(), - imr::set_flag(!size), - imr::set_flag(!ti.is_fixed_size() && size > maximum_internal_storage_length)) - .template serialize_as_nested() - .serialize(ts) - .skip(); - return [&] { - if (ti.is_fixed_size()) { - return after_expiring.template serialize_as(size, [] (uint8_t*) noexcept { }); - } else { - return after_expiring - .template serialize_as(variable_value::write(size, false), allocations); - } - }().done().done(); - }; - } - - template - static size_t size_of(Builder&& builder, imr::alloc::object_allocator& allocator) noexcept { - return structure::size_when_serialized(std::forward(builder), allocator.get_sizer()); - } - - template - static size_t serialize(uint8_t* ptr, Builder&& builder, imr::alloc::object_allocator& allocator) noexcept { - return structure::serialize(ptr, std::forward(builder), allocator.get_serializer()); - } - - static atomic_cell_view make_atomic_cell_view(const type_info& ti, const uint8_t* ptr) noexcept; - static mutable_atomic_cell_view make_atomic_cell_view(const type_info& ti, uint8_t* ptr) noexcept; - - static void destroy(uint8_t* ptr) noexcept; -}; - -/// Minimal cell deserialisation context -/// -/// This is a minimal deserialisation context that doesn't require the cell -/// type to be known, but allows only some operations to be performed. In -/// particular it is able to provide sufficient information to destroy a cell. -class cell::minimal_context { -protected: - cell::flags::view _flags; -public: - explicit minimal_context(cell::flags::view flags) noexcept - : _flags(flags) { } - - template - bool is_present() const noexcept; - - template - auto active_alternative_of() const noexcept; - - template - size_t size_of() const noexcept; - - template - auto context_for(const uint8_t*) const noexcept { - return *this; - } -}; - -template<> -inline bool cell::minimal_context::is_present() const noexcept { - return _flags.get(); -} - -template<> -inline auto cell::minimal_context::active_alternative_of() const noexcept { - if (_flags.get()) { - return cell::atomic_cell_or_collection::index_for(); - } else { - return cell::atomic_cell_or_collection::index_for(); - } -} - -/// Cell deserialisation context -/// -/// This class combines schema-dependnent and instance-specific information -/// and provides an appropriate interface for the IMR deserialisation routines -/// to read a cell. -class cell::context : public cell::minimal_context { - type_info _type; -public: - explicit context(const uint8_t* ptr, const type_info& tinfo) noexcept - : context(structure::get_member(ptr), tinfo) { } - - explicit context(cell::flags::view flags, const type_info& tinfo) noexcept - : minimal_context(flags), _type(tinfo) { } - - template - bool is_present() const noexcept { - return minimal_context::is_present(); - } - - template - auto active_alternative_of() const noexcept { - return minimal_context::active_alternative_of(); - } - - template - size_t size_of() const noexcept; - - template - auto context_for(const uint8_t*) const noexcept { - return *this; - } -}; - -template<> -inline auto cell::context::context_for(const uint8_t* ptr) const noexcept { - auto length = variable_value::structure::get_member(ptr); - return variable_value::context(_flags.get(), length.load()); -} - -template<> -inline auto cell::context::context_for(const uint8_t* ptr) const noexcept { - auto length = variable_value::structure::get_member(ptr); - return variable_value::context(_flags.get(), length.load()); -} - - -template<> -inline auto cell::context::active_alternative_of() const noexcept { - if (_flags.get()) { - if (__builtin_expect(_flags.get(), false)) { - return cell::value_variant::index_for(); - } - if (_type.is_fixed_size()) { - return cell::value_variant::index_for(); - } else { - return cell::value_variant::index_for(); - } - } else { - return cell::value_variant::index_for(); - } -} - -template<> -inline size_t cell::context::size_of() const noexcept { - return _flags.get() ? 0 : _type.value_size(); -} - -/// Atomic cell view -/// -/// This is a, possibly mutable, view of an atomic cell. It is a wrapper on top -/// of IMR-generated view that provides more convenient interface which doesn't -/// depend on the actual cell structure. -/// -/// \note Instances of this class are being copied and passed by value a lot. -/// It is desireable that it remains small and trivial, so that the compiler -/// can try to keep it in registers at all times. We also should not worry too -/// much about computing the same thing more than once (unless the profiler -/// tells otherwise, of course). Most of the IMR code and its direct users rely -/// heavily on inlining which would allow the compiler remove duplicated -/// computations. -template -class cell::basic_atomic_cell_view { -public: - using view_type = structure::basic_view; -private: - type_info _type; - view_type _view; -private: - flags::view flags_view() const noexcept { - return _view.template get(); - } - atomic_cell::basic_view cell_view() const noexcept { - return _view.template get().template as(); - } - context make_context() const noexcept { - return context(flags_view(), _type); - } -public: - basic_atomic_cell_view(const type_info& ti, view_type v) noexcept - : _type(ti), _view(std::move(v)) { } - - operator basic_atomic_cell_view() const noexcept { - return basic_atomic_cell_view(_type, _view); - } - - const uint8_t* raw_pointer() const { return _view.raw_pointer(); } - - bytes_view serialize() const noexcept { - assert(!flags_view().template get()); - auto ptr = raw_pointer(); - auto len = structure::serialized_object_size(ptr, make_context()); - return bytes_view(reinterpret_cast(ptr), len); - } - - bool is_live() const noexcept { - return flags_view().template get(); - } - bool is_expiring() const noexcept { - return flags_view().template get(); - } - bool is_counter_update() const noexcept { - return flags_view().template get(); - } - - api::timestamp_type timestamp() const noexcept { - return cell_view().template get().load(); - } - void set_timestamp(api::timestamp_type ts) noexcept { - cell_view().template get().store(ts); - } - - gc_clock::time_point expiry() const noexcept { - auto v = cell_view().template get().get().template get().load(); - return gc_clock::time_point(gc_clock::duration(v)); - } - gc_clock::duration ttl() const noexcept { - auto v = cell_view().template get().get().template get().load(); - return gc_clock::duration(v); - } - - gc_clock::time_point deletion_time() const noexcept { - auto v = cell_view().template get(make_context()).template as().load(); - return gc_clock::time_point(gc_clock::duration(v)); - } - - int64_t counter_update_value() const noexcept { - return cell_view().template get(make_context()).template as().load(); - } - - basic_value_view value() const noexcept { - auto ctx = make_context(); - return cell_view().template get(ctx).visit(make_visitor( - [] (fixed_value::basic_view view) { return basic_value_view(view, 0, nullptr); }, - [&] (variable_value::structure::basic_view view) { - return variable_value::make_view(view, flags_view().template get()); - }, - [] (...) -> basic_value_view { abort(); } - ), ctx); - } - - size_t value_size() const noexcept { - auto ctx = make_context(); - return cell_view().template get(ctx).visit(make_visitor( - [] (fixed_value::view view) -> size_t { return view.size(); }, - [] (variable_value::structure::view view) -> size_t { - return view.template get().load(); - }, - [] (...) -> size_t { abort(); } - ), ctx); - } - - bool is_value_fragmented() const noexcept { - return flags_view().template get() && value_size() > cell::effective_external_chunk_length; - } -}; - -inline auto cell::copy_fn(const type_info& ti, const uint8_t* ptr) -{ - // Slow path - return [&ti, ptr] (auto&& serializer, auto&& allocations) noexcept { - auto f = structure::get_member(ptr); - context ctx(ptr, ti); - if (f.get()) { - auto view = structure::get_member(ptr).as(ctx); - auto dv = variable_value::make_view(view, f.get()); - return make_collection(dv)(serializer, allocations); - } else { - auto acv = atomic_cell_view(ti, structure::make_view(ptr, ti)); - if (acv.is_live()) { - if (acv.is_counter_update()) { - return make_live_counter_update(acv.timestamp(), acv.counter_update_value())(serializer, allocations); - } else if (acv.is_expiring()) { - return make_live(ti, acv.timestamp(), acv.value(), acv.expiry(), acv.ttl())(serializer, allocations); - } - return make_live(ti, acv.timestamp(), acv.value())(serializer, allocations); - } else { - return make_dead(acv.timestamp(), acv.deletion_time())(serializer, allocations); - } - } - }; -} - -inline cell::atomic_cell_view cell::make_atomic_cell_view(const type_info& ti, const uint8_t* ptr) noexcept { - return atomic_cell_view(ti, structure::make_view(ptr)); -} - -inline cell::mutable_atomic_cell_view cell::make_atomic_cell_view(const type_info& ti, uint8_t* ptr) noexcept { - return mutable_atomic_cell_view(ti, structure::make_view(ptr)); -} - -/// Context for external value destruction -/// -/// When a cell value is stored externally as a list of fragments we need to -/// know when we reach the last fragment. The way to do that is to read the -/// total value size from the parent cell object and use the fact that the size -/// of all fragments except the last one is cell::effective_external_chunk_length. -class fragment_chain_destructor_context : public imr::no_context_t { - size_t _total_length; -public: - explicit fragment_chain_destructor_context(size_t total_length) noexcept - : _total_length(total_length) { } - - void next_chunk() noexcept { _total_length -= data::cell::effective_external_chunk_length; } - bool is_last_chunk() const noexcept { return _total_length <= data::cell::effective_external_chunk_length; } -}; - -} - -namespace imr { -namespace methods { - -/// Cell destructor -/// -/// If the cell value exceeds certain thresholds its value is stored externally -/// (possibly fragmented). This requires a destructor so that the owned memory -/// can be freed when the cell is destroyed. -/// Note that we don't need to know the actual type of the cell to destroy it, -/// since all the necessary information is stored in each instance. This means -/// that IMR cells can be owned by C++ objects without the problem of passing -/// arguments to C++ destructors. -template<> -struct destructor { - static void run(uint8_t* ptr, ...) { - auto flags = data::cell::structure::get_member(ptr); - if (flags.get()) { - auto cell_offset = data::cell::structure::offset_of(ptr); - auto variable_value_ptr = [&] { - if (flags.get()) { - return ptr + cell_offset; - } else { - auto ctx = data::cell::minimal_context(flags); - auto offset = data::cell::atomic_cell::offset_of(ptr + cell_offset, ctx); - return ptr + cell_offset + offset; - } - }(); - imr::methods::destroy(variable_value_ptr); - } - } -}; - -/// Cell mover -template<> -struct mover { - static void run(uint8_t* ptr, ...) { - auto flags = data::cell::structure::get_member(ptr); - if (flags.get()) { - auto cell_offset = data::cell::structure::offset_of(ptr); - auto variable_value_ptr = [&] { - if (flags.get()) { - return ptr + cell_offset; - } else { - auto ctx = data::cell::minimal_context(flags); - auto offset = data::cell::atomic_cell::offset_of(ptr + cell_offset, ctx); - return ptr + cell_offset + offset; - } - }(); - variable_value_ptr += data::cell::variable_value::structure::offset_of(variable_value_ptr); - imr::methods::move>>(variable_value_ptr); - } - } -}; - -template<> -struct destructor { - static void run(uint8_t* ptr, ...) { - auto varval = data::cell::variable_value::structure::make_view(ptr); - auto total_length = varval.template get().load(); - if (total_length <= data::cell::maximum_internal_storage_length) { - return; - } - auto ctx = data::fragment_chain_destructor_context(total_length); - auto ptr_view = varval.get().as(); - if (ctx.is_last_chunk()) { - imr::methods::destroy(ptr_view.load()); - } else { - imr::methods::destroy(ptr_view.load(), ctx); - } - current_allocator().free(ptr_view.load()); - } -}; - -template<> -struct mover>> { - static void run(uint8_t* ptr, ...) { - auto ptr_view = imr::pod::make_view(ptr); - auto chk_ptr = ptr_view.load(); - auto chk = data::cell::external_last_chunk::make_view(chk_ptr, data::cell::last_chunk_context(chk_ptr)); - chk.get().store(ptr); - } -}; - -template<> -struct mover>> { - static void run(uint8_t* bptr, ...) { - auto bptr_view = imr::pod::make_view(bptr); - auto ptr_ptr = bptr_view.load(); - auto ptr = imr::pod::make_view(ptr_ptr); - ptr.store(bptr); - - } -}; - -/// External chunk destructor -template<> -struct destructor { - static void run(uint8_t* ptr, data::fragment_chain_destructor_context ctx) { - bool first = true; - while (true) { - ctx.next_chunk(); - - auto echk_view = data::cell::external_chunk::make_view(ptr); - auto ptr_view = echk_view.get(); - if (ctx.is_last_chunk()) { - imr::methods::destroy(ptr_view.load()); - current_allocator().free(ptr_view.load()); - if (!first) { - current_allocator().free(ptr); - } - break; - } else { - auto last = ptr; - ptr = ptr_view.load(); - if (!first) { - current_allocator().free(last); - } else { - first = false; - } - } - - } - } -}; - -template<> -struct mover { - static void run(uint8_t* ptr, ...) { - auto echk_view = data::cell::external_chunk::make_view(ptr, data::cell::chunk_context(ptr)); - auto next_ptr = echk_view.get().load(); - auto bptr = imr::pod::make_view(next_ptr); - bptr.store(ptr + echk_view.offset_of()); - - auto back_ptr = echk_view.get().load(); - auto nptr = imr::pod::make_view(back_ptr); - nptr.store(ptr); - } -}; - -} -} - -template<> -struct appending_hash { - template - void operator()(Hasher& h, data::value_view v) const { - feed_hash(h, v.size_bytes()); - using boost::range::for_each; - for_each(v, [&h] (auto&& chk) { - h.update(reinterpret_cast(chk.data()), chk.size()); - }); - } -}; - -int compare_unsigned(data::value_view lhs, data::value_view rhs) noexcept; - -namespace data { - -struct type_imr_descriptor { - using context_factory = imr::alloc::context_factory, data::type_info>; - using lsa_migrate_fn = imr::alloc::lsa_migrate_fn::structure, context_factory>; -private: - data::type_info _type_info; - lsa_migrate_fn _lsa_migrator; -public: - explicit type_imr_descriptor(data::type_info ti) - : _type_info(ti) - , _lsa_migrator(context_factory(ti)) - { } - - const data::type_info& type_info() const { return _type_info; } - const lsa_migrate_fn& lsa_migrator() const { return _lsa_migrator; } -}; - -} - -#include "value_view_impl.hh" -#include "cell_impl.hh" diff --git a/data/cell_impl.hh b/data/cell_impl.hh deleted file mode 100644 index 8f523d6095..0000000000 --- a/data/cell_impl.hh +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#pragma once - -#include "data/cell.hh" - -namespace data { - -template -using value_writer = cell::value_writer; - -inline value_writer cell::variable_value::write(size_t value_size, bool force_internal) noexcept -{ - static_assert(imr::WriterAllocator, structure>); - return value_writer(empty_fragment_range(), value_size, force_internal); -} - -template -inline value_writer> cell::variable_value::write(FragmentRange&& value, bool force_internal) noexcept -{ - static_assert(imr::WriterAllocator>, structure>); - return value_writer>(std::forward(value), value.size_bytes(), force_internal); -} - -inline auto cell::variable_value::write(bytes_view value, bool force_internal) noexcept -{ - return write(single_fragment_range(value), force_internal); -} - -template -inline basic_value_view cell::variable_value::do_make_view(structure::basic_view view, bool external_storage) -{ - auto size = view.template get().load(); - context ctx(external_storage, size); - return view.template get().visit(make_visitor( - [&] (imr::pod::view ptr) { - auto ex_ptr = static_cast(ptr.load()); - if (size > cell::effective_external_chunk_length) { - auto ex_ctx = chunk_context(ex_ptr); - auto ex_view = external_chunk::make_view(ex_ptr, ex_ctx); - auto next = static_cast(ex_view.get().load()); - return basic_value_view(ex_view.get(ex_ctx), size - cell::effective_external_chunk_length, next); - } else { - auto ex_ctx = last_chunk_context(ex_ptr); - auto ex_view = external_last_chunk::make_view(ex_ptr, ex_ctx); - assert(ex_view.get(ex_ctx).size() == size); - return basic_value_view(ex_view.get(ex_ctx), 0, nullptr); - } - }, - [] (imr::buffer::basic_view data) { - return basic_value_view(data, 0, nullptr); - } - ), ctx); -} - -} diff --git a/data/schema_info.hh b/data/schema_info.hh deleted file mode 100644 index 5d3053fea5..0000000000 --- a/data/schema_info.hh +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#pragma once - -#include - -namespace data { - -/// Type information -/// -/// `type_info` keeps the type information relevant for the serialisation code. -/// In particular we need to distinguish between fixed-size and variable-sized -/// types. Collections and counters are considered to be variable-sized types. -/// -/// \note Even if the type is fixed-size (e.g. `int32_type`) the value can be -/// empty and its length will be 0. This is a special (and rare) case handled -/// by the cell implementation and ignored by `type_info`. -class type_info { - size_t _fixed_size; -private: - explicit type_info(size_t size) noexcept : _fixed_size(size) { } -public: - static type_info make_fixed_size(size_t size) noexcept { - return type_info { size_t(size) }; - } - static type_info make_variable_size() noexcept { - return type_info { 0 }; - } - static type_info make_collection() noexcept { - return type_info { 0 }; - } - - /// Check whether the type is fixed-size. - bool is_fixed_size() const noexcept { - return _fixed_size > 0; - } - - /// Get the size of the value of a fixed-size type. - /// - /// Valid only if `is_fixed_size()` returns `true`. - size_t value_size() const noexcept { - return _fixed_size; - } -}; - -} diff --git a/data/value_view.hh b/data/value_view.hh deleted file mode 100644 index d62d08ac23..0000000000 --- a/data/value_view.hh +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#pragma once - -#include "utils/fragment_range.hh" - -namespace data { - -/// View of a cell value -/// -/// `basic_value_view` is a non-owning reference to a, possibly fragmented, -/// opaque value of a cell. It behaves like an immutable range of fragments. -/// -/// Moreover, there are functions that linearise the value in order to ease the -/// integration with the pre-existing code. Nevertheless, using them should be -/// avoided. -/// -/// \note For now `basic_value_view` is used by regular atomic cells, counters -/// and collections. This is due to the fact that counters and collections -/// haven't been fully transitioned to the IMR yet and still use custom -/// serialisation formats. Once this is resolved `value_view` can be used -/// exclusively by regular atomic cells. -template -class basic_value_view { -public: - using fragment_type = std::conditional_t; - using raw_pointer_type = std::conditional_t; -private: - size_t _remaining_size; - fragment_type _first_fragment; - raw_pointer_type _next; -public: - basic_value_view(fragment_type first, size_t remaining_size, raw_pointer_type next) - : _remaining_size(remaining_size), _first_fragment(first), _next(next) - { } - - explicit basic_value_view(fragment_type first) - : basic_value_view(first, 0, nullptr) - { } - - /// Iterator over fragments - class iterator { - fragment_type _view; - raw_pointer_type _next; - size_t _left; - public: - using iterator_category = std::forward_iterator_tag; - using value_type = fragment_type; - using pointer = const fragment_type*; - using reference = const fragment_type&; - using difference_type = std::ptrdiff_t; - - iterator(fragment_type bv, size_t total, raw_pointer_type next) noexcept - : _view(bv), _next(next), _left(total) { } - - const fragment_type& operator*() const { - return _view; - } - const fragment_type* operator->() const { - return &_view; - } - - iterator& operator++(); - iterator operator++(int) { - auto it = *this; - operator++(); - return it; - } - - bool operator==(const iterator& other) const { - return _view.data() == other._view.data(); - } - bool operator!=(const iterator& other) const { - return !(*this == other); - } - }; - - using const_iterator = iterator; - - auto begin() const { - return iterator(_first_fragment, _remaining_size, _next); - } - auto end() const { - return iterator(fragment_type(), 0, nullptr); - } - - bool operator==(const basic_value_view& other) const noexcept; - bool operator==(bytes_view bv) const noexcept; - - /// Total size of the value - size_t size_bytes() const noexcept { - return _first_fragment.size() + _remaining_size; - } - - bool empty() const noexcept { - return _first_fragment.empty(); - } - - bool is_fragmented() const noexcept { - return bool(_next); - } - - fragment_type first_fragment() const noexcept { - return _first_fragment; - } - - bytes linearize() const; - - template - decltype(auto) with_linearized(Function&& fn) const; -}; - -using value_view = basic_value_view; -using value_mutable_view = basic_value_view; - -} diff --git a/data/value_view_impl.hh b/data/value_view_impl.hh deleted file mode 100644 index 40f716f110..0000000000 --- a/data/value_view_impl.hh +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#pragma once - -#include "data/cell.hh" - -namespace data { - -template -inline typename basic_value_view::iterator& basic_value_view::iterator::operator++() -{ - if (!_next) { - _view = fragment_type(); - } else if (_left > cell::effective_external_chunk_length) { - cell::chunk_context ctx(_next); - auto v = cell::external_chunk::make_view(_next, ctx); - _next = static_cast(v.template get(ctx).load()); - _view = v.template get(ctx); - _left -= cell::effective_external_chunk_length; - } else { - cell::last_chunk_context ctx(_next); - auto v = cell::external_last_chunk::make_view(_next, ctx); - _view = v.template get(ctx); - _next = nullptr; - } - return *this; -} - -template -inline bool basic_value_view::operator==(const basic_value_view& other) const noexcept -{ - // We can assume that all values are fragmented exactly in the same way. - auto it1 = begin(); - auto it2 = other.begin(); - while (it1 != end() && it2 != other.end()) { - if (*it1 != *it2) { - return false; - } - ++it1; - ++it2; - } - return it1 == end() && it2 == other.end(); -} - -template -inline bool basic_value_view::operator==(bytes_view bv) const noexcept -{ - bool equal = true; - using boost::range::for_each; - for_each(*this, [&] (bytes_view fragment) { - if (fragment.size() > bv.size()) { - equal = false; - } else { - auto bv_frag = bv.substr(0, fragment.size()); - equal = equal && fragment == bv_frag; - bv.remove_prefix(fragment.size()); - } - }); - return equal && bv.empty(); -} - -template -inline bytes basic_value_view::linearize() const -{ - bytes b(bytes::initialized_later(), size_bytes()); - auto it = b.begin(); - for (auto fragment : *this) { - it = boost::copy(fragment, it); - } - return b; -} - -template -template -inline decltype(auto) basic_value_view::with_linearized(Function&& fn) const -{ - bytes b; - bytes_view bv; - if (is_fragmented()) { - b = linearize(); - bv = b; - } else { - bv = _first_fragment; - } - return fn(bv); -} - -inline std::ostream& operator<<(std::ostream& os, value_view vv) -{ - using boost::range::for_each; - for_each(vv, [&os] (bytes_view fragment) { - os << fragment; - }); - return os; -} - -} diff --git a/db/commitlog/commitlog.cc b/db/commitlog/commitlog.cc index 5902cbc4db..8f20e19d62 100644 --- a/db/commitlog/commitlog.cc +++ b/db/commitlog/commitlog.cc @@ -79,6 +79,7 @@ #include "commitlog_entry.hh" #include "commitlog_extensions.hh" #include "service/priority_manager.hh" +#include "serializer.hh" #include #include diff --git a/db/hints/resource_manager.hh b/db/hints/resource_manager.hh index 895c91c808..f0e258e3ee 100644 --- a/db/hints/resource_manager.hh +++ b/db/hints/resource_manager.hh @@ -32,6 +32,7 @@ #include "gms/gossiper.hh" #include "utils/small_vector.hh" #include "lister.hh" +#include "enum_set.hh" namespace service { class storage_proxy; diff --git a/db/view/row_locking.cc b/db/view/row_locking.cc index 6e1d1e61b5..ee9d15b765 100644 --- a/db/view/row_locking.cc +++ b/db/view/row_locking.cc @@ -22,6 +22,8 @@ #include "row_locking.hh" #include "log.hh" +#include + static logging::logger mylog("row_locking"); row_locker::row_locker(schema_ptr s) diff --git a/db/view/view.cc b/db/view/view.cc index 98f319f5ea..cf658a489b 100644 --- a/db/view/view.cc +++ b/db/view/view.cc @@ -433,15 +433,7 @@ deletable_row& view_updates::get_view_row(const partition_key& base_key, const c default: auto& c = update.cells().cell_at(base_col->id); auto value_view = base_col->is_atomic() ? c.as_atomic_cell(cdef).value() : c.as_collection_mutation().data; - // FIXME: don't linearize. - // This is hard right now, because we are dealing with two different types: - // managed_bytes_view and data::basic_value_view, and we can't put both types in one - // container. - // If IMR transitions to managed_bytes_view, this should be revisited. - if (value_view.is_fragmented()) { - return managed_bytes_view(linearized_values.emplace_back(value_view.linearize())); - } - return value_view.first_fragment(); + return value_view; } }); auto& partition = partition_for(partition_key::from_range(_view->partition_key_columns() | get_value)); diff --git a/imr/alloc.hh b/imr/alloc.hh deleted file mode 100644 index 2535bce5d4..0000000000 --- a/imr/alloc.hh +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#pragma once - -#include "utils/chunked_vector.hh" -#include "utils/logalloc.hh" - -#include "imr/core.hh" -#include "imr/methods.hh" - -namespace imr { -namespace alloc { - -static const struct no_context_factory_t { - static no_context_t create(const void*) noexcept { return no_context; } -} no_context_factory; - -/// Deserialisation context factory -/// -/// Deserialisation contexts provide the IMR code with additional information -/// needed to deserialise an IMR object. Often the sources of that information -/// are both the object itself as well as some external state shared by multiple -/// IMR objects of the same type. -/// `context_factory` is a helper class for creating contexts it keeps the -/// shared state (e.g. per-schema information) and when given a pointer to a -/// IMR object creates a deserialisation context for it. -template -class context_factory { - std::tuple _state; -private: - template - Context create(const uint8_t* ptr, std::index_sequence) const noexcept { - return Context(ptr, std::get(_state)...); - } -public: - template - context_factory(Args&&... args) : _state(std::forward(args)...) { } - - context_factory(context_factory&) = default; - context_factory(const context_factory&) = default; - context_factory(context_factory&&) = default; - - Context create(const uint8_t* ptr) const noexcept { - return create(ptr, std::index_sequence_for()); - } -}; - -template -concept ContextFactory = requires(const T factory, const uint8_t* ptr) { - { factory.create(ptr) } noexcept; -}; - -static_assert(ContextFactory, - "no_context_factory_t has to meet ContextFactory constraints"); - -/// LSA migrator for IMR objects -/// -/// IMR objects may own memory and therefore moving and destroying them may -/// be non-trivial. This class implements an LSA migrator for an IMR objects -/// of type `Structure`. The deserialisation context needed to invoke the mover -/// is going to be created by the provided context factory `CtxFactory`. -template -requires ContextFactory -class lsa_migrate_fn final : public migrate_fn_type, CtxFactory { -public: - using structure = Structure; - - explicit lsa_migrate_fn(CtxFactory context_factory) - : migrate_fn_type(1) - , CtxFactory(std::move(context_factory)) - { } - - lsa_migrate_fn(lsa_migrate_fn&&) = delete; - lsa_migrate_fn(const lsa_migrate_fn&) = delete; - - lsa_migrate_fn& operator=(lsa_migrate_fn&&) = delete; - lsa_migrate_fn& operator=(const lsa_migrate_fn&) = delete; - - virtual void migrate(void* src_ptr, void* dst_ptr, size_t size) const noexcept override { - std::memcpy(dst_ptr, src_ptr, size); - auto dst = static_cast(dst_ptr); - methods::move(dst, CtxFactory::create(dst)); - } - - virtual size_t size(const void* obj_ptr) const noexcept override { - auto ptr = static_cast(obj_ptr); - return Structure::serialized_object_size(ptr, CtxFactory::create(ptr)); - } -}; - -// LSA migrator for objects which mover doesn't require a deserialisation context -template -struct default_lsa_migrate_fn { - static lsa_migrate_fn migrate_fn; -}; - -template -lsa_migrate_fn default_lsa_migrate_fn::migrate_fn(no_context_factory); - -/// IMR object allocator -/// -/// This is a helper class that helps creating IMR objects that may own memory. -/// The serialisation of IMR objects is done in two phases: -/// 1. IMR figures out the size of the object. `sizer` provided by `get_sizer()` -/// records the size of all necessary memory allocations. -/// `allocate_all()` is called and allocates memory for all owned objects. -/// 2. Data is written to the allocated memory. `serializer` returned by -/// `get_serializer()` provides pointers to the allocated buffers and handles -/// their serialisation. -class object_allocator { - union allocation { - static_assert(std::is_trivially_destructible_v>); - static_assert(std::is_trivially_destructible_v>); - private: - std::pair _allocation_request; - std::pair _allocated_object; - public: - explicit allocation(size_t n, allocation_strategy::migrate_fn fn) noexcept - : _allocation_request(std::make_pair(n, fn)) { } - - void allocate(allocation_strategy& allocator) { - auto ptr = allocator.alloc(_allocation_request.second, _allocation_request.first, 1); - _allocated_object = std::make_pair(_allocation_request.first, ptr); - } - - void free(allocation_strategy& allocator) noexcept { - allocator.free(_allocated_object.second, _allocated_object.first); - } - - void set_request_size(size_t n) noexcept { - _allocation_request.first = n; - } - - void* pointer() const noexcept { return _allocated_object.second; } - size_t size() const noexcept { return _allocated_object.first; } - }; - - allocation_strategy& _allocator; - std::vector _allocations; - size_t _position = 0; - bool _failed = false; -private: - size_t request(size_t n, allocation_strategy::migrate_fn migrate) noexcept { - auto id = _allocations.size(); - try { - _allocations.emplace_back(n, migrate); - } catch (...) { - _failed = true; - } - return id; - } - void set_request_size(size_t id, size_t n) noexcept { - if (__builtin_expect(!_failed, true)) { - _allocations[id].set_request_size(n); - } - } - uint8_t* next_object() noexcept { - return static_cast(_allocations[_position++].pointer()); - } -public: - class sizer { - object_allocator& _parent; - public: - class continuation { - object_allocator& _parent; - size_t _idx; - public: - continuation(object_allocator& parent, size_t idx) noexcept - : _parent(parent), _idx(idx) { } - uint8_t* run(size_t size) noexcept { - _parent.set_request_size(_idx, size); - return nullptr; - } - }; - public: - explicit sizer(object_allocator& parent) noexcept - : _parent(parent) { } - - /// Request allocation of an IMR object - /// - /// This method request an allocation of an IMR object of type T. The - /// arguments are passed to `T::size_when_serialized`. - /// - /// \return null pointer of type `uint8_t*`. - template - uint8_t* allocate(MigrateFn* migrate_fn, Args&&... args) noexcept { - static_assert(std::is_same_v); - return do_allocate(migrate_fn, std::forward(args)...); - } - - template - auto allocate_nested(MigrateFn* migrate_fn, Args&&... args) noexcept { - static_assert(std::is_same_v); - return do_allocate_nested(migrate_fn, std::forward(args)...); - } - - private: - template - uint8_t* do_allocate(migrate_fn_type* migrate_fn, Args&&... args) noexcept { - auto size = T::size_when_serialized(std::forward(args)...); - _parent.request(size, migrate_fn); - - // We are in the sizing phase and only collect information about - // the size of the required objects. The serializer will return - // the real pointer to the memory buffer requested here, but since - // both sizer and serializer need to expose the same interface we - // need to return something from sizer as well even though the - // value will be ignored. - return nullptr; - } - - template - auto do_allocate_nested(migrate_fn_type* migrate_fn, Args&& ... args) noexcept { - auto n = _parent.request(0, migrate_fn); - return T::get_sizer(continuation(_parent, n), - std::forward(args)...); - } - }; - - class serializer { - object_allocator& _parent; - public: - class continuation { - uint8_t* _ptr; - public: - explicit continuation(uint8_t* ptr) noexcept : _ptr(ptr) { } - uint8_t* run(uint8_t*) noexcept { - return _ptr; - } - }; - public: - explicit serializer(object_allocator& parent) noexcept - : _parent(parent) { } - - /// Writes an IMR object to the preallocated buffer - /// - /// In the second serialisation phase this method writes an IMR object - /// to the buffer requested in the sizing phase. Arguments are passed - /// to `T::serialize`. - /// \return pointer to the IMR object - template - uint8_t* allocate(MigrateFn* migrate_fn, Args&&... args) noexcept { - static_assert(std::is_same_v); - return do_allocate(migrate_fn, std::forward(args)...); - } - - template - auto allocate_nested(MigrateFn* migrate_fn, Args&&... args) noexcept { - static_assert(std::is_same_v); - return do_allocate_nested(migrate_fn, std::forward(args)...); - } - - private: - template - uint8_t* do_allocate(migrate_fn_type* migrate_fn, Args&&... args) noexcept { - auto ptr = _parent.next_object(); - T::serialize(ptr, std::forward(args)...); - return ptr; - } - - template - auto do_allocate_nested(migrate_fn_type*, Args&& ... args) noexcept { - auto ptr = _parent.next_object(); - return T::get_serializer(ptr, - continuation(ptr), - std::forward(args)...); - } - }; - -public: - explicit object_allocator(allocation_strategy& allocator = current_allocator()) - : _allocator(allocator) { } - - size_t requested_allocations_count() const noexcept { return _allocations.size(); } - - /// Allocates all buffers requested in the sizing phase. - void allocate_all() { - if (__builtin_expect(_failed, false)) { - throw std::bad_alloc(); - } - auto it = _allocations.begin(); - try { - // TODO: Send a batch of allocations to the allocation strategy. - while (it != _allocations.end()) { - it->allocate(_allocator); - ++it; - } - } catch (...) { - while (it != _allocations.begin()) { - --it; - it->free(_allocator); - } - throw; - } - } - - sizer get_sizer() noexcept { return sizer(*this); } - serializer get_serializer() noexcept { return serializer(*this); } -}; - -} -} diff --git a/imr/compound.hh b/imr/compound.hh deleted file mode 100644 index c8928f084d..0000000000 --- a/imr/compound.hh +++ /dev/null @@ -1,591 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#pragma once - -#include -#include -#include - -#include "utils/meta.hh" - -#include "imr/core.hh" - -namespace imr { - -/// Optionally present object -/// -/// Represents a value that may be not present. Information whether or not -/// the optional is engaged is not stored and must be provided by external -/// context. -template -struct optional { - using underlying = Type; -public: - template<::mutable_view is_mutable> - class basic_view { - using pointer_type = std::conditional_t; - pointer_type _ptr; - public: - explicit basic_view(pointer_type ptr) noexcept : _ptr(ptr) { } - - operator basic_view<::mutable_view::no>() const noexcept { - return basic_view<::mutable_view::no>(_ptr); - } - - template - requires requires(const Context& ctx) { - { ctx.template context_for() } noexcept; - } - auto get(const Context& ctx = no_context) noexcept { - return Type::make_view(_ptr, ctx.template context_for(_ptr)); - } - }; - - using view = basic_view<::mutable_view::no>; - using mutable_view = basic_view<::mutable_view::yes>; -public: - template - static auto make_view(const uint8_t* in, const Context& ctx = no_context) noexcept { - return view(in); - } - template - static auto make_view(uint8_t* in, const Context& ctx = no_context) noexcept { - return mutable_view(in); - } -public: - template - requires requires(const Context& ctx) { - { ctx.template is_present() } noexcept -> std::same_as; - } - static size_t serialized_object_size(const uint8_t* in, const Context& context) noexcept { - return context.template is_present() - ? Type::serialized_object_size(in, context) - : 0; - } - - template - static size_t size_when_serialized(Args&&... args) noexcept { - return Type::size_when_serialized(std::forward(args)...); - } - - template - static size_t serialize(uint8_t* out, Args&&... args) noexcept { - return Type::serialize(out, std::forward(args)...); - } - - template - static auto get_sizer(Continuation cont = no_op_continuation()) { - return Type::get_sizer(std::move(cont)); - } - - template - static auto get_serializer(uint8_t* out, Continuation cont = no_op_continuation()) { - return Type::get_serializer(out, std::move(cont)); - } - -}; - -template -struct member { - using tag = Tag; - using type = Type; -}; - -namespace internal { - -template -struct do_find_member { - template - using type = std::is_same; -}; - -template -static constexpr auto get_member_index = meta::find_if::template type, Members...>; - -template -using get_member = meta::get, Members...>; - -template typename Function> -struct do_generate_branch_tree { - template - static decltype(auto) run(size_t n, Args&&... args) { - if constexpr (N == 1) { - return Function::run(std::forward(args)...); - } else if (N >= 2) { - if (n < Offset + N / 2) { - return do_generate_branch_tree::run(n, std::forward(args)...); - } else { - return do_generate_branch_tree::run(n, std::forward(args)...); - } - } - } -}; - -template typename Function> -using generate_branch_tree = do_generate_branch_tree<0, N, Function>; - -} - -template -struct variant { - class alternative_index { - size_t _index; - private: - constexpr explicit alternative_index(size_t idx) noexcept - : _index(idx) { } - - friend class variant; - public: - constexpr size_t index() const noexcept { return _index; } - }; - - template - constexpr static alternative_index index_for() noexcept { - return alternative_index(internal::get_member_index); - } -private: - template - struct alternative_visitor { - template - static decltype(auto) run(Visitor&& visitor) { - using member = typename meta::get; - return visitor(static_cast(nullptr)); - } - }; - - template - static decltype(auto) choose_alternative(alternative_index index, Visitor&& visitor) { - // For large sizeof...(Alternatives) a jump table may be the better option. - return internal::generate_branch_tree::run(index.index(), std::forward(visitor)); - } -public: - template<::mutable_view is_mutable> - class basic_view { - using pointer_type = std::conditional_t; - pointer_type _ptr; - public: - explicit basic_view(pointer_type ptr) noexcept - : _ptr(ptr) - { } - - pointer_type raw_pointer() const noexcept { return _ptr; } - - operator basic_view<::mutable_view::no>() const noexcept { - return basic_view<::mutable_view::no>(_ptr); - } - - template - auto as(const Context& context = no_context) noexcept { - using member = internal::get_member; - return member::type::make_view(_ptr, context.template context_for(_ptr)); - } - - template - decltype(auto) visit(Visitor&& visitor, const Context& context) { - auto alt_idx = context.template active_alternative_of(); - return choose_alternative(alt_idx, [&] (auto object) { - using type = std::remove_pointer_t; - return visitor(type::type::make_view(_ptr, context.template context_for(_ptr))); - }); - } - - template - decltype(auto) visit_type(Visitor&& visitor, const Context& context) { - auto alt_idx = context.template active_alternative_of(); - return choose_alternative(alt_idx, [&] (auto object) { - using type = std::remove_pointer_t; - return visitor(static_cast(nullptr)); - }); - } - }; - - using view = basic_view<::mutable_view::no>; - using mutable_view = basic_view<::mutable_view::yes>; - -public: - template - static view make_view(const uint8_t* in, const Context& context) noexcept { - return view(in); - } - - template - static mutable_view make_view(uint8_t* in, const Context& context) noexcept { - return mutable_view(in); - } - -public: - template - requires requires(const Context& ctx) { - { ctx.template active_alternative_of() } noexcept -> std::same_as; - } - static size_t serialized_object_size(const uint8_t* in, const Context& context) noexcept { - return choose_alternative(context.template active_alternative_of(), [&] (auto object) noexcept { - using alternative = std::remove_pointer_t; - return alternative::type::serialized_object_size(in, context.template context_for(in)); - }); - } - - template - static size_t size_when_serialized(Args&&... args) noexcept { - using member = internal::get_member; - return member::type::size_when_serialized(std::forward(args)...); - } - - template - static size_t serialize(uint8_t* out, Args&&... args) noexcept { - using member = internal::get_member; - return member::type::serialize(out, std::forward(args)...); - } - - template - static auto get_sizer(Continuation cont = no_op_continuation()) { - using member = internal::get_member; - return member::type::get_sizer(std::move(cont)); - } - - template - static auto get_serializer(uint8_t* out, Continuation cont = no_op_continuation()) { - using member = internal::get_member; - return member::type::get_serializer(out, std::move(cont)); - } -}; - -template -using optional_member = member>; - -template -using variant_member = member>; - -namespace internal { - -template -class structure_sizer : Continuation { - size_t _size; -public: - explicit structure_sizer(size_t size, Continuation&& cont) noexcept - : Continuation(std::move(cont)), _size(size) {} - - uint8_t* position() const noexcept { - // We are in the sizing phase and there is no object to point to yet. - // The serializer will return a real position in the destination buffer, - // but since sizer and serializer need to expose the same interface we - // need to return something even though the value will be ignored. - return nullptr; - } - - auto done() noexcept { return Continuation::run(_size); } -}; - -template -class structure_sizer_continuation : NestedContinuation { - size_t _size; -public: - explicit structure_sizer_continuation(size_t size, NestedContinuation&& cont) noexcept - : NestedContinuation(std::move(cont)), _size(size) {} - - structure_sizer run(size_t size) noexcept { - return structure_sizer(size + _size, - std::move(*static_cast(this))); - } -}; - -template -class basic_structure_sizer : protected Continuation { -protected: - size_t _size; - - using continuation = structure_sizer_continuation; -public: - explicit basic_structure_sizer(size_t size, Continuation&& cont) noexcept - : Continuation(std::move(cont)), _size(size) {} - uint8_t* position() const noexcept { return nullptr; } - template - structure_sizer serialize(Args&& ... args) noexcept { - auto size = Member::type::size_when_serialized(std::forward(args)...); - return structure_sizer(size + _size, std::move(*static_cast(this))); - } - template - auto serialize_nested(Args&& ... args) noexcept { - return Member::type::get_sizer(continuation(_size, std::move(*static_cast(this))), - std::forward(args)...); - } -}; - -template -struct structure_sizer - : basic_structure_sizer { - - using basic_structure_sizer::basic_structure_sizer; -}; - -template -struct structure_sizer, Members...> - : basic_structure_sizer, Members...> { - - using basic_structure_sizer, Members...>::basic_structure_sizer; - - structure_sizer skip() noexcept { - return structure_sizer(this->_size, std::move(*static_cast(this))); - } -}; - -template -struct structure_sizer, Members...> - : basic_structure_sizer, Members...> { - - using basic_structure_sizer, Members...>::basic_structure_sizer; - - template - structure_sizer serialize(Args&& ... args) noexcept = delete; - template - auto serialize_nested(Args&& ... args) noexcept = delete; - - template - structure_sizer serialize_as(Args&& ... args) noexcept { - using type = variant; - auto size = type::template size_when_serialized(std::forward(args)...); - return structure_sizer(size + this->_size, std::move(*static_cast(this))); - } - template - auto serialize_as_nested(Args&& ... args) noexcept { - using type = variant; - using cont_type = typename basic_structure_sizer, Members...>::continuation; - auto cont = cont_type(this->_size, std::move(*static_cast(this))); - return type::template get_sizer(std::move(cont), - std::forward(args)...); - } -}; - -template -class structure_serializer : Continuation { - uint8_t* _out; -public: - explicit structure_serializer(uint8_t* out, Continuation&& cont) noexcept - : Continuation(std::move(cont)), _out(out) {} - uint8_t* position() const noexcept { return _out; } - auto done() noexcept { return Continuation::run(_out); } -}; - -template -struct structure_serializer_continuation : private NestedContinuation { - explicit structure_serializer_continuation(NestedContinuation&& cont) noexcept - : NestedContinuation(std::move(cont)) {} - - structure_serializer run(uint8_t* out) noexcept { - return structure_serializer(out, - std::move(*static_cast(this))); - } -}; - -template -class basic_structure_serializer : protected Continuation { -protected: - uint8_t* _out; - - using continuation = structure_serializer_continuation; -public: - explicit basic_structure_serializer(uint8_t* out, Continuation&& cont) noexcept - : Continuation(std::move(cont)), _out(out) {} - uint8_t* position() const noexcept { return _out; } - template - structure_serializer serialize(Args&& ... args) noexcept { - auto size = Member::type::serialize(_out, std::forward(args)...); - return structure_serializer(_out + size, std::move(*static_cast(this))); - } - template - auto serialize_nested(Args&& ... args) noexcept { - return Member::type::get_serializer(_out, - continuation(std::move(*static_cast(this))), - std::forward(args)...); - } -}; - -template -struct structure_serializer - : basic_structure_serializer { - - using basic_structure_serializer::basic_structure_serializer; -}; - -template -struct structure_serializer, Members...> - : basic_structure_serializer, Members...> { - - using basic_structure_serializer, Members...>::basic_structure_serializer; - - structure_serializer skip() noexcept { - return structure_serializer(this->_out, - std::move(*static_cast(this))); - } - -}; - -template -struct structure_serializer, Members...> - : basic_structure_serializer, Members...> { - - using basic_structure_serializer, Members...>::basic_structure_serializer; - - template - structure_serializer serialize(Args&& ... args) noexcept = delete; - template - auto serialize_nested(Args&& ... args) noexcept = delete; - - template - structure_serializer serialize_as(Args&& ... args) noexcept { - using type = variant; - auto size = type::template serialize(this->_out, std::forward(args)...); - return structure_serializer(this->_out + size, - std::move(*static_cast(this))); - } - template - auto serialize_as_nested(Args&& ... args) noexcept { - using type = variant; - using cont_type = typename basic_structure_serializer, Members...>::continuation; - auto cont = cont_type(std::move(*static_cast(this))); - return type::template get_serializer(this->_out, - std::move(cont), - std::forward(args)...); - } -}; - -} - -// Represents a compound type. -template -struct structure { - template<::mutable_view is_mutable> - class basic_view { - using pointer_type = std::conditional_t; - pointer_type _ptr; - public: - template - explicit basic_view(pointer_type ptr, const Context& context) noexcept : _ptr(ptr) { } - - pointer_type raw_pointer() const noexcept { return _ptr; } - - operator basic_view<::mutable_view::no>() const noexcept { - return basic_view<::mutable_view::no>(_ptr, no_context); - } - - template - auto offset_of(const Context& context = no_context) const noexcept { - static constexpr auto idx = internal::get_member_index; - size_t total_size = 0; - meta::for_each>([&] (auto ptr) { - using member = std::remove_pointer_t; - auto offset = _ptr + total_size; - auto this_size = member::type::serialized_object_size(offset, context.template context_for(offset)); - total_size += this_size; - }); - return total_size; - } - - template - auto get(const Context& context = no_context) const noexcept { - using member = internal::get_member; - auto offset = _ptr + offset_of(context); - return member::type::make_view(offset, context.template context_for(offset)); - } - }; - - using view = basic_view<::mutable_view::no>; - using mutable_view = basic_view<::mutable_view::yes>; -public: - template - static view make_view(const uint8_t* in, const Context& context = no_context) noexcept { - return view(in, context); - } - template - static mutable_view make_view(uint8_t* in, const Context& context = no_context) noexcept { - return mutable_view(in, context); - } - -public: - template - static size_t serialized_object_size(const uint8_t* in, const Context& context = no_context) noexcept { - size_t total_size = 0; - meta::for_each([&] (auto ptr) noexcept { - using member = std::remove_pointer_t; - auto offset = in + total_size; - auto this_size = member::type::serialized_object_size(offset, context.template context_for(offset)); - total_size += this_size; - }); - return total_size; - } - - template - static internal::structure_sizer get_sizer(Continuation cont = no_op_continuation()) { - return internal::structure_sizer(0, std::move(cont)); - } - - template - static internal::structure_serializer get_serializer(uint8_t* out, Continuation cont = no_op_continuation()) { - return internal::structure_serializer(out, std::move(cont)); - } - - template - static size_t size_when_serialized(Writer&& writer, Args&&... args) noexcept { - return std::forward(writer)(get_sizer(), std::forward(args)...); - } - - template - static size_t serialize(uint8_t* out, Writer&& writer, Args&&... args) noexcept { - auto ptr = std::forward(writer)(get_serializer(out), std::forward(args)...); - return ptr - out; - } - - template - static size_t offset_of(const uint8_t* in, const Context& context = no_context) noexcept { - static constexpr auto idx = internal::get_member_index; - size_t total_size = 0; - meta::for_each>([&] (auto ptr) noexcept { - using member = std::remove_pointer_t; - auto offset = in + total_size; - auto this_size = member::type::serialized_object_size(offset, context.template context_for(offset)); - total_size += this_size; - }); - return total_size; - } - - template - static auto get_member(const uint8_t* in, const Context& context = no_context) noexcept { - auto off = offset_of(in, context); - using member = internal::get_member; - return member::type::make_view(in + off, context.template context_for(in + off)); - } - - template - static auto get_member(uint8_t* in, const Context& context = no_context) noexcept { - auto off = offset_of(in, context); - using member = internal::get_member; - return member::type::make_view(in + off, context.template context_for(in + off)); - } -}; - -template -struct tagged_type : T { }; - -} diff --git a/imr/concepts.hh b/imr/concepts.hh deleted file mode 100644 index b8650937e2..0000000000 --- a/imr/concepts.hh +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#pragma once - -#include -#include "imr/alloc.hh" -#include "imr/compound.hh" -#include "imr/fundamental.hh" - -namespace imr { - -/// Check if a type T is a sizer for Structure. -template -struct is_sizer_for : std::false_type { }; - -template -struct is_sizer_for, - internal::structure_sizer> - : std::true_type { }; - -template -constexpr bool is_sizer_for_v = is_sizer_for::value; - -/// Check if a type T is a serializer for Structure. -template -struct is_serializer_for : std::false_type { }; - -template -struct is_serializer_for, - internal::structure_serializer> - : std::true_type { }; - -template -constexpr bool is_serializer_for_v = is_serializer_for::value; - -/// The default sizer for Structure. -template -using default_sizer_t = decltype(Structure::get_sizer()); - -/// The default serializer for Structure. -template -using default_serializer_t = decltype(Structure::get_serializer(nullptr)); - -/// A simple writer that accepts only sizer or serializer as an argument. -template -concept WriterSimple = requires(Writer writer, default_sizer_t sizer, - default_serializer_t serializer) -{ - writer(sizer); - writer(serializer); -}; - -/// A writer that accepts both sizer or serializer and a memory allocator. -template -concept WriterAllocator = requires(Writer writer, default_sizer_t sizer, - default_serializer_t serializer, - imr::alloc::object_allocator::sizer alloc_sizer, - imr::alloc::object_allocator::serializer alloc_serializer) -{ - writer(sizer, alloc_sizer); - writer(serializer, alloc_serializer); -}; - -} diff --git a/imr/core.hh b/imr/core.hh deleted file mode 100644 index 8f70f8aa4f..0000000000 --- a/imr/core.hh +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#pragma once - -#include "utils/fragment_range.hh" - -namespace imr { - -/// No-op deserialisation context -/// -/// This is a dummy deserialisation context to be used when there is no need -/// for one, but the interface expects a context object. -static const struct no_context_t { - template - const no_context_t& context_for(Args&&...) const noexcept { return *this; } -} no_context; - -struct no_op_continuation { - template - static T run(T value) noexcept { - return value; - } -}; - -template -class placeholder { - uint8_t* _pointer = nullptr; -public: - placeholder() = default; - explicit placeholder(uint8_t* ptr) noexcept : _pointer(ptr) { } - void set_pointer(uint8_t* ptr) noexcept { _pointer = ptr; } - - template - void serialize(Args&&... args) noexcept { - if (!_pointer) { - // We lose the information whether we are in the sizing or - // serializing phase, hence the need for this run-time check. - return; - } - T::serialize(_pointer, std::forward(args)...); - } -}; - -} diff --git a/imr/fundamental.hh b/imr/fundamental.hh deleted file mode 100644 index 78fc876142..0000000000 --- a/imr/fundamental.hh +++ /dev/null @@ -1,308 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#pragma once - -#include - -#include -#include - -#include "bytes.hh" -#include "utils/meta.hh" - -#include "imr/core.hh" - -namespace imr { - -namespace internal { - -template -requires std::is_standard_layout_v && std::is_trivial_v && (sizeof(CharT) == 1) -inline T read_pod(const CharT* in) noexcept { - T obj; - std::copy_n(in, sizeof(T), reinterpret_cast(&obj)); - return obj; -} - -template -requires std::is_standard_layout_v && std::is_trivial_v && (sizeof(CharT) == 1) -inline void write_pod(T obj, CharT* out) noexcept { - std::copy_n(reinterpret_cast(&obj), sizeof(T), out); -} - -} - -template -class set_flag { - bool _value = true; -public: - set_flag() = default; - explicit set_flag(bool v) noexcept : _value(v) { } - bool value() const noexcept { return _value; } -}; - -/// Set of flags -/// -/// Represents a fixed-size set of tagged flags. -template -class flags { - static constexpr auto object_size = seastar::align_up(sizeof...(Tags), 8) / 8; -private: - template - static void do_set_or_clear(uint8_t* ptr, bool set) noexcept { - const auto idx = meta::find; - const auto byte_idx = idx / 8; - const auto bit_idx = idx % 8; - - auto value = ptr[byte_idx]; - value &= ~uint8_t(1 << bit_idx); - value |= uint8_t(set) << bit_idx; - ptr[byte_idx] = value; - } - - template - static bool do_get(const uint8_t* ptr) noexcept { - const auto idx = meta::find; - const auto byte_idx = idx / 8; - const auto bit_idx = idx % 8; - - return ptr[byte_idx] & (1 << bit_idx); - } -public: - template<::mutable_view is_mutable> - class basic_view { - using pointer_type = std::conditional_t; - pointer_type _ptr; - public: - explicit basic_view(pointer_type ptr) noexcept : _ptr(ptr) { } - - operator basic_view<::mutable_view::no>() const noexcept { - return basic_view<::mutable_view::no>(_ptr); - } - - template - bool get() const noexcept { - return do_get(_ptr); - } - - template - void set(bool value = true) noexcept { - do_set_or_clear(_ptr, value); - } - }; - - using view = basic_view<::mutable_view::no>; - using mutable_view = basic_view<::mutable_view::yes>; - -public: - template - static view make_view(const uint8_t* in, const Context& = no_context) noexcept { - return view(in); - } - template - static mutable_view make_view(uint8_t* in, const Context& = no_context) noexcept { - return mutable_view(in); - } - -public: - template - static size_t serialized_object_size(const uint8_t*, const Context& = no_context) noexcept { - return object_size; - } - - template - static size_t size_when_serialized(set_flag...) noexcept { - return object_size; - } - - template - static size_t serialize(uint8_t* out, set_flag... sfs) noexcept { - std::fill_n(out, object_size, 0); - (do_set_or_clear(out, sfs.value()), ...); - return object_size; - } - - static size_t size_when_serialized(placeholder>&) noexcept { - return object_size; - } - - static size_t serialize(uint8_t* out, placeholder>& phldr) noexcept { - phldr.set_pointer(out); - return object_size; - } -}; - -/// POD object -/// -/// Represents a fixed-size POD value. -template -requires std::is_standard_layout_v && std::is_trivial_v -struct pod { - using underlying = Type; - enum : size_t { - size = sizeof(Type), - }; - - template<::mutable_view is_mutable> - class basic_view { - using pointer_type = std::conditional_t; - pointer_type _ptr; - public: - explicit basic_view(pointer_type ptr) noexcept : _ptr(ptr) { } - - operator basic_view<::mutable_view::no>() const noexcept { - return basic_view<::mutable_view::no>(_ptr); - } - - Type load() const noexcept { - return internal::read_pod(_ptr); - } - - void store(const Type& object) noexcept { - internal::write_pod(object, _ptr); - } - }; - - using view = basic_view<::mutable_view::no>; - using mutable_view = basic_view<::mutable_view::yes>; - -public: - template - static view make_view(const uint8_t* in, const Context& = no_context) noexcept { - return view(in); - } - template - static mutable_view make_view(uint8_t* in, const Context& = no_context) noexcept { - return mutable_view(in); - } - -public: - template - static size_t serialized_object_size(const uint8_t*, const Context& = no_context) noexcept { - return sizeof(Type); - } - - static size_t size_when_serialized(const Type&) noexcept { - return sizeof(Type); - } - - static size_t serialize(uint8_t* out, const Type& value) noexcept { - internal::write_pod(value, out); - return sizeof(Type); - } - - static size_t size_when_serialized(placeholder>&) noexcept { - return sizeof(Type); - } - - static size_t serialize(uint8_t* out, placeholder>& phldr) noexcept { - phldr.set_pointer(out); - return sizeof(Type); - } -}; - -/// Buffer -/// -/// Represents an opaque buffer. The size of the buffer is not stored and must -/// be provided by external context. -/// A buffer can be created from a bytes_view, a fragments range or a -/// (size, serializer) pair. -template -struct buffer { - using view = bytes_view; - using mutable_view = bytes_mutable_view; - template<::mutable_view is_mutable> - using basic_view = std::conditional_t; - - template - requires requires(const Context& ctx) { - { ctx.template size_of() } noexcept -> std::same_as; - } - static view make_view(const uint8_t* in, const Context& context) noexcept { - auto ptr = reinterpret_cast(in); - return bytes_view(ptr, context.template size_of()); - } - - template - requires requires(const Context& ctx) { - { ctx.template size_of() } noexcept -> std::same_as; - } - static mutable_view make_view(uint8_t* in, const Context& context) noexcept { - auto ptr = reinterpret_cast(in); - return bytes_mutable_view(ptr, context.template size_of()); - } - -public: - template - requires requires(const Context& ctx) { - { ctx.template size_of() } noexcept -> std::same_as; - } - static size_t serialized_object_size(const uint8_t*, const Context& context) noexcept { - return context.template size_of(); - } - - static size_t size_when_serialized(bytes_view src) noexcept { - return src.size(); - } - - template - requires requires (Serializer ser, uint8_t* ptr) { - { ser(ptr) } noexcept; - } - static size_t size_when_serialized(size_t size, Serializer&&) noexcept { - return size; - } - - template>>> - static size_t size_when_serialized(FragmentRange&& fragments) { - return fragments.size_bytes(); - } - - static size_t serialize(uint8_t* out, bytes_view src) { - std::copy_n(src.begin(), src.size(), - reinterpret_cast(out)); - return src.size(); - } - - template>>> - static size_t serialize(uint8_t* out, FragmentRange&& fragments) { - auto dst = reinterpret_cast(out); - using boost::range::for_each; - for_each(fragments, [&] (bytes_view fragment) { - dst = std::copy(fragment.begin(), fragment.end(), dst); - }); - return fragments.size_bytes(); - } - - template - requires requires (Serializer ser, uint8_t* ptr) { - { ser(ptr) } noexcept; - } - static size_t serialize(uint8_t* out, size_t size, Serializer&& serializer) noexcept { - std::forward(serializer)(out); - return size; - } -}; - -} diff --git a/imr/methods.hh b/imr/methods.hh deleted file mode 100644 index 297852000d..0000000000 --- a/imr/methods.hh +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#pragma once - -#include "compound.hh" - -namespace imr { -namespace methods { - -template typename Method> -struct trivial_method { - template - static void run(Args&&...) noexcept { } -}; - -template typename Method, typename T> -using has_trivial_method = std::is_base_of, Method>; - -namespace internal { - -template typename Method, typename...> -struct generate_method : trivial_method { }; - -template typename Method, typename Structure, typename... Tags, typename... Types> -struct generate_method...> { - template - static void run(uint8_t* ptr, const Context& context, Args&&... args) noexcept { - auto view = Structure::make_view(ptr, context); - meta::for_each...>([&] (auto member_type) { - using member = std::remove_pointer_t; - auto member_ptr = ptr + view.template offset_of(); - Method::run(member_ptr, - context.template context_for(member_ptr), - std::forward(args)...); - }); - } -}; - -template typename Method, typename Tag, typename Type> -struct generate_method> { - template - static void run(uint8_t* ptr, const Context& context, Args&&... args) noexcept { - if (context.template is_present()) { - Method::run(ptr, - context.template context_for(ptr), - std::forward(args)...); - } - } -}; - -template typename Method, typename Tag, typename... Members> -struct generate_method> { - template - static void run(uint8_t* ptr, const Context& context, Args&&... args) noexcept { - auto view = variant::make_view(ptr, context); - view.visit_type([&] (auto alternative_type) { - using member = std::remove_pointer_t; - Method::run(ptr, - context.template context_for(ptr), - std::forward(args)...); - }, context); - } -}; - -template typename Method> -struct member_has_trivial_method { - template - struct type; -}; -template typename Method> -template -struct member_has_trivial_method::type> : has_trivial_method { }; - -template typename Method, typename T> -struct get_method; - -template typename Method, typename... Members> -struct get_method> - : std::conditional_t::template type, Members...>, - trivial_method, - generate_method, Members...>> -{ }; - -template typename Method, typename Tag, typename Type> -struct get_method> - : std::conditional_t::value, - trivial_method, - generate_method>> -{ }; - -template typename Method, typename Tag, typename... Members> -struct get_method> - : std::conditional_t::template type, Members...>, - trivial_method, - generate_method>> -{ }; - -template typename Method, typename Tag, typename Type> -struct get_method> - : std::conditional_t::value, - trivial_method, - Method> -{ }; - -} - -template -struct destructor : trivial_method { }; -using trivial_destructor = trivial_method; - -template -using is_trivially_destructible = has_trivial_method; - -template -struct destructor> : internal::get_method> { }; - -template -struct destructor> : internal::get_method> { }; - -template -struct destructor> : internal::get_method> { }; - -template -void destroy(uint8_t* ptr, const Context& context = no_context) { - destructor::run(ptr, context); -} - -template -struct mover : trivial_method { }; -using trivial_mover = trivial_method; - -template -using is_trivially_movable = has_trivial_method; - -template -struct mover> : internal::get_method> { }; - -template -struct mover> : internal::get_method> { }; - -template -struct mover> : internal::get_method> { }; - -template -void move(uint8_t* ptr, const Context& context = no_context) { - mover::run(ptr, context); -} - -} -} diff --git a/imr/utils.hh b/imr/utils.hh deleted file mode 100644 index d169e97ce9..0000000000 --- a/imr/utils.hh +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#pragma once - -#include - -#include "imr/core.hh" -#include "imr/alloc.hh" -#include "imr/concepts.hh" - -namespace imr { -namespace utils { - -class basic_object { -public: - struct tags { - class back_pointer { }; - class object { }; - }; -protected: - uint8_t* _data = nullptr; - - friend struct methods::mover>>; -protected: - explicit basic_object(uint8_t* ptr) noexcept : _data(ptr) { } - - void set_data(uint8_t* ptr) noexcept { _data = ptr; } -public: - basic_object() = default; - basic_object(basic_object&& other) noexcept : _data(std::exchange(other._data, nullptr)) { } - basic_object(const basic_object&) = delete; -}; - -template -class object_context { - std::tuple _state; -private: - template - Context create(const uint8_t* ptr, std::index_sequence) const noexcept { - return Context(ptr, std::get(_state)...); - } -public: - object_context(const uint8_t*, State... state) : _state { state... } { } - template - auto context_for(const uint8_t* ptr, Args&&... args) const noexcept { - if constexpr (std::is_same_v) { - return no_context_t(); - } else { - return create(ptr, std::index_sequence_for()); - } - } -}; - -} - -namespace methods { - -template<> -struct mover>> { - static void run(uint8_t* ptr, ...) { - auto bptr = imr::tagged_type>::make_view(ptr).load(); - bptr->_data = ptr; - } -}; - -} - -namespace utils { - -/// Unique pointer to an IMR object -/// -/// This is an LSA-aware unique-owner pointer to an IMR object. -template -class object : public basic_object { -public: - using structure = imr::structure< - imr::member>>, - imr::member - >; - static constexpr size_t size_overhead = sizeof(basic_object*); -private: - explicit object(uint8_t* ptr) noexcept - : basic_object(ptr) - { - structure::template get_member(_data).store(this); - } -public: - object() = default; - object(object&& other) noexcept : basic_object(std::move(other)) { - if (_data) { - structure::template get_member(_data).store(this); - } - } - - object& operator=(object&& other) noexcept { - swap(other); - return *this; - } - - ~object() { - if (_data) { - imr::methods::destroy(_data); - current_allocator().free(_data); - } - } - - void swap(object& other) noexcept { - std::swap(_data, other._data); - if (_data) { - structure::template get_member(_data).store(this); - } - if (other._data) { - structure::template get_member(other._data).store(&other); - } - } - - explicit operator bool() const noexcept { return bool(_data); } - - uint8_t* get() noexcept { return _data ? _data + structure::template offset_of(_data) : nullptr; } - const uint8_t* get() const noexcept { return _data ? _data + structure::template offset_of(_data) : nullptr; } - - /// Creates an IMR object from a raw writer - /// - /// This low-level function creates an IMR object owned by `object` using - /// a raw writer (i.e. does not necessarily follow the standard IMR - /// serialisation process). This is useful for fast copying of trivial - /// IMR objects. - /// - /// \note This function could be deprecated once the IMR starts supporting - /// copying IMR objects. - template - requires requires (RawWriter wr, uint8_t* ptr) { - { wr(ptr) } noexcept; - } - static object make_raw(size_t len, RawWriter&& wr, allocation_strategy::migrate_fn migrate = &imr::alloc::default_lsa_migrate_fn::migrate_fn) { - object obj; - auto ptr = static_cast(current_allocator().alloc(migrate, sizeof(void*) + len, 1)); - wr(ptr + sizeof(void*)); - auto view = structure::make_view(ptr); - view.template get().store(&obj); - obj.set_data(ptr); - return obj; - } - - /// Create an IMR objects - template - requires WriterAllocator - static object make(Writer&& object_writer, - MigrateFn* migrate = &imr::alloc::default_lsa_migrate_fn::migrate_fn) { - static_assert(std::is_same_v); - return do_make(std::forward(object_writer), migrate); - } -private: - template - requires WriterAllocator - static object do_make(Writer&& object_writer, allocation_strategy::migrate_fn migrate) { - struct alloc_deleter { - size_t _size; - - void operator()(uint8_t* ptr) { - current_allocator().free(ptr, _size); - } - }; - using alloc_unique_ptr = std::unique_ptr; - - auto writer = [&object_writer] (auto&& ser, auto&& alloc) { - return object_writer(ser.serialize(nullptr).serialize_nested(), alloc).done(); - }; - - auto& alloc = current_allocator(); - alloc::object_allocator allocator(alloc); - auto obj_size = structure::size_when_serialized(writer, allocator.get_sizer()); - auto ptr = alloc_unique_ptr(static_cast(alloc.alloc(migrate, obj_size, 1)), alloc_deleter { obj_size }); - allocator.allocate_all(); - structure::serialize(ptr.get(), writer, allocator.get_serializer()); - return object(ptr.release()); - } -}; - -} - -} diff --git a/lua.cc b/lua.cc index 0ba75d77cf..2fc14cf3b6 100644 --- a/lua.cc +++ b/lua.cc @@ -25,6 +25,7 @@ #include "utils/utf8.hh" #include "utils/ascii.hh" #include "utils/date.h" +#include #include // Lua 5.4 added an extra parameter to lua_resume diff --git a/mutation_partition.cc b/mutation_partition.cc index 5f7ecb4315..ee89e1f58e 100644 --- a/mutation_partition.cc +++ b/mutation_partition.cc @@ -711,7 +711,7 @@ void write_cell(RowWriter& w, const query::partition_slice& slice, ::atomic_cell } else { return std::move(wr).skip_expiry(); } - }().write_fragmented_value(c.value()); + }().write_fragmented_value(fragment_range(c.value())); [&, wr = std::move(after_value)] () mutable { if (slice.options.contains() && c.is_live_and_has_ttl()) { return std::move(wr).write_ttl(c.ttl()); @@ -738,7 +738,7 @@ void write_cell(RowWriter& w, const query::partition_slice& slice, data_type typ template void write_counter_cell(RowWriter& w, const query::partition_slice& slice, ::atomic_cell_view c) { assert(c.is_live()); - counter_cell_view::with_linearized(c, [&] (counter_cell_view ccv) { + auto ccv = counter_cell_view(c); auto wr = w.add().write(); [&, wr = std::move(wr)] () mutable { if (slice.options.contains()) { @@ -750,7 +750,6 @@ void write_counter_cell(RowWriter& w, const query::partition_slice& slice, ::ato .write_value(counter_cell_view::total_value_type()->decompose(ccv.total_value())) .skip_ttl() .end_qr_cell(); - }); } template diff --git a/mutation_partition_serializer.cc b/mutation_partition_serializer.cc index af33aa0cbd..9e0ea234de 100644 --- a/mutation_partition_serializer.cc +++ b/mutation_partition_serializer.cc @@ -45,7 +45,7 @@ template auto write_live_cell(Writer&& writer, atomic_cell_view c) { return std::move(writer).write_created_at(c.timestamp()) - .write_fragmented_value(c.value()) + .write_fragmented_value(fragment_range(c.value())) .end_live_cell(); } @@ -60,14 +60,13 @@ auto write_counter_cell(Writer&& writer, atomic_cell_view c) .write_delta(delta) .end_counter_cell_update(); } else { - return counter_cell_view::with_linearized(c, [&] (counter_cell_view ccv) { + auto ccv = counter_cell_view(c); auto shards = std::move(value).start_value_counter_cell_full() .start_shards(); for (auto csv : ccv.shards()) { shards.add_shards(counter_shard(csv)); } return std::move(shards).end_shards().end_counter_cell_full(); - }); } }().end_counter_cell(); } @@ -79,7 +78,7 @@ auto write_expiring_cell(Writer&& writer, atomic_cell_view c) .write_expiry(c.expiry()) .start_c() .write_created_at(c.timestamp()) - .write_fragmented_value(c.value()) + .write_fragmented_value(fragment_range(c.value())) .end_c() .end_expiring_cell(); } diff --git a/repair/repair.hh b/repair/repair.hh index 13a0b6b392..8c3dd6f72d 100644 --- a/repair/repair.hh +++ b/repair/repair.hh @@ -28,6 +28,7 @@ #include #include #include +#include #include "database_fwd.hh" #include "frozen_mutation.hh" diff --git a/service/migration_manager.hh b/service/migration_manager.hh index 67568ddc1c..5cf9f2c4d2 100644 --- a/service/migration_manager.hh +++ b/service/migration_manager.hh @@ -46,6 +46,7 @@ #include "gms/endpoint_state.hh" #include #include +#include #include "gms/inet_address.hh" #include "gms/feature.hh" #include "message/msg_addr.hh" diff --git a/sstables/compaction_manager.hh b/sstables/compaction_manager.hh index df43b21d0f..5b20e0f462 100644 --- a/sstables/compaction_manager.hh +++ b/sstables/compaction_manager.hh @@ -30,6 +30,7 @@ #include #include #include +#include #include "log.hh" #include "utils/exponential_backoff_retry.hh" #include diff --git a/sstables/kl/writer.cc b/sstables/kl/writer.cc index 76925c4a24..20fde50caa 100644 --- a/sstables/kl/writer.cc +++ b/sstables/kl/writer.cc @@ -182,13 +182,12 @@ void sstable_writer_k_l::write_cell(file_writer& out, atomic_cell_view cell, con column_mask mask = column_mask::counter; write(_version, out, mask, int64_t(0), timestamp); - counter_cell_view::with_linearized(cell, [&] (counter_cell_view ccv) { + auto ccv = counter_cell_view(cell); write_counter_value(ccv, out, _version, [v = _version] (file_writer& out, uint32_t value) { return write(v, out, value); }); _c_stats.update_local_deletion_time(std::numeric_limits::max()); - }); } else if (cell.is_live_and_has_ttl()) { // expiring cell diff --git a/sstables/mx/writer.cc b/sstables/mx/writer.cc index 61c438a0a7..02178ad547 100644 --- a/sstables/mx/writer.cc +++ b/sstables/mx/writer.cc @@ -1133,10 +1133,9 @@ void writer::write_cell(bytes_ostream& writer, const clustering_key_prefix* clus if (cdef.is_counter()) { if (!is_deleted) { assert(!cell.is_counter_update()); - counter_cell_view::with_linearized(cell, [&] (counter_cell_view ccv) { - write_counter_value(ccv, writer, _sst.get_version(), [] (bytes_ostream& out, uint32_t value) { - return write_vint(out, value); - }); + auto ccv = counter_cell_view(cell); + write_counter_value(ccv, writer, _sst.get_version(), [] (bytes_ostream& out, uint32_t value) { + return write_vint(out, value); }); } } else { diff --git a/sstables/writer.hh b/sstables/writer.hh index fce35e2014..02c83dfb7e 100644 --- a/sstables/writer.hh +++ b/sstables/writer.hh @@ -29,7 +29,6 @@ #include "vint-serialization.hh" #include #include "version.hh" -#include "data/value_view.hh" #include "counters.hh" #include "service/storage_service.hh" @@ -375,10 +374,11 @@ inline void write(sstable_version_types v, file_writer& out, const disk_string_v template inline void write(sstable_version_types ver, file_writer& out, const disk_data_value_view& v) { SizeType length; - check_truncate_and_assign(length, v.value.size_bytes()); + check_truncate_and_assign(length, v.value.size()); write(ver, out, length); - using boost::range::for_each; - for_each(v.value, [&] (bytes_view fragment) { write(ver, out, fragment); }); + for (bytes_view frag : fragment_range(v.value)) { + write(ver, out, frag); + } } template @@ -576,18 +576,6 @@ void write_cell_value(sstable_version_types v, W& out, const abstract_type& type } } -template -requires Writer -void write_cell_value(sstable_version_types v, W& out, const abstract_type& type, atomic_cell_value_view value) { - if (!value.empty()) { - if (!type.value_length_if_fixed()) { - write_vint(out, value.size_bytes()); - } - using boost::range::for_each; - for_each(value, [&] (bytes_view fragment) { write(v, out, fragment); }); - } -} - template requires Writer void write_counter_value(counter_cell_view ccv, W& out, sstable_version_types v, WriteLengthFunc&& write_len_func) { diff --git a/test/boost/counter_test.cc b/test/boost/counter_test.cc index 3b2f141662..389eaac100 100644 --- a/test/boost/counter_test.cc +++ b/test/boost/counter_test.cc @@ -70,7 +70,8 @@ SEASTAR_TEST_CASE(test_counter_cell) { auto c1 = atomic_cell_or_collection(b1.build(0)); atomic_cell_or_collection c2; - counter_cell_view::with_linearized(c1.as_atomic_cell(cdef), [&] (counter_cell_view cv) { + { + counter_cell_view cv(c1.as_atomic_cell(cdef)); BOOST_REQUIRE_EQUAL(cv.total_value(), 1); verify_shard_order(cv); @@ -78,18 +79,20 @@ SEASTAR_TEST_CASE(test_counter_cell) { b2.add_shard(counter_shard(*cv.get_shard(id[0])).update(2, 1)); b2.add_shard(counter_shard(id[2], 1, 1)); c2 = atomic_cell_or_collection(b2.build(0)); - }); + } - counter_cell_view::with_linearized(c2.as_atomic_cell(cdef), [&] (counter_cell_view cv) { + { + counter_cell_view cv(c2.as_atomic_cell(cdef)); BOOST_REQUIRE_EQUAL(cv.total_value(), 8); verify_shard_order(cv); - }); + } counter_cell_view::apply(cdef, c1, c2); - counter_cell_view::with_linearized(c1.as_atomic_cell(cdef), [&] (counter_cell_view cv) { + { + counter_cell_view cv(c1.as_atomic_cell(cdef)); BOOST_REQUIRE_EQUAL(cv.total_value(), 4); verify_shard_order(cv); - }); + } }); } @@ -102,10 +105,11 @@ SEASTAR_TEST_CASE(test_apply) { auto src = b.copy(*cdef.type); counter_cell_view::apply(cdef, dst, src); - counter_cell_view::with_linearized(dst.as_atomic_cell(cdef), [&] (counter_cell_view cv) { + { + counter_cell_view cv(dst.as_atomic_cell(cdef)); BOOST_REQUIRE_EQUAL(cv.total_value(), value); BOOST_REQUIRE_EQUAL(cv.timestamp(), std::max(dst.as_atomic_cell(cdef).timestamp(), src.as_atomic_cell(cdef).timestamp())); - }); + } }; auto id = generate_ids(5); @@ -241,17 +245,19 @@ SEASTAR_TEST_CASE(test_counter_mutations) { m.apply(m2); auto ac = get_counter_cell(m); BOOST_REQUIRE(ac.is_live()); - counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) { + { + counter_cell_view ccv(ac); BOOST_REQUIRE_EQUAL(ccv.total_value(), -102); verify_shard_order(ccv); - }); + } ac = get_static_counter_cell(m); BOOST_REQUIRE(ac.is_live()); - counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) { + { + counter_cell_view ccv(ac); BOOST_REQUIRE_EQUAL(ccv.total_value(), 20); verify_shard_order(ccv); - }); + } m.apply(m3); ac = get_counter_cell(m); @@ -271,32 +277,36 @@ SEASTAR_TEST_CASE(test_counter_mutations) { m = mutation(s, m1.decorated_key(), m1.partition().difference(s, m2.partition())); ac = get_counter_cell(m); BOOST_REQUIRE(ac.is_live()); - counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) { + { + counter_cell_view ccv(ac); BOOST_REQUIRE_EQUAL(ccv.total_value(), 2); verify_shard_order(ccv); - }); + } ac = get_static_counter_cell(m); BOOST_REQUIRE(ac.is_live()); - counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) { + { + counter_cell_view ccv(ac); BOOST_REQUIRE_EQUAL(ccv.total_value(), 11); verify_shard_order(ccv); - }); + } m = mutation(s, m1.decorated_key(), m2.partition().difference(s, m1.partition())); ac = get_counter_cell(m); BOOST_REQUIRE(ac.is_live()); - counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) { + { + counter_cell_view ccv(ac); BOOST_REQUIRE_EQUAL(ccv.total_value(), -105); verify_shard_order(ccv); - }); + } ac = get_static_counter_cell(m); BOOST_REQUIRE(ac.is_live()); - counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) { + { + counter_cell_view ccv(ac); BOOST_REQUIRE_EQUAL(ccv.total_value(), 9); verify_shard_order(ccv); - }); + } m = mutation(s, m1.decorated_key(), m1.partition().difference(s, m3.partition())); BOOST_REQUIRE_EQUAL(m.partition().clustered_rows().calculate_size(), 0); @@ -434,34 +444,38 @@ SEASTAR_TEST_CASE(test_transfer_updates_to_shards) { auto ac = get_counter_cell(m); BOOST_REQUIRE(ac.is_live()); - counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) { + { + counter_cell_view ccv(ac); BOOST_REQUIRE_EQUAL(ccv.total_value(), 5); verify_shard_order(ccv); - }); + } ac = get_static_counter_cell(m); BOOST_REQUIRE(ac.is_live()); - counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) { + { + counter_cell_view ccv(ac); BOOST_REQUIRE_EQUAL(ccv.total_value(), 4); verify_shard_order(ccv); - }); + } m = m2; transform_counter_updates_to_shards(m, &m0, 0, utils::UUID{}); ac = get_counter_cell(m); BOOST_REQUIRE(ac.is_live()); - counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) { + { + counter_cell_view ccv(ac); BOOST_REQUIRE_EQUAL(ccv.total_value(), 14); verify_shard_order(ccv); - }); + } ac = get_static_counter_cell(m); BOOST_REQUIRE(ac.is_live()); - counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) { + { + counter_cell_view ccv(ac); BOOST_REQUIRE_EQUAL(ccv.total_value(), 12); verify_shard_order(ccv); - }); + } m = m3; transform_counter_updates_to_shards(m, &m0, 0, utils::UUID{}); @@ -519,14 +533,14 @@ SEASTAR_TEST_CASE(test_sanitize_corrupted_cells) { auto c2 = atomic_cell_or_collection(b2.build(0)); // Compare - counter_cell_view::with_linearized(c1.as_atomic_cell(cdef), [&] (counter_cell_view cv1) { - counter_cell_view::with_linearized(c2.as_atomic_cell(cdef), [&] (counter_cell_view cv2) { + { + counter_cell_view cv1(c1.as_atomic_cell(cdef)); + counter_cell_view cv2(c2.as_atomic_cell(cdef)); BOOST_REQUIRE_EQUAL(cv1, cv2); BOOST_REQUIRE_EQUAL(cv1.total_value(), cv2.total_value()); verify_shard_order(cv1); verify_shard_order(cv2); - }); - }); + } } }); } diff --git a/test/boost/imr_test.cc b/test/boost/imr_test.cc deleted file mode 100644 index a8e91906b1..0000000000 --- a/test/boost/imr_test.cc +++ /dev/null @@ -1,847 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#include -#include - -#include -#include - -#include -#include -#include - -#include - -#include "imr/fundamental.hh" -#include "imr/compound.hh" -#include "imr/methods.hh" -#include "imr/utils.hh" - -#include "test/lib/failure_injecting_allocation_strategy.hh" -#include "utils/logalloc.hh" - -#include "test/lib/random_utils.hh" - -static constexpr auto random_test_iteration_count = 20; - -class A; -class B; -class C; -class D; - -BOOST_AUTO_TEST_SUITE(fundamental); - -template -struct generate_flags_type; - -template -struct generate_flags_type, std::index_sequence> { - using type = imr::flags..., - B, std::integral_constant..., C>; -}; - -SEASTAR_THREAD_TEST_CASE(test_flags) { - using flags_type = generate_flags_type, std::make_index_sequence<8>>::type; - static constexpr size_t expected_size = 3; - - BOOST_CHECK_EQUAL(flags_type::size_when_serialized(), expected_size); - BOOST_CHECK_EQUAL(flags_type::size_when_serialized(imr::set_flag(), - imr::set_flag(), - imr::set_flag()), expected_size); - - uint8_t buffer[expected_size]; - std::fill_n(buffer, expected_size, 0xbe); - BOOST_CHECK_EQUAL(flags_type::serialize(buffer, imr::set_flag()), expected_size); - - auto mview = flags_type::make_view(buffer); - BOOST_CHECK(!mview.get()); - BOOST_CHECK(mview.get()); - BOOST_CHECK(!mview.get()); - - mview.set(); - mview.set(false); - BOOST_CHECK(mview.get()); - BOOST_CHECK(!mview.get()); - BOOST_CHECK(!mview.get()); - - flags_type::view view = mview; - mview.set(); - BOOST_CHECK(view.get()); - BOOST_CHECK(!view.get()); - BOOST_CHECK(view.get()); - - BOOST_CHECK_EQUAL(flags_type::serialized_object_size(buffer), expected_size); - - int some_context; - BOOST_CHECK_EQUAL(flags_type::serialized_object_size(buffer, some_context), expected_size); - - std::fill_n(buffer, expected_size, 0xff); - BOOST_CHECK_EQUAL(flags_type::serialize(buffer), expected_size); - BOOST_CHECK(!mview.get()); - BOOST_CHECK(!mview.get()); - BOOST_CHECK(!mview.get()); -} - -struct test_pod_type { - int32_t x; - uint64_t y; - - friend bool operator==(const test_pod_type& a, const test_pod_type& b) { - return a.x == b.x && a.y == b.y; - } - friend std::ostream& operator<<(std::ostream& os, const test_pod_type& obj) { - return os << "test_pod_type { x: " << obj.x << ", y: " << obj.y << " }"; - } -}; - -SEASTAR_THREAD_TEST_CASE(test_pod) { - auto generate_object = [] { - std::uniform_int_distribution dist_x; - std::uniform_int_distribution dist_y; - return test_pod_type { dist_x(tests::random::gen()), dist_y(tests::random::gen()) }; - }; - using pod_type = imr::pod; - - uint8_t buffer[pod_type::size]; - for (auto i = 0; i < random_test_iteration_count; i++) { - auto obj = generate_object(); - - BOOST_CHECK_EQUAL(pod_type::size_when_serialized(obj), pod_type::size); - BOOST_CHECK_EQUAL(pod_type::serialize(buffer, obj), pod_type::size); - - - BOOST_CHECK_EQUAL(pod_type::serialized_object_size(buffer), pod_type::size); - int some_context; - BOOST_CHECK_EQUAL(pod_type::serialized_object_size(buffer, some_context), pod_type::size); - - auto mview = pod_type::make_view(buffer); - pod_type::view view = mview; - - BOOST_CHECK_EQUAL(mview.load(), obj); - BOOST_CHECK_EQUAL(view.load(), obj); - - auto obj2 = generate_object(); - mview.store(obj2); - - BOOST_CHECK_EQUAL(mview.load(), obj2); - BOOST_CHECK_EQUAL(view.load(), obj2); - } -} - -class test_buffer_context { - size_t _size; -public: - explicit test_buffer_context(size_t sz) : _size(sz) { } - - template - size_t size_of() const noexcept; -}; - -template<> -size_t test_buffer_context::size_of() const noexcept { - return _size; -} - -SEASTAR_THREAD_TEST_CASE(test_buffer) { - using buffer_type = imr::buffer; - - auto test = [] (auto serialize) { - auto data = tests::random::get_bytes(); - auto size = data.size(); - - auto buffer = std::make_unique(size); - - serialize(buffer.get(), size, data); - - const auto ctx = test_buffer_context(size); - BOOST_CHECK_EQUAL(buffer_type::serialized_object_size(buffer.get(), ctx), size); - - BOOST_CHECK(boost::range::equal(buffer_type::make_view(buffer.get(), ctx), data)); - BOOST_CHECK(boost::range::equal(buffer_type::make_view(const_cast(buffer.get()), ctx), data)); - - BOOST_CHECK_EQUAL(buffer_type::make_view(buffer.get(), ctx).size(), size); - }; - - for (auto i = 0; i < random_test_iteration_count; i++) { - test([] (uint8_t* out, size_t size, const bytes& data) { - BOOST_CHECK_EQUAL(buffer_type::size_when_serialized(data), size); - BOOST_CHECK_EQUAL(buffer_type::serialize(out, data), size); - }); - - test([] (uint8_t* out, size_t size, const bytes& data) { - auto serializer = [&data] (uint8_t* out) noexcept { - boost::range::copy(data, out); - }; - BOOST_CHECK_EQUAL(buffer_type::size_when_serialized(size, serializer), size); - BOOST_CHECK_EQUAL(buffer_type::serialize(out, size, serializer), size); - }); - } -} - -BOOST_AUTO_TEST_SUITE_END(); - -BOOST_AUTO_TEST_SUITE(compound); - -struct test_optional_context { - template - bool is_present() const noexcept; - - template - decltype(auto) context_for(Args&&...) const noexcept { return *this; } -}; -template<> -bool test_optional_context::is_present() const noexcept { - return true; -} -template<> -bool test_optional_context::is_present() const noexcept { - return false; -} - -SEASTAR_THREAD_TEST_CASE(test_optional) { - using optional_type1 = imr::optional>; - using optional_type2 = imr::optional>; - - for (auto i = 0; i < random_test_iteration_count; i++) { - auto value = tests::random::get_int(); - auto expected_size = imr::pod::size_when_serialized(value); - - auto buffer = std::make_unique(expected_size); - - BOOST_CHECK_EQUAL(optional_type1::size_when_serialized(value), expected_size); - BOOST_CHECK_EQUAL(optional_type1::serialize(buffer.get(), value), expected_size); - - BOOST_CHECK_EQUAL(optional_type1::serialized_object_size(buffer.get(), test_optional_context()), expected_size); - BOOST_CHECK_EQUAL(optional_type2::serialized_object_size(buffer.get(), test_optional_context()), 0); - - auto view = optional_type1::make_view(buffer.get()); - BOOST_CHECK_EQUAL(view.get().load(), value); - } -} - - -static constexpr auto data_size = 128; -using variant_type = imr::variant>, - imr::member>, - imr::member>>; - -struct test_variant_context { - unsigned _alternative_idx; -public: - template - size_t size_of() const noexcept; - - template - auto active_alternative_of() const noexcept; - - template - decltype(auto) context_for(Args&&...) const noexcept { return *this; } -}; - -template<> -size_t test_variant_context::size_of() const noexcept { - return data_size; -} - -template<> -auto test_variant_context::active_alternative_of() const noexcept { - switch (_alternative_idx) { - case 0: - return variant_type::index_for(); - case 1: - return variant_type::index_for(); - case 2: - return variant_type::index_for(); - default: - BOOST_FAIL("should not reach"); - abort(); - } -} - -SEASTAR_THREAD_TEST_CASE(test_variant) { - for (auto i = 0; i < random_test_iteration_count; i++) { - unsigned alternative_idx = tests::random::get_int(2); - - uint64_t uinteger = tests::random::get_int(); - int64_t integer = tests::random::get_int(); - bytes data = tests::random::get_bytes(data_size); - - const size_t expected_size = alternative_idx == 0 - ? imr::pod::size_when_serialized(uinteger) - : (alternative_idx == 1 ? data_size : sizeof(int64_t)); - - auto buffer = std::make_unique(expected_size); - - if (!alternative_idx) { - BOOST_CHECK_EQUAL(variant_type::size_when_serialized(uinteger), expected_size); - BOOST_CHECK_EQUAL(variant_type::serialize(buffer.get(), uinteger), expected_size); - } else if (alternative_idx == 1) { - BOOST_CHECK_EQUAL(variant_type::size_when_serialized(data), expected_size); - BOOST_CHECK_EQUAL(variant_type::serialize(buffer.get(), data), expected_size); - } else { - BOOST_CHECK_EQUAL(variant_type::size_when_serialized(integer), expected_size); - BOOST_CHECK_EQUAL(variant_type::serialize(buffer.get(), integer), expected_size); - } - - auto ctx = test_variant_context { alternative_idx }; - - BOOST_CHECK_EQUAL(variant_type::serialized_object_size(buffer.get(), ctx), expected_size); - - auto view = variant_type::make_view(buffer.get(), ctx); - bool visitor_was_called = false; - view.visit(make_visitor( - [&] (imr::pod::view val) { - visitor_was_called = true; - if (alternative_idx == 0) { - BOOST_CHECK_EQUAL(val.load(), uinteger); - } else { - BOOST_FAIL("wrong variant alternative (B)"); - } - }, - [&] (imr::buffer::view buf) { - visitor_was_called = true; - if (alternative_idx == 1) { - BOOST_CHECK(boost::equal(data, buf)); - } else { - BOOST_FAIL("wrong variant alternative (C)"); - } - }, - [&] (imr::pod::view val) { - visitor_was_called = true; - if (alternative_idx == 2) { - BOOST_CHECK_EQUAL(val.load(), integer); - } else { - BOOST_FAIL("wrong variant alternative (D)"); - } - } - ), ctx); - BOOST_CHECK(visitor_was_called); - } -} - -SEASTAR_THREAD_TEST_CASE(test_structure_with_fixed) { - using S = imr::structure>, - imr::member>, - imr::member>>; - static constexpr auto expected_size = sizeof(uint8_t) + sizeof(uint64_t) - + sizeof(uint32_t); - - for (auto i = 0; i < random_test_iteration_count; i++) { - auto a = tests::random::get_int(); - auto b = tests::random::get_int(); - auto c = tests::random::get_int(); - - auto writer = [&] (auto&& serializer) noexcept { - return serializer - .serialize(a) - .serialize(b) - .serialize(c) - .done(); - }; - - uint8_t buffer[expected_size]; - - BOOST_CHECK_EQUAL(S::size_when_serialized(writer), expected_size); - BOOST_CHECK_EQUAL(S::serialize(buffer, writer), expected_size); - BOOST_CHECK_EQUAL(S::serialized_object_size(buffer), expected_size); - - auto mview = S::make_view(buffer); - BOOST_CHECK_EQUAL(mview.get().load(), a); - BOOST_CHECK_EQUAL(mview.get().load(), b); - BOOST_CHECK_EQUAL(mview.get().load(), c); - - auto view = S::make_view(const_cast(buffer)); - BOOST_CHECK_EQUAL(view.get().load(), a); - BOOST_CHECK_EQUAL(view.get().load(), b); - BOOST_CHECK_EQUAL(view.get().load(), c); - - a = tests::random::get_int(); - b = tests::random::get_int(); - c = tests::random::get_int(); - mview.get().store(a); - mview.get().store(b); - mview.get().store(c); - - BOOST_CHECK_EQUAL(view.get().load(), a); - BOOST_CHECK_EQUAL(view.get().load(), b); - BOOST_CHECK_EQUAL(view.get().load(), c); - } -} - -class test_structure_context { - bool _b_is_present; - size_t _c_size_of; -public: - test_structure_context(bool b_is_present, size_t c_size_of) noexcept - : _b_is_present(b_is_present), _c_size_of(c_size_of) { } - - template - bool is_present() const noexcept; - - template - size_t size_of() const noexcept; - - template - decltype(auto) context_for(Args&&...) const noexcept { return *this; } -}; - -template<> -bool test_structure_context::is_present() const noexcept { - return _b_is_present; -} - -template<> -size_t test_structure_context::size_of() const noexcept { - return _c_size_of; -} - -SEASTAR_THREAD_TEST_CASE(test_structure_with_context) { - using S = imr::structure>, - imr::optional_member>, - imr::member>>; - - for (auto i = 0; i < random_test_iteration_count; i++) { - auto b_value = tests::random::get_int(); - auto c_data = tests::random::get_bytes(); - - const auto expected_size = 1 + imr::pod::size_when_serialized(b_value) - + c_data.size(); - - auto writer = [&] (auto&& serializer) noexcept { - return serializer - .serialize(imr::set_flag()) - .serialize(b_value) - .serialize(c_data) - .done(); - }; - - BOOST_CHECK_EQUAL(S::size_when_serialized(writer), expected_size); - - auto buffer = std::make_unique(expected_size); - BOOST_CHECK_EQUAL(S::serialize(buffer.get(), writer), expected_size); - - auto ctx = test_structure_context(true, c_data.size()); - BOOST_CHECK_EQUAL(S::serialized_object_size(buffer.get(), ctx), expected_size); - - auto mview = S::make_view(buffer.get(), ctx); - BOOST_CHECK(mview.get().get()); - BOOST_CHECK(!mview.get().get()); - BOOST_CHECK_EQUAL(mview.get().get().load(), b_value); - BOOST_CHECK(boost::range::equal(mview.get(ctx), c_data)); - - auto view = S::view(mview); - BOOST_CHECK(view.get().get()); - BOOST_CHECK(!view.get().get()); - BOOST_CHECK_EQUAL(view.get().get().load(), b_value); - BOOST_CHECK(boost::range::equal(view.get(ctx), c_data)); - } -} - -SEASTAR_THREAD_TEST_CASE(test_structure_get_element_without_view) { - using S = imr::structure>, - imr::member>, - imr::optional_member>>; - - auto uinteger = tests::random::get_int(); - - static constexpr auto expected_size = 1 + sizeof(uint64_t); - - auto writer = [&] (auto&& serializer) noexcept { - return serializer - .serialize(imr::set_flag()) - .serialize(uinteger) - .skip() - .done(); - }; - - BOOST_CHECK_EQUAL(S::size_when_serialized(writer), expected_size); - - uint8_t buffer[expected_size]; - BOOST_CHECK_EQUAL(S::serialize(buffer, writer), expected_size); - - auto fview = S::get_member(buffer); - BOOST_CHECK(fview.get()); - BOOST_CHECK(!fview.get()); - - auto uview = S::get_member(buffer); - BOOST_CHECK_EQUAL(uview.load(), uinteger); - // FIXME test offset -} - -SEASTAR_THREAD_TEST_CASE(test_nested_structure) { - using S1 = imr::structure>, - imr::member>, - imr::member>>; - - using S = imr::structure>, - imr::member, - imr::member>>; - - for (auto i = 0; i < random_test_iteration_count; i++) { - auto b1_value = tests::random::get_int(); - auto c1_data = tests::random::get_bytes(); - auto a1_value = tests::random::get_int(); - - const auto expected_size1 = imr::pod::size_when_serialized(b1_value) - + c1_data.size() + sizeof(uint8_t); - - auto a_value = tests::random::get_int(); - auto c_value = tests::random::get_int(); - - const auto expected_size = sizeof(uint16_t) + expected_size1 + sizeof(uint32_t); - - auto writer1 = [&] (auto&& serializer) noexcept { - return serializer - .serialize(b1_value) - .serialize(c1_data) - .serialize(a1_value) - .done(); - }; - - auto writer = [&] (auto&& serializer) noexcept { - return serializer - .serialize(a_value) - .serialize(writer1) - .serialize(c_value) - .done(); - }; - - BOOST_CHECK_EQUAL(S::size_when_serialized(writer), expected_size); - - auto buffer = std::make_unique(expected_size); - BOOST_CHECK_EQUAL(S::serialize(buffer.get(), writer), expected_size); - - auto ctx = test_structure_context(true, c1_data.size()); - BOOST_CHECK_EQUAL(S::serialized_object_size(buffer.get(), ctx), expected_size); - - auto view = S::make_view(buffer.get(), ctx); - BOOST_CHECK_EQUAL(view.get().load(), a_value); - BOOST_CHECK_EQUAL(view.get(ctx).get().get().load(), b1_value); - BOOST_CHECK(boost::range::equal(view.get(ctx).get(ctx), c1_data)); - BOOST_CHECK_EQUAL(view.get(ctx).load(), c_value); - } -} - -BOOST_AUTO_TEST_SUITE_END(); - -struct object_with_destructor { - static size_t destruction_count; - static uint64_t last_destroyed_one; - - static void reset() { - destruction_count = 0; - last_destroyed_one = 0; - } - - uint64_t value; -}; - -size_t object_with_destructor::destruction_count = 0; -uint64_t object_with_destructor::last_destroyed_one = 0; - -struct object_without_destructor { - uint64_t value; -}; - -namespace imr { -namespace methods { - -template<> -struct destructor> { - template - static void run(uint8_t* ptr, Args&&...) noexcept { - object_with_destructor::destruction_count++; - - auto view = imr::pod::make_view(ptr); - object_with_destructor::last_destroyed_one = view.load().value; - } -}; - -} -} - -BOOST_AUTO_TEST_SUITE(methods); - -SEASTAR_THREAD_TEST_CASE(test_simple_destructor) { - object_with_destructor::reset(); - - using O1 = imr::pod; - using O2 = imr::pod; - - BOOST_CHECK(!imr::methods::is_trivially_destructible::value); - BOOST_CHECK(imr::methods::is_trivially_destructible::value); - - static constexpr auto expected_size = sizeof(object_with_destructor); - uint8_t buffer[expected_size]; - - auto value = tests::random::get_int(); - BOOST_CHECK_EQUAL(O1::serialize(buffer, object_with_destructor { value }), expected_size); - imr::methods::destroy(buffer); - BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 1); - BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, value); - - imr::methods::destroy(buffer); - BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 1); - BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, value); -} - -SEASTAR_THREAD_TEST_CASE(test_structure_destructor) { - object_with_destructor::reset(); - - using S = imr::structure>, - imr::member>, - imr::member>>; - - using S1 = imr::structure>, - imr::member>, - imr::member>>; - - BOOST_CHECK(!imr::methods::is_trivially_destructible::value); - BOOST_CHECK(imr::methods::is_trivially_destructible::value); - - static constexpr auto expected_size = sizeof(object_with_destructor) * 3; - uint8_t buffer[expected_size]; - - auto a = tests::random::get_int(); - auto b = tests::random::get_int(); - auto c = tests::random::get_int(); - - BOOST_CHECK_EQUAL(S::serialize(buffer, [&] (auto serializer) noexcept { - return serializer - .serialize(object_with_destructor { a }) - .serialize(object_without_destructor { b }) - .serialize(object_with_destructor { c }) - .done(); - }), expected_size); - - imr::methods::destroy(buffer); - BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 2); - BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, c); - - imr::methods::destroy(buffer); - BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 2); - BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, c); -} - -SEASTAR_THREAD_TEST_CASE(test_optional_destructor) { - object_with_destructor::reset(); - - using O1 = imr::optional>; - using O2 = imr::optional>; - using O3 = imr::optional>; - - BOOST_CHECK(!imr::methods::is_trivially_destructible::value); - BOOST_CHECK(!imr::methods::is_trivially_destructible::value); - BOOST_CHECK(imr::methods::is_trivially_destructible::value); - - static constexpr auto expected_size = sizeof(object_with_destructor); - uint8_t buffer[expected_size]; - - auto value = tests::random::get_int(); - - BOOST_CHECK_EQUAL(O1::serialize(buffer, object_with_destructor { value }), expected_size); - - imr::methods::destroy(buffer, compound::test_optional_context()); - BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 0); - BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, 0); - - imr::methods::destroy(buffer, compound::test_optional_context()); - BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 1); - BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, value); - - imr::methods::destroy(buffer, compound::test_optional_context()); - BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 1); - BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, value); -} - -using V = imr::variant>, - imr::member>>; - -struct test_variant_context { - bool _alternative_b; -public: - template - auto active_alternative_of() const noexcept; - - template - decltype(auto) context_for(...) const noexcept { return *this; } -}; - -template<> -auto test_variant_context::active_alternative_of() const noexcept { - if (_alternative_b) { - return V::index_for(); - } else { - return V::index_for(); - } -} - -SEASTAR_THREAD_TEST_CASE(test_variant_destructor) { - object_with_destructor::reset(); - - using V1 = imr::variant>>; - - BOOST_CHECK(!imr::methods::is_trivially_destructible::value); - BOOST_CHECK(imr::methods::is_trivially_destructible::value); - - static constexpr auto expected_size = sizeof(object_with_destructor); - uint8_t buffer[expected_size]; - - auto value = tests::random::get_int(); - - BOOST_CHECK_EQUAL(V::serialize(buffer, object_with_destructor { value }), expected_size); - - imr::methods::destroy(buffer, test_variant_context { false }); - BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 0); - BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, 0); - - imr::methods::destroy(buffer, test_variant_context { true }); - BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 1); - BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, value); -} - -BOOST_AUTO_TEST_SUITE_END(); - -namespace object_exception_safety { - -using nested_structure = imr::structure< - imr::member>, - imr::member> ->; - -using structure = imr::structure< - imr::member>, - imr::member>>, - imr::member>>, - imr::member> ->; - -struct structue_context { - size_t _size; - - structue_context(const uint8_t* ptr) - : _size(imr::pod::make_view(ptr).load()) - { - BOOST_CHECK_EQUAL(_size, 4); - } - - template - size_t size_of() const noexcept { - return _size; - } - - template - decltype(auto) context_for(Args&&...) const noexcept { return *this; } -}; - -struct nested_structue_context { - size_t _size; - - nested_structue_context(const uint8_t* ptr) - : _size(imr::pod::make_view(ptr).load()) - { - BOOST_CHECK_NE(_size, 0); - } - - template - size_t size_of() const noexcept { - return _size; - } - - template - decltype(auto) context_for(Args&&...) const noexcept { return *this; } -}; - -} - -namespace imr::methods { - -template<> -struct destructor>> { - static void run(uint8_t* ptr, ...) { - using namespace object_exception_safety; - auto obj_ptr = imr::pod::make_view(ptr).load(); - imr::methods::destroy(obj_ptr, nested_structue_context(obj_ptr)); - current_allocator().free(obj_ptr); - } -}; - -} - -SEASTAR_THREAD_TEST_CASE(test_object_exception_safety) { - using namespace object_exception_safety; - - using context_factory_for_structure = imr::alloc::context_factory>; - using lsa_migrator_fn_for_structure = imr::alloc::lsa_migrate_fn::structure, context_factory_for_structure>; - auto migrator_for_structure = lsa_migrator_fn_for_structure(context_factory_for_structure()); - - using context_factory_for_nested_structure = imr::alloc::context_factory; - using lsa_migrator_fn_for_nested_structure = imr::alloc::lsa_migrate_fn; - auto migrator_for_nested_structure = lsa_migrator_fn_for_nested_structure(context_factory_for_nested_structure()); - - auto writer_fn = [&] (auto serializer, auto& allocator) { - return serializer - .serialize(4) - .serialize(allocator.template allocate( - &migrator_for_nested_structure, - [&] (auto nested_serializer) { - return nested_serializer - .serialize(128) - .serialize(128, [] (auto&&...) noexcept { }) - .done(); - } - )) - .serialize(allocator.template allocate( - &migrator_for_nested_structure, - [&] (auto nested_serializer) { - return nested_serializer - .serialize(1024) - .serialize(1024, [] (auto&&...) noexcept { }) - .done(); - } - )) - .serialize(bytes(4, 'a')) - .done(); - }; - - logalloc::region reg; - - size_t fail_offset = 0; - auto allocator = failure_injecting_allocation_strategy(reg.allocator()); - with_allocator(allocator, [&] { - while (true) { - allocator.fail_after(fail_offset++); - try { - imr::utils::object::make(writer_fn, &migrator_for_structure); - } catch (const std::bad_alloc&) { - BOOST_CHECK_EQUAL(reg.occupancy().used_space(), 0); - continue; - } - BOOST_CHECK_EQUAL(reg.occupancy().used_space(), 0); - break; - } - }); - - BOOST_CHECK_EQUAL(fail_offset, 4); -} - diff --git a/test/boost/meta_test.cc b/test/boost/meta_test.cc deleted file mode 100644 index 86f3467209..0000000000 --- a/test/boost/meta_test.cc +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#define BOOST_TEST_MODULE meta -#include - -#include - -#include "utils/meta.hh" - -namespace internal { - -template -struct check_constexpr { - template - struct check { - enum : T { - value = N, - }; - }; -}; - -template -struct first_argument { }; - -template -struct first_argument { - using type = T; -}; - -} - -#define INTERNAL_STATIC_CHECK_EQUAL(expected, actual, actual_str) \ - BOOST_CHECK_MESSAGE(internal::check_constexpr>::check<(actual)>::value == (expected), \ - actual_str " expected to be equal " #expected " [actual: " << (actual) << ", expected: " << (expected) << "]") - -#define INTERNAL_STATIC_CHECK_SAME(expr, expected, actual, actual_str) \ - BOOST_CHECK_MESSAGE(expr, actual_str " expected to be the same as " #expected \ - " [actual: " << seastar::pretty_type_name(typeid(typename internal::first_argument::type)) << ", expected: " \ - << seastar::pretty_type_name(typeid(internal::first_argument::type)) << "]") - -#define STATIC_CHECK_EQUAL(expected, ...) \ - INTERNAL_STATIC_CHECK_EQUAL(expected, (__VA_ARGS__), #__VA_ARGS__) - -#define STATIC_CHECK_SAME(expected, ...) \ - INTERNAL_STATIC_CHECK_SAME((std::is_same<__VA_ARGS__, typename internal::first_argument::type>::value), expected, (__VA_ARGS__), #__VA_ARGS__) - -class A { }; -class B { }; -class C { }; -class D { }; - -BOOST_AUTO_TEST_CASE(find) { - STATIC_CHECK_EQUAL(0, meta::find); - STATIC_CHECK_EQUAL(1, meta::find); - STATIC_CHECK_EQUAL(2, meta::find); - STATIC_CHECK_EQUAL(3, meta::find); - - STATIC_CHECK_EQUAL(0, meta::find); - STATIC_CHECK_EQUAL(0, meta::find); - STATIC_CHECK_EQUAL(1, meta::find); - - STATIC_CHECK_EQUAL(0, meta::find>); - STATIC_CHECK_EQUAL(1, meta::find>); - STATIC_CHECK_EQUAL(2, meta::find>); - STATIC_CHECK_EQUAL(3, meta::find>); - - STATIC_CHECK_EQUAL(0, meta::find>); - STATIC_CHECK_EQUAL(0, meta::find>); - STATIC_CHECK_EQUAL(1, meta::find>); - - STATIC_CHECK_EQUAL(1, meta::find, meta::list, meta::list>); - STATIC_CHECK_EQUAL(1, meta::find, meta::list, meta::list>>); -} - -BOOST_AUTO_TEST_CASE(get) { - STATIC_CHECK_SAME(A, meta::get<0, A, B, C, D>); - STATIC_CHECK_SAME(B, meta::get<1, A, B, C, D>); - STATIC_CHECK_SAME(C, meta::get<2, A, B, C, D>); - STATIC_CHECK_SAME(D, meta::get<3, A, B, C, D>); - - STATIC_CHECK_SAME(A, meta::get<0, meta::list>); - STATIC_CHECK_SAME(B, meta::get<1, meta::list>); - STATIC_CHECK_SAME(C, meta::get<2, meta::list>); - STATIC_CHECK_SAME(D, meta::get<3, meta::list>); - - STATIC_CHECK_SAME(A, meta::get<0, meta::list>); - STATIC_CHECK_SAME(meta::list, meta::get<0, meta::list>>); -} - -BOOST_AUTO_TEST_CASE(take) { - STATIC_CHECK_SAME(meta::list, meta::take<1, A, B, C, D>); - STATIC_CHECK_SAME((meta::list), meta::take<2, A, B, C, D>); - STATIC_CHECK_SAME((meta::list), meta::take<3, A, B, C, D>); - STATIC_CHECK_SAME((meta::list), meta::take<4, A, B, C, D>); - - STATIC_CHECK_SAME(meta::list, meta::take<1, meta::list>); - STATIC_CHECK_SAME((meta::list), meta::take<2, meta::list>); - STATIC_CHECK_SAME((meta::list), meta::take<3, meta::list>); - STATIC_CHECK_SAME((meta::list), meta::take<4, meta::list>); - - STATIC_CHECK_SAME(meta::list, meta::take<1, meta::list>); - STATIC_CHECK_SAME(meta::list>, meta::take<1, meta::list>>); - STATIC_CHECK_SAME((meta::list>), meta::take<1, meta::list>>); -} - -BOOST_AUTO_TEST_CASE(size) { - STATIC_CHECK_EQUAL(0, meta::size<>); - STATIC_CHECK_EQUAL(1, meta::size); - STATIC_CHECK_EQUAL(2, meta::size); - STATIC_CHECK_EQUAL(3, meta::size); - STATIC_CHECK_EQUAL(4, meta::size); - - STATIC_CHECK_EQUAL(0, meta::size>); - STATIC_CHECK_EQUAL(1, meta::size>); - STATIC_CHECK_EQUAL(2, meta::size>); - STATIC_CHECK_EQUAL(3, meta::size>); - STATIC_CHECK_EQUAL(4, meta::size>); - - STATIC_CHECK_EQUAL(1, meta::size>>); - STATIC_CHECK_EQUAL(3, meta::size, C, D>); - STATIC_CHECK_EQUAL(3, meta::size, C, D>>); -} - -class constexpr_count_all_fn { - size_t _n = 0; -public: - constexpr constexpr_count_all_fn() = default; - template - constexpr void operator()(T) { _n++; } - constexpr size_t get() { return _n; } -}; - -template -constexpr size_t constexpr_count_all() -{ - constexpr_count_all_fn constexpr_fn; - meta::for_each(constexpr_fn); - return constexpr_fn.get(); -} - -BOOST_AUTO_TEST_CASE(for_each) { - STATIC_CHECK_EQUAL(0, constexpr_count_all<>()); - STATIC_CHECK_EQUAL(4, constexpr_count_all()); - - size_t n = 0; - meta::for_each([&] (auto&& ptr) { - using type = std::remove_pointer_t>; - switch (n) { - case 0: STATIC_CHECK_SAME(A, type); break; - case 1: STATIC_CHECK_SAME(B, type); break; - case 2: STATIC_CHECK_SAME(C, type); break; - case 3: STATIC_CHECK_SAME(D, type); break; - default: BOOST_FAIL("should not reach"); break; - } - n++; - }); - BOOST_CHECK_EQUAL(4, n); - - STATIC_CHECK_EQUAL(0, constexpr_count_all>()); - STATIC_CHECK_EQUAL(4, constexpr_count_all>()); - - n = 0; - meta::for_each>([&] (auto ptr) { - using type = std::remove_pointer_t; - switch (n) { - case 0: STATIC_CHECK_SAME(A, type); break; - case 1: STATIC_CHECK_SAME(B, type); break; - case 2: STATIC_CHECK_SAME(C, type); break; - case 3: STATIC_CHECK_SAME(D, type); break; - default: BOOST_FAIL("should not reach"); break; - } - n++; - }); - BOOST_CHECK_EQUAL(4, n); - - n = 0; - meta::for_each>([&] (auto ptr) { - using type = std::remove_pointer_t; - switch (n) { - case 0: STATIC_CHECK_SAME(A, type); break; - case 1: STATIC_CHECK_SAME(B, type); break; - default: BOOST_FAIL("should not reach"); break; - } - n++; - }); - BOOST_CHECK_EQUAL(2, n); - - n = 0; - using list = meta::list; - meta::for_each - 1, list>>([&] (auto ptr) { - using type = std::remove_pointer_t; - switch (n) { - case 0: STATIC_CHECK_SAME(A, type); break; - case 1: STATIC_CHECK_SAME(B, type); break; - case 2: STATIC_CHECK_SAME(C, type); break; - default: BOOST_FAIL("should not reach"); break; - } - n++; - }); - BOOST_CHECK_EQUAL(3, n); -} diff --git a/test/boost/multishard_mutation_query_test.cc b/test/boost/multishard_mutation_query_test.cc index e9deb0e3b9..7cbeddaae3 100644 --- a/test/boost/multishard_mutation_query_test.cc +++ b/test/boost/multishard_mutation_query_test.cc @@ -432,12 +432,11 @@ static bytes make_payload(const schema& schema, size_t size, const partition_key return std::move(buf_os).detach(); } -static bool validate_payload(const schema& schema, data::value_view payload_view, const partition_key& pk, const clustering_key* const ck) { - auto istream = fragmented_memory_input_stream(payload_view.begin(), payload_view.size_bytes()); - +static bool validate_payload(const schema& schema, atomic_cell_value_view payload_view, const partition_key& pk, const clustering_key* const ck) { + auto istream = fragmented_memory_input_stream(fragment_range(payload_view).begin(), payload_view.size()); auto head = ser::deserialize(istream, boost::type{}); - const size_t actual_size = payload_view.size_bytes(); + const size_t actual_size = payload_view.size(); if (head.size != actual_size) { testlog.error("Validating payload for pk={}, ck={} failed, sizes differ: stored={}, actual={}", pk, seastar::lazy_deref(ck), head.size, diff --git a/test/boost/sstable_3_x_test.cc b/test/boost/sstable_3_x_test.cc index 77efd91292..aa940068ec 100644 --- a/test/boost/sstable_3_x_test.cc +++ b/test/boost/sstable_3_x_test.cc @@ -1607,14 +1607,15 @@ SEASTAR_THREAD_TEST_CASE(test_uncompressed_counters_read) { assertions.push_back([&, timestamp, value, clock] (const column_definition& def, const atomic_cell_or_collection* cell) { BOOST_REQUIRE(def.is_counter()); - counter_cell_view::with_linearized(cell->as_atomic_cell(def), [&] (counter_cell_view cv) { + { + counter_cell_view cv(cell->as_atomic_cell(def)); BOOST_REQUIRE_EQUAL(timestamp, cv.timestamp()); BOOST_REQUIRE_EQUAL(1, cv.shard_count()); auto shard = cv.get_shard(HOST_ID); BOOST_REQUIRE(shard); BOOST_REQUIRE_EQUAL(value, shard->value()); BOOST_REQUIRE_EQUAL(clock, shard->logical_clock()); - }); + } }); return assertions; diff --git a/test/boost/sstable_datafile_test.cc b/test/boost/sstable_datafile_test.cc index f170c6cb37..6921532235 100644 --- a/test/boost/sstable_datafile_test.cc +++ b/test/boost/sstable_datafile_test.cc @@ -1156,8 +1156,8 @@ SEASTAR_TEST_CASE(compact) { auto &cells = row.cells(); auto& cdef1 = *s->get_column_definition("age"); auto& cdef2 = *s->get_column_definition("height"); - BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == bytes({0,0,0,40})); - BOOST_REQUIRE(cells.cell_at(cdef2.id).as_atomic_cell(cdef2).value() == bytes({0,0,0,(int8_t)170})); + BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == managed_bytes({0,0,0,40})); + BOOST_REQUIRE(cells.cell_at(cdef2.id).as_atomic_cell(cdef2).value() == managed_bytes({0,0,0,(int8_t)170})); return read_mutation_from_flat_mutation_reader(*reader, db::no_timeout); }).then([reader, s] (mutation_opt m) { BOOST_REQUIRE(m); @@ -1170,8 +1170,8 @@ SEASTAR_TEST_CASE(compact) { auto &cells = row.cells(); auto& cdef1 = *s->get_column_definition("age"); auto& cdef2 = *s->get_column_definition("height"); - BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == bytes({0,0,0,20})); - BOOST_REQUIRE(cells.cell_at(cdef2.id).as_atomic_cell(cdef2).value() == bytes({0,0,0,(int8_t)180})); + BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == managed_bytes({0,0,0,20})); + BOOST_REQUIRE(cells.cell_at(cdef2.id).as_atomic_cell(cdef2).value() == managed_bytes({0,0,0,(int8_t)180})); return read_mutation_from_flat_mutation_reader(*reader, db::no_timeout); }).then([reader, s] (mutation_opt m) { BOOST_REQUIRE(m); @@ -1184,7 +1184,7 @@ SEASTAR_TEST_CASE(compact) { auto &cells = row.cells(); auto& cdef1 = *s->get_column_definition("age"); auto& cdef2 = *s->get_column_definition("height"); - BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == bytes({0,0,0,20})); + BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == managed_bytes({0,0,0,20})); BOOST_REQUIRE(cells.find_cell(cdef2.id) == nullptr); return read_mutation_from_flat_mutation_reader(*reader, db::no_timeout); }).then([reader, s] (mutation_opt m) { @@ -2450,7 +2450,7 @@ SEASTAR_TEST_CASE(check_multi_schema) { auto& cells = row.cells(); BOOST_REQUIRE_EQUAL(cells.size(), 1); auto& cdef = *s->get_column_definition("e"); - BOOST_REQUIRE_EQUAL(cells.cell_at(cdef.id).as_atomic_cell(cdef).value(), int32_type->decompose(5)); + BOOST_REQUIRE_EQUAL(cells.cell_at(cdef.id).as_atomic_cell(cdef).value(), managed_bytes(int32_type->decompose(5))); return (*reader)(db::no_timeout); }).then([reader, s] (mutation_fragment_opt m) { BOOST_REQUIRE(!m); @@ -2761,7 +2761,7 @@ SEASTAR_TEST_CASE(test_counter_read) { BOOST_REQUIRE(mfopt->is_clustering_row()); const clustering_row* cr = &mfopt->as_clustering_row(); cr->cells().for_each_cell([&] (column_id id, const atomic_cell_or_collection& c) { - counter_cell_view::with_linearized(c.as_atomic_cell(s->regular_column_at(id)), [&] (counter_cell_view ccv) { + counter_cell_view ccv(c.as_atomic_cell(s->regular_column_at(id))); auto& col = s->column_at(column_kind::regular_column, id); if (col.name_as_text() == "c1") { BOOST_REQUIRE_EQUAL(ccv.total_value(), 13); @@ -2782,7 +2782,6 @@ SEASTAR_TEST_CASE(test_counter_read) { } else { BOOST_FAIL(format("Unexpected column \'{}\'", col.name_as_text())); } - }); }); mfopt = reader(db::no_timeout).get0(); @@ -4959,12 +4958,11 @@ SEASTAR_TEST_CASE(test_wrong_counter_shard_order) { size_t n = 0; row.cells().for_each_cell([&] (column_id id, const atomic_cell_or_collection& ac_o_c) { auto acv = ac_o_c.as_atomic_cell(s->regular_column_at(id)); - counter_cell_view::with_linearized(acv, [&] (counter_cell_view ccv) { + counter_cell_view ccv(acv); counter_shard_view::less_compare_by_id cmp; BOOST_REQUIRE_MESSAGE(boost::algorithm::is_sorted(ccv.shards(), cmp), ccv << " is expected to be sorted"); BOOST_REQUIRE_EQUAL(ccv.total_value(), expected_value); n++; - }); }); BOOST_REQUIRE_EQUAL(n, 5); }; diff --git a/test/manual/partition_data_test.cc b/test/manual/partition_data_test.cc index ee987baf23..cee5a11095 100644 --- a/test/manual/partition_data_test.cc +++ b/test/manual/partition_data_test.cc @@ -27,10 +27,10 @@ #include #include -#include "data/cell.hh" - #include "test/lib/random_utils.hh" #include "utils/disk-error-handler.hh" +#include "atomic_cell.hh" +#include "types.hh" BOOST_AUTO_TEST_CASE(test_atomic_cell) { struct test_case { @@ -44,16 +44,14 @@ BOOST_AUTO_TEST_CASE(test_atomic_cell) { auto cases = std::vector { // Live, fixed-size, empty cell { true, true, bytes(), false }, - // Live, fixed-size cell - { true, true, tests::random::get_bytes(data::cell::maximum_internal_storage_length / 2), false, false }, - // Live, variable-size (small), cell - { true, false, tests::random::get_bytes(data::cell::maximum_internal_storage_length / 2), false, false }, - // Live, variable-size (large), cell - { true, false, tests::random::get_bytes(data::cell::maximum_external_chunk_length * 5), false, false }, - // Live, variable-size, empty cell + // Live, small cell + { true, false, tests::random::get_bytes(1024), false, false }, + // Live, large cell + { true, false, tests::random::get_bytes(129 * 1024), false, false }, + // Live, empty cell { true, false, bytes(), false, false }, - // Live, expiring, variable-size cell - { true, false, tests::random::get_bytes(data::cell::maximum_internal_storage_length / 2), true, false }, + // Live, expiring cell + { true, false, tests::random::get_bytes(1024), true, false }, // Dead cell { false, false, bytes(), false, false }, // Counter update cell @@ -67,32 +65,12 @@ BOOST_AUTO_TEST_CASE(test_atomic_cell) { auto& expiring = tc.expiring; auto& counter_update = tc.counter_update; auto timestamp = tests::random::get_int(); - auto ti = [&] { - if (fixed_size) { - return data::type_info::make_fixed_size(value.size()); - } else { - return data::type_info::make_variable_size(); - } - }(); auto ttl = gc_clock::duration(tests::random::get_int(1, std::numeric_limits::max())); auto expiry_time = gc_clock::time_point(gc_clock::duration(tests::random::get_int(1, std::numeric_limits::max()))); auto deletion_time = expiry_time; auto counter_update_value = tests::random::get_int(); - std::optional allocator; - allocator.emplace(); - - auto test_cell = [&] (auto builder) { - auto expected_size = data::cell::size_of(builder, *allocator); - if (fixed_size) { - BOOST_CHECK_GE(expected_size, value.size()); - } - - allocator->allocate_all(); - - auto buffer = std::make_unique(expected_size); - BOOST_CHECK_EQUAL(data::cell::serialize(buffer.get(), builder, *allocator), expected_size); - + auto test_cell = [&] (auto cell) { auto verify_cell = [&] (auto view) { if (!live) { BOOST_CHECK(!view.is_live()); @@ -106,57 +84,29 @@ BOOST_AUTO_TEST_CASE(test_atomic_cell) { BOOST_CHECK_EQUAL(view.counter_update_value(), counter_update_value); } else { BOOST_CHECK(!view.is_counter_update()); - BOOST_CHECK(view.value() == value); + BOOST_CHECK(view.value() == managed_bytes_view(bytes_view(value))); } - BOOST_CHECK_EQUAL(view.is_expiring(), expiring); + BOOST_CHECK_EQUAL(view.is_live_and_has_ttl(), expiring); if (expiring) { BOOST_CHECK(view.ttl() == ttl); BOOST_CHECK(view.expiry() == expiry_time); } }; - auto view = data::cell::make_atomic_cell_view(ti, buffer.get()); + auto view = atomic_cell_view(cell); verify_cell(view); - - allocator.emplace(); - - auto copier = data::cell::copy_fn(ti, buffer.get()); - BOOST_CHECK_EQUAL(data::cell::size_of(copier, *allocator), expected_size); - - allocator->allocate_all(); - - auto copied = std::make_unique(expected_size); - BOOST_CHECK_EQUAL(data::cell::serialize(copied.get(), copier, *allocator), expected_size); - - auto view2 = data::cell::make_atomic_cell_view(ti, copied.get()); - verify_cell(view2); - - auto ctx = data::cell::context(buffer.get(), ti); - BOOST_CHECK_EQUAL(data::cell::structure::serialized_object_size(buffer.get(), ctx), expected_size); - auto moved = std::make_unique(expected_size); - std::copy_n(buffer.get(), expected_size, moved.get()); - imr::methods::move(moved.get()); - - auto view3 = data::cell::make_atomic_cell_view(ti, moved.get()); - verify_cell(view3); - - imr::methods::destroy(moved.get()); - imr::methods::destroy(copied.get()); }; if (live) { if (counter_update) { - test_cell(data::cell::make_live_counter_update(timestamp, counter_update_value)); + test_cell(atomic_cell::make_live_counter_update(timestamp, counter_update_value)); } else if (expiring) { - test_cell(data::cell::make_live(ti, timestamp, value, expiry_time, ttl)); + test_cell(atomic_cell::make_live(*bytes_type, timestamp, value, expiry_time, ttl)); } else { - test_cell(data::cell::make_live(ti, timestamp, value)); + test_cell(atomic_cell::make_live(*bytes_type, timestamp, value)); } } else { - test_cell(data::cell::make_dead(timestamp, deletion_time)); + test_cell(atomic_cell::make_dead(timestamp, deletion_time)); } } } - - - diff --git a/types.cc b/types.cc index 48b12e60df..32a9611d86 100644 --- a/types.cc +++ b/types.cc @@ -167,8 +167,7 @@ struct simple_type_traits { template simple_type_impl::simple_type_impl(abstract_type::kind k, sstring name, std::optional value_length_if_fixed) - : concrete_type(k, std::move(name), std::move(value_length_if_fixed), - data::type_info::make_fixed_size(simple_type_traits::serialized_size)) {} + : concrete_type(k, std::move(name), std::move(value_length_if_fixed)) {} template integer_type_impl::integer_type_impl( @@ -206,22 +205,22 @@ int32_type_impl::int32_type_impl() : integer_type_impl{kind::int32, int32_type_n long_type_impl::long_type_impl() : integer_type_impl{kind::long_kind, long_type_name, 8} {} string_type_impl::string_type_impl(kind k, sstring name) - : concrete_type(k, name, {}, data::type_info::make_variable_size()) {} + : concrete_type(k, name, {}) {} ascii_type_impl::ascii_type_impl() : string_type_impl(kind::ascii, ascii_type_name) {} utf8_type_impl::utf8_type_impl() : string_type_impl(kind::utf8, utf8_type_name) {} bytes_type_impl::bytes_type_impl() - : concrete_type(kind::bytes, bytes_type_name, {}, data::type_info::make_variable_size()) {} + : concrete_type(kind::bytes, bytes_type_name, {}) {} boolean_type_impl::boolean_type_impl() : simple_type_impl(kind::boolean, boolean_type_name, 1) {} -date_type_impl::date_type_impl() : concrete_type(kind::date, date_type_name, 8, data::type_info::make_fixed_size(sizeof(uint64_t))) {} +date_type_impl::date_type_impl() : concrete_type(kind::date, date_type_name, 8) {} timeuuid_type_impl::timeuuid_type_impl() : concrete_type( - kind::timeuuid, timeuuid_type_name, 16, data::type_info::make_fixed_size(sizeof(uint64_t) * 2)) {} + kind::timeuuid, timeuuid_type_name, 16) {} timestamp_type_impl::timestamp_type_impl() : simple_type_impl(kind::timestamp, timestamp_type_name, 8) {} @@ -409,12 +408,12 @@ int64_t time_type_impl::from_sstring(sstring_view s) { } uuid_type_impl::uuid_type_impl() - : concrete_type(kind::uuid, uuid_type_name, 16, data::type_info::make_fixed_size(sizeof(uint64_t) * 2)) {} + : concrete_type(kind::uuid, uuid_type_name, 16) {} using inet_address = seastar::net::inet_address; inet_addr_type_impl::inet_addr_type_impl() - : concrete_type(kind::inet, inet_addr_type_name, {}, data::type_info::make_variable_size()) {} + : concrete_type(kind::inet, inet_addr_type_name, {}) {} // Integer of same length of a given type. This is useful because our // ntoh functions only know how to operate on integers. @@ -454,12 +453,12 @@ double_type_impl::double_type_impl() : floating_type_impl{kind::double_kind, dou float_type_impl::float_type_impl() : floating_type_impl{kind::float_kind, float_type_name, 4} {} -varint_type_impl::varint_type_impl() : concrete_type{kind::varint, varint_type_name, { }, data::type_info::make_variable_size()} { } +varint_type_impl::varint_type_impl() : concrete_type{kind::varint, varint_type_name, { }} { } -decimal_type_impl::decimal_type_impl() : concrete_type{kind::decimal, decimal_type_name, { }, data::type_info::make_variable_size()} { } +decimal_type_impl::decimal_type_impl() : concrete_type{kind::decimal, decimal_type_name, { }} { } counter_type_impl::counter_type_impl() - : abstract_type{kind::counter, counter_type_name, {}, data::type_info::make_variable_size()} {} + : abstract_type{kind::counter, counter_type_name, {}} {} // TODO(jhaberku): Move this to Seastar. template @@ -472,7 +471,7 @@ auto generate_tuple_from_index(std::index_sequence, Function&& f) { } duration_type_impl::duration_type_impl() - : concrete_type(kind::duration, duration_type_name, {}, data::type_info::make_variable_size()) {} + : concrete_type(kind::duration, duration_type_name, {}) {} using common_counter_type = cql_duration::common_counter_type; static std::tuple deserialize_counters(bytes_view v) { @@ -492,7 +491,7 @@ static std::tuple } empty_type_impl::empty_type_impl() - : abstract_type(kind::empty, empty_type_name, 0, data::type_info::make_fixed_size(0)) {} + : abstract_type(kind::empty, empty_type_name, 0) {} logging::logger collection_type_impl::_logger("collection_type_impl"); const size_t collection_type_impl::max_elements; @@ -689,7 +688,7 @@ void write_simple(bytes_ostream& out, std::type_identity_t val) { out.write(bytes_view(val_ptr, sizeof(T))); } -void write_collection_value(bytes_ostream& out, cql_serialization_format sf, data::value_view val) { +void write_collection_value(bytes_ostream& out, cql_serialization_format sf, atomic_cell_value_view val) { if (sf.using_32_bits_for_collections()) { write_simple(out, int32_t(val.size_bytes())); } else { @@ -700,7 +699,7 @@ void write_collection_value(bytes_ostream& out, cql_serialization_format sf, dat } write_simple(out, uint16_t(val.size_bytes())); } - for (auto&& frag : val) { + for (auto&& frag : fragment_range(val)) { out.write(frag); } } @@ -1393,7 +1392,7 @@ static std::optional update_listlike( } tuple_type_impl::tuple_type_impl(kind k, sstring name, std::vector types, bool freeze_inner) - : concrete_type(k, std::move(name), { }, data::type_info::make_variable_size()), _types(std::move(types)) { + : concrete_type(k, std::move(name), { }), _types(std::move(types)) { if (freeze_inner) { for (auto& t : _types) { t = t->freeze(); @@ -3057,7 +3056,7 @@ static bytes_ostream serialize_for_cql_aux(const map_type_impl&, collection_muta int elements = 0; for (auto&& e : mut.cells) { if (e.second.is_live(mut.tomb, false)) { - write_collection_value(out, sf, data::value_view(e.first)); + write_collection_value(out, sf, atomic_cell_value_view(e.first)); write_collection_value(out, sf, e.second.value()); elements += 1; } @@ -3072,7 +3071,7 @@ static bytes_ostream serialize_for_cql_aux(const set_type_impl&, collection_muta int elements = 0; for (auto&& e : mut.cells) { if (e.second.is_live(mut.tomb, false)) { - write_collection_value(out, sf, data::value_view(e.first)); + write_collection_value(out, sf, atomic_cell_value_view(e.first)); elements += 1; } } @@ -3114,7 +3113,7 @@ static bytes_ostream serialize_for_cql_aux(const user_type_impl& type, collectio if (e.second.is_live(mut.tomb, false)) { auto value = e.second.value(); write_simple(out, int32_t(value.size_bytes())); - for (auto&& frag : value) { + for (auto&& frag : fragment_range(value)) { out.write(frag); } } else { diff --git a/types.hh b/types.hh index 03ee31d078..9963207eb3 100644 --- a/types.hh +++ b/types.hh @@ -24,7 +24,6 @@ #include #include #include -#include "data/cell.hh" #include #include @@ -35,7 +34,6 @@ #include "db_clock.hh" #include "bytes.hh" #include "log.hh" -#include "atomic_cell.hh" #include "cql_serialization_format.hh" #include "tombstone.hh" #include "to_string.hh" @@ -51,6 +49,7 @@ #include "hashing.hh" #include "utils/fragmented_temporary_buffer.hh" #include "utils/exceptions.hh" +#include "utils/managed_bytes.hh" class tuple_type_impl; class big_decimal; @@ -466,7 +465,6 @@ class user_type_impl; class abstract_type : public enable_shared_from_this { sstring _name; std::optional _value_length_if_fixed; - data::type_imr_descriptor _imr_state; public: enum class kind : int8_t { ascii, @@ -504,10 +502,9 @@ private: public: kind get_kind() const { return _kind; } - abstract_type(kind k, sstring name, std::optional value_length_if_fixed, data::type_info ti) - : _name(name), _value_length_if_fixed(std::move(value_length_if_fixed)), _imr_state(ti), _kind(k) {} + abstract_type(kind k, sstring name, std::optional value_length_if_fixed) + : _name(name), _value_length_if_fixed(std::move(value_length_if_fixed)), _kind(k) {} virtual ~abstract_type() {} - const data::type_imr_descriptor& imr_state() const { return _imr_state; } bool less(bytes_view v1, bytes_view v2) const { return compare(v1, v2) < 0; } // returns a callable that can be called with two byte_views, and calls this->less() on them. serialized_compare as_less_comparator() const ; @@ -834,7 +831,7 @@ class reversed_type_impl : public abstract_type { data_type _underlying_type; reversed_type_impl(data_type t) : abstract_type(kind::reversed, "org.apache.cassandra.db.marshal.ReversedType(" + t->name() + ")", - t->value_length_if_fixed(), t->imr_state().type_info()) + t->value_length_if_fixed()) , _underlying_type(t) {} public: diff --git a/types/collection.hh b/types/collection.hh index 080cb40124..9500d549b2 100644 --- a/types/collection.hh +++ b/types/collection.hh @@ -44,7 +44,7 @@ public: protected: bool _is_multi_cell; explicit collection_type_impl(kind k, sstring name, bool is_multi_cell) - : abstract_type(k, std::move(name), {}, data::type_info::make_collection()), _is_multi_cell(is_multi_cell) {} + : abstract_type(k, std::move(name), {}), _is_multi_cell(is_multi_cell) {} public: bool is_multi_cell() const { return _is_multi_cell; } virtual data_type name_comparator() const = 0; diff --git a/utils/meta.hh b/utils/meta.hh deleted file mode 100644 index 268ef1224b..0000000000 --- a/utils/meta.hh +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright (C) 2018 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -#pragma once - -#include -#include -#include -#include - -namespace meta { - -// Wrappers that allows returning a list of types. All helpers defined in this -// file accept both unpacked and packed lists of types. -template -struct list { }; - -namespace internal { - -template -constexpr ssize_t do_find_if_unpacked() { - ssize_t i = -1; - ssize_t j = 0; - (..., ((Vs && i == -1) ? i = j : j++)); - return i; -} - -template -struct negative_to_empty : std::integral_constant { }; - -template<> -struct negative_to_empty<-1> { }; - -template -struct is_same_as { - template - using type = std::is_same; -}; - -template typename Predicate, typename... Ts> -struct do_find_if : internal::negative_to_empty::value...>()> { }; - -template typename Predicate, typename... Ts> -struct do_find_if> : internal::negative_to_empty::value...>()> { }; - -} - -// Returns the index of the first type in the list of types list of types Ts for -// which Predicate typename Predicate, typename... Ts> -constexpr size_t find_if = internal::do_find_if::value; - -// Returns the index of the first occurrence of type T in the list of types Ts. -template -constexpr size_t find = find_if::template type, Ts...>; - -namespace internal { - -template -struct do_get_unpacked { }; - -template -struct do_get_unpacked : do_get_unpacked { }; - -template -struct do_get_unpacked<0, T, Ts...> { - using type = T; -}; - -template -struct do_get : do_get_unpacked { }; - -template -struct do_get> : do_get_unpacked { }; - -} - -// Returns the Nth type in the provided list of types. -template -using get = typename internal::do_get::type; - -namespace internal { - -template -struct do_take_unpacked { }; - -template -struct do_take_unpacked<0, list> { - using type = list; -}; - -template -struct do_take_unpacked<0, list, U, Us...> { - using type = list; -}; - -template -struct do_take_unpacked, U, Us...> { - using type = typename do_take_unpacked, Us...>::type; -}; - -template -struct do_take : do_take_unpacked { }; - - -template -struct do_take> : do_take_unpacked { }; - -} - -// Returns a list containing N first elements of the provided list of types. -template -using take = typename internal::do_take, Ts...>::type; - -namespace internal { - -template -struct do_for_each_unpacked { - template - static constexpr void run(Function&& fn) { - (..., fn(static_cast(nullptr))); - } -}; - -template -struct do_for_each : do_for_each_unpacked { }; - -template -struct do_for_each> : do_for_each_unpacked { }; - -} - -// Executes the provided function for each element in the provided list of -// types. For each type T the Function is called with an argument of type T*. -template -constexpr void for_each(Function&& fn) { - internal::do_for_each::run(std::forward(fn)); -}; - -namespace internal { - -template -struct get_size : std::integral_constant { }; - -template -struct get_size> : std::integral_constant { }; - -} - -// Returns the size of a list of types. -template -constexpr size_t size = internal::get_size::value; - -template